1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <netinet/in.h> 14 #include <rte_string_fns.h> 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_kvargs.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <ethdev_driver.h> 31 #include <ethdev_pci.h> 32 #include <rte_malloc.h> 33 #include <rte_random.h> 34 #include <rte_dev.h> 35 #include <rte_hash_crc.h> 36 #ifdef RTE_LIB_SECURITY 37 #include <rte_security_driver.h> 38 #endif 39 40 #include "ixgbe_logs.h" 41 #include "base/ixgbe_api.h" 42 #include "base/ixgbe_vf.h" 43 #include "base/ixgbe_common.h" 44 #include "ixgbe_ethdev.h" 45 #include "ixgbe_bypass.h" 46 #include "ixgbe_rxtx.h" 47 #include "base/ixgbe_type.h" 48 #include "base/ixgbe_phy.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static int ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static int ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 struct rte_eth_xstat_name *xstats_names, 177 const uint64_t *ids, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static int ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, 234 uint32_t timeout_ms); 235 236 static int ixgbe_add_rar(struct rte_eth_dev *dev, 237 struct rte_ether_addr *mac_addr, 238 uint32_t index, uint32_t pool); 239 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 240 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 241 struct rte_ether_addr *mac_addr); 242 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 243 static bool is_device_supported(struct rte_eth_dev *dev, 244 struct rte_pci_driver *drv); 245 246 /* For Virtual Function support */ 247 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 248 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 249 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 251 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 252 int wait_to_complete); 253 static int ixgbevf_dev_stop(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_close(struct rte_eth_dev *dev); 255 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 257 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 258 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 259 struct rte_eth_stats *stats); 260 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 261 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 262 uint16_t vlan_id, int on); 263 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 264 uint16_t queue, int on); 265 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 266 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 267 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 268 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 269 uint16_t queue_id); 270 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 271 uint16_t queue_id); 272 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 273 uint8_t queue, uint8_t msix_vector); 274 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 278 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 279 280 /* For Eth VMDQ APIs support */ 281 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 282 rte_ether_addr * mac_addr, uint8_t on); 283 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 284 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 285 struct rte_eth_mirror_conf *mirror_conf, 286 uint8_t rule_id, uint8_t on); 287 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 288 uint8_t rule_id); 289 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 290 uint16_t queue_id); 291 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 292 uint16_t queue_id); 293 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 294 uint8_t queue, uint8_t msix_vector); 295 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 296 297 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 298 struct rte_ether_addr *mac_addr, 299 uint32_t index, uint32_t pool); 300 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 301 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 302 struct rte_ether_addr *mac_addr); 303 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 304 struct ixgbe_5tuple_filter *filter); 305 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 306 struct ixgbe_5tuple_filter *filter); 307 static int ixgbe_dev_flow_ops_get(struct rte_eth_dev *dev, 308 const struct rte_flow_ops **ops); 309 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 310 311 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 312 struct rte_ether_addr *mc_addr_set, 313 uint32_t nb_mc_addr); 314 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 315 struct rte_eth_dcb_info *dcb_info); 316 317 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 318 static int ixgbe_get_regs(struct rte_eth_dev *dev, 319 struct rte_dev_reg_info *regs); 320 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 321 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 322 struct rte_dev_eeprom_info *eeprom); 323 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 324 struct rte_dev_eeprom_info *eeprom); 325 326 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 327 struct rte_eth_dev_module_info *modinfo); 328 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 329 struct rte_dev_eeprom_info *info); 330 331 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 332 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 333 struct rte_dev_reg_info *regs); 334 335 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 336 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 337 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 338 struct timespec *timestamp, 339 uint32_t flags); 340 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 341 struct timespec *timestamp); 342 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 343 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 344 struct timespec *timestamp); 345 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 346 const struct timespec *timestamp); 347 static void ixgbevf_dev_interrupt_handler(void *param); 348 349 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 350 struct rte_eth_udp_tunnel *udp_tunnel); 351 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 352 struct rte_eth_udp_tunnel *udp_tunnel); 353 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 354 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 355 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 356 357 /* 358 * Define VF Stats MACRO for Non "cleared on read" register 359 */ 360 #define UPDATE_VF_STAT(reg, last, cur) \ 361 { \ 362 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 363 cur += (latest - last) & UINT_MAX; \ 364 last = latest; \ 365 } 366 367 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 368 { \ 369 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 370 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 371 u64 latest = ((new_msb << 32) | new_lsb); \ 372 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 373 last = latest; \ 374 } 375 376 #define IXGBE_SET_HWSTRIP(h, q) do {\ 377 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 378 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 379 (h)->bitmap[idx] |= 1 << bit;\ 380 } while (0) 381 382 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 383 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 384 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 385 (h)->bitmap[idx] &= ~(1 << bit);\ 386 } while (0) 387 388 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 389 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 390 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 391 (r) = (h)->bitmap[idx] >> bit & 1;\ 392 } while (0) 393 394 /* 395 * The set of PCI devices this driver supports 396 */ 397 static const struct rte_pci_id pci_id_ixgbe_map[] = { 398 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 399 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 400 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 401 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 402 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 403 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 404 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 405 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 406 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 407 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 408 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 409 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 410 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 411 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 412 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 413 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 446 #ifdef RTE_LIBRTE_IXGBE_BYPASS 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 448 #endif 449 { .vendor_id = 0, /* sentinel */ }, 450 }; 451 452 /* 453 * The set of PCI devices this driver supports (for 82599 VF) 454 */ 455 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 464 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 466 { .vendor_id = 0, /* sentinel */ }, 467 }; 468 469 static const struct rte_eth_desc_lim rx_desc_lim = { 470 .nb_max = IXGBE_MAX_RING_DESC, 471 .nb_min = IXGBE_MIN_RING_DESC, 472 .nb_align = IXGBE_RXD_ALIGN, 473 }; 474 475 static const struct rte_eth_desc_lim tx_desc_lim = { 476 .nb_max = IXGBE_MAX_RING_DESC, 477 .nb_min = IXGBE_MIN_RING_DESC, 478 .nb_align = IXGBE_TXD_ALIGN, 479 .nb_seg_max = IXGBE_TX_MAX_SEG, 480 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 481 }; 482 483 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 484 .dev_configure = ixgbe_dev_configure, 485 .dev_start = ixgbe_dev_start, 486 .dev_stop = ixgbe_dev_stop, 487 .dev_set_link_up = ixgbe_dev_set_link_up, 488 .dev_set_link_down = ixgbe_dev_set_link_down, 489 .dev_close = ixgbe_dev_close, 490 .dev_reset = ixgbe_dev_reset, 491 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 492 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 493 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 494 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 495 .link_update = ixgbe_dev_link_update, 496 .stats_get = ixgbe_dev_stats_get, 497 .xstats_get = ixgbe_dev_xstats_get, 498 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 499 .stats_reset = ixgbe_dev_stats_reset, 500 .xstats_reset = ixgbe_dev_xstats_reset, 501 .xstats_get_names = ixgbe_dev_xstats_get_names, 502 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 503 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 504 .fw_version_get = ixgbe_fw_version_get, 505 .dev_infos_get = ixgbe_dev_info_get, 506 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 507 .mtu_set = ixgbe_dev_mtu_set, 508 .vlan_filter_set = ixgbe_vlan_filter_set, 509 .vlan_tpid_set = ixgbe_vlan_tpid_set, 510 .vlan_offload_set = ixgbe_vlan_offload_set, 511 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 512 .rx_queue_start = ixgbe_dev_rx_queue_start, 513 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 514 .tx_queue_start = ixgbe_dev_tx_queue_start, 515 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 516 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 517 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 518 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 519 .rx_queue_release = ixgbe_dev_rx_queue_release, 520 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 521 .tx_queue_release = ixgbe_dev_tx_queue_release, 522 .dev_led_on = ixgbe_dev_led_on, 523 .dev_led_off = ixgbe_dev_led_off, 524 .flow_ctrl_get = ixgbe_flow_ctrl_get, 525 .flow_ctrl_set = ixgbe_flow_ctrl_set, 526 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 527 .mac_addr_add = ixgbe_add_rar, 528 .mac_addr_remove = ixgbe_remove_rar, 529 .mac_addr_set = ixgbe_set_default_mac_addr, 530 .uc_hash_table_set = ixgbe_uc_hash_table_set, 531 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 532 .mirror_rule_set = ixgbe_mirror_rule_set, 533 .mirror_rule_reset = ixgbe_mirror_rule_reset, 534 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 535 .reta_update = ixgbe_dev_rss_reta_update, 536 .reta_query = ixgbe_dev_rss_reta_query, 537 .rss_hash_update = ixgbe_dev_rss_hash_update, 538 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 539 .flow_ops_get = ixgbe_dev_flow_ops_get, 540 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 541 .rxq_info_get = ixgbe_rxq_info_get, 542 .txq_info_get = ixgbe_txq_info_get, 543 .timesync_enable = ixgbe_timesync_enable, 544 .timesync_disable = ixgbe_timesync_disable, 545 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 546 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 547 .get_reg = ixgbe_get_regs, 548 .get_eeprom_length = ixgbe_get_eeprom_length, 549 .get_eeprom = ixgbe_get_eeprom, 550 .set_eeprom = ixgbe_set_eeprom, 551 .get_module_info = ixgbe_get_module_info, 552 .get_module_eeprom = ixgbe_get_module_eeprom, 553 .get_dcb_info = ixgbe_dev_get_dcb_info, 554 .timesync_adjust_time = ixgbe_timesync_adjust_time, 555 .timesync_read_time = ixgbe_timesync_read_time, 556 .timesync_write_time = ixgbe_timesync_write_time, 557 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 558 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 559 .tm_ops_get = ixgbe_tm_ops_get, 560 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 561 .get_monitor_addr = ixgbe_get_monitor_addr, 562 }; 563 564 /* 565 * dev_ops for virtual function, bare necessities for basic vf 566 * operation have been implemented 567 */ 568 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 569 .dev_configure = ixgbevf_dev_configure, 570 .dev_start = ixgbevf_dev_start, 571 .dev_stop = ixgbevf_dev_stop, 572 .link_update = ixgbevf_dev_link_update, 573 .stats_get = ixgbevf_dev_stats_get, 574 .xstats_get = ixgbevf_dev_xstats_get, 575 .stats_reset = ixgbevf_dev_stats_reset, 576 .xstats_reset = ixgbevf_dev_stats_reset, 577 .xstats_get_names = ixgbevf_dev_xstats_get_names, 578 .dev_close = ixgbevf_dev_close, 579 .dev_reset = ixgbevf_dev_reset, 580 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 581 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 582 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 583 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 584 .dev_infos_get = ixgbevf_dev_info_get, 585 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 586 .mtu_set = ixgbevf_dev_set_mtu, 587 .vlan_filter_set = ixgbevf_vlan_filter_set, 588 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 589 .vlan_offload_set = ixgbevf_vlan_offload_set, 590 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 591 .rx_queue_release = ixgbe_dev_rx_queue_release, 592 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 593 .tx_queue_release = ixgbe_dev_tx_queue_release, 594 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 595 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 596 .mac_addr_add = ixgbevf_add_mac_addr, 597 .mac_addr_remove = ixgbevf_remove_mac_addr, 598 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 599 .rxq_info_get = ixgbe_rxq_info_get, 600 .txq_info_get = ixgbe_txq_info_get, 601 .mac_addr_set = ixgbevf_set_default_mac_addr, 602 .get_reg = ixgbevf_get_regs, 603 .reta_update = ixgbe_dev_rss_reta_update, 604 .reta_query = ixgbe_dev_rss_reta_query, 605 .rss_hash_update = ixgbe_dev_rss_hash_update, 606 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 607 .tx_done_cleanup = ixgbe_dev_tx_done_cleanup, 608 .get_monitor_addr = ixgbe_get_monitor_addr, 609 }; 610 611 /* store statistics names and its offset in stats structure */ 612 struct rte_ixgbe_xstats_name_off { 613 char name[RTE_ETH_XSTATS_NAME_SIZE]; 614 unsigned offset; 615 }; 616 617 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 618 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 619 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 620 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 621 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 622 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 623 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 624 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 625 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 626 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 627 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 628 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 629 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 630 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 631 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 632 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 633 prc1023)}, 634 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 635 prc1522)}, 636 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 637 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 638 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 639 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 640 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 641 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 642 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 643 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 644 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 645 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 646 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 647 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 648 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 649 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 650 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 651 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 652 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 653 ptc1023)}, 654 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 655 ptc1522)}, 656 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 657 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 658 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 659 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 660 661 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 662 fdirustat_add)}, 663 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 664 fdirustat_remove)}, 665 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 666 fdirfstat_fadd)}, 667 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 668 fdirfstat_fremove)}, 669 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 670 fdirmatch)}, 671 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 672 fdirmiss)}, 673 674 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 675 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 676 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 677 fclast)}, 678 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 679 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 680 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 681 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 682 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 683 fcoe_noddp)}, 684 {"rx_fcoe_no_direct_data_placement_ext_buff", 685 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 686 687 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 688 lxontxc)}, 689 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 690 lxonrxc)}, 691 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 692 lxofftxc)}, 693 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 694 lxoffrxc)}, 695 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 696 }; 697 698 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 699 sizeof(rte_ixgbe_stats_strings[0])) 700 701 /* MACsec statistics */ 702 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 703 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 704 out_pkts_untagged)}, 705 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 706 out_pkts_encrypted)}, 707 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 708 out_pkts_protected)}, 709 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 710 out_octets_encrypted)}, 711 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 712 out_octets_protected)}, 713 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 714 in_pkts_untagged)}, 715 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 716 in_pkts_badtag)}, 717 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 718 in_pkts_nosci)}, 719 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 720 in_pkts_unknownsci)}, 721 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 722 in_octets_decrypted)}, 723 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 724 in_octets_validated)}, 725 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 726 in_pkts_unchecked)}, 727 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 728 in_pkts_delayed)}, 729 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 730 in_pkts_late)}, 731 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 732 in_pkts_ok)}, 733 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 734 in_pkts_invalid)}, 735 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 736 in_pkts_notvalid)}, 737 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 738 in_pkts_unusedsa)}, 739 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 740 in_pkts_notusingsa)}, 741 }; 742 743 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 744 sizeof(rte_ixgbe_macsec_strings[0])) 745 746 /* Per-queue statistics */ 747 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 748 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 749 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 750 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 751 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 752 }; 753 754 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 755 sizeof(rte_ixgbe_rxq_strings[0])) 756 #define IXGBE_NB_RXQ_PRIO_VALUES 8 757 758 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 759 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 760 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 761 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 762 pxon2offc)}, 763 }; 764 765 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 766 sizeof(rte_ixgbe_txq_strings[0])) 767 #define IXGBE_NB_TXQ_PRIO_VALUES 8 768 769 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 770 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 771 }; 772 773 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 774 sizeof(rte_ixgbevf_stats_strings[0])) 775 776 /* 777 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 778 */ 779 static inline int 780 ixgbe_is_sfp(struct ixgbe_hw *hw) 781 { 782 switch (hw->phy.type) { 783 case ixgbe_phy_sfp_avago: 784 case ixgbe_phy_sfp_ftl: 785 case ixgbe_phy_sfp_intel: 786 case ixgbe_phy_sfp_unknown: 787 case ixgbe_phy_sfp_passive_tyco: 788 case ixgbe_phy_sfp_passive_unknown: 789 return 1; 790 default: 791 return 0; 792 } 793 } 794 795 static inline int32_t 796 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 797 { 798 uint32_t ctrl_ext; 799 int32_t status; 800 801 status = ixgbe_reset_hw(hw); 802 803 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 804 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 805 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 806 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 807 IXGBE_WRITE_FLUSH(hw); 808 809 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 810 status = IXGBE_SUCCESS; 811 return status; 812 } 813 814 static inline void 815 ixgbe_enable_intr(struct rte_eth_dev *dev) 816 { 817 struct ixgbe_interrupt *intr = 818 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 819 struct ixgbe_hw *hw = 820 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 821 822 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 823 IXGBE_WRITE_FLUSH(hw); 824 } 825 826 /* 827 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 828 */ 829 static void 830 ixgbe_disable_intr(struct ixgbe_hw *hw) 831 { 832 PMD_INIT_FUNC_TRACE(); 833 834 if (hw->mac.type == ixgbe_mac_82598EB) { 835 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 836 } else { 837 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 838 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 839 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 840 } 841 IXGBE_WRITE_FLUSH(hw); 842 } 843 844 /* 845 * This function resets queue statistics mapping registers. 846 * From Niantic datasheet, Initialization of Statistics section: 847 * "...if software requires the queue counters, the RQSMR and TQSM registers 848 * must be re-programmed following a device reset. 849 */ 850 static void 851 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 852 { 853 uint32_t i; 854 855 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 856 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 857 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 858 } 859 } 860 861 862 static int 863 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 864 uint16_t queue_id, 865 uint8_t stat_idx, 866 uint8_t is_rx) 867 { 868 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 869 #define NB_QMAP_FIELDS_PER_QSM_REG 4 870 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 871 872 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 873 struct ixgbe_stat_mapping_registers *stat_mappings = 874 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 875 uint32_t qsmr_mask = 0; 876 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 877 uint32_t q_map; 878 uint8_t n, offset; 879 880 if ((hw->mac.type != ixgbe_mac_82599EB) && 881 (hw->mac.type != ixgbe_mac_X540) && 882 (hw->mac.type != ixgbe_mac_X550) && 883 (hw->mac.type != ixgbe_mac_X550EM_x) && 884 (hw->mac.type != ixgbe_mac_X550EM_a)) 885 return -ENOSYS; 886 887 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 888 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 889 queue_id, stat_idx); 890 891 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 892 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 893 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 894 return -EIO; 895 } 896 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 897 898 /* Now clear any previous stat_idx set */ 899 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 900 if (!is_rx) 901 stat_mappings->tqsm[n] &= ~clearing_mask; 902 else 903 stat_mappings->rqsmr[n] &= ~clearing_mask; 904 905 q_map = (uint32_t)stat_idx; 906 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 907 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 908 if (!is_rx) 909 stat_mappings->tqsm[n] |= qsmr_mask; 910 else 911 stat_mappings->rqsmr[n] |= qsmr_mask; 912 913 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 914 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 915 queue_id, stat_idx); 916 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 917 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 918 919 /* Now write the mapping in the appropriate register */ 920 if (is_rx) { 921 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 922 stat_mappings->rqsmr[n], n); 923 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 924 } else { 925 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 926 stat_mappings->tqsm[n], n); 927 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 928 } 929 return 0; 930 } 931 932 static void 933 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 934 { 935 struct ixgbe_stat_mapping_registers *stat_mappings = 936 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 937 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 938 int i; 939 940 /* write whatever was in stat mapping table to the NIC */ 941 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 942 /* rx */ 943 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 944 945 /* tx */ 946 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 947 } 948 } 949 950 static void 951 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 952 { 953 uint8_t i; 954 struct ixgbe_dcb_tc_config *tc; 955 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 956 957 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 958 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 959 for (i = 0; i < dcb_max_tc; i++) { 960 tc = &dcb_config->tc_config[i]; 961 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 962 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 963 (uint8_t)(100/dcb_max_tc + (i & 1)); 964 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 965 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 966 (uint8_t)(100/dcb_max_tc + (i & 1)); 967 tc->pfc = ixgbe_dcb_pfc_disabled; 968 } 969 970 /* Initialize default user to priority mapping, UPx->TC0 */ 971 tc = &dcb_config->tc_config[0]; 972 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 973 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 974 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 975 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 976 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 977 } 978 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 979 dcb_config->pfc_mode_enable = false; 980 dcb_config->vt_mode = true; 981 dcb_config->round_robin_enable = false; 982 /* support all DCB capabilities in 82599 */ 983 dcb_config->support.capabilities = 0xFF; 984 985 /*we only support 4 Tcs for X540, X550 */ 986 if (hw->mac.type == ixgbe_mac_X540 || 987 hw->mac.type == ixgbe_mac_X550 || 988 hw->mac.type == ixgbe_mac_X550EM_x || 989 hw->mac.type == ixgbe_mac_X550EM_a) { 990 dcb_config->num_tcs.pg_tcs = 4; 991 dcb_config->num_tcs.pfc_tcs = 4; 992 } 993 } 994 995 /* 996 * Ensure that all locks are released before first NVM or PHY access 997 */ 998 static void 999 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1000 { 1001 uint16_t mask; 1002 1003 /* 1004 * Phy lock should not fail in this early stage. If this is the case, 1005 * it is due to an improper exit of the application. 1006 * So force the release of the faulty lock. Release of common lock 1007 * is done automatically by swfw_sync function. 1008 */ 1009 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1010 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1011 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1012 } 1013 ixgbe_release_swfw_semaphore(hw, mask); 1014 1015 /* 1016 * These ones are more tricky since they are common to all ports; but 1017 * swfw_sync retries last long enough (1s) to be almost sure that if 1018 * lock can not be taken it is due to an improper lock of the 1019 * semaphore. 1020 */ 1021 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1022 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1023 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1024 } 1025 ixgbe_release_swfw_semaphore(hw, mask); 1026 } 1027 1028 /* 1029 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1030 * It returns 0 on success. 1031 */ 1032 static int 1033 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1034 { 1035 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1036 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1037 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1038 struct ixgbe_hw *hw = 1039 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1040 struct ixgbe_vfta *shadow_vfta = 1041 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1042 struct ixgbe_hwstrip *hwstrip = 1043 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1044 struct ixgbe_dcb_config *dcb_config = 1045 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1046 struct ixgbe_filter_info *filter_info = 1047 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1048 struct ixgbe_bw_conf *bw_conf = 1049 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1050 uint32_t ctrl_ext; 1051 uint16_t csum; 1052 int diag, i, ret; 1053 1054 PMD_INIT_FUNC_TRACE(); 1055 1056 ixgbe_dev_macsec_setting_reset(eth_dev); 1057 1058 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1059 eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count; 1060 eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done; 1061 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1062 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1063 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1064 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1065 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1066 1067 /* 1068 * For secondary processes, we don't initialise any further as primary 1069 * has already done this work. Only check we don't need a different 1070 * RX and TX function. 1071 */ 1072 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1073 struct ixgbe_tx_queue *txq; 1074 /* TX queue function in primary, set by last queue initialized 1075 * Tx queue may not initialized by primary process 1076 */ 1077 if (eth_dev->data->tx_queues) { 1078 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1079 ixgbe_set_tx_function(eth_dev, txq); 1080 } else { 1081 /* Use default TX function if we get here */ 1082 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1083 "Using default TX function."); 1084 } 1085 1086 ixgbe_set_rx_function(eth_dev); 1087 1088 return 0; 1089 } 1090 1091 rte_atomic32_clear(&ad->link_thread_running); 1092 rte_eth_copy_pci_info(eth_dev, pci_dev); 1093 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1094 1095 /* Vendor and Device ID need to be set before init of shared code */ 1096 hw->device_id = pci_dev->id.device_id; 1097 hw->vendor_id = pci_dev->id.vendor_id; 1098 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1099 hw->allow_unsupported_sfp = 1; 1100 1101 /* Initialize the shared code (base driver) */ 1102 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1103 diag = ixgbe_bypass_init_shared_code(hw); 1104 #else 1105 diag = ixgbe_init_shared_code(hw); 1106 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1107 1108 if (diag != IXGBE_SUCCESS) { 1109 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1110 return -EIO; 1111 } 1112 1113 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1114 PMD_INIT_LOG(ERR, "\nERROR: " 1115 "Firmware recovery mode detected. Limiting functionality.\n" 1116 "Refer to the Intel(R) Ethernet Adapters and Devices " 1117 "User Guide for details on firmware recovery mode."); 1118 return -EIO; 1119 } 1120 1121 /* pick up the PCI bus settings for reporting later */ 1122 ixgbe_get_bus_info(hw); 1123 1124 /* Unlock any pending hardware semaphore */ 1125 ixgbe_swfw_lock_reset(hw); 1126 1127 #ifdef RTE_LIB_SECURITY 1128 /* Initialize security_ctx only for primary process*/ 1129 if (ixgbe_ipsec_ctx_create(eth_dev)) 1130 return -ENOMEM; 1131 #endif 1132 1133 /* Initialize DCB configuration*/ 1134 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1135 ixgbe_dcb_init(hw, dcb_config); 1136 /* Get Hardware Flow Control setting */ 1137 hw->fc.requested_mode = ixgbe_fc_none; 1138 hw->fc.current_mode = ixgbe_fc_none; 1139 hw->fc.pause_time = IXGBE_FC_PAUSE; 1140 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1141 hw->fc.low_water[i] = IXGBE_FC_LO; 1142 hw->fc.high_water[i] = IXGBE_FC_HI; 1143 } 1144 hw->fc.send_xon = 1; 1145 1146 /* Make sure we have a good EEPROM before we read from it */ 1147 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1148 if (diag != IXGBE_SUCCESS) { 1149 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1150 return -EIO; 1151 } 1152 1153 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1154 diag = ixgbe_bypass_init_hw(hw); 1155 #else 1156 diag = ixgbe_init_hw(hw); 1157 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1158 1159 /* 1160 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1161 * is called too soon after the kernel driver unbinding/binding occurs. 1162 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1163 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1164 * also called. See ixgbe_identify_phy_82599(). The reason for the 1165 * failure is not known, and only occuts when virtualisation features 1166 * are disabled in the bios. A delay of 100ms was found to be enough by 1167 * trial-and-error, and is doubled to be safe. 1168 */ 1169 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1170 rte_delay_ms(200); 1171 diag = ixgbe_init_hw(hw); 1172 } 1173 1174 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1175 diag = IXGBE_SUCCESS; 1176 1177 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1178 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1179 "LOM. Please be aware there may be issues associated " 1180 "with your hardware."); 1181 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1182 "please contact your Intel or hardware representative " 1183 "who provided you with this hardware."); 1184 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1185 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1186 if (diag) { 1187 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1188 return -EIO; 1189 } 1190 1191 /* Reset the hw statistics */ 1192 ixgbe_dev_stats_reset(eth_dev); 1193 1194 /* disable interrupt */ 1195 ixgbe_disable_intr(hw); 1196 1197 /* reset mappings for queue statistics hw counters*/ 1198 ixgbe_reset_qstat_mappings(hw); 1199 1200 /* Allocate memory for storing MAC addresses */ 1201 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1202 hw->mac.num_rar_entries, 0); 1203 if (eth_dev->data->mac_addrs == NULL) { 1204 PMD_INIT_LOG(ERR, 1205 "Failed to allocate %u bytes needed to store " 1206 "MAC addresses", 1207 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1208 return -ENOMEM; 1209 } 1210 /* Copy the permanent MAC address */ 1211 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1212 ð_dev->data->mac_addrs[0]); 1213 1214 /* Allocate memory for storing hash filter MAC addresses */ 1215 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1216 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1217 if (eth_dev->data->hash_mac_addrs == NULL) { 1218 PMD_INIT_LOG(ERR, 1219 "Failed to allocate %d bytes needed to store MAC addresses", 1220 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1221 return -ENOMEM; 1222 } 1223 1224 /* initialize the vfta */ 1225 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1226 1227 /* initialize the hw strip bitmap*/ 1228 memset(hwstrip, 0, sizeof(*hwstrip)); 1229 1230 /* initialize PF if max_vfs not zero */ 1231 ret = ixgbe_pf_host_init(eth_dev); 1232 if (ret) { 1233 rte_free(eth_dev->data->mac_addrs); 1234 eth_dev->data->mac_addrs = NULL; 1235 rte_free(eth_dev->data->hash_mac_addrs); 1236 eth_dev->data->hash_mac_addrs = NULL; 1237 return ret; 1238 } 1239 1240 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1241 /* let hardware know driver is loaded */ 1242 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1243 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1244 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1245 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1246 IXGBE_WRITE_FLUSH(hw); 1247 1248 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1249 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1250 (int) hw->mac.type, (int) hw->phy.type, 1251 (int) hw->phy.sfp_type); 1252 else 1253 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1254 (int) hw->mac.type, (int) hw->phy.type); 1255 1256 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1257 eth_dev->data->port_id, pci_dev->id.vendor_id, 1258 pci_dev->id.device_id); 1259 1260 rte_intr_callback_register(intr_handle, 1261 ixgbe_dev_interrupt_handler, eth_dev); 1262 1263 /* enable uio/vfio intr/eventfd mapping */ 1264 rte_intr_enable(intr_handle); 1265 1266 /* enable support intr */ 1267 ixgbe_enable_intr(eth_dev); 1268 1269 /* initialize filter info */ 1270 memset(filter_info, 0, 1271 sizeof(struct ixgbe_filter_info)); 1272 1273 /* initialize 5tuple filter list */ 1274 TAILQ_INIT(&filter_info->fivetuple_list); 1275 1276 /* initialize flow director filter list & hash */ 1277 ixgbe_fdir_filter_init(eth_dev); 1278 1279 /* initialize l2 tunnel filter list & hash */ 1280 ixgbe_l2_tn_filter_init(eth_dev); 1281 1282 /* initialize flow filter lists */ 1283 ixgbe_filterlist_init(); 1284 1285 /* initialize bandwidth configuration info */ 1286 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1287 1288 /* initialize Traffic Manager configuration */ 1289 ixgbe_tm_conf_init(eth_dev); 1290 1291 return 0; 1292 } 1293 1294 static int 1295 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1296 { 1297 PMD_INIT_FUNC_TRACE(); 1298 1299 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1300 return 0; 1301 1302 ixgbe_dev_close(eth_dev); 1303 1304 return 0; 1305 } 1306 1307 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1308 { 1309 struct ixgbe_filter_info *filter_info = 1310 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1311 struct ixgbe_5tuple_filter *p_5tuple; 1312 1313 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1314 TAILQ_REMOVE(&filter_info->fivetuple_list, 1315 p_5tuple, 1316 entries); 1317 rte_free(p_5tuple); 1318 } 1319 memset(filter_info->fivetuple_mask, 0, 1320 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1321 1322 return 0; 1323 } 1324 1325 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1326 { 1327 struct ixgbe_hw_fdir_info *fdir_info = 1328 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1329 struct ixgbe_fdir_filter *fdir_filter; 1330 1331 if (fdir_info->hash_map) 1332 rte_free(fdir_info->hash_map); 1333 if (fdir_info->hash_handle) 1334 rte_hash_free(fdir_info->hash_handle); 1335 1336 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1337 TAILQ_REMOVE(&fdir_info->fdir_list, 1338 fdir_filter, 1339 entries); 1340 rte_free(fdir_filter); 1341 } 1342 1343 return 0; 1344 } 1345 1346 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1347 { 1348 struct ixgbe_l2_tn_info *l2_tn_info = 1349 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1350 struct ixgbe_l2_tn_filter *l2_tn_filter; 1351 1352 if (l2_tn_info->hash_map) 1353 rte_free(l2_tn_info->hash_map); 1354 if (l2_tn_info->hash_handle) 1355 rte_hash_free(l2_tn_info->hash_handle); 1356 1357 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1358 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1359 l2_tn_filter, 1360 entries); 1361 rte_free(l2_tn_filter); 1362 } 1363 1364 return 0; 1365 } 1366 1367 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1368 { 1369 struct ixgbe_hw_fdir_info *fdir_info = 1370 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1371 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1372 struct rte_hash_parameters fdir_hash_params = { 1373 .name = fdir_hash_name, 1374 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1375 .key_len = sizeof(union ixgbe_atr_input), 1376 .hash_func = rte_hash_crc, 1377 .hash_func_init_val = 0, 1378 .socket_id = rte_socket_id(), 1379 }; 1380 1381 TAILQ_INIT(&fdir_info->fdir_list); 1382 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1383 "fdir_%s", eth_dev->device->name); 1384 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1385 if (!fdir_info->hash_handle) { 1386 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1387 return -EINVAL; 1388 } 1389 fdir_info->hash_map = rte_zmalloc("ixgbe", 1390 sizeof(struct ixgbe_fdir_filter *) * 1391 IXGBE_MAX_FDIR_FILTER_NUM, 1392 0); 1393 if (!fdir_info->hash_map) { 1394 PMD_INIT_LOG(ERR, 1395 "Failed to allocate memory for fdir hash map!"); 1396 return -ENOMEM; 1397 } 1398 fdir_info->mask_added = FALSE; 1399 1400 return 0; 1401 } 1402 1403 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1404 { 1405 struct ixgbe_l2_tn_info *l2_tn_info = 1406 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1407 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1408 struct rte_hash_parameters l2_tn_hash_params = { 1409 .name = l2_tn_hash_name, 1410 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1411 .key_len = sizeof(struct ixgbe_l2_tn_key), 1412 .hash_func = rte_hash_crc, 1413 .hash_func_init_val = 0, 1414 .socket_id = rte_socket_id(), 1415 }; 1416 1417 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1418 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1419 "l2_tn_%s", eth_dev->device->name); 1420 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1421 if (!l2_tn_info->hash_handle) { 1422 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1423 return -EINVAL; 1424 } 1425 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1426 sizeof(struct ixgbe_l2_tn_filter *) * 1427 IXGBE_MAX_L2_TN_FILTER_NUM, 1428 0); 1429 if (!l2_tn_info->hash_map) { 1430 PMD_INIT_LOG(ERR, 1431 "Failed to allocate memory for L2 TN hash map!"); 1432 return -ENOMEM; 1433 } 1434 l2_tn_info->e_tag_en = FALSE; 1435 l2_tn_info->e_tag_fwd_en = FALSE; 1436 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1437 1438 return 0; 1439 } 1440 /* 1441 * Negotiate mailbox API version with the PF. 1442 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1443 * Then we try to negotiate starting with the most recent one. 1444 * If all negotiation attempts fail, then we will proceed with 1445 * the default one (ixgbe_mbox_api_10). 1446 */ 1447 static void 1448 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1449 { 1450 int32_t i; 1451 1452 /* start with highest supported, proceed down */ 1453 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1454 ixgbe_mbox_api_13, 1455 ixgbe_mbox_api_12, 1456 ixgbe_mbox_api_11, 1457 ixgbe_mbox_api_10, 1458 }; 1459 1460 for (i = 0; 1461 i != RTE_DIM(sup_ver) && 1462 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1463 i++) 1464 ; 1465 } 1466 1467 static void 1468 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1469 { 1470 uint64_t random; 1471 1472 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1473 mac_addr->addr_bytes[0] = 0x00; 1474 mac_addr->addr_bytes[1] = 0x09; 1475 mac_addr->addr_bytes[2] = 0xC0; 1476 /* Force indication of locally assigned MAC address. */ 1477 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1478 /* Generate the last 3 bytes of the MAC address with a random number. */ 1479 random = rte_rand(); 1480 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1481 } 1482 1483 static int 1484 devarg_handle_int(__rte_unused const char *key, const char *value, 1485 void *extra_args) 1486 { 1487 uint16_t *n = extra_args; 1488 1489 if (value == NULL || extra_args == NULL) 1490 return -EINVAL; 1491 1492 *n = (uint16_t)strtoul(value, NULL, 0); 1493 if (*n == USHRT_MAX && errno == ERANGE) 1494 return -1; 1495 1496 return 0; 1497 } 1498 1499 static void 1500 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1501 struct rte_devargs *devargs) 1502 { 1503 struct rte_kvargs *kvlist; 1504 uint16_t pflink_fullchk; 1505 1506 if (devargs == NULL) 1507 return; 1508 1509 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1510 if (kvlist == NULL) 1511 return; 1512 1513 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1514 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1515 devarg_handle_int, &pflink_fullchk) == 0 && 1516 pflink_fullchk == 1) 1517 adapter->pflink_fullchk = 1; 1518 1519 rte_kvargs_free(kvlist); 1520 } 1521 1522 /* 1523 * Virtual Function device init 1524 */ 1525 static int 1526 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1527 { 1528 int diag; 1529 uint32_t tc, tcs; 1530 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1531 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1532 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1533 struct ixgbe_hw *hw = 1534 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1535 struct ixgbe_vfta *shadow_vfta = 1536 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1537 struct ixgbe_hwstrip *hwstrip = 1538 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1539 struct rte_ether_addr *perm_addr = 1540 (struct rte_ether_addr *)hw->mac.perm_addr; 1541 1542 PMD_INIT_FUNC_TRACE(); 1543 1544 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1545 eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done; 1546 eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status; 1547 eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status; 1548 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1549 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1550 1551 /* for secondary processes, we don't initialise any further as primary 1552 * has already done this work. Only check we don't need a different 1553 * RX function 1554 */ 1555 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1556 struct ixgbe_tx_queue *txq; 1557 /* TX queue function in primary, set by last queue initialized 1558 * Tx queue may not initialized by primary process 1559 */ 1560 if (eth_dev->data->tx_queues) { 1561 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1562 ixgbe_set_tx_function(eth_dev, txq); 1563 } else { 1564 /* Use default TX function if we get here */ 1565 PMD_INIT_LOG(NOTICE, 1566 "No TX queues configured yet. Using default TX function."); 1567 } 1568 1569 ixgbe_set_rx_function(eth_dev); 1570 1571 return 0; 1572 } 1573 1574 rte_atomic32_clear(&ad->link_thread_running); 1575 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1576 pci_dev->device.devargs); 1577 1578 rte_eth_copy_pci_info(eth_dev, pci_dev); 1579 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1580 1581 hw->device_id = pci_dev->id.device_id; 1582 hw->vendor_id = pci_dev->id.vendor_id; 1583 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1584 1585 /* initialize the vfta */ 1586 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1587 1588 /* initialize the hw strip bitmap*/ 1589 memset(hwstrip, 0, sizeof(*hwstrip)); 1590 1591 /* Initialize the shared code (base driver) */ 1592 diag = ixgbe_init_shared_code(hw); 1593 if (diag != IXGBE_SUCCESS) { 1594 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1595 return -EIO; 1596 } 1597 1598 /* init_mailbox_params */ 1599 hw->mbx.ops.init_params(hw); 1600 1601 /* Reset the hw statistics */ 1602 ixgbevf_dev_stats_reset(eth_dev); 1603 1604 /* Disable the interrupts for VF */ 1605 ixgbevf_intr_disable(eth_dev); 1606 1607 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1608 diag = hw->mac.ops.reset_hw(hw); 1609 1610 /* 1611 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1612 * the underlying PF driver has not assigned a MAC address to the VF. 1613 * In this case, assign a random MAC address. 1614 */ 1615 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1616 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1617 /* 1618 * This error code will be propagated to the app by 1619 * rte_eth_dev_reset, so use a public error code rather than 1620 * the internal-only IXGBE_ERR_RESET_FAILED 1621 */ 1622 return -EAGAIN; 1623 } 1624 1625 /* negotiate mailbox API version to use with the PF. */ 1626 ixgbevf_negotiate_api(hw); 1627 1628 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1629 ixgbevf_get_queues(hw, &tcs, &tc); 1630 1631 /* Allocate memory for storing MAC addresses */ 1632 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1633 hw->mac.num_rar_entries, 0); 1634 if (eth_dev->data->mac_addrs == NULL) { 1635 PMD_INIT_LOG(ERR, 1636 "Failed to allocate %u bytes needed to store " 1637 "MAC addresses", 1638 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1639 return -ENOMEM; 1640 } 1641 1642 /* Generate a random MAC address, if none was assigned by PF. */ 1643 if (rte_is_zero_ether_addr(perm_addr)) { 1644 generate_random_mac_addr(perm_addr); 1645 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1646 if (diag) { 1647 rte_free(eth_dev->data->mac_addrs); 1648 eth_dev->data->mac_addrs = NULL; 1649 return diag; 1650 } 1651 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1652 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1653 "%02x:%02x:%02x:%02x:%02x:%02x", 1654 perm_addr->addr_bytes[0], 1655 perm_addr->addr_bytes[1], 1656 perm_addr->addr_bytes[2], 1657 perm_addr->addr_bytes[3], 1658 perm_addr->addr_bytes[4], 1659 perm_addr->addr_bytes[5]); 1660 } 1661 1662 /* Copy the permanent MAC address */ 1663 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1664 1665 /* reset the hardware with the new settings */ 1666 diag = hw->mac.ops.start_hw(hw); 1667 switch (diag) { 1668 case 0: 1669 break; 1670 1671 default: 1672 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1673 return -EIO; 1674 } 1675 1676 rte_intr_callback_register(intr_handle, 1677 ixgbevf_dev_interrupt_handler, eth_dev); 1678 rte_intr_enable(intr_handle); 1679 ixgbevf_intr_enable(eth_dev); 1680 1681 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1682 eth_dev->data->port_id, pci_dev->id.vendor_id, 1683 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1684 1685 return 0; 1686 } 1687 1688 /* Virtual Function device uninit */ 1689 1690 static int 1691 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1692 { 1693 PMD_INIT_FUNC_TRACE(); 1694 1695 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1696 return 0; 1697 1698 ixgbevf_dev_close(eth_dev); 1699 1700 return 0; 1701 } 1702 1703 static int 1704 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1705 struct rte_pci_device *pci_dev) 1706 { 1707 char name[RTE_ETH_NAME_MAX_LEN]; 1708 struct rte_eth_dev *pf_ethdev; 1709 struct rte_eth_devargs eth_da; 1710 int i, retval; 1711 1712 if (pci_dev->device.devargs) { 1713 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1714 ð_da); 1715 if (retval) 1716 return retval; 1717 } else 1718 memset(ð_da, 0, sizeof(eth_da)); 1719 1720 if (eth_da.nb_representor_ports > 0 && 1721 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1722 PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", 1723 pci_dev->device.devargs->args); 1724 return -ENOTSUP; 1725 } 1726 1727 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1728 sizeof(struct ixgbe_adapter), 1729 eth_dev_pci_specific_init, pci_dev, 1730 eth_ixgbe_dev_init, NULL); 1731 1732 if (retval || eth_da.nb_representor_ports < 1) 1733 return retval; 1734 1735 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1736 if (pf_ethdev == NULL) 1737 return -ENODEV; 1738 1739 /* probe VF representor ports */ 1740 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1741 struct ixgbe_vf_info *vfinfo; 1742 struct ixgbe_vf_representor representor; 1743 1744 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1745 pf_ethdev->data->dev_private); 1746 if (vfinfo == NULL) { 1747 PMD_DRV_LOG(ERR, 1748 "no virtual functions supported by PF"); 1749 break; 1750 } 1751 1752 representor.vf_id = eth_da.representor_ports[i]; 1753 representor.switch_domain_id = vfinfo->switch_domain_id; 1754 representor.pf_ethdev = pf_ethdev; 1755 1756 /* representor port net_bdf_port */ 1757 snprintf(name, sizeof(name), "net_%s_representor_%d", 1758 pci_dev->device.name, 1759 eth_da.representor_ports[i]); 1760 1761 retval = rte_eth_dev_create(&pci_dev->device, name, 1762 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1763 ixgbe_vf_representor_init, &representor); 1764 1765 if (retval) 1766 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1767 "representor %s.", name); 1768 } 1769 1770 return 0; 1771 } 1772 1773 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1774 { 1775 struct rte_eth_dev *ethdev; 1776 1777 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1778 if (!ethdev) 1779 return 0; 1780 1781 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1782 return rte_eth_dev_pci_generic_remove(pci_dev, 1783 ixgbe_vf_representor_uninit); 1784 else 1785 return rte_eth_dev_pci_generic_remove(pci_dev, 1786 eth_ixgbe_dev_uninit); 1787 } 1788 1789 static struct rte_pci_driver rte_ixgbe_pmd = { 1790 .id_table = pci_id_ixgbe_map, 1791 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1792 .probe = eth_ixgbe_pci_probe, 1793 .remove = eth_ixgbe_pci_remove, 1794 }; 1795 1796 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1797 struct rte_pci_device *pci_dev) 1798 { 1799 return rte_eth_dev_pci_generic_probe(pci_dev, 1800 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1801 } 1802 1803 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1804 { 1805 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1806 } 1807 1808 /* 1809 * virtual function driver struct 1810 */ 1811 static struct rte_pci_driver rte_ixgbevf_pmd = { 1812 .id_table = pci_id_ixgbevf_map, 1813 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1814 .probe = eth_ixgbevf_pci_probe, 1815 .remove = eth_ixgbevf_pci_remove, 1816 }; 1817 1818 static int 1819 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1820 { 1821 struct ixgbe_hw *hw = 1822 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1823 struct ixgbe_vfta *shadow_vfta = 1824 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1825 uint32_t vfta; 1826 uint32_t vid_idx; 1827 uint32_t vid_bit; 1828 1829 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1830 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1831 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1832 if (on) 1833 vfta |= vid_bit; 1834 else 1835 vfta &= ~vid_bit; 1836 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1837 1838 /* update local VFTA copy */ 1839 shadow_vfta->vfta[vid_idx] = vfta; 1840 1841 return 0; 1842 } 1843 1844 static void 1845 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1846 { 1847 if (on) 1848 ixgbe_vlan_hw_strip_enable(dev, queue); 1849 else 1850 ixgbe_vlan_hw_strip_disable(dev, queue); 1851 } 1852 1853 static int 1854 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1855 enum rte_vlan_type vlan_type, 1856 uint16_t tpid) 1857 { 1858 struct ixgbe_hw *hw = 1859 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1860 int ret = 0; 1861 uint32_t reg; 1862 uint32_t qinq; 1863 1864 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1865 qinq &= IXGBE_DMATXCTL_GDV; 1866 1867 switch (vlan_type) { 1868 case ETH_VLAN_TYPE_INNER: 1869 if (qinq) { 1870 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1871 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1872 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1873 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1874 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1875 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1876 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1877 } else { 1878 ret = -ENOTSUP; 1879 PMD_DRV_LOG(ERR, "Inner type is not supported" 1880 " by single VLAN"); 1881 } 1882 break; 1883 case ETH_VLAN_TYPE_OUTER: 1884 if (qinq) { 1885 /* Only the high 16-bits is valid */ 1886 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1887 IXGBE_EXVET_VET_EXT_SHIFT); 1888 } else { 1889 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1890 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1891 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1892 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1893 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1894 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1895 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1896 } 1897 1898 break; 1899 default: 1900 ret = -EINVAL; 1901 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1902 break; 1903 } 1904 1905 return ret; 1906 } 1907 1908 void 1909 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1910 { 1911 struct ixgbe_hw *hw = 1912 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1913 uint32_t vlnctrl; 1914 1915 PMD_INIT_FUNC_TRACE(); 1916 1917 /* Filter Table Disable */ 1918 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1919 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1920 1921 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1922 } 1923 1924 void 1925 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1926 { 1927 struct ixgbe_hw *hw = 1928 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1929 struct ixgbe_vfta *shadow_vfta = 1930 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1931 uint32_t vlnctrl; 1932 uint16_t i; 1933 1934 PMD_INIT_FUNC_TRACE(); 1935 1936 /* Filter Table Enable */ 1937 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1938 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1939 vlnctrl |= IXGBE_VLNCTRL_VFE; 1940 1941 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1942 1943 /* write whatever is in local vfta copy */ 1944 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1945 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1946 } 1947 1948 static void 1949 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1950 { 1951 struct ixgbe_hwstrip *hwstrip = 1952 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1953 struct ixgbe_rx_queue *rxq; 1954 1955 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1956 return; 1957 1958 if (on) 1959 IXGBE_SET_HWSTRIP(hwstrip, queue); 1960 else 1961 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1962 1963 if (queue >= dev->data->nb_rx_queues) 1964 return; 1965 1966 rxq = dev->data->rx_queues[queue]; 1967 1968 if (on) { 1969 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 1970 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1971 } else { 1972 rxq->vlan_flags = PKT_RX_VLAN; 1973 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1974 } 1975 } 1976 1977 static void 1978 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1979 { 1980 struct ixgbe_hw *hw = 1981 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1982 uint32_t ctrl; 1983 1984 PMD_INIT_FUNC_TRACE(); 1985 1986 if (hw->mac.type == ixgbe_mac_82598EB) { 1987 /* No queue level support */ 1988 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1989 return; 1990 } 1991 1992 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1993 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1994 ctrl &= ~IXGBE_RXDCTL_VME; 1995 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1996 1997 /* record those setting for HW strip per queue */ 1998 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1999 } 2000 2001 static void 2002 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2003 { 2004 struct ixgbe_hw *hw = 2005 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2006 uint32_t ctrl; 2007 2008 PMD_INIT_FUNC_TRACE(); 2009 2010 if (hw->mac.type == ixgbe_mac_82598EB) { 2011 /* No queue level supported */ 2012 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2013 return; 2014 } 2015 2016 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2017 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2018 ctrl |= IXGBE_RXDCTL_VME; 2019 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2020 2021 /* record those setting for HW strip per queue */ 2022 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2023 } 2024 2025 static void 2026 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2027 { 2028 struct ixgbe_hw *hw = 2029 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2030 uint32_t ctrl; 2031 2032 PMD_INIT_FUNC_TRACE(); 2033 2034 /* DMATXCTRL: Geric Double VLAN Disable */ 2035 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2036 ctrl &= ~IXGBE_DMATXCTL_GDV; 2037 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2038 2039 /* CTRL_EXT: Global Double VLAN Disable */ 2040 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2041 ctrl &= ~IXGBE_EXTENDED_VLAN; 2042 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2043 2044 } 2045 2046 static void 2047 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2048 { 2049 struct ixgbe_hw *hw = 2050 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2051 uint32_t ctrl; 2052 2053 PMD_INIT_FUNC_TRACE(); 2054 2055 /* DMATXCTRL: Geric Double VLAN Enable */ 2056 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2057 ctrl |= IXGBE_DMATXCTL_GDV; 2058 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2059 2060 /* CTRL_EXT: Global Double VLAN Enable */ 2061 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2062 ctrl |= IXGBE_EXTENDED_VLAN; 2063 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2064 2065 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2066 if (hw->mac.type == ixgbe_mac_X550 || 2067 hw->mac.type == ixgbe_mac_X550EM_x || 2068 hw->mac.type == ixgbe_mac_X550EM_a) { 2069 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2070 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2071 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2072 } 2073 2074 /* 2075 * VET EXT field in the EXVET register = 0x8100 by default 2076 * So no need to change. Same to VT field of DMATXCTL register 2077 */ 2078 } 2079 2080 void 2081 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2082 { 2083 struct ixgbe_hw *hw = 2084 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2085 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2086 uint32_t ctrl; 2087 uint16_t i; 2088 struct ixgbe_rx_queue *rxq; 2089 bool on; 2090 2091 PMD_INIT_FUNC_TRACE(); 2092 2093 if (hw->mac.type == ixgbe_mac_82598EB) { 2094 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2095 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2096 ctrl |= IXGBE_VLNCTRL_VME; 2097 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2098 } else { 2099 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2100 ctrl &= ~IXGBE_VLNCTRL_VME; 2101 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2102 } 2103 } else { 2104 /* 2105 * Other 10G NIC, the VLAN strip can be setup 2106 * per queue in RXDCTL 2107 */ 2108 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2109 rxq = dev->data->rx_queues[i]; 2110 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2111 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2112 ctrl |= IXGBE_RXDCTL_VME; 2113 on = TRUE; 2114 } else { 2115 ctrl &= ~IXGBE_RXDCTL_VME; 2116 on = FALSE; 2117 } 2118 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2119 2120 /* record those setting for HW strip per queue */ 2121 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2122 } 2123 } 2124 } 2125 2126 static void 2127 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2128 { 2129 uint16_t i; 2130 struct rte_eth_rxmode *rxmode; 2131 struct ixgbe_rx_queue *rxq; 2132 2133 if (mask & ETH_VLAN_STRIP_MASK) { 2134 rxmode = &dev->data->dev_conf.rxmode; 2135 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2136 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2137 rxq = dev->data->rx_queues[i]; 2138 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2139 } 2140 else 2141 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2142 rxq = dev->data->rx_queues[i]; 2143 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2144 } 2145 } 2146 } 2147 2148 static int 2149 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2150 { 2151 struct rte_eth_rxmode *rxmode; 2152 rxmode = &dev->data->dev_conf.rxmode; 2153 2154 if (mask & ETH_VLAN_STRIP_MASK) { 2155 ixgbe_vlan_hw_strip_config(dev); 2156 } 2157 2158 if (mask & ETH_VLAN_FILTER_MASK) { 2159 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2160 ixgbe_vlan_hw_filter_enable(dev); 2161 else 2162 ixgbe_vlan_hw_filter_disable(dev); 2163 } 2164 2165 if (mask & ETH_VLAN_EXTEND_MASK) { 2166 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2167 ixgbe_vlan_hw_extend_enable(dev); 2168 else 2169 ixgbe_vlan_hw_extend_disable(dev); 2170 } 2171 2172 return 0; 2173 } 2174 2175 static int 2176 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2177 { 2178 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2179 2180 ixgbe_vlan_offload_config(dev, mask); 2181 2182 return 0; 2183 } 2184 2185 static void 2186 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2187 { 2188 struct ixgbe_hw *hw = 2189 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2190 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2191 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2192 2193 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2194 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2195 } 2196 2197 static int 2198 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2199 { 2200 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2201 2202 switch (nb_rx_q) { 2203 case 1: 2204 case 2: 2205 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 2206 break; 2207 case 4: 2208 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 2209 break; 2210 default: 2211 return -EINVAL; 2212 } 2213 2214 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2215 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2216 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2217 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2218 return 0; 2219 } 2220 2221 static int 2222 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2223 { 2224 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2225 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2226 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2227 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2228 2229 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2230 /* check multi-queue mode */ 2231 switch (dev_conf->rxmode.mq_mode) { 2232 case ETH_MQ_RX_VMDQ_DCB: 2233 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2234 break; 2235 case ETH_MQ_RX_VMDQ_DCB_RSS: 2236 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2237 PMD_INIT_LOG(ERR, "SRIOV active," 2238 " unsupported mq_mode rx %d.", 2239 dev_conf->rxmode.mq_mode); 2240 return -EINVAL; 2241 case ETH_MQ_RX_RSS: 2242 case ETH_MQ_RX_VMDQ_RSS: 2243 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 2244 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2245 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2246 PMD_INIT_LOG(ERR, "SRIOV is active," 2247 " invalid queue number" 2248 " for VMDQ RSS, allowed" 2249 " value are 1, 2 or 4."); 2250 return -EINVAL; 2251 } 2252 break; 2253 case ETH_MQ_RX_VMDQ_ONLY: 2254 case ETH_MQ_RX_NONE: 2255 /* if nothing mq mode configure, use default scheme */ 2256 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2257 break; 2258 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2259 /* SRIOV only works in VMDq enable mode */ 2260 PMD_INIT_LOG(ERR, "SRIOV is active," 2261 " wrong mq_mode rx %d.", 2262 dev_conf->rxmode.mq_mode); 2263 return -EINVAL; 2264 } 2265 2266 switch (dev_conf->txmode.mq_mode) { 2267 case ETH_MQ_TX_VMDQ_DCB: 2268 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2269 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2270 break; 2271 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2272 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2273 break; 2274 } 2275 2276 /* check valid queue number */ 2277 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2278 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2279 PMD_INIT_LOG(ERR, "SRIOV is active," 2280 " nb_rx_q=%d nb_tx_q=%d queue number" 2281 " must be less than or equal to %d.", 2282 nb_rx_q, nb_tx_q, 2283 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2284 return -EINVAL; 2285 } 2286 } else { 2287 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2288 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2289 " not supported."); 2290 return -EINVAL; 2291 } 2292 /* check configuration for vmdb+dcb mode */ 2293 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2294 const struct rte_eth_vmdq_dcb_conf *conf; 2295 2296 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2297 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2298 IXGBE_VMDQ_DCB_NB_QUEUES); 2299 return -EINVAL; 2300 } 2301 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2302 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2303 conf->nb_queue_pools == ETH_32_POOLS)) { 2304 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2305 " nb_queue_pools must be %d or %d.", 2306 ETH_16_POOLS, ETH_32_POOLS); 2307 return -EINVAL; 2308 } 2309 } 2310 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2311 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2312 2313 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2314 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2315 IXGBE_VMDQ_DCB_NB_QUEUES); 2316 return -EINVAL; 2317 } 2318 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2319 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2320 conf->nb_queue_pools == ETH_32_POOLS)) { 2321 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2322 " nb_queue_pools != %d and" 2323 " nb_queue_pools != %d.", 2324 ETH_16_POOLS, ETH_32_POOLS); 2325 return -EINVAL; 2326 } 2327 } 2328 2329 /* For DCB mode check our configuration before we go further */ 2330 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2331 const struct rte_eth_dcb_rx_conf *conf; 2332 2333 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2334 if (!(conf->nb_tcs == ETH_4_TCS || 2335 conf->nb_tcs == ETH_8_TCS)) { 2336 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2337 " and nb_tcs != %d.", 2338 ETH_4_TCS, ETH_8_TCS); 2339 return -EINVAL; 2340 } 2341 } 2342 2343 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2344 const struct rte_eth_dcb_tx_conf *conf; 2345 2346 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2347 if (!(conf->nb_tcs == ETH_4_TCS || 2348 conf->nb_tcs == ETH_8_TCS)) { 2349 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2350 " and nb_tcs != %d.", 2351 ETH_4_TCS, ETH_8_TCS); 2352 return -EINVAL; 2353 } 2354 } 2355 2356 /* 2357 * When DCB/VT is off, maximum number of queues changes, 2358 * except for 82598EB, which remains constant. 2359 */ 2360 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2361 hw->mac.type != ixgbe_mac_82598EB) { 2362 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2363 PMD_INIT_LOG(ERR, 2364 "Neither VT nor DCB are enabled, " 2365 "nb_tx_q > %d.", 2366 IXGBE_NONE_MODE_TX_NB_QUEUES); 2367 return -EINVAL; 2368 } 2369 } 2370 } 2371 return 0; 2372 } 2373 2374 static int 2375 ixgbe_dev_configure(struct rte_eth_dev *dev) 2376 { 2377 struct ixgbe_interrupt *intr = 2378 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2379 struct ixgbe_adapter *adapter = dev->data->dev_private; 2380 int ret; 2381 2382 PMD_INIT_FUNC_TRACE(); 2383 2384 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 2385 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2386 2387 /* multipe queue mode checking */ 2388 ret = ixgbe_check_mq_mode(dev); 2389 if (ret != 0) { 2390 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2391 ret); 2392 return ret; 2393 } 2394 2395 /* set flag to update link status after init */ 2396 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2397 2398 /* 2399 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2400 * allocation or vector Rx preconditions we will reset it. 2401 */ 2402 adapter->rx_bulk_alloc_allowed = true; 2403 adapter->rx_vec_allowed = true; 2404 2405 return 0; 2406 } 2407 2408 static void 2409 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2410 { 2411 struct ixgbe_hw *hw = 2412 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2413 struct ixgbe_interrupt *intr = 2414 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2415 uint32_t gpie; 2416 2417 /* only set up it on X550EM_X */ 2418 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2419 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2420 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2421 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2422 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2423 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2424 } 2425 } 2426 2427 int 2428 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2429 uint16_t tx_rate, uint64_t q_msk) 2430 { 2431 struct ixgbe_hw *hw; 2432 struct ixgbe_vf_info *vfinfo; 2433 struct rte_eth_link link; 2434 uint8_t nb_q_per_pool; 2435 uint32_t queue_stride; 2436 uint32_t queue_idx, idx = 0, vf_idx; 2437 uint32_t queue_end; 2438 uint16_t total_rate = 0; 2439 struct rte_pci_device *pci_dev; 2440 int ret; 2441 2442 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2443 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2444 if (ret < 0) 2445 return ret; 2446 2447 if (vf >= pci_dev->max_vfs) 2448 return -EINVAL; 2449 2450 if (tx_rate > link.link_speed) 2451 return -EINVAL; 2452 2453 if (q_msk == 0) 2454 return 0; 2455 2456 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2457 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2458 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2459 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2460 queue_idx = vf * queue_stride; 2461 queue_end = queue_idx + nb_q_per_pool - 1; 2462 if (queue_end >= hw->mac.max_tx_queues) 2463 return -EINVAL; 2464 2465 if (vfinfo) { 2466 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2467 if (vf_idx == vf) 2468 continue; 2469 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2470 idx++) 2471 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2472 } 2473 } else { 2474 return -EINVAL; 2475 } 2476 2477 /* Store tx_rate for this vf. */ 2478 for (idx = 0; idx < nb_q_per_pool; idx++) { 2479 if (((uint64_t)0x1 << idx) & q_msk) { 2480 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2481 vfinfo[vf].tx_rate[idx] = tx_rate; 2482 total_rate += tx_rate; 2483 } 2484 } 2485 2486 if (total_rate > dev->data->dev_link.link_speed) { 2487 /* Reset stored TX rate of the VF if it causes exceed 2488 * link speed. 2489 */ 2490 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2491 return -EINVAL; 2492 } 2493 2494 /* Set RTTBCNRC of each queue/pool for vf X */ 2495 for (; queue_idx <= queue_end; queue_idx++) { 2496 if (0x1 & q_msk) 2497 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2498 q_msk = q_msk >> 1; 2499 } 2500 2501 return 0; 2502 } 2503 2504 static int 2505 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2506 { 2507 struct ixgbe_adapter *adapter = dev->data->dev_private; 2508 int err; 2509 uint32_t mflcn; 2510 2511 ixgbe_setup_fc(hw); 2512 2513 err = ixgbe_fc_enable(hw); 2514 2515 /* Not negotiated is not an error case */ 2516 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2517 /* 2518 *check if we want to forward MAC frames - driver doesn't 2519 *have native capability to do that, 2520 *so we'll write the registers ourselves 2521 */ 2522 2523 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2524 2525 /* set or clear MFLCN.PMCF bit depending on configuration */ 2526 if (adapter->mac_ctrl_frame_fwd != 0) 2527 mflcn |= IXGBE_MFLCN_PMCF; 2528 else 2529 mflcn &= ~IXGBE_MFLCN_PMCF; 2530 2531 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2532 IXGBE_WRITE_FLUSH(hw); 2533 2534 return 0; 2535 } 2536 return err; 2537 } 2538 2539 /* 2540 * Configure device link speed and setup link. 2541 * It returns 0 on success. 2542 */ 2543 static int 2544 ixgbe_dev_start(struct rte_eth_dev *dev) 2545 { 2546 struct ixgbe_hw *hw = 2547 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2548 struct ixgbe_vf_info *vfinfo = 2549 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2550 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2551 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2552 uint32_t intr_vector = 0; 2553 int err; 2554 bool link_up = false, negotiate = 0; 2555 uint32_t speed = 0; 2556 uint32_t allowed_speeds = 0; 2557 int mask = 0; 2558 int status; 2559 uint16_t vf, idx; 2560 uint32_t *link_speeds; 2561 struct ixgbe_tm_conf *tm_conf = 2562 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2563 struct ixgbe_macsec_setting *macsec_setting = 2564 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2565 2566 PMD_INIT_FUNC_TRACE(); 2567 2568 /* Stop the link setup handler before resetting the HW. */ 2569 ixgbe_dev_wait_setup_link_complete(dev, 0); 2570 2571 /* disable uio/vfio intr/eventfd mapping */ 2572 rte_intr_disable(intr_handle); 2573 2574 /* stop adapter */ 2575 hw->adapter_stopped = 0; 2576 ixgbe_stop_adapter(hw); 2577 2578 /* reinitialize adapter 2579 * this calls reset and start 2580 */ 2581 status = ixgbe_pf_reset_hw(hw); 2582 if (status != 0) 2583 return -1; 2584 hw->mac.ops.start_hw(hw); 2585 hw->mac.get_link_status = true; 2586 2587 /* configure PF module if SRIOV enabled */ 2588 ixgbe_pf_host_configure(dev); 2589 2590 ixgbe_dev_phy_intr_setup(dev); 2591 2592 /* check and configure queue intr-vector mapping */ 2593 if ((rte_intr_cap_multiple(intr_handle) || 2594 !RTE_ETH_DEV_SRIOV(dev).active) && 2595 dev->data->dev_conf.intr_conf.rxq != 0) { 2596 intr_vector = dev->data->nb_rx_queues; 2597 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2598 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2599 IXGBE_MAX_INTR_QUEUE_NUM); 2600 return -ENOTSUP; 2601 } 2602 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2603 return -1; 2604 } 2605 2606 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2607 intr_handle->intr_vec = 2608 rte_zmalloc("intr_vec", 2609 dev->data->nb_rx_queues * sizeof(int), 0); 2610 if (intr_handle->intr_vec == NULL) { 2611 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2612 " intr_vec", dev->data->nb_rx_queues); 2613 return -ENOMEM; 2614 } 2615 } 2616 2617 /* confiugre msix for sleep until rx interrupt */ 2618 ixgbe_configure_msix(dev); 2619 2620 /* initialize transmission unit */ 2621 ixgbe_dev_tx_init(dev); 2622 2623 /* This can fail when allocating mbufs for descriptor rings */ 2624 err = ixgbe_dev_rx_init(dev); 2625 if (err) { 2626 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2627 goto error; 2628 } 2629 2630 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2631 ETH_VLAN_EXTEND_MASK; 2632 err = ixgbe_vlan_offload_config(dev, mask); 2633 if (err) { 2634 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2635 goto error; 2636 } 2637 2638 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2639 /* Enable vlan filtering for VMDq */ 2640 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2641 } 2642 2643 /* Configure DCB hw */ 2644 ixgbe_configure_dcb(dev); 2645 2646 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2647 err = ixgbe_fdir_configure(dev); 2648 if (err) 2649 goto error; 2650 } 2651 2652 /* Restore vf rate limit */ 2653 if (vfinfo != NULL) { 2654 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2655 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2656 if (vfinfo[vf].tx_rate[idx] != 0) 2657 ixgbe_set_vf_rate_limit( 2658 dev, vf, 2659 vfinfo[vf].tx_rate[idx], 2660 1 << idx); 2661 } 2662 2663 ixgbe_restore_statistics_mapping(dev); 2664 2665 err = ixgbe_flow_ctrl_enable(dev, hw); 2666 if (err < 0) { 2667 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2668 goto error; 2669 } 2670 2671 err = ixgbe_dev_rxtx_start(dev); 2672 if (err < 0) { 2673 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2674 goto error; 2675 } 2676 2677 /* Skip link setup if loopback mode is enabled. */ 2678 if (dev->data->dev_conf.lpbk_mode != 0) { 2679 err = ixgbe_check_supported_loopback_mode(dev); 2680 if (err < 0) { 2681 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2682 goto error; 2683 } else { 2684 goto skip_link_setup; 2685 } 2686 } 2687 2688 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2689 err = hw->mac.ops.setup_sfp(hw); 2690 if (err) 2691 goto error; 2692 } 2693 2694 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2695 /* Turn on the copper */ 2696 ixgbe_set_phy_power(hw, true); 2697 } else { 2698 /* Turn on the laser */ 2699 ixgbe_enable_tx_laser(hw); 2700 } 2701 2702 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2703 if (err) 2704 goto error; 2705 dev->data->dev_link.link_status = link_up; 2706 2707 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2708 if (err) 2709 goto error; 2710 2711 switch (hw->mac.type) { 2712 case ixgbe_mac_X550: 2713 case ixgbe_mac_X550EM_x: 2714 case ixgbe_mac_X550EM_a: 2715 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2716 ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | 2717 ETH_LINK_SPEED_10G; 2718 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2719 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2720 allowed_speeds = ETH_LINK_SPEED_10M | 2721 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 2722 break; 2723 default: 2724 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2725 ETH_LINK_SPEED_10G; 2726 } 2727 2728 link_speeds = &dev->data->dev_conf.link_speeds; 2729 2730 /* Ignore autoneg flag bit and check the validity of 2731 * link_speed 2732 */ 2733 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2734 PMD_INIT_LOG(ERR, "Invalid link setting"); 2735 goto error; 2736 } 2737 2738 speed = 0x0; 2739 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2740 switch (hw->mac.type) { 2741 case ixgbe_mac_82598EB: 2742 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2743 break; 2744 case ixgbe_mac_82599EB: 2745 case ixgbe_mac_X540: 2746 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2747 break; 2748 case ixgbe_mac_X550: 2749 case ixgbe_mac_X550EM_x: 2750 case ixgbe_mac_X550EM_a: 2751 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2752 break; 2753 default: 2754 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2755 } 2756 } else { 2757 if (*link_speeds & ETH_LINK_SPEED_10G) 2758 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2759 if (*link_speeds & ETH_LINK_SPEED_5G) 2760 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2761 if (*link_speeds & ETH_LINK_SPEED_2_5G) 2762 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2763 if (*link_speeds & ETH_LINK_SPEED_1G) 2764 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2765 if (*link_speeds & ETH_LINK_SPEED_100M) 2766 speed |= IXGBE_LINK_SPEED_100_FULL; 2767 if (*link_speeds & ETH_LINK_SPEED_10M) 2768 speed |= IXGBE_LINK_SPEED_10_FULL; 2769 } 2770 2771 err = ixgbe_setup_link(hw, speed, link_up); 2772 if (err) 2773 goto error; 2774 2775 skip_link_setup: 2776 2777 if (rte_intr_allow_others(intr_handle)) { 2778 /* check if lsc interrupt is enabled */ 2779 if (dev->data->dev_conf.intr_conf.lsc != 0) 2780 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2781 else 2782 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2783 ixgbe_dev_macsec_interrupt_setup(dev); 2784 } else { 2785 rte_intr_callback_unregister(intr_handle, 2786 ixgbe_dev_interrupt_handler, dev); 2787 if (dev->data->dev_conf.intr_conf.lsc != 0) 2788 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2789 " no intr multiplex"); 2790 } 2791 2792 /* check if rxq interrupt is enabled */ 2793 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2794 rte_intr_dp_is_en(intr_handle)) 2795 ixgbe_dev_rxq_interrupt_setup(dev); 2796 2797 /* enable uio/vfio intr/eventfd mapping */ 2798 rte_intr_enable(intr_handle); 2799 2800 /* resume enabled intr since hw reset */ 2801 ixgbe_enable_intr(dev); 2802 ixgbe_l2_tunnel_conf(dev); 2803 ixgbe_filter_restore(dev); 2804 2805 if (tm_conf->root && !tm_conf->committed) 2806 PMD_DRV_LOG(WARNING, 2807 "please call hierarchy_commit() " 2808 "before starting the port"); 2809 2810 /* wait for the controller to acquire link */ 2811 err = ixgbe_wait_for_link_up(hw); 2812 if (err) 2813 goto error; 2814 2815 /* 2816 * Update link status right before return, because it may 2817 * start link configuration process in a separate thread. 2818 */ 2819 ixgbe_dev_link_update(dev, 0); 2820 2821 /* setup the macsec setting register */ 2822 if (macsec_setting->offload_en) 2823 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2824 2825 return 0; 2826 2827 error: 2828 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2829 ixgbe_dev_clear_queues(dev); 2830 return -EIO; 2831 } 2832 2833 /* 2834 * Stop device: disable rx and tx functions to allow for reconfiguring. 2835 */ 2836 static int 2837 ixgbe_dev_stop(struct rte_eth_dev *dev) 2838 { 2839 struct rte_eth_link link; 2840 struct ixgbe_adapter *adapter = dev->data->dev_private; 2841 struct ixgbe_hw *hw = 2842 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2843 struct ixgbe_vf_info *vfinfo = 2844 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2845 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2846 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2847 int vf; 2848 struct ixgbe_tm_conf *tm_conf = 2849 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2850 2851 if (hw->adapter_stopped) 2852 return 0; 2853 2854 PMD_INIT_FUNC_TRACE(); 2855 2856 ixgbe_dev_wait_setup_link_complete(dev, 0); 2857 2858 /* disable interrupts */ 2859 ixgbe_disable_intr(hw); 2860 2861 /* reset the NIC */ 2862 ixgbe_pf_reset_hw(hw); 2863 hw->adapter_stopped = 0; 2864 2865 /* stop adapter */ 2866 ixgbe_stop_adapter(hw); 2867 2868 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2869 vfinfo[vf].clear_to_send = false; 2870 2871 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2872 /* Turn off the copper */ 2873 ixgbe_set_phy_power(hw, false); 2874 } else { 2875 /* Turn off the laser */ 2876 ixgbe_disable_tx_laser(hw); 2877 } 2878 2879 ixgbe_dev_clear_queues(dev); 2880 2881 /* Clear stored conf */ 2882 dev->data->scattered_rx = 0; 2883 dev->data->lro = 0; 2884 2885 /* Clear recorded link status */ 2886 memset(&link, 0, sizeof(link)); 2887 rte_eth_linkstatus_set(dev, &link); 2888 2889 if (!rte_intr_allow_others(intr_handle)) 2890 /* resume to the default handler */ 2891 rte_intr_callback_register(intr_handle, 2892 ixgbe_dev_interrupt_handler, 2893 (void *)dev); 2894 2895 /* Clean datapath event and queue/vec mapping */ 2896 rte_intr_efd_disable(intr_handle); 2897 if (intr_handle->intr_vec != NULL) { 2898 rte_free(intr_handle->intr_vec); 2899 intr_handle->intr_vec = NULL; 2900 } 2901 2902 /* reset hierarchy commit */ 2903 tm_conf->committed = false; 2904 2905 adapter->rss_reta_updated = 0; 2906 2907 hw->adapter_stopped = true; 2908 dev->data->dev_started = 0; 2909 2910 return 0; 2911 } 2912 2913 /* 2914 * Set device link up: enable tx. 2915 */ 2916 static int 2917 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2918 { 2919 struct ixgbe_hw *hw = 2920 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2921 if (hw->mac.type == ixgbe_mac_82599EB) { 2922 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2923 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2924 /* Not suported in bypass mode */ 2925 PMD_INIT_LOG(ERR, "Set link up is not supported " 2926 "by device id 0x%x", hw->device_id); 2927 return -ENOTSUP; 2928 } 2929 #endif 2930 } 2931 2932 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2933 /* Turn on the copper */ 2934 ixgbe_set_phy_power(hw, true); 2935 } else { 2936 /* Turn on the laser */ 2937 ixgbe_enable_tx_laser(hw); 2938 ixgbe_dev_link_update(dev, 0); 2939 } 2940 2941 return 0; 2942 } 2943 2944 /* 2945 * Set device link down: disable tx. 2946 */ 2947 static int 2948 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2949 { 2950 struct ixgbe_hw *hw = 2951 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2952 if (hw->mac.type == ixgbe_mac_82599EB) { 2953 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2954 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2955 /* Not suported in bypass mode */ 2956 PMD_INIT_LOG(ERR, "Set link down is not supported " 2957 "by device id 0x%x", hw->device_id); 2958 return -ENOTSUP; 2959 } 2960 #endif 2961 } 2962 2963 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2964 /* Turn off the copper */ 2965 ixgbe_set_phy_power(hw, false); 2966 } else { 2967 /* Turn off the laser */ 2968 ixgbe_disable_tx_laser(hw); 2969 ixgbe_dev_link_update(dev, 0); 2970 } 2971 2972 return 0; 2973 } 2974 2975 /* 2976 * Reset and stop device. 2977 */ 2978 static int 2979 ixgbe_dev_close(struct rte_eth_dev *dev) 2980 { 2981 struct ixgbe_hw *hw = 2982 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2983 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2984 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2985 int retries = 0; 2986 int ret; 2987 2988 PMD_INIT_FUNC_TRACE(); 2989 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2990 return 0; 2991 2992 ixgbe_pf_reset_hw(hw); 2993 2994 ret = ixgbe_dev_stop(dev); 2995 2996 ixgbe_dev_free_queues(dev); 2997 2998 ixgbe_disable_pcie_master(hw); 2999 3000 /* reprogram the RAR[0] in case user changed it. */ 3001 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3002 3003 /* Unlock any pending hardware semaphore */ 3004 ixgbe_swfw_lock_reset(hw); 3005 3006 /* disable uio intr before callback unregister */ 3007 rte_intr_disable(intr_handle); 3008 3009 do { 3010 ret = rte_intr_callback_unregister(intr_handle, 3011 ixgbe_dev_interrupt_handler, dev); 3012 if (ret >= 0 || ret == -ENOENT) { 3013 break; 3014 } else if (ret != -EAGAIN) { 3015 PMD_INIT_LOG(ERR, 3016 "intr callback unregister failed: %d", 3017 ret); 3018 } 3019 rte_delay_ms(100); 3020 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3021 3022 /* cancel the delay handler before remove dev */ 3023 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3024 3025 /* uninitialize PF if max_vfs not zero */ 3026 ixgbe_pf_host_uninit(dev); 3027 3028 /* remove all the fdir filters & hash */ 3029 ixgbe_fdir_filter_uninit(dev); 3030 3031 /* remove all the L2 tunnel filters & hash */ 3032 ixgbe_l2_tn_filter_uninit(dev); 3033 3034 /* Remove all ntuple filters of the device */ 3035 ixgbe_ntuple_filter_uninit(dev); 3036 3037 /* clear all the filters list */ 3038 ixgbe_filterlist_flush(); 3039 3040 /* Remove all Traffic Manager configuration */ 3041 ixgbe_tm_conf_uninit(dev); 3042 3043 #ifdef RTE_LIB_SECURITY 3044 rte_free(dev->security_ctx); 3045 #endif 3046 3047 return ret; 3048 } 3049 3050 /* 3051 * Reset PF device. 3052 */ 3053 static int 3054 ixgbe_dev_reset(struct rte_eth_dev *dev) 3055 { 3056 int ret; 3057 3058 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3059 * its VF to make them align with it. The detailed notification 3060 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3061 * To avoid unexpected behavior in VF, currently reset of PF with 3062 * SR-IOV activation is not supported. It might be supported later. 3063 */ 3064 if (dev->data->sriov.active) 3065 return -ENOTSUP; 3066 3067 ret = eth_ixgbe_dev_uninit(dev); 3068 if (ret) 3069 return ret; 3070 3071 ret = eth_ixgbe_dev_init(dev, NULL); 3072 3073 return ret; 3074 } 3075 3076 static void 3077 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3078 struct ixgbe_hw_stats *hw_stats, 3079 struct ixgbe_macsec_stats *macsec_stats, 3080 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3081 uint64_t *total_qprc, uint64_t *total_qprdc) 3082 { 3083 uint32_t bprc, lxon, lxoff, total; 3084 uint32_t delta_gprc = 0; 3085 unsigned i; 3086 /* Workaround for RX byte count not including CRC bytes when CRC 3087 * strip is enabled. CRC bytes are removed from counters when crc_strip 3088 * is disabled. 3089 */ 3090 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3091 IXGBE_HLREG0_RXCRCSTRP); 3092 3093 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3094 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3095 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3096 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3097 3098 for (i = 0; i < 8; i++) { 3099 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3100 3101 /* global total per queue */ 3102 hw_stats->mpc[i] += mp; 3103 /* Running comprehensive total for stats display */ 3104 *total_missed_rx += hw_stats->mpc[i]; 3105 if (hw->mac.type == ixgbe_mac_82598EB) { 3106 hw_stats->rnbc[i] += 3107 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3108 hw_stats->pxonrxc[i] += 3109 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3110 hw_stats->pxoffrxc[i] += 3111 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3112 } else { 3113 hw_stats->pxonrxc[i] += 3114 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3115 hw_stats->pxoffrxc[i] += 3116 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3117 hw_stats->pxon2offc[i] += 3118 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3119 } 3120 hw_stats->pxontxc[i] += 3121 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3122 hw_stats->pxofftxc[i] += 3123 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3124 } 3125 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3126 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3127 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3128 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3129 3130 delta_gprc += delta_qprc; 3131 3132 hw_stats->qprc[i] += delta_qprc; 3133 hw_stats->qptc[i] += delta_qptc; 3134 3135 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3136 hw_stats->qbrc[i] += 3137 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3138 if (crc_strip == 0) 3139 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3140 3141 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3142 hw_stats->qbtc[i] += 3143 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3144 3145 hw_stats->qprdc[i] += delta_qprdc; 3146 *total_qprdc += hw_stats->qprdc[i]; 3147 3148 *total_qprc += hw_stats->qprc[i]; 3149 *total_qbrc += hw_stats->qbrc[i]; 3150 } 3151 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3152 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3153 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3154 3155 /* 3156 * An errata states that gprc actually counts good + missed packets: 3157 * Workaround to set gprc to summated queue packet receives 3158 */ 3159 hw_stats->gprc = *total_qprc; 3160 3161 if (hw->mac.type != ixgbe_mac_82598EB) { 3162 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3163 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3164 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3165 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3166 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3167 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3168 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3169 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3170 } else { 3171 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3172 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3173 /* 82598 only has a counter in the high register */ 3174 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3175 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3176 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3177 } 3178 uint64_t old_tpr = hw_stats->tpr; 3179 3180 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3181 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3182 3183 if (crc_strip == 0) 3184 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3185 3186 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3187 hw_stats->gptc += delta_gptc; 3188 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3189 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3190 3191 /* 3192 * Workaround: mprc hardware is incorrectly counting 3193 * broadcasts, so for now we subtract those. 3194 */ 3195 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3196 hw_stats->bprc += bprc; 3197 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3198 if (hw->mac.type == ixgbe_mac_82598EB) 3199 hw_stats->mprc -= bprc; 3200 3201 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3202 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3203 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3204 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3205 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3206 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3207 3208 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3209 hw_stats->lxontxc += lxon; 3210 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3211 hw_stats->lxofftxc += lxoff; 3212 total = lxon + lxoff; 3213 3214 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3215 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3216 hw_stats->gptc -= total; 3217 hw_stats->mptc -= total; 3218 hw_stats->ptc64 -= total; 3219 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3220 3221 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3222 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3223 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3224 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3225 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3226 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3227 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3228 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3229 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3230 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3231 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3232 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3233 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3234 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3235 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3236 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3237 /* Only read FCOE on 82599 */ 3238 if (hw->mac.type != ixgbe_mac_82598EB) { 3239 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3240 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3241 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3242 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3243 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3244 } 3245 3246 /* Flow Director Stats registers */ 3247 if (hw->mac.type != ixgbe_mac_82598EB) { 3248 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3249 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3250 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3251 IXGBE_FDIRUSTAT) & 0xFFFF; 3252 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3253 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3254 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3255 IXGBE_FDIRFSTAT) & 0xFFFF; 3256 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3257 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3258 } 3259 /* MACsec Stats registers */ 3260 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3261 macsec_stats->out_pkts_encrypted += 3262 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3263 macsec_stats->out_pkts_protected += 3264 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3265 macsec_stats->out_octets_encrypted += 3266 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3267 macsec_stats->out_octets_protected += 3268 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3269 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3270 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3271 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3272 macsec_stats->in_pkts_unknownsci += 3273 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3274 macsec_stats->in_octets_decrypted += 3275 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3276 macsec_stats->in_octets_validated += 3277 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3278 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3279 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3280 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3281 for (i = 0; i < 2; i++) { 3282 macsec_stats->in_pkts_ok += 3283 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3284 macsec_stats->in_pkts_invalid += 3285 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3286 macsec_stats->in_pkts_notvalid += 3287 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3288 } 3289 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3290 macsec_stats->in_pkts_notusingsa += 3291 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3292 } 3293 3294 /* 3295 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3296 */ 3297 static int 3298 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3299 { 3300 struct ixgbe_hw *hw = 3301 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3302 struct ixgbe_hw_stats *hw_stats = 3303 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3304 struct ixgbe_macsec_stats *macsec_stats = 3305 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3306 dev->data->dev_private); 3307 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3308 unsigned i; 3309 3310 total_missed_rx = 0; 3311 total_qbrc = 0; 3312 total_qprc = 0; 3313 total_qprdc = 0; 3314 3315 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3316 &total_qbrc, &total_qprc, &total_qprdc); 3317 3318 if (stats == NULL) 3319 return -EINVAL; 3320 3321 /* Fill out the rte_eth_stats statistics structure */ 3322 stats->ipackets = total_qprc; 3323 stats->ibytes = total_qbrc; 3324 stats->opackets = hw_stats->gptc; 3325 stats->obytes = hw_stats->gotc; 3326 3327 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3328 stats->q_ipackets[i] = hw_stats->qprc[i]; 3329 stats->q_opackets[i] = hw_stats->qptc[i]; 3330 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3331 stats->q_obytes[i] = hw_stats->qbtc[i]; 3332 stats->q_errors[i] = hw_stats->qprdc[i]; 3333 } 3334 3335 /* Rx Errors */ 3336 stats->imissed = total_missed_rx; 3337 stats->ierrors = hw_stats->crcerrs + 3338 hw_stats->mspdc + 3339 hw_stats->rlec + 3340 hw_stats->ruc + 3341 hw_stats->roc + 3342 hw_stats->illerrc + 3343 hw_stats->errbc + 3344 hw_stats->rfc + 3345 hw_stats->fccrc + 3346 hw_stats->fclast; 3347 3348 /* 3349 * 82599 errata, UDP frames with a 0 checksum can be marked as checksum 3350 * errors. 3351 */ 3352 if (hw->mac.type != ixgbe_mac_82599EB) 3353 stats->ierrors += hw_stats->xec; 3354 3355 /* Tx Errors */ 3356 stats->oerrors = 0; 3357 return 0; 3358 } 3359 3360 static int 3361 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3362 { 3363 struct ixgbe_hw_stats *stats = 3364 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3365 3366 /* HW registers are cleared on read */ 3367 ixgbe_dev_stats_get(dev, NULL); 3368 3369 /* Reset software totals */ 3370 memset(stats, 0, sizeof(*stats)); 3371 3372 return 0; 3373 } 3374 3375 /* This function calculates the number of xstats based on the current config */ 3376 static unsigned 3377 ixgbe_xstats_calc_num(void) { 3378 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3379 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3380 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3381 } 3382 3383 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3384 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3385 { 3386 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3387 unsigned stat, i, count; 3388 3389 if (xstats_names != NULL) { 3390 count = 0; 3391 3392 /* Note: limit >= cnt_stats checked upstream 3393 * in rte_eth_xstats_names() 3394 */ 3395 3396 /* Extended stats from ixgbe_hw_stats */ 3397 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3398 strlcpy(xstats_names[count].name, 3399 rte_ixgbe_stats_strings[i].name, 3400 sizeof(xstats_names[count].name)); 3401 count++; 3402 } 3403 3404 /* MACsec Stats */ 3405 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3406 strlcpy(xstats_names[count].name, 3407 rte_ixgbe_macsec_strings[i].name, 3408 sizeof(xstats_names[count].name)); 3409 count++; 3410 } 3411 3412 /* RX Priority Stats */ 3413 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3414 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3415 snprintf(xstats_names[count].name, 3416 sizeof(xstats_names[count].name), 3417 "rx_priority%u_%s", i, 3418 rte_ixgbe_rxq_strings[stat].name); 3419 count++; 3420 } 3421 } 3422 3423 /* TX Priority Stats */ 3424 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3425 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3426 snprintf(xstats_names[count].name, 3427 sizeof(xstats_names[count].name), 3428 "tx_priority%u_%s", i, 3429 rte_ixgbe_txq_strings[stat].name); 3430 count++; 3431 } 3432 } 3433 } 3434 return cnt_stats; 3435 } 3436 3437 static int ixgbe_dev_xstats_get_names_by_id( 3438 struct rte_eth_dev *dev, 3439 struct rte_eth_xstat_name *xstats_names, 3440 const uint64_t *ids, 3441 unsigned int limit) 3442 { 3443 if (!ids) { 3444 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3445 unsigned int stat, i, count; 3446 3447 if (xstats_names != NULL) { 3448 count = 0; 3449 3450 /* Note: limit >= cnt_stats checked upstream 3451 * in rte_eth_xstats_names() 3452 */ 3453 3454 /* Extended stats from ixgbe_hw_stats */ 3455 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3456 strlcpy(xstats_names[count].name, 3457 rte_ixgbe_stats_strings[i].name, 3458 sizeof(xstats_names[count].name)); 3459 count++; 3460 } 3461 3462 /* MACsec Stats */ 3463 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3464 strlcpy(xstats_names[count].name, 3465 rte_ixgbe_macsec_strings[i].name, 3466 sizeof(xstats_names[count].name)); 3467 count++; 3468 } 3469 3470 /* RX Priority Stats */ 3471 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3472 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3473 snprintf(xstats_names[count].name, 3474 sizeof(xstats_names[count].name), 3475 "rx_priority%u_%s", i, 3476 rte_ixgbe_rxq_strings[stat].name); 3477 count++; 3478 } 3479 } 3480 3481 /* TX Priority Stats */ 3482 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3483 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3484 snprintf(xstats_names[count].name, 3485 sizeof(xstats_names[count].name), 3486 "tx_priority%u_%s", i, 3487 rte_ixgbe_txq_strings[stat].name); 3488 count++; 3489 } 3490 } 3491 } 3492 return cnt_stats; 3493 } 3494 3495 uint16_t i; 3496 uint16_t size = ixgbe_xstats_calc_num(); 3497 struct rte_eth_xstat_name xstats_names_copy[size]; 3498 3499 ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 3500 size); 3501 3502 for (i = 0; i < limit; i++) { 3503 if (ids[i] >= size) { 3504 PMD_INIT_LOG(ERR, "id value isn't valid"); 3505 return -1; 3506 } 3507 strcpy(xstats_names[i].name, 3508 xstats_names_copy[ids[i]].name); 3509 } 3510 return limit; 3511 } 3512 3513 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3514 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3515 { 3516 unsigned i; 3517 3518 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3519 return -ENOMEM; 3520 3521 if (xstats_names != NULL) 3522 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3523 strlcpy(xstats_names[i].name, 3524 rte_ixgbevf_stats_strings[i].name, 3525 sizeof(xstats_names[i].name)); 3526 return IXGBEVF_NB_XSTATS; 3527 } 3528 3529 static int 3530 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3531 unsigned n) 3532 { 3533 struct ixgbe_hw *hw = 3534 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3535 struct ixgbe_hw_stats *hw_stats = 3536 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3537 struct ixgbe_macsec_stats *macsec_stats = 3538 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3539 dev->data->dev_private); 3540 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3541 unsigned i, stat, count = 0; 3542 3543 count = ixgbe_xstats_calc_num(); 3544 3545 if (n < count) 3546 return count; 3547 3548 total_missed_rx = 0; 3549 total_qbrc = 0; 3550 total_qprc = 0; 3551 total_qprdc = 0; 3552 3553 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3554 &total_qbrc, &total_qprc, &total_qprdc); 3555 3556 /* If this is a reset xstats is NULL, and we have cleared the 3557 * registers by reading them. 3558 */ 3559 if (!xstats) 3560 return 0; 3561 3562 /* Extended stats from ixgbe_hw_stats */ 3563 count = 0; 3564 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3565 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3566 rte_ixgbe_stats_strings[i].offset); 3567 xstats[count].id = count; 3568 count++; 3569 } 3570 3571 /* MACsec Stats */ 3572 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3573 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3574 rte_ixgbe_macsec_strings[i].offset); 3575 xstats[count].id = count; 3576 count++; 3577 } 3578 3579 /* RX Priority Stats */ 3580 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3581 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3582 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3583 rte_ixgbe_rxq_strings[stat].offset + 3584 (sizeof(uint64_t) * i)); 3585 xstats[count].id = count; 3586 count++; 3587 } 3588 } 3589 3590 /* TX Priority Stats */ 3591 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3592 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3593 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3594 rte_ixgbe_txq_strings[stat].offset + 3595 (sizeof(uint64_t) * i)); 3596 xstats[count].id = count; 3597 count++; 3598 } 3599 } 3600 return count; 3601 } 3602 3603 static int 3604 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3605 uint64_t *values, unsigned int n) 3606 { 3607 if (!ids) { 3608 struct ixgbe_hw *hw = 3609 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3610 struct ixgbe_hw_stats *hw_stats = 3611 IXGBE_DEV_PRIVATE_TO_STATS( 3612 dev->data->dev_private); 3613 struct ixgbe_macsec_stats *macsec_stats = 3614 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3615 dev->data->dev_private); 3616 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3617 unsigned int i, stat, count = 0; 3618 3619 count = ixgbe_xstats_calc_num(); 3620 3621 if (!ids && n < count) 3622 return count; 3623 3624 total_missed_rx = 0; 3625 total_qbrc = 0; 3626 total_qprc = 0; 3627 total_qprdc = 0; 3628 3629 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3630 &total_missed_rx, &total_qbrc, &total_qprc, 3631 &total_qprdc); 3632 3633 /* If this is a reset xstats is NULL, and we have cleared the 3634 * registers by reading them. 3635 */ 3636 if (!ids && !values) 3637 return 0; 3638 3639 /* Extended stats from ixgbe_hw_stats */ 3640 count = 0; 3641 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3642 values[count] = *(uint64_t *)(((char *)hw_stats) + 3643 rte_ixgbe_stats_strings[i].offset); 3644 count++; 3645 } 3646 3647 /* MACsec Stats */ 3648 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3649 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3650 rte_ixgbe_macsec_strings[i].offset); 3651 count++; 3652 } 3653 3654 /* RX Priority Stats */ 3655 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3656 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3657 values[count] = 3658 *(uint64_t *)(((char *)hw_stats) + 3659 rte_ixgbe_rxq_strings[stat].offset + 3660 (sizeof(uint64_t) * i)); 3661 count++; 3662 } 3663 } 3664 3665 /* TX Priority Stats */ 3666 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3667 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3668 values[count] = 3669 *(uint64_t *)(((char *)hw_stats) + 3670 rte_ixgbe_txq_strings[stat].offset + 3671 (sizeof(uint64_t) * i)); 3672 count++; 3673 } 3674 } 3675 return count; 3676 } 3677 3678 uint16_t i; 3679 uint16_t size = ixgbe_xstats_calc_num(); 3680 uint64_t values_copy[size]; 3681 3682 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3683 3684 for (i = 0; i < n; i++) { 3685 if (ids[i] >= size) { 3686 PMD_INIT_LOG(ERR, "id value isn't valid"); 3687 return -1; 3688 } 3689 values[i] = values_copy[ids[i]]; 3690 } 3691 return n; 3692 } 3693 3694 static int 3695 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3696 { 3697 struct ixgbe_hw_stats *stats = 3698 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3699 struct ixgbe_macsec_stats *macsec_stats = 3700 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3701 dev->data->dev_private); 3702 3703 unsigned count = ixgbe_xstats_calc_num(); 3704 3705 /* HW registers are cleared on read */ 3706 ixgbe_dev_xstats_get(dev, NULL, count); 3707 3708 /* Reset software totals */ 3709 memset(stats, 0, sizeof(*stats)); 3710 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3711 3712 return 0; 3713 } 3714 3715 static void 3716 ixgbevf_update_stats(struct rte_eth_dev *dev) 3717 { 3718 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3719 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3720 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3721 3722 /* Good Rx packet, include VF loopback */ 3723 UPDATE_VF_STAT(IXGBE_VFGPRC, 3724 hw_stats->last_vfgprc, hw_stats->vfgprc); 3725 3726 /* Good Rx octets, include VF loopback */ 3727 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3728 hw_stats->last_vfgorc, hw_stats->vfgorc); 3729 3730 /* Good Tx packet, include VF loopback */ 3731 UPDATE_VF_STAT(IXGBE_VFGPTC, 3732 hw_stats->last_vfgptc, hw_stats->vfgptc); 3733 3734 /* Good Tx octets, include VF loopback */ 3735 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3736 hw_stats->last_vfgotc, hw_stats->vfgotc); 3737 3738 /* Rx Multicst Packet */ 3739 UPDATE_VF_STAT(IXGBE_VFMPRC, 3740 hw_stats->last_vfmprc, hw_stats->vfmprc); 3741 } 3742 3743 static int 3744 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3745 unsigned n) 3746 { 3747 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3748 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3749 unsigned i; 3750 3751 if (n < IXGBEVF_NB_XSTATS) 3752 return IXGBEVF_NB_XSTATS; 3753 3754 ixgbevf_update_stats(dev); 3755 3756 if (!xstats) 3757 return 0; 3758 3759 /* Extended stats */ 3760 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3761 xstats[i].id = i; 3762 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3763 rte_ixgbevf_stats_strings[i].offset); 3764 } 3765 3766 return IXGBEVF_NB_XSTATS; 3767 } 3768 3769 static int 3770 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3771 { 3772 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3773 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3774 3775 ixgbevf_update_stats(dev); 3776 3777 if (stats == NULL) 3778 return -EINVAL; 3779 3780 stats->ipackets = hw_stats->vfgprc; 3781 stats->ibytes = hw_stats->vfgorc; 3782 stats->opackets = hw_stats->vfgptc; 3783 stats->obytes = hw_stats->vfgotc; 3784 return 0; 3785 } 3786 3787 static int 3788 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3789 { 3790 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3791 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3792 3793 /* Sync HW register to the last stats */ 3794 ixgbevf_dev_stats_get(dev, NULL); 3795 3796 /* reset HW current stats*/ 3797 hw_stats->vfgprc = 0; 3798 hw_stats->vfgorc = 0; 3799 hw_stats->vfgptc = 0; 3800 hw_stats->vfgotc = 0; 3801 3802 return 0; 3803 } 3804 3805 static int 3806 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3807 { 3808 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3809 u16 eeprom_verh, eeprom_verl; 3810 u32 etrack_id; 3811 int ret; 3812 3813 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3814 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3815 3816 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3817 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3818 if (ret < 0) 3819 return -EINVAL; 3820 3821 ret += 1; /* add the size of '\0' */ 3822 if (fw_size < (size_t)ret) 3823 return ret; 3824 else 3825 return 0; 3826 } 3827 3828 static int 3829 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3830 { 3831 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3832 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3833 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3834 3835 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3836 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3837 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3838 /* 3839 * When DCB/VT is off, maximum number of queues changes, 3840 * except for 82598EB, which remains constant. 3841 */ 3842 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3843 hw->mac.type != ixgbe_mac_82598EB) 3844 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3845 } 3846 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3847 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3848 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3849 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3850 dev_info->max_vfs = pci_dev->max_vfs; 3851 if (hw->mac.type == ixgbe_mac_82598EB) 3852 dev_info->max_vmdq_pools = ETH_16_POOLS; 3853 else 3854 dev_info->max_vmdq_pools = ETH_64_POOLS; 3855 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3856 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3857 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3858 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3859 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3860 dev_info->rx_queue_offload_capa); 3861 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3862 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3863 3864 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3865 .rx_thresh = { 3866 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3867 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3868 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3869 }, 3870 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3871 .rx_drop_en = 0, 3872 .offloads = 0, 3873 }; 3874 3875 dev_info->default_txconf = (struct rte_eth_txconf) { 3876 .tx_thresh = { 3877 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3878 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3879 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3880 }, 3881 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3882 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3883 .offloads = 0, 3884 }; 3885 3886 dev_info->rx_desc_lim = rx_desc_lim; 3887 dev_info->tx_desc_lim = tx_desc_lim; 3888 3889 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3890 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3891 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3892 3893 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3894 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3895 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3896 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3897 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 3898 3899 if (hw->mac.type == ixgbe_mac_X540 || 3900 hw->mac.type == ixgbe_mac_X540_vf || 3901 hw->mac.type == ixgbe_mac_X550 || 3902 hw->mac.type == ixgbe_mac_X550_vf) { 3903 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3904 } 3905 if (hw->mac.type == ixgbe_mac_X550) { 3906 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 3907 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 3908 } 3909 3910 /* Driver-preferred Rx/Tx parameters */ 3911 dev_info->default_rxportconf.burst_size = 32; 3912 dev_info->default_txportconf.burst_size = 32; 3913 dev_info->default_rxportconf.nb_queues = 1; 3914 dev_info->default_txportconf.nb_queues = 1; 3915 dev_info->default_rxportconf.ring_size = 256; 3916 dev_info->default_txportconf.ring_size = 256; 3917 3918 return 0; 3919 } 3920 3921 static const uint32_t * 3922 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3923 { 3924 static const uint32_t ptypes[] = { 3925 /* For non-vec functions, 3926 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3927 * for vec functions, 3928 * refers to _recv_raw_pkts_vec(). 3929 */ 3930 RTE_PTYPE_L2_ETHER, 3931 RTE_PTYPE_L3_IPV4, 3932 RTE_PTYPE_L3_IPV4_EXT, 3933 RTE_PTYPE_L3_IPV6, 3934 RTE_PTYPE_L3_IPV6_EXT, 3935 RTE_PTYPE_L4_SCTP, 3936 RTE_PTYPE_L4_TCP, 3937 RTE_PTYPE_L4_UDP, 3938 RTE_PTYPE_TUNNEL_IP, 3939 RTE_PTYPE_INNER_L3_IPV6, 3940 RTE_PTYPE_INNER_L3_IPV6_EXT, 3941 RTE_PTYPE_INNER_L4_TCP, 3942 RTE_PTYPE_INNER_L4_UDP, 3943 RTE_PTYPE_UNKNOWN 3944 }; 3945 3946 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3947 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3948 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3949 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3950 return ptypes; 3951 3952 #if defined(RTE_ARCH_X86) || defined(__ARM_NEON) 3953 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3954 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3955 return ptypes; 3956 #endif 3957 return NULL; 3958 } 3959 3960 static int 3961 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3962 struct rte_eth_dev_info *dev_info) 3963 { 3964 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3965 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3966 3967 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3968 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3969 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3970 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3971 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3972 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3973 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3974 dev_info->max_vfs = pci_dev->max_vfs; 3975 if (hw->mac.type == ixgbe_mac_82598EB) 3976 dev_info->max_vmdq_pools = ETH_16_POOLS; 3977 else 3978 dev_info->max_vmdq_pools = ETH_64_POOLS; 3979 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3980 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3981 dev_info->rx_queue_offload_capa); 3982 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3983 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3984 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3985 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3986 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3987 3988 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3989 .rx_thresh = { 3990 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3991 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3992 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3993 }, 3994 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3995 .rx_drop_en = 0, 3996 .offloads = 0, 3997 }; 3998 3999 dev_info->default_txconf = (struct rte_eth_txconf) { 4000 .tx_thresh = { 4001 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4002 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4003 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4004 }, 4005 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4006 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4007 .offloads = 0, 4008 }; 4009 4010 dev_info->rx_desc_lim = rx_desc_lim; 4011 dev_info->tx_desc_lim = tx_desc_lim; 4012 4013 return 0; 4014 } 4015 4016 static int 4017 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4018 bool *link_up, int wait_to_complete) 4019 { 4020 struct ixgbe_adapter *adapter = container_of(hw, 4021 struct ixgbe_adapter, hw); 4022 struct ixgbe_mbx_info *mbx = &hw->mbx; 4023 struct ixgbe_mac_info *mac = &hw->mac; 4024 uint32_t links_reg, in_msg; 4025 int ret_val = 0; 4026 4027 /* If we were hit with a reset drop the link */ 4028 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4029 mac->get_link_status = true; 4030 4031 if (!mac->get_link_status) 4032 goto out; 4033 4034 /* if link status is down no point in checking to see if pf is up */ 4035 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4036 if (!(links_reg & IXGBE_LINKS_UP)) 4037 goto out; 4038 4039 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4040 * before the link status is correct 4041 */ 4042 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4043 int i; 4044 4045 for (i = 0; i < 5; i++) { 4046 rte_delay_us(100); 4047 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4048 4049 if (!(links_reg & IXGBE_LINKS_UP)) 4050 goto out; 4051 } 4052 } 4053 4054 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4055 case IXGBE_LINKS_SPEED_10G_82599: 4056 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4057 if (hw->mac.type >= ixgbe_mac_X550) { 4058 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4059 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4060 } 4061 break; 4062 case IXGBE_LINKS_SPEED_1G_82599: 4063 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4064 break; 4065 case IXGBE_LINKS_SPEED_100_82599: 4066 *speed = IXGBE_LINK_SPEED_100_FULL; 4067 if (hw->mac.type == ixgbe_mac_X550) { 4068 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4069 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4070 } 4071 break; 4072 case IXGBE_LINKS_SPEED_10_X550EM_A: 4073 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4074 /* Since Reserved in older MAC's */ 4075 if (hw->mac.type >= ixgbe_mac_X550) 4076 *speed = IXGBE_LINK_SPEED_10_FULL; 4077 break; 4078 default: 4079 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4080 } 4081 4082 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4083 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4084 mac->get_link_status = true; 4085 else 4086 mac->get_link_status = false; 4087 4088 goto out; 4089 } 4090 4091 /* if the read failed it could just be a mailbox collision, best wait 4092 * until we are called again and don't report an error 4093 */ 4094 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4095 goto out; 4096 4097 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4098 /* msg is not CTS and is NACK we must have lost CTS status */ 4099 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4100 mac->get_link_status = false; 4101 goto out; 4102 } 4103 4104 /* the pf is talking, if we timed out in the past we reinit */ 4105 if (!mbx->timeout) { 4106 ret_val = -1; 4107 goto out; 4108 } 4109 4110 /* if we passed all the tests above then the link is up and we no 4111 * longer need to check for link 4112 */ 4113 mac->get_link_status = false; 4114 4115 out: 4116 *link_up = !mac->get_link_status; 4117 return ret_val; 4118 } 4119 4120 /* 4121 * If @timeout_ms was 0, it means that it will not return until link complete. 4122 * It returns 1 on complete, return 0 on timeout. 4123 */ 4124 static int 4125 ixgbe_dev_wait_setup_link_complete(struct rte_eth_dev *dev, uint32_t timeout_ms) 4126 { 4127 #define WARNING_TIMEOUT 9000 /* 9s in total */ 4128 struct ixgbe_adapter *ad = dev->data->dev_private; 4129 uint32_t timeout = timeout_ms ? timeout_ms : WARNING_TIMEOUT; 4130 4131 while (rte_atomic32_read(&ad->link_thread_running)) { 4132 msec_delay(1); 4133 timeout--; 4134 4135 if (timeout_ms) { 4136 if (!timeout) 4137 return 0; 4138 } else if (!timeout) { 4139 /* It will not return until link complete */ 4140 timeout = WARNING_TIMEOUT; 4141 PMD_DRV_LOG(ERR, "IXGBE link thread not complete too long time!"); 4142 } 4143 } 4144 4145 return 1; 4146 } 4147 4148 static void * 4149 ixgbe_dev_setup_link_thread_handler(void *param) 4150 { 4151 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4152 struct ixgbe_adapter *ad = dev->data->dev_private; 4153 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4154 struct ixgbe_interrupt *intr = 4155 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4156 u32 speed; 4157 bool autoneg = false; 4158 4159 pthread_detach(pthread_self()); 4160 speed = hw->phy.autoneg_advertised; 4161 if (!speed) 4162 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4163 4164 ixgbe_setup_link(hw, speed, true); 4165 4166 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4167 rte_atomic32_clear(&ad->link_thread_running); 4168 return NULL; 4169 } 4170 4171 /* 4172 * In freebsd environment, nic_uio drivers do not support interrupts, 4173 * rte_intr_callback_register() will fail to register interrupts. 4174 * We can not make link status to change from down to up by interrupt 4175 * callback. So we need to wait for the controller to acquire link 4176 * when ports start. 4177 * It returns 0 on link up. 4178 */ 4179 static int 4180 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4181 { 4182 #ifdef RTE_EXEC_ENV_FREEBSD 4183 int err, i; 4184 bool link_up = false; 4185 uint32_t speed = 0; 4186 const int nb_iter = 25; 4187 4188 for (i = 0; i < nb_iter; i++) { 4189 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4190 if (err) 4191 return err; 4192 if (link_up) 4193 return 0; 4194 msec_delay(200); 4195 } 4196 4197 return 0; 4198 #else 4199 RTE_SET_USED(hw); 4200 return 0; 4201 #endif 4202 } 4203 4204 /* return 0 means link status changed, -1 means not changed */ 4205 int 4206 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4207 int wait_to_complete, int vf) 4208 { 4209 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4210 struct ixgbe_adapter *ad = dev->data->dev_private; 4211 struct rte_eth_link link; 4212 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4213 struct ixgbe_interrupt *intr = 4214 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4215 bool link_up; 4216 int diag; 4217 int wait = 1; 4218 u32 esdp_reg; 4219 4220 memset(&link, 0, sizeof(link)); 4221 link.link_status = ETH_LINK_DOWN; 4222 link.link_speed = ETH_SPEED_NUM_NONE; 4223 link.link_duplex = ETH_LINK_HALF_DUPLEX; 4224 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4225 ETH_LINK_SPEED_FIXED); 4226 4227 hw->mac.get_link_status = true; 4228 4229 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4230 return rte_eth_linkstatus_set(dev, &link); 4231 4232 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4233 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4234 wait = 0; 4235 4236 /* BSD has no interrupt mechanism, so force NIC status synchronization. */ 4237 #ifdef RTE_EXEC_ENV_FREEBSD 4238 wait = 1; 4239 #endif 4240 4241 if (vf) 4242 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4243 else 4244 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4245 4246 if (diag != 0) { 4247 link.link_speed = ETH_SPEED_NUM_100M; 4248 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4249 return rte_eth_linkstatus_set(dev, &link); 4250 } 4251 4252 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4253 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4254 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4255 link_up = 0; 4256 } 4257 4258 if (link_up == 0) { 4259 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4260 ixgbe_dev_wait_setup_link_complete(dev, 0); 4261 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4262 /* To avoid race condition between threads, set 4263 * the IXGBE_FLAG_NEED_LINK_CONFIG flag only 4264 * when there is no link thread running. 4265 */ 4266 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4267 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4268 "ixgbe-link-handler", 4269 NULL, 4270 ixgbe_dev_setup_link_thread_handler, 4271 dev) < 0) { 4272 PMD_DRV_LOG(ERR, 4273 "Create link thread failed!"); 4274 rte_atomic32_clear(&ad->link_thread_running); 4275 } 4276 } else { 4277 PMD_DRV_LOG(ERR, 4278 "Other link thread is running now!"); 4279 } 4280 } 4281 return rte_eth_linkstatus_set(dev, &link); 4282 } 4283 4284 link.link_status = ETH_LINK_UP; 4285 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4286 4287 switch (link_speed) { 4288 default: 4289 case IXGBE_LINK_SPEED_UNKNOWN: 4290 link.link_speed = ETH_SPEED_NUM_UNKNOWN; 4291 break; 4292 4293 case IXGBE_LINK_SPEED_10_FULL: 4294 link.link_speed = ETH_SPEED_NUM_10M; 4295 break; 4296 4297 case IXGBE_LINK_SPEED_100_FULL: 4298 link.link_speed = ETH_SPEED_NUM_100M; 4299 break; 4300 4301 case IXGBE_LINK_SPEED_1GB_FULL: 4302 link.link_speed = ETH_SPEED_NUM_1G; 4303 break; 4304 4305 case IXGBE_LINK_SPEED_2_5GB_FULL: 4306 link.link_speed = ETH_SPEED_NUM_2_5G; 4307 break; 4308 4309 case IXGBE_LINK_SPEED_5GB_FULL: 4310 link.link_speed = ETH_SPEED_NUM_5G; 4311 break; 4312 4313 case IXGBE_LINK_SPEED_10GB_FULL: 4314 link.link_speed = ETH_SPEED_NUM_10G; 4315 break; 4316 } 4317 4318 return rte_eth_linkstatus_set(dev, &link); 4319 } 4320 4321 static int 4322 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4323 { 4324 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4325 } 4326 4327 static int 4328 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4329 { 4330 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4331 } 4332 4333 static int 4334 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4335 { 4336 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4337 uint32_t fctrl; 4338 4339 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4340 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4341 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4342 4343 return 0; 4344 } 4345 4346 static int 4347 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4348 { 4349 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4350 uint32_t fctrl; 4351 4352 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4353 fctrl &= (~IXGBE_FCTRL_UPE); 4354 if (dev->data->all_multicast == 1) 4355 fctrl |= IXGBE_FCTRL_MPE; 4356 else 4357 fctrl &= (~IXGBE_FCTRL_MPE); 4358 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4359 4360 return 0; 4361 } 4362 4363 static int 4364 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4365 { 4366 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4367 uint32_t fctrl; 4368 4369 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4370 fctrl |= IXGBE_FCTRL_MPE; 4371 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4372 4373 return 0; 4374 } 4375 4376 static int 4377 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4378 { 4379 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4380 uint32_t fctrl; 4381 4382 if (dev->data->promiscuous == 1) 4383 return 0; /* must remain in all_multicast mode */ 4384 4385 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4386 fctrl &= (~IXGBE_FCTRL_MPE); 4387 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4388 4389 return 0; 4390 } 4391 4392 /** 4393 * It clears the interrupt causes and enables the interrupt. 4394 * It will be called once only during nic initialized. 4395 * 4396 * @param dev 4397 * Pointer to struct rte_eth_dev. 4398 * @param on 4399 * Enable or Disable. 4400 * 4401 * @return 4402 * - On success, zero. 4403 * - On failure, a negative value. 4404 */ 4405 static int 4406 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4407 { 4408 struct ixgbe_interrupt *intr = 4409 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4410 4411 ixgbe_dev_link_status_print(dev); 4412 if (on) 4413 intr->mask |= IXGBE_EICR_LSC; 4414 else 4415 intr->mask &= ~IXGBE_EICR_LSC; 4416 4417 return 0; 4418 } 4419 4420 /** 4421 * It clears the interrupt causes and enables the interrupt. 4422 * It will be called once only during nic initialized. 4423 * 4424 * @param dev 4425 * Pointer to struct rte_eth_dev. 4426 * 4427 * @return 4428 * - On success, zero. 4429 * - On failure, a negative value. 4430 */ 4431 static int 4432 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4433 { 4434 struct ixgbe_interrupt *intr = 4435 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4436 4437 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4438 4439 return 0; 4440 } 4441 4442 /** 4443 * It clears the interrupt causes and enables the interrupt. 4444 * It will be called once only during nic initialized. 4445 * 4446 * @param dev 4447 * Pointer to struct rte_eth_dev. 4448 * 4449 * @return 4450 * - On success, zero. 4451 * - On failure, a negative value. 4452 */ 4453 static int 4454 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4455 { 4456 struct ixgbe_interrupt *intr = 4457 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4458 4459 intr->mask |= IXGBE_EICR_LINKSEC; 4460 4461 return 0; 4462 } 4463 4464 /* 4465 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4466 * 4467 * @param dev 4468 * Pointer to struct rte_eth_dev. 4469 * 4470 * @return 4471 * - On success, zero. 4472 * - On failure, a negative value. 4473 */ 4474 static int 4475 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4476 { 4477 uint32_t eicr; 4478 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4479 struct ixgbe_interrupt *intr = 4480 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4481 4482 /* clear all cause mask */ 4483 ixgbe_disable_intr(hw); 4484 4485 /* read-on-clear nic registers here */ 4486 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4487 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4488 4489 intr->flags = 0; 4490 4491 /* set flag for async link update */ 4492 if (eicr & IXGBE_EICR_LSC) 4493 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4494 4495 if (eicr & IXGBE_EICR_MAILBOX) 4496 intr->flags |= IXGBE_FLAG_MAILBOX; 4497 4498 if (eicr & IXGBE_EICR_LINKSEC) 4499 intr->flags |= IXGBE_FLAG_MACSEC; 4500 4501 if (hw->mac.type == ixgbe_mac_X550EM_x && 4502 hw->phy.type == ixgbe_phy_x550em_ext_t && 4503 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4504 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4505 4506 return 0; 4507 } 4508 4509 /** 4510 * It gets and then prints the link status. 4511 * 4512 * @param dev 4513 * Pointer to struct rte_eth_dev. 4514 * 4515 * @return 4516 * - On success, zero. 4517 * - On failure, a negative value. 4518 */ 4519 static void 4520 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4521 { 4522 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4523 struct rte_eth_link link; 4524 4525 rte_eth_linkstatus_get(dev, &link); 4526 4527 if (link.link_status) { 4528 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4529 (int)(dev->data->port_id), 4530 (unsigned)link.link_speed, 4531 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 4532 "full-duplex" : "half-duplex"); 4533 } else { 4534 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4535 (int)(dev->data->port_id)); 4536 } 4537 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4538 pci_dev->addr.domain, 4539 pci_dev->addr.bus, 4540 pci_dev->addr.devid, 4541 pci_dev->addr.function); 4542 } 4543 4544 /* 4545 * It executes link_update after knowing an interrupt occurred. 4546 * 4547 * @param dev 4548 * Pointer to struct rte_eth_dev. 4549 * 4550 * @return 4551 * - On success, zero. 4552 * - On failure, a negative value. 4553 */ 4554 static int 4555 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4556 { 4557 struct ixgbe_interrupt *intr = 4558 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4559 int64_t timeout; 4560 struct ixgbe_hw *hw = 4561 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4562 4563 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4564 4565 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4566 ixgbe_pf_mbx_process(dev); 4567 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4568 } 4569 4570 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4571 ixgbe_handle_lasi(hw); 4572 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4573 } 4574 4575 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4576 struct rte_eth_link link; 4577 4578 /* get the link status before link update, for predicting later */ 4579 rte_eth_linkstatus_get(dev, &link); 4580 4581 ixgbe_dev_link_update(dev, 0); 4582 4583 /* likely to up */ 4584 if (!link.link_status) 4585 /* handle it 1 sec later, wait it being stable */ 4586 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4587 /* likely to down */ 4588 else 4589 /* handle it 4 sec later, wait it being stable */ 4590 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4591 4592 ixgbe_dev_link_status_print(dev); 4593 if (rte_eal_alarm_set(timeout * 1000, 4594 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4595 PMD_DRV_LOG(ERR, "Error setting alarm"); 4596 else { 4597 /* remember original mask */ 4598 intr->mask_original = intr->mask; 4599 /* only disable lsc interrupt */ 4600 intr->mask &= ~IXGBE_EIMS_LSC; 4601 } 4602 } 4603 4604 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4605 ixgbe_enable_intr(dev); 4606 4607 return 0; 4608 } 4609 4610 /** 4611 * Interrupt handler which shall be registered for alarm callback for delayed 4612 * handling specific interrupt to wait for the stable nic state. As the 4613 * NIC interrupt state is not stable for ixgbe after link is just down, 4614 * it needs to wait 4 seconds to get the stable status. 4615 * 4616 * @param handle 4617 * Pointer to interrupt handle. 4618 * @param param 4619 * The address of parameter (struct rte_eth_dev *) regsitered before. 4620 * 4621 * @return 4622 * void 4623 */ 4624 static void 4625 ixgbe_dev_interrupt_delayed_handler(void *param) 4626 { 4627 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4628 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4629 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4630 struct ixgbe_interrupt *intr = 4631 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4632 struct ixgbe_hw *hw = 4633 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4634 uint32_t eicr; 4635 4636 ixgbe_disable_intr(hw); 4637 4638 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4639 if (eicr & IXGBE_EICR_MAILBOX) 4640 ixgbe_pf_mbx_process(dev); 4641 4642 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4643 ixgbe_handle_lasi(hw); 4644 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4645 } 4646 4647 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4648 ixgbe_dev_link_update(dev, 0); 4649 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4650 ixgbe_dev_link_status_print(dev); 4651 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 4652 } 4653 4654 if (intr->flags & IXGBE_FLAG_MACSEC) { 4655 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, NULL); 4656 intr->flags &= ~IXGBE_FLAG_MACSEC; 4657 } 4658 4659 /* restore original mask */ 4660 intr->mask = intr->mask_original; 4661 intr->mask_original = 0; 4662 4663 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4664 ixgbe_enable_intr(dev); 4665 rte_intr_ack(intr_handle); 4666 } 4667 4668 /** 4669 * Interrupt handler triggered by NIC for handling 4670 * specific interrupt. 4671 * 4672 * @param handle 4673 * Pointer to interrupt handle. 4674 * @param param 4675 * The address of parameter (struct rte_eth_dev *) regsitered before. 4676 * 4677 * @return 4678 * void 4679 */ 4680 static void 4681 ixgbe_dev_interrupt_handler(void *param) 4682 { 4683 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4684 4685 ixgbe_dev_interrupt_get_status(dev); 4686 ixgbe_dev_interrupt_action(dev); 4687 } 4688 4689 static int 4690 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4691 { 4692 struct ixgbe_hw *hw; 4693 4694 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4695 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4696 } 4697 4698 static int 4699 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4700 { 4701 struct ixgbe_hw *hw; 4702 4703 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4704 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4705 } 4706 4707 static int 4708 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4709 { 4710 struct ixgbe_hw *hw; 4711 uint32_t mflcn_reg; 4712 uint32_t fccfg_reg; 4713 int rx_pause; 4714 int tx_pause; 4715 4716 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4717 4718 fc_conf->pause_time = hw->fc.pause_time; 4719 fc_conf->high_water = hw->fc.high_water[0]; 4720 fc_conf->low_water = hw->fc.low_water[0]; 4721 fc_conf->send_xon = hw->fc.send_xon; 4722 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4723 4724 /* 4725 * Return rx_pause status according to actual setting of 4726 * MFLCN register. 4727 */ 4728 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4729 if (mflcn_reg & IXGBE_MFLCN_PMCF) 4730 fc_conf->mac_ctrl_frame_fwd = 1; 4731 else 4732 fc_conf->mac_ctrl_frame_fwd = 0; 4733 4734 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4735 rx_pause = 1; 4736 else 4737 rx_pause = 0; 4738 4739 /* 4740 * Return tx_pause status according to actual setting of 4741 * FCCFG register. 4742 */ 4743 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4744 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4745 tx_pause = 1; 4746 else 4747 tx_pause = 0; 4748 4749 if (rx_pause && tx_pause) 4750 fc_conf->mode = RTE_FC_FULL; 4751 else if (rx_pause) 4752 fc_conf->mode = RTE_FC_RX_PAUSE; 4753 else if (tx_pause) 4754 fc_conf->mode = RTE_FC_TX_PAUSE; 4755 else 4756 fc_conf->mode = RTE_FC_NONE; 4757 4758 return 0; 4759 } 4760 4761 static int 4762 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4763 { 4764 struct ixgbe_hw *hw; 4765 struct ixgbe_adapter *adapter = dev->data->dev_private; 4766 int err; 4767 uint32_t rx_buf_size; 4768 uint32_t max_high_water; 4769 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4770 ixgbe_fc_none, 4771 ixgbe_fc_rx_pause, 4772 ixgbe_fc_tx_pause, 4773 ixgbe_fc_full 4774 }; 4775 4776 PMD_INIT_FUNC_TRACE(); 4777 4778 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4779 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4780 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4781 4782 /* 4783 * At least reserve one Ethernet frame for watermark 4784 * high_water/low_water in kilo bytes for ixgbe 4785 */ 4786 max_high_water = (rx_buf_size - 4787 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4788 if ((fc_conf->high_water > max_high_water) || 4789 (fc_conf->high_water < fc_conf->low_water)) { 4790 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4791 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4792 return -EINVAL; 4793 } 4794 4795 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4796 hw->fc.pause_time = fc_conf->pause_time; 4797 hw->fc.high_water[0] = fc_conf->high_water; 4798 hw->fc.low_water[0] = fc_conf->low_water; 4799 hw->fc.send_xon = fc_conf->send_xon; 4800 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4801 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4802 4803 err = ixgbe_flow_ctrl_enable(dev, hw); 4804 if (err < 0) { 4805 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4806 return -EIO; 4807 } 4808 return err; 4809 } 4810 4811 /** 4812 * ixgbe_pfc_enable_generic - Enable flow control 4813 * @hw: pointer to hardware structure 4814 * @tc_num: traffic class number 4815 * Enable flow control according to the current settings. 4816 */ 4817 static int 4818 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4819 { 4820 int ret_val = 0; 4821 uint32_t mflcn_reg, fccfg_reg; 4822 uint32_t reg; 4823 uint32_t fcrtl, fcrth; 4824 uint8_t i; 4825 uint8_t nb_rx_en; 4826 4827 /* Validate the water mark configuration */ 4828 if (!hw->fc.pause_time) { 4829 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4830 goto out; 4831 } 4832 4833 /* Low water mark of zero causes XOFF floods */ 4834 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4835 /* High/Low water can not be 0 */ 4836 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4837 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4838 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4839 goto out; 4840 } 4841 4842 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4843 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4844 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4845 goto out; 4846 } 4847 } 4848 /* Negotiate the fc mode to use */ 4849 ixgbe_fc_autoneg(hw); 4850 4851 /* Disable any previous flow control settings */ 4852 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4853 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4854 4855 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4856 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4857 4858 switch (hw->fc.current_mode) { 4859 case ixgbe_fc_none: 4860 /* 4861 * If the count of enabled RX Priority Flow control >1, 4862 * and the TX pause can not be disabled 4863 */ 4864 nb_rx_en = 0; 4865 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4866 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4867 if (reg & IXGBE_FCRTH_FCEN) 4868 nb_rx_en++; 4869 } 4870 if (nb_rx_en > 1) 4871 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4872 break; 4873 case ixgbe_fc_rx_pause: 4874 /* 4875 * Rx Flow control is enabled and Tx Flow control is 4876 * disabled by software override. Since there really 4877 * isn't a way to advertise that we are capable of RX 4878 * Pause ONLY, we will advertise that we support both 4879 * symmetric and asymmetric Rx PAUSE. Later, we will 4880 * disable the adapter's ability to send PAUSE frames. 4881 */ 4882 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4883 /* 4884 * If the count of enabled RX Priority Flow control >1, 4885 * and the TX pause can not be disabled 4886 */ 4887 nb_rx_en = 0; 4888 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4889 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4890 if (reg & IXGBE_FCRTH_FCEN) 4891 nb_rx_en++; 4892 } 4893 if (nb_rx_en > 1) 4894 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4895 break; 4896 case ixgbe_fc_tx_pause: 4897 /* 4898 * Tx Flow control is enabled, and Rx Flow control is 4899 * disabled by software override. 4900 */ 4901 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4902 break; 4903 case ixgbe_fc_full: 4904 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4905 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4906 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4907 break; 4908 default: 4909 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4910 ret_val = IXGBE_ERR_CONFIG; 4911 goto out; 4912 } 4913 4914 /* Set 802.3x based flow control settings. */ 4915 mflcn_reg |= IXGBE_MFLCN_DPF; 4916 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4917 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4918 4919 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4920 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4921 hw->fc.high_water[tc_num]) { 4922 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4923 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4924 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4925 } else { 4926 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4927 /* 4928 * In order to prevent Tx hangs when the internal Tx 4929 * switch is enabled we must set the high water mark 4930 * to the maximum FCRTH value. This allows the Tx 4931 * switch to function even under heavy Rx workloads. 4932 */ 4933 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4934 } 4935 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4936 4937 /* Configure pause time (2 TCs per register) */ 4938 reg = hw->fc.pause_time * 0x00010001; 4939 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4940 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4941 4942 /* Configure flow control refresh threshold value */ 4943 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4944 4945 out: 4946 return ret_val; 4947 } 4948 4949 static int 4950 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4951 { 4952 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4953 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4954 4955 if (hw->mac.type != ixgbe_mac_82598EB) { 4956 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4957 } 4958 return ret_val; 4959 } 4960 4961 static int 4962 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4963 { 4964 int err; 4965 uint32_t rx_buf_size; 4966 uint32_t max_high_water; 4967 uint8_t tc_num; 4968 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4969 struct ixgbe_hw *hw = 4970 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4971 struct ixgbe_dcb_config *dcb_config = 4972 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4973 4974 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4975 ixgbe_fc_none, 4976 ixgbe_fc_rx_pause, 4977 ixgbe_fc_tx_pause, 4978 ixgbe_fc_full 4979 }; 4980 4981 PMD_INIT_FUNC_TRACE(); 4982 4983 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4984 tc_num = map[pfc_conf->priority]; 4985 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4986 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4987 /* 4988 * At least reserve one Ethernet frame for watermark 4989 * high_water/low_water in kilo bytes for ixgbe 4990 */ 4991 max_high_water = (rx_buf_size - 4992 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4993 if ((pfc_conf->fc.high_water > max_high_water) || 4994 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4995 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4996 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4997 return -EINVAL; 4998 } 4999 5000 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 5001 hw->fc.pause_time = pfc_conf->fc.pause_time; 5002 hw->fc.send_xon = pfc_conf->fc.send_xon; 5003 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 5004 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 5005 5006 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5007 5008 /* Not negotiated is not an error case */ 5009 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5010 return 0; 5011 5012 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5013 return -EIO; 5014 } 5015 5016 static int 5017 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5018 struct rte_eth_rss_reta_entry64 *reta_conf, 5019 uint16_t reta_size) 5020 { 5021 uint16_t i, sp_reta_size; 5022 uint8_t j, mask; 5023 uint32_t reta, r; 5024 uint16_t idx, shift; 5025 struct ixgbe_adapter *adapter = dev->data->dev_private; 5026 struct rte_eth_dev_data *dev_data = dev->data; 5027 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5028 uint32_t reta_reg; 5029 5030 PMD_INIT_FUNC_TRACE(); 5031 5032 if (!dev_data->dev_started) { 5033 PMD_DRV_LOG(ERR, 5034 "port %d must be started before rss reta update", 5035 dev_data->port_id); 5036 return -EIO; 5037 } 5038 5039 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5040 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5041 "NIC."); 5042 return -ENOTSUP; 5043 } 5044 5045 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5046 if (reta_size != sp_reta_size) { 5047 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5048 "(%d) doesn't match the number hardware can supported " 5049 "(%d)", reta_size, sp_reta_size); 5050 return -EINVAL; 5051 } 5052 5053 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5054 idx = i / RTE_RETA_GROUP_SIZE; 5055 shift = i % RTE_RETA_GROUP_SIZE; 5056 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5057 IXGBE_4_BIT_MASK); 5058 if (!mask) 5059 continue; 5060 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5061 if (mask == IXGBE_4_BIT_MASK) 5062 r = 0; 5063 else 5064 r = IXGBE_READ_REG(hw, reta_reg); 5065 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5066 if (mask & (0x1 << j)) 5067 reta |= reta_conf[idx].reta[shift + j] << 5068 (CHAR_BIT * j); 5069 else 5070 reta |= r & (IXGBE_8_BIT_MASK << 5071 (CHAR_BIT * j)); 5072 } 5073 IXGBE_WRITE_REG(hw, reta_reg, reta); 5074 } 5075 adapter->rss_reta_updated = 1; 5076 5077 return 0; 5078 } 5079 5080 static int 5081 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5082 struct rte_eth_rss_reta_entry64 *reta_conf, 5083 uint16_t reta_size) 5084 { 5085 uint16_t i, sp_reta_size; 5086 uint8_t j, mask; 5087 uint32_t reta; 5088 uint16_t idx, shift; 5089 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5090 uint32_t reta_reg; 5091 5092 PMD_INIT_FUNC_TRACE(); 5093 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5094 if (reta_size != sp_reta_size) { 5095 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5096 "(%d) doesn't match the number hardware can supported " 5097 "(%d)", reta_size, sp_reta_size); 5098 return -EINVAL; 5099 } 5100 5101 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5102 idx = i / RTE_RETA_GROUP_SIZE; 5103 shift = i % RTE_RETA_GROUP_SIZE; 5104 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5105 IXGBE_4_BIT_MASK); 5106 if (!mask) 5107 continue; 5108 5109 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5110 reta = IXGBE_READ_REG(hw, reta_reg); 5111 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5112 if (mask & (0x1 << j)) 5113 reta_conf[idx].reta[shift + j] = 5114 ((reta >> (CHAR_BIT * j)) & 5115 IXGBE_8_BIT_MASK); 5116 } 5117 } 5118 5119 return 0; 5120 } 5121 5122 static int 5123 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5124 uint32_t index, uint32_t pool) 5125 { 5126 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5127 uint32_t enable_addr = 1; 5128 5129 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5130 pool, enable_addr); 5131 } 5132 5133 static void 5134 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5135 { 5136 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5137 5138 ixgbe_clear_rar(hw, index); 5139 } 5140 5141 static int 5142 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5143 { 5144 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5145 5146 ixgbe_remove_rar(dev, 0); 5147 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5148 5149 return 0; 5150 } 5151 5152 static bool 5153 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5154 { 5155 if (strcmp(dev->device->driver->name, drv->driver.name)) 5156 return false; 5157 5158 return true; 5159 } 5160 5161 bool 5162 is_ixgbe_supported(struct rte_eth_dev *dev) 5163 { 5164 return is_device_supported(dev, &rte_ixgbe_pmd); 5165 } 5166 5167 static int 5168 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5169 { 5170 uint32_t hlreg0; 5171 uint32_t maxfrs; 5172 struct ixgbe_hw *hw; 5173 struct rte_eth_dev_info dev_info; 5174 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5175 struct rte_eth_dev_data *dev_data = dev->data; 5176 int ret; 5177 5178 ret = ixgbe_dev_info_get(dev, &dev_info); 5179 if (ret != 0) 5180 return ret; 5181 5182 /* check that mtu is within the allowed range */ 5183 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5184 return -EINVAL; 5185 5186 /* If device is started, refuse mtu that requires the support of 5187 * scattered packets when this feature has not been enabled before. 5188 */ 5189 if (dev_data->dev_started && !dev_data->scattered_rx && 5190 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 5191 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 5192 PMD_INIT_LOG(ERR, "Stop port first."); 5193 return -EINVAL; 5194 } 5195 5196 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5197 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5198 5199 /* switch to jumbo mode if needed */ 5200 if (frame_size > IXGBE_ETH_MAX_LEN) { 5201 dev->data->dev_conf.rxmode.offloads |= 5202 DEV_RX_OFFLOAD_JUMBO_FRAME; 5203 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5204 } else { 5205 dev->data->dev_conf.rxmode.offloads &= 5206 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 5207 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5208 } 5209 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5210 5211 /* update max frame size */ 5212 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 5213 5214 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5215 maxfrs &= 0x0000FFFF; 5216 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 5217 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5218 5219 return 0; 5220 } 5221 5222 /* 5223 * Virtual Function operations 5224 */ 5225 static void 5226 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5227 { 5228 struct ixgbe_interrupt *intr = 5229 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5230 struct ixgbe_hw *hw = 5231 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5232 5233 PMD_INIT_FUNC_TRACE(); 5234 5235 /* Clear interrupt mask to stop from interrupts being generated */ 5236 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5237 5238 IXGBE_WRITE_FLUSH(hw); 5239 5240 /* Clear mask value. */ 5241 intr->mask = 0; 5242 } 5243 5244 static void 5245 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5246 { 5247 struct ixgbe_interrupt *intr = 5248 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5249 struct ixgbe_hw *hw = 5250 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5251 5252 PMD_INIT_FUNC_TRACE(); 5253 5254 /* VF enable interrupt autoclean */ 5255 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5256 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5257 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5258 5259 IXGBE_WRITE_FLUSH(hw); 5260 5261 /* Save IXGBE_VTEIMS value to mask. */ 5262 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5263 } 5264 5265 static int 5266 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5267 { 5268 struct rte_eth_conf *conf = &dev->data->dev_conf; 5269 struct ixgbe_adapter *adapter = dev->data->dev_private; 5270 5271 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5272 dev->data->port_id); 5273 5274 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 5275 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 5276 5277 /* 5278 * VF has no ability to enable/disable HW CRC 5279 * Keep the persistent behavior the same as Host PF 5280 */ 5281 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5282 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 5283 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5284 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 5285 } 5286 #else 5287 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 5288 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5289 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 5290 } 5291 #endif 5292 5293 /* 5294 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5295 * allocation or vector Rx preconditions we will reset it. 5296 */ 5297 adapter->rx_bulk_alloc_allowed = true; 5298 adapter->rx_vec_allowed = true; 5299 5300 return 0; 5301 } 5302 5303 static int 5304 ixgbevf_dev_start(struct rte_eth_dev *dev) 5305 { 5306 struct ixgbe_hw *hw = 5307 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5308 uint32_t intr_vector = 0; 5309 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5310 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5311 5312 int err, mask = 0; 5313 5314 PMD_INIT_FUNC_TRACE(); 5315 5316 /* Stop the link setup handler before resetting the HW. */ 5317 ixgbe_dev_wait_setup_link_complete(dev, 0); 5318 5319 err = hw->mac.ops.reset_hw(hw); 5320 5321 /** 5322 * In this case, reuses the MAC address assigned by VF 5323 * initialization. 5324 */ 5325 if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) { 5326 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5327 return err; 5328 } 5329 5330 hw->mac.get_link_status = true; 5331 5332 /* negotiate mailbox API version to use with the PF. */ 5333 ixgbevf_negotiate_api(hw); 5334 5335 ixgbevf_dev_tx_init(dev); 5336 5337 /* This can fail when allocating mbufs for descriptor rings */ 5338 err = ixgbevf_dev_rx_init(dev); 5339 if (err) { 5340 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5341 ixgbe_dev_clear_queues(dev); 5342 return err; 5343 } 5344 5345 /* Set vfta */ 5346 ixgbevf_set_vfta_all(dev, 1); 5347 5348 /* Set HW strip */ 5349 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 5350 ETH_VLAN_EXTEND_MASK; 5351 err = ixgbevf_vlan_offload_config(dev, mask); 5352 if (err) { 5353 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5354 ixgbe_dev_clear_queues(dev); 5355 return err; 5356 } 5357 5358 ixgbevf_dev_rxtx_start(dev); 5359 5360 /* check and configure queue intr-vector mapping */ 5361 if (rte_intr_cap_multiple(intr_handle) && 5362 dev->data->dev_conf.intr_conf.rxq) { 5363 /* According to datasheet, only vector 0/1/2 can be used, 5364 * now only one vector is used for Rx queue 5365 */ 5366 intr_vector = 1; 5367 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5368 return -1; 5369 } 5370 5371 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 5372 intr_handle->intr_vec = 5373 rte_zmalloc("intr_vec", 5374 dev->data->nb_rx_queues * sizeof(int), 0); 5375 if (intr_handle->intr_vec == NULL) { 5376 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5377 " intr_vec", dev->data->nb_rx_queues); 5378 return -ENOMEM; 5379 } 5380 } 5381 ixgbevf_configure_msix(dev); 5382 5383 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5384 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5385 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5386 * is not cleared, it will fail when following rte_intr_enable( ) tries 5387 * to map Rx queue interrupt to other VFIO vectors. 5388 * So clear uio/vfio intr/evevnfd first to avoid failure. 5389 */ 5390 rte_intr_disable(intr_handle); 5391 5392 rte_intr_enable(intr_handle); 5393 5394 /* Re-enable interrupt for VF */ 5395 ixgbevf_intr_enable(dev); 5396 5397 /* 5398 * Update link status right before return, because it may 5399 * start link configuration process in a separate thread. 5400 */ 5401 ixgbevf_dev_link_update(dev, 0); 5402 5403 hw->adapter_stopped = false; 5404 5405 return 0; 5406 } 5407 5408 static int 5409 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5410 { 5411 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5412 struct ixgbe_adapter *adapter = dev->data->dev_private; 5413 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5414 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5415 5416 if (hw->adapter_stopped) 5417 return 0; 5418 5419 PMD_INIT_FUNC_TRACE(); 5420 5421 ixgbe_dev_wait_setup_link_complete(dev, 0); 5422 5423 ixgbevf_intr_disable(dev); 5424 5425 dev->data->dev_started = 0; 5426 hw->adapter_stopped = 1; 5427 ixgbe_stop_adapter(hw); 5428 5429 /* 5430 * Clear what we set, but we still keep shadow_vfta to 5431 * restore after device starts 5432 */ 5433 ixgbevf_set_vfta_all(dev, 0); 5434 5435 /* Clear stored conf */ 5436 dev->data->scattered_rx = 0; 5437 5438 ixgbe_dev_clear_queues(dev); 5439 5440 /* Clean datapath event and queue/vec mapping */ 5441 rte_intr_efd_disable(intr_handle); 5442 if (intr_handle->intr_vec != NULL) { 5443 rte_free(intr_handle->intr_vec); 5444 intr_handle->intr_vec = NULL; 5445 } 5446 5447 adapter->rss_reta_updated = 0; 5448 5449 return 0; 5450 } 5451 5452 static int 5453 ixgbevf_dev_close(struct rte_eth_dev *dev) 5454 { 5455 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5456 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5457 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5458 int ret; 5459 5460 PMD_INIT_FUNC_TRACE(); 5461 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5462 return 0; 5463 5464 ixgbe_reset_hw(hw); 5465 5466 ret = ixgbevf_dev_stop(dev); 5467 5468 ixgbe_dev_free_queues(dev); 5469 5470 /** 5471 * Remove the VF MAC address ro ensure 5472 * that the VF traffic goes to the PF 5473 * after stop, close and detach of the VF 5474 **/ 5475 ixgbevf_remove_mac_addr(dev, 0); 5476 5477 rte_intr_disable(intr_handle); 5478 rte_intr_callback_unregister(intr_handle, 5479 ixgbevf_dev_interrupt_handler, dev); 5480 5481 return ret; 5482 } 5483 5484 /* 5485 * Reset VF device 5486 */ 5487 static int 5488 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5489 { 5490 int ret; 5491 5492 ret = eth_ixgbevf_dev_uninit(dev); 5493 if (ret) 5494 return ret; 5495 5496 ret = eth_ixgbevf_dev_init(dev); 5497 5498 return ret; 5499 } 5500 5501 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5502 { 5503 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5504 struct ixgbe_vfta *shadow_vfta = 5505 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5506 int i = 0, j = 0, vfta = 0, mask = 1; 5507 5508 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5509 vfta = shadow_vfta->vfta[i]; 5510 if (vfta) { 5511 mask = 1; 5512 for (j = 0; j < 32; j++) { 5513 if (vfta & mask) 5514 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5515 on, false); 5516 mask <<= 1; 5517 } 5518 } 5519 } 5520 5521 } 5522 5523 static int 5524 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5525 { 5526 struct ixgbe_hw *hw = 5527 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5528 struct ixgbe_vfta *shadow_vfta = 5529 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5530 uint32_t vid_idx = 0; 5531 uint32_t vid_bit = 0; 5532 int ret = 0; 5533 5534 PMD_INIT_FUNC_TRACE(); 5535 5536 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5537 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5538 if (ret) { 5539 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5540 return ret; 5541 } 5542 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5543 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5544 5545 /* Save what we set and retore it after device reset */ 5546 if (on) 5547 shadow_vfta->vfta[vid_idx] |= vid_bit; 5548 else 5549 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5550 5551 return 0; 5552 } 5553 5554 static void 5555 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5556 { 5557 struct ixgbe_hw *hw = 5558 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5559 uint32_t ctrl; 5560 5561 PMD_INIT_FUNC_TRACE(); 5562 5563 if (queue >= hw->mac.max_rx_queues) 5564 return; 5565 5566 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5567 if (on) 5568 ctrl |= IXGBE_RXDCTL_VME; 5569 else 5570 ctrl &= ~IXGBE_RXDCTL_VME; 5571 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5572 5573 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5574 } 5575 5576 static int 5577 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5578 { 5579 struct ixgbe_rx_queue *rxq; 5580 uint16_t i; 5581 int on = 0; 5582 5583 /* VF function only support hw strip feature, others are not support */ 5584 if (mask & ETH_VLAN_STRIP_MASK) { 5585 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5586 rxq = dev->data->rx_queues[i]; 5587 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 5588 ixgbevf_vlan_strip_queue_set(dev, i, on); 5589 } 5590 } 5591 5592 return 0; 5593 } 5594 5595 static int 5596 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5597 { 5598 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5599 5600 ixgbevf_vlan_offload_config(dev, mask); 5601 5602 return 0; 5603 } 5604 5605 int 5606 ixgbe_vt_check(struct ixgbe_hw *hw) 5607 { 5608 uint32_t reg_val; 5609 5610 /* if Virtualization Technology is enabled */ 5611 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5612 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5613 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5614 return -1; 5615 } 5616 5617 return 0; 5618 } 5619 5620 static uint32_t 5621 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5622 { 5623 uint32_t vector = 0; 5624 5625 switch (hw->mac.mc_filter_type) { 5626 case 0: /* use bits [47:36] of the address */ 5627 vector = ((uc_addr->addr_bytes[4] >> 4) | 5628 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5629 break; 5630 case 1: /* use bits [46:35] of the address */ 5631 vector = ((uc_addr->addr_bytes[4] >> 3) | 5632 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5633 break; 5634 case 2: /* use bits [45:34] of the address */ 5635 vector = ((uc_addr->addr_bytes[4] >> 2) | 5636 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5637 break; 5638 case 3: /* use bits [43:32] of the address */ 5639 vector = ((uc_addr->addr_bytes[4]) | 5640 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5641 break; 5642 default: /* Invalid mc_filter_type */ 5643 break; 5644 } 5645 5646 /* vector can only be 12-bits or boundary will be exceeded */ 5647 vector &= 0xFFF; 5648 return vector; 5649 } 5650 5651 static int 5652 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5653 struct rte_ether_addr *mac_addr, uint8_t on) 5654 { 5655 uint32_t vector; 5656 uint32_t uta_idx; 5657 uint32_t reg_val; 5658 uint32_t uta_shift; 5659 uint32_t rc; 5660 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5661 const uint32_t ixgbe_uta_bit_shift = 5; 5662 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5663 const uint32_t bit1 = 0x1; 5664 5665 struct ixgbe_hw *hw = 5666 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5667 struct ixgbe_uta_info *uta_info = 5668 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5669 5670 /* The UTA table only exists on 82599 hardware and newer */ 5671 if (hw->mac.type < ixgbe_mac_82599EB) 5672 return -ENOTSUP; 5673 5674 vector = ixgbe_uta_vector(hw, mac_addr); 5675 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5676 uta_shift = vector & ixgbe_uta_bit_mask; 5677 5678 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5679 if (rc == on) 5680 return 0; 5681 5682 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5683 if (on) { 5684 uta_info->uta_in_use++; 5685 reg_val |= (bit1 << uta_shift); 5686 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5687 } else { 5688 uta_info->uta_in_use--; 5689 reg_val &= ~(bit1 << uta_shift); 5690 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5691 } 5692 5693 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5694 5695 if (uta_info->uta_in_use > 0) 5696 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5697 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5698 else 5699 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5700 5701 return 0; 5702 } 5703 5704 static int 5705 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5706 { 5707 int i; 5708 struct ixgbe_hw *hw = 5709 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5710 struct ixgbe_uta_info *uta_info = 5711 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5712 5713 /* The UTA table only exists on 82599 hardware and newer */ 5714 if (hw->mac.type < ixgbe_mac_82599EB) 5715 return -ENOTSUP; 5716 5717 if (on) { 5718 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5719 uta_info->uta_shadow[i] = ~0; 5720 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5721 } 5722 } else { 5723 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5724 uta_info->uta_shadow[i] = 0; 5725 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5726 } 5727 } 5728 return 0; 5729 5730 } 5731 5732 uint32_t 5733 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5734 { 5735 uint32_t new_val = orig_val; 5736 5737 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 5738 new_val |= IXGBE_VMOLR_AUPE; 5739 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 5740 new_val |= IXGBE_VMOLR_ROMPE; 5741 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 5742 new_val |= IXGBE_VMOLR_ROPE; 5743 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 5744 new_val |= IXGBE_VMOLR_BAM; 5745 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 5746 new_val |= IXGBE_VMOLR_MPE; 5747 5748 return new_val; 5749 } 5750 5751 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 5752 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 5753 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 5754 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 5755 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 5756 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 5757 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 5758 5759 static int 5760 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 5761 struct rte_eth_mirror_conf *mirror_conf, 5762 uint8_t rule_id, uint8_t on) 5763 { 5764 uint32_t mr_ctl, vlvf; 5765 uint32_t mp_lsb = 0; 5766 uint32_t mv_msb = 0; 5767 uint32_t mv_lsb = 0; 5768 uint32_t mp_msb = 0; 5769 uint8_t i = 0; 5770 int reg_index = 0; 5771 uint64_t vlan_mask = 0; 5772 5773 const uint8_t pool_mask_offset = 32; 5774 const uint8_t vlan_mask_offset = 32; 5775 const uint8_t dst_pool_offset = 8; 5776 const uint8_t rule_mr_offset = 4; 5777 const uint8_t mirror_rule_mask = 0x0F; 5778 5779 struct ixgbe_mirror_info *mr_info = 5780 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5781 struct ixgbe_hw *hw = 5782 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5783 uint8_t mirror_type = 0; 5784 5785 if (ixgbe_vt_check(hw) < 0) 5786 return -ENOTSUP; 5787 5788 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5789 return -EINVAL; 5790 5791 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 5792 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 5793 mirror_conf->rule_type); 5794 return -EINVAL; 5795 } 5796 5797 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5798 mirror_type |= IXGBE_MRCTL_VLME; 5799 /* Check if vlan id is valid and find conresponding VLAN ID 5800 * index in VLVF 5801 */ 5802 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 5803 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 5804 /* search vlan id related pool vlan filter 5805 * index 5806 */ 5807 reg_index = ixgbe_find_vlvf_slot( 5808 hw, 5809 mirror_conf->vlan.vlan_id[i], 5810 false); 5811 if (reg_index < 0) 5812 return -EINVAL; 5813 vlvf = IXGBE_READ_REG(hw, 5814 IXGBE_VLVF(reg_index)); 5815 if ((vlvf & IXGBE_VLVF_VIEN) && 5816 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 5817 mirror_conf->vlan.vlan_id[i])) 5818 vlan_mask |= (1ULL << reg_index); 5819 else 5820 return -EINVAL; 5821 } 5822 } 5823 5824 if (on) { 5825 mv_lsb = vlan_mask & 0xFFFFFFFF; 5826 mv_msb = vlan_mask >> vlan_mask_offset; 5827 5828 mr_info->mr_conf[rule_id].vlan.vlan_mask = 5829 mirror_conf->vlan.vlan_mask; 5830 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 5831 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 5832 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 5833 mirror_conf->vlan.vlan_id[i]; 5834 } 5835 } else { 5836 mv_lsb = 0; 5837 mv_msb = 0; 5838 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 5839 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 5840 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 5841 } 5842 } 5843 5844 /** 5845 * if enable pool mirror, write related pool mask register,if disable 5846 * pool mirror, clear PFMRVM register 5847 */ 5848 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5849 mirror_type |= IXGBE_MRCTL_VPME; 5850 if (on) { 5851 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 5852 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 5853 mr_info->mr_conf[rule_id].pool_mask = 5854 mirror_conf->pool_mask; 5855 5856 } else { 5857 mp_lsb = 0; 5858 mp_msb = 0; 5859 mr_info->mr_conf[rule_id].pool_mask = 0; 5860 } 5861 } 5862 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 5863 mirror_type |= IXGBE_MRCTL_UPME; 5864 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 5865 mirror_type |= IXGBE_MRCTL_DPME; 5866 5867 /* read mirror control register and recalculate it */ 5868 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 5869 5870 if (on) { 5871 mr_ctl |= mirror_type; 5872 mr_ctl &= mirror_rule_mask; 5873 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 5874 } else { 5875 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 5876 } 5877 5878 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 5879 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 5880 5881 /* write mirrror control register */ 5882 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5883 5884 /* write pool mirrror control register */ 5885 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5886 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 5887 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 5888 mp_msb); 5889 } 5890 /* write VLAN mirrror control register */ 5891 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5892 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 5893 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 5894 mv_msb); 5895 } 5896 5897 return 0; 5898 } 5899 5900 static int 5901 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 5902 { 5903 int mr_ctl = 0; 5904 uint32_t lsb_val = 0; 5905 uint32_t msb_val = 0; 5906 const uint8_t rule_mr_offset = 4; 5907 5908 struct ixgbe_hw *hw = 5909 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5910 struct ixgbe_mirror_info *mr_info = 5911 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5912 5913 if (ixgbe_vt_check(hw) < 0) 5914 return -ENOTSUP; 5915 5916 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5917 return -EINVAL; 5918 5919 memset(&mr_info->mr_conf[rule_id], 0, 5920 sizeof(struct rte_eth_mirror_conf)); 5921 5922 /* clear PFVMCTL register */ 5923 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5924 5925 /* clear pool mask register */ 5926 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 5927 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 5928 5929 /* clear vlan mask register */ 5930 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 5931 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 5932 5933 return 0; 5934 } 5935 5936 static int 5937 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5938 { 5939 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5940 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5941 struct ixgbe_interrupt *intr = 5942 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5943 struct ixgbe_hw *hw = 5944 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5945 uint32_t vec = IXGBE_MISC_VEC_ID; 5946 5947 if (rte_intr_allow_others(intr_handle)) 5948 vec = IXGBE_RX_VEC_START; 5949 intr->mask |= (1 << vec); 5950 RTE_SET_USED(queue_id); 5951 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5952 5953 rte_intr_ack(intr_handle); 5954 5955 return 0; 5956 } 5957 5958 static int 5959 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5960 { 5961 struct ixgbe_interrupt *intr = 5962 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5963 struct ixgbe_hw *hw = 5964 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5965 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5966 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5967 uint32_t vec = IXGBE_MISC_VEC_ID; 5968 5969 if (rte_intr_allow_others(intr_handle)) 5970 vec = IXGBE_RX_VEC_START; 5971 intr->mask &= ~(1 << vec); 5972 RTE_SET_USED(queue_id); 5973 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5974 5975 return 0; 5976 } 5977 5978 static int 5979 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5980 { 5981 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5982 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5983 uint32_t mask; 5984 struct ixgbe_hw *hw = 5985 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5986 struct ixgbe_interrupt *intr = 5987 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5988 5989 if (queue_id < 16) { 5990 ixgbe_disable_intr(hw); 5991 intr->mask |= (1 << queue_id); 5992 ixgbe_enable_intr(dev); 5993 } else if (queue_id < 32) { 5994 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5995 mask &= (1 << queue_id); 5996 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5997 } else if (queue_id < 64) { 5998 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5999 mask &= (1 << (queue_id - 32)); 6000 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 6001 } 6002 rte_intr_ack(intr_handle); 6003 6004 return 0; 6005 } 6006 6007 static int 6008 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 6009 { 6010 uint32_t mask; 6011 struct ixgbe_hw *hw = 6012 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6013 struct ixgbe_interrupt *intr = 6014 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 6015 6016 if (queue_id < 16) { 6017 ixgbe_disable_intr(hw); 6018 intr->mask &= ~(1 << queue_id); 6019 ixgbe_enable_intr(dev); 6020 } else if (queue_id < 32) { 6021 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 6022 mask &= ~(1 << queue_id); 6023 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 6024 } else if (queue_id < 64) { 6025 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 6026 mask &= ~(1 << (queue_id - 32)); 6027 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 6028 } 6029 6030 return 0; 6031 } 6032 6033 static void 6034 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6035 uint8_t queue, uint8_t msix_vector) 6036 { 6037 uint32_t tmp, idx; 6038 6039 if (direction == -1) { 6040 /* other causes */ 6041 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6042 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 6043 tmp &= ~0xFF; 6044 tmp |= msix_vector; 6045 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 6046 } else { 6047 /* rx or tx cause */ 6048 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6049 idx = ((16 * (queue & 1)) + (8 * direction)); 6050 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 6051 tmp &= ~(0xFF << idx); 6052 tmp |= (msix_vector << idx); 6053 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 6054 } 6055 } 6056 6057 /** 6058 * set the IVAR registers, mapping interrupt causes to vectors 6059 * @param hw 6060 * pointer to ixgbe_hw struct 6061 * @direction 6062 * 0 for Rx, 1 for Tx, -1 for other causes 6063 * @queue 6064 * queue to map the corresponding interrupt to 6065 * @msix_vector 6066 * the vector to map to the corresponding queue 6067 */ 6068 static void 6069 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6070 uint8_t queue, uint8_t msix_vector) 6071 { 6072 uint32_t tmp, idx; 6073 6074 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6075 if (hw->mac.type == ixgbe_mac_82598EB) { 6076 if (direction == -1) 6077 direction = 0; 6078 idx = (((direction * 64) + queue) >> 2) & 0x1F; 6079 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 6080 tmp &= ~(0xFF << (8 * (queue & 0x3))); 6081 tmp |= (msix_vector << (8 * (queue & 0x3))); 6082 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 6083 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 6084 (hw->mac.type == ixgbe_mac_X540) || 6085 (hw->mac.type == ixgbe_mac_X550) || 6086 (hw->mac.type == ixgbe_mac_X550EM_x)) { 6087 if (direction == -1) { 6088 /* other causes */ 6089 idx = ((queue & 1) * 8); 6090 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 6091 tmp &= ~(0xFF << idx); 6092 tmp |= (msix_vector << idx); 6093 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 6094 } else { 6095 /* rx or tx causes */ 6096 idx = ((16 * (queue & 1)) + (8 * direction)); 6097 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 6098 tmp &= ~(0xFF << idx); 6099 tmp |= (msix_vector << idx); 6100 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 6101 } 6102 } 6103 } 6104 6105 static void 6106 ixgbevf_configure_msix(struct rte_eth_dev *dev) 6107 { 6108 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6109 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6110 struct ixgbe_hw *hw = 6111 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6112 uint32_t q_idx; 6113 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 6114 uint32_t base = IXGBE_MISC_VEC_ID; 6115 6116 /* Configure VF other cause ivar */ 6117 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 6118 6119 /* won't configure msix register if no mapping is done 6120 * between intr vector and event fd. 6121 */ 6122 if (!rte_intr_dp_is_en(intr_handle)) 6123 return; 6124 6125 if (rte_intr_allow_others(intr_handle)) { 6126 base = IXGBE_RX_VEC_START; 6127 vector_idx = IXGBE_RX_VEC_START; 6128 } 6129 6130 /* Configure all RX queues of VF */ 6131 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 6132 /* Force all queue use vector 0, 6133 * as IXGBE_VF_MAXMSIVECOTR = 1 6134 */ 6135 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 6136 intr_handle->intr_vec[q_idx] = vector_idx; 6137 if (vector_idx < base + intr_handle->nb_efd - 1) 6138 vector_idx++; 6139 } 6140 6141 /* As RX queue setting above show, all queues use the vector 0. 6142 * Set only the ITR value of IXGBE_MISC_VEC_ID. 6143 */ 6144 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 6145 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6146 | IXGBE_EITR_CNT_WDIS); 6147 } 6148 6149 /** 6150 * Sets up the hardware to properly generate MSI-X interrupts 6151 * @hw 6152 * board private structure 6153 */ 6154 static void 6155 ixgbe_configure_msix(struct rte_eth_dev *dev) 6156 { 6157 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6158 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6159 struct ixgbe_hw *hw = 6160 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6161 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 6162 uint32_t vec = IXGBE_MISC_VEC_ID; 6163 uint32_t mask; 6164 uint32_t gpie; 6165 6166 /* won't configure msix register if no mapping is done 6167 * between intr vector and event fd 6168 * but if misx has been enabled already, need to configure 6169 * auto clean, auto mask and throttling. 6170 */ 6171 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6172 if (!rte_intr_dp_is_en(intr_handle) && 6173 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 6174 return; 6175 6176 if (rte_intr_allow_others(intr_handle)) 6177 vec = base = IXGBE_RX_VEC_START; 6178 6179 /* setup GPIE for MSI-x mode */ 6180 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6181 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 6182 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 6183 /* auto clearing and auto setting corresponding bits in EIMS 6184 * when MSI-X interrupt is triggered 6185 */ 6186 if (hw->mac.type == ixgbe_mac_82598EB) { 6187 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 6188 } else { 6189 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6190 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6191 } 6192 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6193 6194 /* Populate the IVAR table and set the ITR values to the 6195 * corresponding register. 6196 */ 6197 if (rte_intr_dp_is_en(intr_handle)) { 6198 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6199 queue_id++) { 6200 /* by default, 1:1 mapping */ 6201 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6202 intr_handle->intr_vec[queue_id] = vec; 6203 if (vec < base + intr_handle->nb_efd - 1) 6204 vec++; 6205 } 6206 6207 switch (hw->mac.type) { 6208 case ixgbe_mac_82598EB: 6209 ixgbe_set_ivar_map(hw, -1, 6210 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6211 IXGBE_MISC_VEC_ID); 6212 break; 6213 case ixgbe_mac_82599EB: 6214 case ixgbe_mac_X540: 6215 case ixgbe_mac_X550: 6216 case ixgbe_mac_X550EM_x: 6217 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6218 break; 6219 default: 6220 break; 6221 } 6222 } 6223 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6224 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6225 | IXGBE_EITR_CNT_WDIS); 6226 6227 /* set up to autoclear timer, and the vectors */ 6228 mask = IXGBE_EIMS_ENABLE_MASK; 6229 mask &= ~(IXGBE_EIMS_OTHER | 6230 IXGBE_EIMS_MAILBOX | 6231 IXGBE_EIMS_LSC); 6232 6233 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6234 } 6235 6236 int 6237 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6238 uint16_t queue_idx, uint16_t tx_rate) 6239 { 6240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6241 struct rte_eth_rxmode *rxmode; 6242 uint32_t rf_dec, rf_int; 6243 uint32_t bcnrc_val; 6244 uint16_t link_speed = dev->data->dev_link.link_speed; 6245 6246 if (queue_idx >= hw->mac.max_tx_queues) 6247 return -EINVAL; 6248 6249 if (tx_rate != 0) { 6250 /* Calculate the rate factor values to set */ 6251 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6252 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6253 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6254 6255 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6256 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6257 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6258 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6259 } else { 6260 bcnrc_val = 0; 6261 } 6262 6263 rxmode = &dev->data->dev_conf.rxmode; 6264 /* 6265 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6266 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6267 * set as 0x4. 6268 */ 6269 if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && 6270 (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) 6271 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6272 IXGBE_MMW_SIZE_JUMBO_FRAME); 6273 else 6274 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6275 IXGBE_MMW_SIZE_DEFAULT); 6276 6277 /* Set RTTBCNRC of queue X */ 6278 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6279 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6280 IXGBE_WRITE_FLUSH(hw); 6281 6282 return 0; 6283 } 6284 6285 static int 6286 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6287 __rte_unused uint32_t index, 6288 __rte_unused uint32_t pool) 6289 { 6290 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6291 int diag; 6292 6293 /* 6294 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6295 * operation. Trap this case to avoid exhausting the [very limited] 6296 * set of PF resources used to store VF MAC addresses. 6297 */ 6298 if (memcmp(hw->mac.perm_addr, mac_addr, 6299 sizeof(struct rte_ether_addr)) == 0) 6300 return -1; 6301 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6302 if (diag != 0) 6303 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6304 "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", 6305 mac_addr->addr_bytes[0], 6306 mac_addr->addr_bytes[1], 6307 mac_addr->addr_bytes[2], 6308 mac_addr->addr_bytes[3], 6309 mac_addr->addr_bytes[4], 6310 mac_addr->addr_bytes[5], 6311 diag); 6312 return diag; 6313 } 6314 6315 static void 6316 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6317 { 6318 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6319 struct rte_ether_addr *perm_addr = 6320 (struct rte_ether_addr *)hw->mac.perm_addr; 6321 struct rte_ether_addr *mac_addr; 6322 uint32_t i; 6323 int diag; 6324 6325 /* 6326 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6327 * not support the deletion of a given MAC address. 6328 * Instead, it imposes to delete all MAC addresses, then to add again 6329 * all MAC addresses with the exception of the one to be deleted. 6330 */ 6331 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6332 6333 /* 6334 * Add again all MAC addresses, with the exception of the deleted one 6335 * and of the permanent MAC address. 6336 */ 6337 for (i = 0, mac_addr = dev->data->mac_addrs; 6338 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6339 /* Skip the deleted MAC address */ 6340 if (i == index) 6341 continue; 6342 /* Skip NULL MAC addresses */ 6343 if (rte_is_zero_ether_addr(mac_addr)) 6344 continue; 6345 /* Skip the permanent MAC address */ 6346 if (memcmp(perm_addr, mac_addr, 6347 sizeof(struct rte_ether_addr)) == 0) 6348 continue; 6349 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6350 if (diag != 0) 6351 PMD_DRV_LOG(ERR, 6352 "Adding again MAC address " 6353 "%02x:%02x:%02x:%02x:%02x:%02x failed " 6354 "diag=%d", 6355 mac_addr->addr_bytes[0], 6356 mac_addr->addr_bytes[1], 6357 mac_addr->addr_bytes[2], 6358 mac_addr->addr_bytes[3], 6359 mac_addr->addr_bytes[4], 6360 mac_addr->addr_bytes[5], 6361 diag); 6362 } 6363 } 6364 6365 static int 6366 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6367 struct rte_ether_addr *addr) 6368 { 6369 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6370 6371 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6372 6373 return 0; 6374 } 6375 6376 int 6377 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6378 struct rte_eth_syn_filter *filter, 6379 bool add) 6380 { 6381 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6382 struct ixgbe_filter_info *filter_info = 6383 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6384 uint32_t syn_info; 6385 uint32_t synqf; 6386 6387 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6388 return -EINVAL; 6389 6390 syn_info = filter_info->syn_info; 6391 6392 if (add) { 6393 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6394 return -EINVAL; 6395 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6396 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6397 6398 if (filter->hig_pri) 6399 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6400 else 6401 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6402 } else { 6403 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6404 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6405 return -ENOENT; 6406 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6407 } 6408 6409 filter_info->syn_info = synqf; 6410 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6411 IXGBE_WRITE_FLUSH(hw); 6412 return 0; 6413 } 6414 6415 6416 static inline enum ixgbe_5tuple_protocol 6417 convert_protocol_type(uint8_t protocol_value) 6418 { 6419 if (protocol_value == IPPROTO_TCP) 6420 return IXGBE_FILTER_PROTOCOL_TCP; 6421 else if (protocol_value == IPPROTO_UDP) 6422 return IXGBE_FILTER_PROTOCOL_UDP; 6423 else if (protocol_value == IPPROTO_SCTP) 6424 return IXGBE_FILTER_PROTOCOL_SCTP; 6425 else 6426 return IXGBE_FILTER_PROTOCOL_NONE; 6427 } 6428 6429 /* inject a 5-tuple filter to HW */ 6430 static inline void 6431 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6432 struct ixgbe_5tuple_filter *filter) 6433 { 6434 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6435 int i; 6436 uint32_t ftqf, sdpqf; 6437 uint32_t l34timir = 0; 6438 uint8_t mask = 0xff; 6439 6440 i = filter->index; 6441 6442 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6443 IXGBE_SDPQF_DSTPORT_SHIFT); 6444 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6445 6446 ftqf = (uint32_t)(filter->filter_info.proto & 6447 IXGBE_FTQF_PROTOCOL_MASK); 6448 ftqf |= (uint32_t)((filter->filter_info.priority & 6449 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6450 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6451 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6452 if (filter->filter_info.dst_ip_mask == 0) 6453 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6454 if (filter->filter_info.src_port_mask == 0) 6455 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6456 if (filter->filter_info.dst_port_mask == 0) 6457 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6458 if (filter->filter_info.proto_mask == 0) 6459 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6460 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6461 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6462 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6463 6464 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6465 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6466 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6467 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6468 6469 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6470 l34timir |= (uint32_t)(filter->queue << 6471 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6472 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6473 } 6474 6475 /* 6476 * add a 5tuple filter 6477 * 6478 * @param 6479 * dev: Pointer to struct rte_eth_dev. 6480 * index: the index the filter allocates. 6481 * filter: ponter to the filter that will be added. 6482 * rx_queue: the queue id the filter assigned to. 6483 * 6484 * @return 6485 * - On success, zero. 6486 * - On failure, a negative value. 6487 */ 6488 static int 6489 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6490 struct ixgbe_5tuple_filter *filter) 6491 { 6492 struct ixgbe_filter_info *filter_info = 6493 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6494 int i, idx, shift; 6495 6496 /* 6497 * look for an unused 5tuple filter index, 6498 * and insert the filter to list. 6499 */ 6500 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6501 idx = i / (sizeof(uint32_t) * NBBY); 6502 shift = i % (sizeof(uint32_t) * NBBY); 6503 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6504 filter_info->fivetuple_mask[idx] |= 1 << shift; 6505 filter->index = i; 6506 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6507 filter, 6508 entries); 6509 break; 6510 } 6511 } 6512 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6513 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6514 return -ENOSYS; 6515 } 6516 6517 ixgbe_inject_5tuple_filter(dev, filter); 6518 6519 return 0; 6520 } 6521 6522 /* 6523 * remove a 5tuple filter 6524 * 6525 * @param 6526 * dev: Pointer to struct rte_eth_dev. 6527 * filter: the pointer of the filter will be removed. 6528 */ 6529 static void 6530 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6531 struct ixgbe_5tuple_filter *filter) 6532 { 6533 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6534 struct ixgbe_filter_info *filter_info = 6535 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6536 uint16_t index = filter->index; 6537 6538 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6539 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6540 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6541 rte_free(filter); 6542 6543 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6544 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6545 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6546 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6547 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6548 } 6549 6550 static int 6551 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6552 { 6553 struct ixgbe_hw *hw; 6554 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6555 struct rte_eth_dev_data *dev_data = dev->data; 6556 6557 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6558 6559 if (mtu < RTE_ETHER_MIN_MTU || 6560 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6561 return -EINVAL; 6562 6563 /* If device is started, refuse mtu that requires the support of 6564 * scattered packets when this feature has not been enabled before. 6565 */ 6566 if (dev_data->dev_started && !dev_data->scattered_rx && 6567 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6568 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6569 PMD_INIT_LOG(ERR, "Stop port first."); 6570 return -EINVAL; 6571 } 6572 6573 /* 6574 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6575 * request of the version 2.0 of the mailbox API. 6576 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6577 * of the mailbox API. 6578 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6579 * prior to 3.11.33 which contains the following change: 6580 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6581 */ 6582 if (ixgbevf_rlpml_set_vf(hw, max_frame)) 6583 return -EINVAL; 6584 6585 /* update max frame size */ 6586 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 6587 return 0; 6588 } 6589 6590 static inline struct ixgbe_5tuple_filter * 6591 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6592 struct ixgbe_5tuple_filter_info *key) 6593 { 6594 struct ixgbe_5tuple_filter *it; 6595 6596 TAILQ_FOREACH(it, filter_list, entries) { 6597 if (memcmp(key, &it->filter_info, 6598 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6599 return it; 6600 } 6601 } 6602 return NULL; 6603 } 6604 6605 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6606 static inline int 6607 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6608 struct ixgbe_5tuple_filter_info *filter_info) 6609 { 6610 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6611 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6612 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6613 return -EINVAL; 6614 6615 switch (filter->dst_ip_mask) { 6616 case UINT32_MAX: 6617 filter_info->dst_ip_mask = 0; 6618 filter_info->dst_ip = filter->dst_ip; 6619 break; 6620 case 0: 6621 filter_info->dst_ip_mask = 1; 6622 break; 6623 default: 6624 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6625 return -EINVAL; 6626 } 6627 6628 switch (filter->src_ip_mask) { 6629 case UINT32_MAX: 6630 filter_info->src_ip_mask = 0; 6631 filter_info->src_ip = filter->src_ip; 6632 break; 6633 case 0: 6634 filter_info->src_ip_mask = 1; 6635 break; 6636 default: 6637 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6638 return -EINVAL; 6639 } 6640 6641 switch (filter->dst_port_mask) { 6642 case UINT16_MAX: 6643 filter_info->dst_port_mask = 0; 6644 filter_info->dst_port = filter->dst_port; 6645 break; 6646 case 0: 6647 filter_info->dst_port_mask = 1; 6648 break; 6649 default: 6650 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6651 return -EINVAL; 6652 } 6653 6654 switch (filter->src_port_mask) { 6655 case UINT16_MAX: 6656 filter_info->src_port_mask = 0; 6657 filter_info->src_port = filter->src_port; 6658 break; 6659 case 0: 6660 filter_info->src_port_mask = 1; 6661 break; 6662 default: 6663 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6664 return -EINVAL; 6665 } 6666 6667 switch (filter->proto_mask) { 6668 case UINT8_MAX: 6669 filter_info->proto_mask = 0; 6670 filter_info->proto = 6671 convert_protocol_type(filter->proto); 6672 break; 6673 case 0: 6674 filter_info->proto_mask = 1; 6675 break; 6676 default: 6677 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6678 return -EINVAL; 6679 } 6680 6681 filter_info->priority = (uint8_t)filter->priority; 6682 return 0; 6683 } 6684 6685 /* 6686 * add or delete a ntuple filter 6687 * 6688 * @param 6689 * dev: Pointer to struct rte_eth_dev. 6690 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6691 * add: if true, add filter, if false, remove filter 6692 * 6693 * @return 6694 * - On success, zero. 6695 * - On failure, a negative value. 6696 */ 6697 int 6698 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6699 struct rte_eth_ntuple_filter *ntuple_filter, 6700 bool add) 6701 { 6702 struct ixgbe_filter_info *filter_info = 6703 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6704 struct ixgbe_5tuple_filter_info filter_5tuple; 6705 struct ixgbe_5tuple_filter *filter; 6706 int ret; 6707 6708 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6709 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6710 return -EINVAL; 6711 } 6712 6713 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6714 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6715 if (ret < 0) 6716 return ret; 6717 6718 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6719 &filter_5tuple); 6720 if (filter != NULL && add) { 6721 PMD_DRV_LOG(ERR, "filter exists."); 6722 return -EEXIST; 6723 } 6724 if (filter == NULL && !add) { 6725 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6726 return -ENOENT; 6727 } 6728 6729 if (add) { 6730 filter = rte_zmalloc("ixgbe_5tuple_filter", 6731 sizeof(struct ixgbe_5tuple_filter), 0); 6732 if (filter == NULL) 6733 return -ENOMEM; 6734 rte_memcpy(&filter->filter_info, 6735 &filter_5tuple, 6736 sizeof(struct ixgbe_5tuple_filter_info)); 6737 filter->queue = ntuple_filter->queue; 6738 ret = ixgbe_add_5tuple_filter(dev, filter); 6739 if (ret < 0) { 6740 rte_free(filter); 6741 return ret; 6742 } 6743 } else 6744 ixgbe_remove_5tuple_filter(dev, filter); 6745 6746 return 0; 6747 } 6748 6749 int 6750 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6751 struct rte_eth_ethertype_filter *filter, 6752 bool add) 6753 { 6754 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6755 struct ixgbe_filter_info *filter_info = 6756 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6757 uint32_t etqf = 0; 6758 uint32_t etqs = 0; 6759 int ret; 6760 struct ixgbe_ethertype_filter ethertype_filter; 6761 6762 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6763 return -EINVAL; 6764 6765 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6766 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6767 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6768 " ethertype filter.", filter->ether_type); 6769 return -EINVAL; 6770 } 6771 6772 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6773 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6774 return -EINVAL; 6775 } 6776 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6777 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6778 return -EINVAL; 6779 } 6780 6781 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6782 if (ret >= 0 && add) { 6783 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6784 filter->ether_type); 6785 return -EEXIST; 6786 } 6787 if (ret < 0 && !add) { 6788 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6789 filter->ether_type); 6790 return -ENOENT; 6791 } 6792 6793 if (add) { 6794 etqf = IXGBE_ETQF_FILTER_EN; 6795 etqf |= (uint32_t)filter->ether_type; 6796 etqs |= (uint32_t)((filter->queue << 6797 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6798 IXGBE_ETQS_RX_QUEUE); 6799 etqs |= IXGBE_ETQS_QUEUE_EN; 6800 6801 ethertype_filter.ethertype = filter->ether_type; 6802 ethertype_filter.etqf = etqf; 6803 ethertype_filter.etqs = etqs; 6804 ethertype_filter.conf = FALSE; 6805 ret = ixgbe_ethertype_filter_insert(filter_info, 6806 ðertype_filter); 6807 if (ret < 0) { 6808 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6809 return -ENOSPC; 6810 } 6811 } else { 6812 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6813 if (ret < 0) 6814 return -ENOSYS; 6815 } 6816 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6817 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6818 IXGBE_WRITE_FLUSH(hw); 6819 6820 return 0; 6821 } 6822 6823 static int 6824 ixgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev, 6825 const struct rte_flow_ops **ops) 6826 { 6827 *ops = &ixgbe_flow_ops; 6828 return 0; 6829 } 6830 6831 static u8 * 6832 ixgbe_dev_addr_list_itr(__rte_unused struct ixgbe_hw *hw, 6833 u8 **mc_addr_ptr, u32 *vmdq) 6834 { 6835 u8 *mc_addr; 6836 6837 *vmdq = 0; 6838 mc_addr = *mc_addr_ptr; 6839 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 6840 return mc_addr; 6841 } 6842 6843 static int 6844 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6845 struct rte_ether_addr *mc_addr_set, 6846 uint32_t nb_mc_addr) 6847 { 6848 struct ixgbe_hw *hw; 6849 u8 *mc_addr_list; 6850 6851 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6852 mc_addr_list = (u8 *)mc_addr_set; 6853 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6854 ixgbe_dev_addr_list_itr, TRUE); 6855 } 6856 6857 static uint64_t 6858 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6859 { 6860 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6861 uint64_t systime_cycles; 6862 6863 switch (hw->mac.type) { 6864 case ixgbe_mac_X550: 6865 case ixgbe_mac_X550EM_x: 6866 case ixgbe_mac_X550EM_a: 6867 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6868 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6869 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6870 * NSEC_PER_SEC; 6871 break; 6872 default: 6873 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6874 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6875 << 32; 6876 } 6877 6878 return systime_cycles; 6879 } 6880 6881 static uint64_t 6882 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6883 { 6884 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6885 uint64_t rx_tstamp_cycles; 6886 6887 switch (hw->mac.type) { 6888 case ixgbe_mac_X550: 6889 case ixgbe_mac_X550EM_x: 6890 case ixgbe_mac_X550EM_a: 6891 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6892 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6893 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6894 * NSEC_PER_SEC; 6895 break; 6896 default: 6897 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6898 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6899 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6900 << 32; 6901 } 6902 6903 return rx_tstamp_cycles; 6904 } 6905 6906 static uint64_t 6907 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6908 { 6909 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6910 uint64_t tx_tstamp_cycles; 6911 6912 switch (hw->mac.type) { 6913 case ixgbe_mac_X550: 6914 case ixgbe_mac_X550EM_x: 6915 case ixgbe_mac_X550EM_a: 6916 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6917 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6918 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6919 * NSEC_PER_SEC; 6920 break; 6921 default: 6922 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6923 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6924 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6925 << 32; 6926 } 6927 6928 return tx_tstamp_cycles; 6929 } 6930 6931 static void 6932 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6933 { 6934 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6935 struct ixgbe_adapter *adapter = dev->data->dev_private; 6936 struct rte_eth_link link; 6937 uint32_t incval = 0; 6938 uint32_t shift = 0; 6939 6940 /* Get current link speed. */ 6941 ixgbe_dev_link_update(dev, 1); 6942 rte_eth_linkstatus_get(dev, &link); 6943 6944 switch (link.link_speed) { 6945 case ETH_SPEED_NUM_100M: 6946 incval = IXGBE_INCVAL_100; 6947 shift = IXGBE_INCVAL_SHIFT_100; 6948 break; 6949 case ETH_SPEED_NUM_1G: 6950 incval = IXGBE_INCVAL_1GB; 6951 shift = IXGBE_INCVAL_SHIFT_1GB; 6952 break; 6953 case ETH_SPEED_NUM_10G: 6954 default: 6955 incval = IXGBE_INCVAL_10GB; 6956 shift = IXGBE_INCVAL_SHIFT_10GB; 6957 break; 6958 } 6959 6960 switch (hw->mac.type) { 6961 case ixgbe_mac_X550: 6962 case ixgbe_mac_X550EM_x: 6963 case ixgbe_mac_X550EM_a: 6964 /* Independent of link speed. */ 6965 incval = 1; 6966 /* Cycles read will be interpreted as ns. */ 6967 shift = 0; 6968 /* Fall-through */ 6969 case ixgbe_mac_X540: 6970 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6971 break; 6972 case ixgbe_mac_82599EB: 6973 incval >>= IXGBE_INCVAL_SHIFT_82599; 6974 shift -= IXGBE_INCVAL_SHIFT_82599; 6975 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6976 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6977 break; 6978 default: 6979 /* Not supported. */ 6980 return; 6981 } 6982 6983 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6984 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6985 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6986 6987 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6988 adapter->systime_tc.cc_shift = shift; 6989 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6990 6991 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6992 adapter->rx_tstamp_tc.cc_shift = shift; 6993 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6994 6995 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6996 adapter->tx_tstamp_tc.cc_shift = shift; 6997 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6998 } 6999 7000 static int 7001 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 7002 { 7003 struct ixgbe_adapter *adapter = dev->data->dev_private; 7004 7005 adapter->systime_tc.nsec += delta; 7006 adapter->rx_tstamp_tc.nsec += delta; 7007 adapter->tx_tstamp_tc.nsec += delta; 7008 7009 return 0; 7010 } 7011 7012 static int 7013 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 7014 { 7015 uint64_t ns; 7016 struct ixgbe_adapter *adapter = dev->data->dev_private; 7017 7018 ns = rte_timespec_to_ns(ts); 7019 /* Set the timecounters to a new value. */ 7020 adapter->systime_tc.nsec = ns; 7021 adapter->rx_tstamp_tc.nsec = ns; 7022 adapter->tx_tstamp_tc.nsec = ns; 7023 7024 return 0; 7025 } 7026 7027 static int 7028 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 7029 { 7030 uint64_t ns, systime_cycles; 7031 struct ixgbe_adapter *adapter = dev->data->dev_private; 7032 7033 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 7034 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 7035 *ts = rte_ns_to_timespec(ns); 7036 7037 return 0; 7038 } 7039 7040 static int 7041 ixgbe_timesync_enable(struct rte_eth_dev *dev) 7042 { 7043 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7044 uint32_t tsync_ctl; 7045 uint32_t tsauxc; 7046 7047 /* Stop the timesync system time. */ 7048 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 7049 /* Reset the timesync system time value. */ 7050 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 7051 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 7052 7053 /* Enable system time for platforms where it isn't on by default. */ 7054 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 7055 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 7056 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 7057 7058 ixgbe_start_timecounters(dev); 7059 7060 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7061 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 7062 (RTE_ETHER_TYPE_1588 | 7063 IXGBE_ETQF_FILTER_EN | 7064 IXGBE_ETQF_1588)); 7065 7066 /* Enable timestamping of received PTP packets. */ 7067 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7068 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 7069 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7070 7071 /* Enable timestamping of transmitted PTP packets. */ 7072 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7073 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 7074 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7075 7076 IXGBE_WRITE_FLUSH(hw); 7077 7078 return 0; 7079 } 7080 7081 static int 7082 ixgbe_timesync_disable(struct rte_eth_dev *dev) 7083 { 7084 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7085 uint32_t tsync_ctl; 7086 7087 /* Disable timestamping of transmitted PTP packets. */ 7088 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7089 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 7090 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7091 7092 /* Disable timestamping of received PTP packets. */ 7093 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7094 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 7095 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7096 7097 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7098 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 7099 7100 /* Stop incrementating the System Time registers. */ 7101 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 7102 7103 return 0; 7104 } 7105 7106 static int 7107 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 7108 struct timespec *timestamp, 7109 uint32_t flags __rte_unused) 7110 { 7111 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7112 struct ixgbe_adapter *adapter = dev->data->dev_private; 7113 uint32_t tsync_rxctl; 7114 uint64_t rx_tstamp_cycles; 7115 uint64_t ns; 7116 7117 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7118 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 7119 return -EINVAL; 7120 7121 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 7122 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 7123 *timestamp = rte_ns_to_timespec(ns); 7124 7125 return 0; 7126 } 7127 7128 static int 7129 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 7130 struct timespec *timestamp) 7131 { 7132 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7133 struct ixgbe_adapter *adapter = dev->data->dev_private; 7134 uint32_t tsync_txctl; 7135 uint64_t tx_tstamp_cycles; 7136 uint64_t ns; 7137 7138 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7139 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 7140 return -EINVAL; 7141 7142 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 7143 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 7144 *timestamp = rte_ns_to_timespec(ns); 7145 7146 return 0; 7147 } 7148 7149 static int 7150 ixgbe_get_reg_length(struct rte_eth_dev *dev) 7151 { 7152 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7153 int count = 0; 7154 int g_ind = 0; 7155 const struct reg_info *reg_group; 7156 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7157 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7158 7159 while ((reg_group = reg_set[g_ind++])) 7160 count += ixgbe_regs_group_count(reg_group); 7161 7162 return count; 7163 } 7164 7165 static int 7166 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 7167 { 7168 int count = 0; 7169 int g_ind = 0; 7170 const struct reg_info *reg_group; 7171 7172 while ((reg_group = ixgbevf_regs[g_ind++])) 7173 count += ixgbe_regs_group_count(reg_group); 7174 7175 return count; 7176 } 7177 7178 static int 7179 ixgbe_get_regs(struct rte_eth_dev *dev, 7180 struct rte_dev_reg_info *regs) 7181 { 7182 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7183 uint32_t *data = regs->data; 7184 int g_ind = 0; 7185 int count = 0; 7186 const struct reg_info *reg_group; 7187 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7188 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7189 7190 if (data == NULL) { 7191 regs->length = ixgbe_get_reg_length(dev); 7192 regs->width = sizeof(uint32_t); 7193 return 0; 7194 } 7195 7196 /* Support only full register dump */ 7197 if ((regs->length == 0) || 7198 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 7199 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7200 hw->device_id; 7201 while ((reg_group = reg_set[g_ind++])) 7202 count += ixgbe_read_regs_group(dev, &data[count], 7203 reg_group); 7204 return 0; 7205 } 7206 7207 return -ENOTSUP; 7208 } 7209 7210 static int 7211 ixgbevf_get_regs(struct rte_eth_dev *dev, 7212 struct rte_dev_reg_info *regs) 7213 { 7214 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7215 uint32_t *data = regs->data; 7216 int g_ind = 0; 7217 int count = 0; 7218 const struct reg_info *reg_group; 7219 7220 if (data == NULL) { 7221 regs->length = ixgbevf_get_reg_length(dev); 7222 regs->width = sizeof(uint32_t); 7223 return 0; 7224 } 7225 7226 /* Support only full register dump */ 7227 if ((regs->length == 0) || 7228 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7229 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7230 hw->device_id; 7231 while ((reg_group = ixgbevf_regs[g_ind++])) 7232 count += ixgbe_read_regs_group(dev, &data[count], 7233 reg_group); 7234 return 0; 7235 } 7236 7237 return -ENOTSUP; 7238 } 7239 7240 static int 7241 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7242 { 7243 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7244 7245 /* Return unit is byte count */ 7246 return hw->eeprom.word_size * 2; 7247 } 7248 7249 static int 7250 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7251 struct rte_dev_eeprom_info *in_eeprom) 7252 { 7253 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7254 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7255 uint16_t *data = in_eeprom->data; 7256 int first, length; 7257 7258 first = in_eeprom->offset >> 1; 7259 length = in_eeprom->length >> 1; 7260 if ((first > hw->eeprom.word_size) || 7261 ((first + length) > hw->eeprom.word_size)) 7262 return -EINVAL; 7263 7264 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7265 7266 return eeprom->ops.read_buffer(hw, first, length, data); 7267 } 7268 7269 static int 7270 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7271 struct rte_dev_eeprom_info *in_eeprom) 7272 { 7273 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7274 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7275 uint16_t *data = in_eeprom->data; 7276 int first, length; 7277 7278 first = in_eeprom->offset >> 1; 7279 length = in_eeprom->length >> 1; 7280 if ((first > hw->eeprom.word_size) || 7281 ((first + length) > hw->eeprom.word_size)) 7282 return -EINVAL; 7283 7284 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7285 7286 return eeprom->ops.write_buffer(hw, first, length, data); 7287 } 7288 7289 static int 7290 ixgbe_get_module_info(struct rte_eth_dev *dev, 7291 struct rte_eth_dev_module_info *modinfo) 7292 { 7293 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7294 uint32_t status; 7295 uint8_t sff8472_rev, addr_mode; 7296 bool page_swap = false; 7297 7298 /* Check whether we support SFF-8472 or not */ 7299 status = hw->phy.ops.read_i2c_eeprom(hw, 7300 IXGBE_SFF_SFF_8472_COMP, 7301 &sff8472_rev); 7302 if (status != 0) 7303 return -EIO; 7304 7305 /* addressing mode is not supported */ 7306 status = hw->phy.ops.read_i2c_eeprom(hw, 7307 IXGBE_SFF_SFF_8472_SWAP, 7308 &addr_mode); 7309 if (status != 0) 7310 return -EIO; 7311 7312 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7313 PMD_DRV_LOG(ERR, 7314 "Address change required to access page 0xA2, " 7315 "but not supported. Please report the module " 7316 "type to the driver maintainers."); 7317 page_swap = true; 7318 } 7319 7320 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7321 /* We have a SFP, but it does not support SFF-8472 */ 7322 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7323 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7324 } else { 7325 /* We have a SFP which supports a revision of SFF-8472. */ 7326 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7327 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7328 } 7329 7330 return 0; 7331 } 7332 7333 static int 7334 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7335 struct rte_dev_eeprom_info *info) 7336 { 7337 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7338 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7339 uint8_t databyte = 0xFF; 7340 uint8_t *data = info->data; 7341 uint32_t i = 0; 7342 7343 for (i = info->offset; i < info->offset + info->length; i++) { 7344 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7345 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7346 else 7347 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7348 7349 if (status != 0) 7350 return -EIO; 7351 7352 data[i - info->offset] = databyte; 7353 } 7354 7355 return 0; 7356 } 7357 7358 uint16_t 7359 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7360 switch (mac_type) { 7361 case ixgbe_mac_X550: 7362 case ixgbe_mac_X550EM_x: 7363 case ixgbe_mac_X550EM_a: 7364 return ETH_RSS_RETA_SIZE_512; 7365 case ixgbe_mac_X550_vf: 7366 case ixgbe_mac_X550EM_x_vf: 7367 case ixgbe_mac_X550EM_a_vf: 7368 return ETH_RSS_RETA_SIZE_64; 7369 case ixgbe_mac_X540_vf: 7370 case ixgbe_mac_82599_vf: 7371 return 0; 7372 default: 7373 return ETH_RSS_RETA_SIZE_128; 7374 } 7375 } 7376 7377 uint32_t 7378 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7379 switch (mac_type) { 7380 case ixgbe_mac_X550: 7381 case ixgbe_mac_X550EM_x: 7382 case ixgbe_mac_X550EM_a: 7383 if (reta_idx < ETH_RSS_RETA_SIZE_128) 7384 return IXGBE_RETA(reta_idx >> 2); 7385 else 7386 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 7387 case ixgbe_mac_X550_vf: 7388 case ixgbe_mac_X550EM_x_vf: 7389 case ixgbe_mac_X550EM_a_vf: 7390 return IXGBE_VFRETA(reta_idx >> 2); 7391 default: 7392 return IXGBE_RETA(reta_idx >> 2); 7393 } 7394 } 7395 7396 uint32_t 7397 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7398 switch (mac_type) { 7399 case ixgbe_mac_X550_vf: 7400 case ixgbe_mac_X550EM_x_vf: 7401 case ixgbe_mac_X550EM_a_vf: 7402 return IXGBE_VFMRQC; 7403 default: 7404 return IXGBE_MRQC; 7405 } 7406 } 7407 7408 uint32_t 7409 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7410 switch (mac_type) { 7411 case ixgbe_mac_X550_vf: 7412 case ixgbe_mac_X550EM_x_vf: 7413 case ixgbe_mac_X550EM_a_vf: 7414 return IXGBE_VFRSSRK(i); 7415 default: 7416 return IXGBE_RSSRK(i); 7417 } 7418 } 7419 7420 bool 7421 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7422 switch (mac_type) { 7423 case ixgbe_mac_82599_vf: 7424 case ixgbe_mac_X540_vf: 7425 return 0; 7426 default: 7427 return 1; 7428 } 7429 } 7430 7431 static int 7432 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7433 struct rte_eth_dcb_info *dcb_info) 7434 { 7435 struct ixgbe_dcb_config *dcb_config = 7436 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7437 struct ixgbe_dcb_tc_config *tc; 7438 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7439 uint8_t nb_tcs; 7440 uint8_t i, j; 7441 7442 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 7443 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7444 else 7445 dcb_info->nb_tcs = 1; 7446 7447 tc_queue = &dcb_info->tc_queue; 7448 nb_tcs = dcb_info->nb_tcs; 7449 7450 if (dcb_config->vt_mode) { /* vt is enabled*/ 7451 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7452 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7453 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7454 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7455 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7456 for (j = 0; j < nb_tcs; j++) { 7457 tc_queue->tc_rxq[0][j].base = j; 7458 tc_queue->tc_rxq[0][j].nb_queue = 1; 7459 tc_queue->tc_txq[0][j].base = j; 7460 tc_queue->tc_txq[0][j].nb_queue = 1; 7461 } 7462 } else { 7463 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7464 for (j = 0; j < nb_tcs; j++) { 7465 tc_queue->tc_rxq[i][j].base = 7466 i * nb_tcs + j; 7467 tc_queue->tc_rxq[i][j].nb_queue = 1; 7468 tc_queue->tc_txq[i][j].base = 7469 i * nb_tcs + j; 7470 tc_queue->tc_txq[i][j].nb_queue = 1; 7471 } 7472 } 7473 } 7474 } else { /* vt is disabled*/ 7475 struct rte_eth_dcb_rx_conf *rx_conf = 7476 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7477 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7478 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7479 if (dcb_info->nb_tcs == ETH_4_TCS) { 7480 for (i = 0; i < dcb_info->nb_tcs; i++) { 7481 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7482 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7483 } 7484 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7485 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7486 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7487 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7488 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7489 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7490 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7491 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7492 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 7493 for (i = 0; i < dcb_info->nb_tcs; i++) { 7494 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7495 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7496 } 7497 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7498 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7499 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7500 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7501 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7502 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7503 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7504 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7505 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7506 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7507 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7508 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7509 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7510 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7511 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7512 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7513 } 7514 } 7515 for (i = 0; i < dcb_info->nb_tcs; i++) { 7516 tc = &dcb_config->tc_config[i]; 7517 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7518 } 7519 return 0; 7520 } 7521 7522 /* Update e-tag ether type */ 7523 static int 7524 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7525 uint16_t ether_type) 7526 { 7527 uint32_t etag_etype; 7528 7529 if (hw->mac.type != ixgbe_mac_X550 && 7530 hw->mac.type != ixgbe_mac_X550EM_x && 7531 hw->mac.type != ixgbe_mac_X550EM_a) { 7532 return -ENOTSUP; 7533 } 7534 7535 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7536 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7537 etag_etype |= ether_type; 7538 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7539 IXGBE_WRITE_FLUSH(hw); 7540 7541 return 0; 7542 } 7543 7544 /* Enable e-tag tunnel */ 7545 static int 7546 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7547 { 7548 uint32_t etag_etype; 7549 7550 if (hw->mac.type != ixgbe_mac_X550 && 7551 hw->mac.type != ixgbe_mac_X550EM_x && 7552 hw->mac.type != ixgbe_mac_X550EM_a) { 7553 return -ENOTSUP; 7554 } 7555 7556 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7557 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7558 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7559 IXGBE_WRITE_FLUSH(hw); 7560 7561 return 0; 7562 } 7563 7564 static int 7565 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7566 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7567 { 7568 int ret = 0; 7569 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7570 uint32_t i, rar_entries; 7571 uint32_t rar_low, rar_high; 7572 7573 if (hw->mac.type != ixgbe_mac_X550 && 7574 hw->mac.type != ixgbe_mac_X550EM_x && 7575 hw->mac.type != ixgbe_mac_X550EM_a) { 7576 return -ENOTSUP; 7577 } 7578 7579 rar_entries = ixgbe_get_num_rx_addrs(hw); 7580 7581 for (i = 1; i < rar_entries; i++) { 7582 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7583 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7584 if ((rar_high & IXGBE_RAH_AV) && 7585 (rar_high & IXGBE_RAH_ADTYPE) && 7586 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7587 l2_tunnel->tunnel_id)) { 7588 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7589 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7590 7591 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7592 7593 return ret; 7594 } 7595 } 7596 7597 return ret; 7598 } 7599 7600 static int 7601 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7602 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7603 { 7604 int ret = 0; 7605 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7606 uint32_t i, rar_entries; 7607 uint32_t rar_low, rar_high; 7608 7609 if (hw->mac.type != ixgbe_mac_X550 && 7610 hw->mac.type != ixgbe_mac_X550EM_x && 7611 hw->mac.type != ixgbe_mac_X550EM_a) { 7612 return -ENOTSUP; 7613 } 7614 7615 /* One entry for one tunnel. Try to remove potential existing entry. */ 7616 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7617 7618 rar_entries = ixgbe_get_num_rx_addrs(hw); 7619 7620 for (i = 1; i < rar_entries; i++) { 7621 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7622 if (rar_high & IXGBE_RAH_AV) { 7623 continue; 7624 } else { 7625 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7626 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7627 rar_low = l2_tunnel->tunnel_id; 7628 7629 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7630 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7631 7632 return ret; 7633 } 7634 } 7635 7636 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7637 " Please remove a rule before adding a new one."); 7638 return -EINVAL; 7639 } 7640 7641 static inline struct ixgbe_l2_tn_filter * 7642 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7643 struct ixgbe_l2_tn_key *key) 7644 { 7645 int ret; 7646 7647 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7648 if (ret < 0) 7649 return NULL; 7650 7651 return l2_tn_info->hash_map[ret]; 7652 } 7653 7654 static inline int 7655 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7656 struct ixgbe_l2_tn_filter *l2_tn_filter) 7657 { 7658 int ret; 7659 7660 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7661 &l2_tn_filter->key); 7662 7663 if (ret < 0) { 7664 PMD_DRV_LOG(ERR, 7665 "Failed to insert L2 tunnel filter" 7666 " to hash table %d!", 7667 ret); 7668 return ret; 7669 } 7670 7671 l2_tn_info->hash_map[ret] = l2_tn_filter; 7672 7673 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7674 7675 return 0; 7676 } 7677 7678 static inline int 7679 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7680 struct ixgbe_l2_tn_key *key) 7681 { 7682 int ret; 7683 struct ixgbe_l2_tn_filter *l2_tn_filter; 7684 7685 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7686 7687 if (ret < 0) { 7688 PMD_DRV_LOG(ERR, 7689 "No such L2 tunnel filter to delete %d!", 7690 ret); 7691 return ret; 7692 } 7693 7694 l2_tn_filter = l2_tn_info->hash_map[ret]; 7695 l2_tn_info->hash_map[ret] = NULL; 7696 7697 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7698 rte_free(l2_tn_filter); 7699 7700 return 0; 7701 } 7702 7703 /* Add l2 tunnel filter */ 7704 int 7705 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7706 struct ixgbe_l2_tunnel_conf *l2_tunnel, 7707 bool restore) 7708 { 7709 int ret; 7710 struct ixgbe_l2_tn_info *l2_tn_info = 7711 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7712 struct ixgbe_l2_tn_key key; 7713 struct ixgbe_l2_tn_filter *node; 7714 7715 if (!restore) { 7716 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7717 key.tn_id = l2_tunnel->tunnel_id; 7718 7719 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7720 7721 if (node) { 7722 PMD_DRV_LOG(ERR, 7723 "The L2 tunnel filter already exists!"); 7724 return -EINVAL; 7725 } 7726 7727 node = rte_zmalloc("ixgbe_l2_tn", 7728 sizeof(struct ixgbe_l2_tn_filter), 7729 0); 7730 if (!node) 7731 return -ENOMEM; 7732 7733 rte_memcpy(&node->key, 7734 &key, 7735 sizeof(struct ixgbe_l2_tn_key)); 7736 node->pool = l2_tunnel->pool; 7737 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7738 if (ret < 0) { 7739 rte_free(node); 7740 return ret; 7741 } 7742 } 7743 7744 switch (l2_tunnel->l2_tunnel_type) { 7745 case RTE_L2_TUNNEL_TYPE_E_TAG: 7746 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7747 break; 7748 default: 7749 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7750 ret = -EINVAL; 7751 break; 7752 } 7753 7754 if ((!restore) && (ret < 0)) 7755 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7756 7757 return ret; 7758 } 7759 7760 /* Delete l2 tunnel filter */ 7761 int 7762 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7763 struct ixgbe_l2_tunnel_conf *l2_tunnel) 7764 { 7765 int ret; 7766 struct ixgbe_l2_tn_info *l2_tn_info = 7767 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7768 struct ixgbe_l2_tn_key key; 7769 7770 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7771 key.tn_id = l2_tunnel->tunnel_id; 7772 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7773 if (ret < 0) 7774 return ret; 7775 7776 switch (l2_tunnel->l2_tunnel_type) { 7777 case RTE_L2_TUNNEL_TYPE_E_TAG: 7778 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7779 break; 7780 default: 7781 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7782 ret = -EINVAL; 7783 break; 7784 } 7785 7786 return ret; 7787 } 7788 7789 static int 7790 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7791 { 7792 int ret = 0; 7793 uint32_t ctrl; 7794 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7795 7796 if (hw->mac.type != ixgbe_mac_X550 && 7797 hw->mac.type != ixgbe_mac_X550EM_x && 7798 hw->mac.type != ixgbe_mac_X550EM_a) { 7799 return -ENOTSUP; 7800 } 7801 7802 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7803 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7804 if (en) 7805 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7806 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7807 7808 return ret; 7809 } 7810 7811 static int 7812 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7813 uint16_t port) 7814 { 7815 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7816 IXGBE_WRITE_FLUSH(hw); 7817 7818 return 0; 7819 } 7820 7821 /* There's only one register for VxLAN UDP port. 7822 * So, we cannot add several ports. Will update it. 7823 */ 7824 static int 7825 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7826 uint16_t port) 7827 { 7828 if (port == 0) { 7829 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7830 return -EINVAL; 7831 } 7832 7833 return ixgbe_update_vxlan_port(hw, port); 7834 } 7835 7836 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7837 * UDP port, it must have a value. 7838 * So, will reset it to the original value 0. 7839 */ 7840 static int 7841 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7842 uint16_t port) 7843 { 7844 uint16_t cur_port; 7845 7846 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7847 7848 if (cur_port != port) { 7849 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7850 return -EINVAL; 7851 } 7852 7853 return ixgbe_update_vxlan_port(hw, 0); 7854 } 7855 7856 /* Add UDP tunneling port */ 7857 static int 7858 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7859 struct rte_eth_udp_tunnel *udp_tunnel) 7860 { 7861 int ret = 0; 7862 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7863 7864 if (hw->mac.type != ixgbe_mac_X550 && 7865 hw->mac.type != ixgbe_mac_X550EM_x && 7866 hw->mac.type != ixgbe_mac_X550EM_a) { 7867 return -ENOTSUP; 7868 } 7869 7870 if (udp_tunnel == NULL) 7871 return -EINVAL; 7872 7873 switch (udp_tunnel->prot_type) { 7874 case RTE_TUNNEL_TYPE_VXLAN: 7875 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7876 break; 7877 7878 case RTE_TUNNEL_TYPE_GENEVE: 7879 case RTE_TUNNEL_TYPE_TEREDO: 7880 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7881 ret = -EINVAL; 7882 break; 7883 7884 default: 7885 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7886 ret = -EINVAL; 7887 break; 7888 } 7889 7890 return ret; 7891 } 7892 7893 /* Remove UDP tunneling port */ 7894 static int 7895 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7896 struct rte_eth_udp_tunnel *udp_tunnel) 7897 { 7898 int ret = 0; 7899 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7900 7901 if (hw->mac.type != ixgbe_mac_X550 && 7902 hw->mac.type != ixgbe_mac_X550EM_x && 7903 hw->mac.type != ixgbe_mac_X550EM_a) { 7904 return -ENOTSUP; 7905 } 7906 7907 if (udp_tunnel == NULL) 7908 return -EINVAL; 7909 7910 switch (udp_tunnel->prot_type) { 7911 case RTE_TUNNEL_TYPE_VXLAN: 7912 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7913 break; 7914 case RTE_TUNNEL_TYPE_GENEVE: 7915 case RTE_TUNNEL_TYPE_TEREDO: 7916 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7917 ret = -EINVAL; 7918 break; 7919 default: 7920 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7921 ret = -EINVAL; 7922 break; 7923 } 7924 7925 return ret; 7926 } 7927 7928 static int 7929 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 7930 { 7931 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7932 int ret; 7933 7934 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 7935 case IXGBE_SUCCESS: 7936 ret = 0; 7937 break; 7938 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7939 ret = -ENOTSUP; 7940 break; 7941 default: 7942 ret = -EAGAIN; 7943 break; 7944 } 7945 7946 return ret; 7947 } 7948 7949 static int 7950 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 7951 { 7952 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7953 int ret; 7954 7955 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 7956 case IXGBE_SUCCESS: 7957 ret = 0; 7958 break; 7959 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7960 ret = -ENOTSUP; 7961 break; 7962 default: 7963 ret = -EAGAIN; 7964 break; 7965 } 7966 7967 return ret; 7968 } 7969 7970 static int 7971 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7972 { 7973 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7974 int ret; 7975 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 7976 7977 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 7978 case IXGBE_SUCCESS: 7979 ret = 0; 7980 break; 7981 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 7982 ret = -ENOTSUP; 7983 break; 7984 default: 7985 ret = -EAGAIN; 7986 break; 7987 } 7988 7989 return ret; 7990 } 7991 7992 static int 7993 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7994 { 7995 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7996 int ret; 7997 7998 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 7999 case IXGBE_SUCCESS: 8000 ret = 0; 8001 break; 8002 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 8003 ret = -ENOTSUP; 8004 break; 8005 default: 8006 ret = -EAGAIN; 8007 break; 8008 } 8009 8010 return ret; 8011 } 8012 8013 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 8014 { 8015 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8016 u32 in_msg = 0; 8017 8018 /* peek the message first */ 8019 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 8020 8021 /* PF reset VF event */ 8022 if (in_msg == IXGBE_PF_CONTROL_MSG) { 8023 /* dummy mbx read to ack pf */ 8024 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 8025 return; 8026 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 8027 NULL); 8028 } 8029 } 8030 8031 static int 8032 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 8033 { 8034 uint32_t eicr; 8035 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8036 struct ixgbe_interrupt *intr = 8037 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8038 ixgbevf_intr_disable(dev); 8039 8040 /* read-on-clear nic registers here */ 8041 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 8042 intr->flags = 0; 8043 8044 /* only one misc vector supported - mailbox */ 8045 eicr &= IXGBE_VTEICR_MASK; 8046 if (eicr == IXGBE_MISC_VEC_ID) 8047 intr->flags |= IXGBE_FLAG_MAILBOX; 8048 8049 return 0; 8050 } 8051 8052 static int 8053 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 8054 { 8055 struct ixgbe_interrupt *intr = 8056 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8057 8058 if (intr->flags & IXGBE_FLAG_MAILBOX) { 8059 ixgbevf_mbx_process(dev); 8060 intr->flags &= ~IXGBE_FLAG_MAILBOX; 8061 } 8062 8063 ixgbevf_intr_enable(dev); 8064 8065 return 0; 8066 } 8067 8068 static void 8069 ixgbevf_dev_interrupt_handler(void *param) 8070 { 8071 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 8072 8073 ixgbevf_dev_interrupt_get_status(dev); 8074 ixgbevf_dev_interrupt_action(dev); 8075 } 8076 8077 /** 8078 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 8079 * @hw: pointer to hardware structure 8080 * 8081 * Stops the transmit data path and waits for the HW to internally empty 8082 * the Tx security block 8083 **/ 8084 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 8085 { 8086 #define IXGBE_MAX_SECTX_POLL 40 8087 8088 int i; 8089 int sectxreg; 8090 8091 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8092 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 8093 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8094 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 8095 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 8096 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 8097 break; 8098 /* Use interrupt-safe sleep just in case */ 8099 usec_delay(1000); 8100 } 8101 8102 /* For informational purposes only */ 8103 if (i >= IXGBE_MAX_SECTX_POLL) 8104 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 8105 "path fully disabled. Continuing with init."); 8106 8107 return IXGBE_SUCCESS; 8108 } 8109 8110 /** 8111 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 8112 * @hw: pointer to hardware structure 8113 * 8114 * Enables the transmit data path. 8115 **/ 8116 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 8117 { 8118 uint32_t sectxreg; 8119 8120 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8121 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 8122 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8123 IXGBE_WRITE_FLUSH(hw); 8124 8125 return IXGBE_SUCCESS; 8126 } 8127 8128 /* restore n-tuple filter */ 8129 static inline void 8130 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 8131 { 8132 struct ixgbe_filter_info *filter_info = 8133 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8134 struct ixgbe_5tuple_filter *node; 8135 8136 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 8137 ixgbe_inject_5tuple_filter(dev, node); 8138 } 8139 } 8140 8141 /* restore ethernet type filter */ 8142 static inline void 8143 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 8144 { 8145 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8146 struct ixgbe_filter_info *filter_info = 8147 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8148 int i; 8149 8150 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8151 if (filter_info->ethertype_mask & (1 << i)) { 8152 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 8153 filter_info->ethertype_filters[i].etqf); 8154 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 8155 filter_info->ethertype_filters[i].etqs); 8156 IXGBE_WRITE_FLUSH(hw); 8157 } 8158 } 8159 } 8160 8161 /* restore SYN filter */ 8162 static inline void 8163 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 8164 { 8165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8166 struct ixgbe_filter_info *filter_info = 8167 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8168 uint32_t synqf; 8169 8170 synqf = filter_info->syn_info; 8171 8172 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 8173 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 8174 IXGBE_WRITE_FLUSH(hw); 8175 } 8176 } 8177 8178 /* restore L2 tunnel filter */ 8179 static inline void 8180 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 8181 { 8182 struct ixgbe_l2_tn_info *l2_tn_info = 8183 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8184 struct ixgbe_l2_tn_filter *node; 8185 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8186 8187 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 8188 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 8189 l2_tn_conf.tunnel_id = node->key.tn_id; 8190 l2_tn_conf.pool = node->pool; 8191 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 8192 } 8193 } 8194 8195 /* restore rss filter */ 8196 static inline void 8197 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 8198 { 8199 struct ixgbe_filter_info *filter_info = 8200 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8201 8202 if (filter_info->rss_info.conf.queue_num) 8203 ixgbe_config_rss_filter(dev, 8204 &filter_info->rss_info, TRUE); 8205 } 8206 8207 static int 8208 ixgbe_filter_restore(struct rte_eth_dev *dev) 8209 { 8210 ixgbe_ntuple_filter_restore(dev); 8211 ixgbe_ethertype_filter_restore(dev); 8212 ixgbe_syn_filter_restore(dev); 8213 ixgbe_fdir_filter_restore(dev); 8214 ixgbe_l2_tn_filter_restore(dev); 8215 ixgbe_rss_filter_restore(dev); 8216 8217 return 0; 8218 } 8219 8220 static void 8221 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8222 { 8223 struct ixgbe_l2_tn_info *l2_tn_info = 8224 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8225 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8226 8227 if (l2_tn_info->e_tag_en) 8228 (void)ixgbe_e_tag_enable(hw); 8229 8230 if (l2_tn_info->e_tag_fwd_en) 8231 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8232 8233 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8234 } 8235 8236 /* remove all the n-tuple filters */ 8237 void 8238 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8239 { 8240 struct ixgbe_filter_info *filter_info = 8241 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8242 struct ixgbe_5tuple_filter *p_5tuple; 8243 8244 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8245 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8246 } 8247 8248 /* remove all the ether type filters */ 8249 void 8250 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8251 { 8252 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8253 struct ixgbe_filter_info *filter_info = 8254 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8255 int i; 8256 8257 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8258 if (filter_info->ethertype_mask & (1 << i) && 8259 !filter_info->ethertype_filters[i].conf) { 8260 (void)ixgbe_ethertype_filter_remove(filter_info, 8261 (uint8_t)i); 8262 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8263 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8264 IXGBE_WRITE_FLUSH(hw); 8265 } 8266 } 8267 } 8268 8269 /* remove the SYN filter */ 8270 void 8271 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8272 { 8273 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8274 struct ixgbe_filter_info *filter_info = 8275 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8276 8277 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8278 filter_info->syn_info = 0; 8279 8280 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8281 IXGBE_WRITE_FLUSH(hw); 8282 } 8283 } 8284 8285 /* remove all the L2 tunnel filters */ 8286 int 8287 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8288 { 8289 struct ixgbe_l2_tn_info *l2_tn_info = 8290 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8291 struct ixgbe_l2_tn_filter *l2_tn_filter; 8292 struct ixgbe_l2_tunnel_conf l2_tn_conf; 8293 int ret = 0; 8294 8295 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8296 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8297 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8298 l2_tn_conf.pool = l2_tn_filter->pool; 8299 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8300 if (ret < 0) 8301 return ret; 8302 } 8303 8304 return 0; 8305 } 8306 8307 void 8308 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8309 struct ixgbe_macsec_setting *macsec_setting) 8310 { 8311 struct ixgbe_macsec_setting *macsec = 8312 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8313 8314 macsec->offload_en = macsec_setting->offload_en; 8315 macsec->encrypt_en = macsec_setting->encrypt_en; 8316 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8317 } 8318 8319 void 8320 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8321 { 8322 struct ixgbe_macsec_setting *macsec = 8323 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8324 8325 macsec->offload_en = 0; 8326 macsec->encrypt_en = 0; 8327 macsec->replayprotect_en = 0; 8328 } 8329 8330 void 8331 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8332 struct ixgbe_macsec_setting *macsec_setting) 8333 { 8334 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8335 uint32_t ctrl; 8336 uint8_t en = macsec_setting->encrypt_en; 8337 uint8_t rp = macsec_setting->replayprotect_en; 8338 8339 /** 8340 * Workaround: 8341 * As no ixgbe_disable_sec_rx_path equivalent is 8342 * implemented for tx in the base code, and we are 8343 * not allowed to modify the base code in DPDK, so 8344 * just call the hand-written one directly for now. 8345 * The hardware support has been checked by 8346 * ixgbe_disable_sec_rx_path(). 8347 */ 8348 ixgbe_disable_sec_tx_path_generic(hw); 8349 8350 /* Enable Ethernet CRC (required by MACsec offload) */ 8351 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8352 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8353 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8354 8355 /* Enable the TX and RX crypto engines */ 8356 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8357 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8358 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8359 8360 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8361 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8362 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8363 8364 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8365 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8366 ctrl |= 0x3; 8367 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8368 8369 /* Enable SA lookup */ 8370 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8371 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8372 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 8373 IXGBE_LSECTXCTRL_AUTH; 8374 ctrl |= IXGBE_LSECTXCTRL_AISCI; 8375 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8376 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 8377 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8378 8379 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8380 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8381 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 8382 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 8383 if (rp) 8384 ctrl |= IXGBE_LSECRXCTRL_RP; 8385 else 8386 ctrl &= ~IXGBE_LSECRXCTRL_RP; 8387 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8388 8389 /* Start the data paths */ 8390 ixgbe_enable_sec_rx_path(hw); 8391 /** 8392 * Workaround: 8393 * As no ixgbe_enable_sec_rx_path equivalent is 8394 * implemented for tx in the base code, and we are 8395 * not allowed to modify the base code in DPDK, so 8396 * just call the hand-written one directly for now. 8397 */ 8398 ixgbe_enable_sec_tx_path_generic(hw); 8399 } 8400 8401 void 8402 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 8403 { 8404 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8405 uint32_t ctrl; 8406 8407 /** 8408 * Workaround: 8409 * As no ixgbe_disable_sec_rx_path equivalent is 8410 * implemented for tx in the base code, and we are 8411 * not allowed to modify the base code in DPDK, so 8412 * just call the hand-written one directly for now. 8413 * The hardware support has been checked by 8414 * ixgbe_disable_sec_rx_path(). 8415 */ 8416 ixgbe_disable_sec_tx_path_generic(hw); 8417 8418 /* Disable the TX and RX crypto engines */ 8419 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8420 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 8421 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8422 8423 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8424 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 8425 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8426 8427 /* Disable SA lookup */ 8428 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 8429 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 8430 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 8431 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 8432 8433 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 8434 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 8435 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 8436 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 8437 8438 /* Start the data paths */ 8439 ixgbe_enable_sec_rx_path(hw); 8440 /** 8441 * Workaround: 8442 * As no ixgbe_enable_sec_rx_path equivalent is 8443 * implemented for tx in the base code, and we are 8444 * not allowed to modify the base code in DPDK, so 8445 * just call the hand-written one directly for now. 8446 */ 8447 ixgbe_enable_sec_tx_path_generic(hw); 8448 } 8449 8450 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8451 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8452 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8453 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8454 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8455 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8456 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 8457 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 8458 8459 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_init, init, NOTICE); 8460 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_driver, driver, NOTICE); 8461 8462 #ifdef RTE_ETHDEV_DEBUG_RX 8463 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_rx, rx, DEBUG); 8464 #endif 8465 #ifdef RTE_ETHDEV_DEBUG_TX 8466 RTE_LOG_REGISTER_SUFFIX(ixgbe_logtype_tx, tx, DEBUG); 8467 #endif 8468