1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <netinet/in.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_eal.h> 26 #include <rte_alarm.h> 27 #include <rte_ether.h> 28 #include <rte_ethdev_driver.h> 29 #include <rte_ethdev_pci.h> 30 #include <rte_malloc.h> 31 #include <rte_random.h> 32 #include <rte_dev.h> 33 #include <rte_hash_crc.h> 34 #ifdef RTE_LIBRTE_SECURITY 35 #include <rte_security_driver.h> 36 #endif 37 38 #include "ixgbe_logs.h" 39 #include "base/ixgbe_api.h" 40 #include "base/ixgbe_vf.h" 41 #include "base/ixgbe_common.h" 42 #include "ixgbe_ethdev.h" 43 #include "ixgbe_bypass.h" 44 #include "ixgbe_rxtx.h" 45 #include "base/ixgbe_type.h" 46 #include "base/ixgbe_phy.h" 47 #include "ixgbe_regs.h" 48 49 /* 50 * High threshold controlling when to start sending XOFF frames. Must be at 51 * least 8 bytes less than receive packet buffer size. This value is in units 52 * of 1024 bytes. 53 */ 54 #define IXGBE_FC_HI 0x80 55 56 /* 57 * Low threshold controlling when to start sending XON frames. This value is 58 * in units of 1024 bytes. 59 */ 60 #define IXGBE_FC_LO 0x40 61 62 /* Timer value included in XOFF frames. */ 63 #define IXGBE_FC_PAUSE 0x680 64 65 /*Default value of Max Rx Queue*/ 66 #define IXGBE_MAX_RX_QUEUE_NUM 128 67 68 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 69 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 70 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 71 72 #define IXGBE_MMW_SIZE_DEFAULT 0x4 73 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 74 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 75 76 /* 77 * Default values for RX/TX configuration 78 */ 79 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 80 #define IXGBE_DEFAULT_RX_PTHRESH 8 81 #define IXGBE_DEFAULT_RX_HTHRESH 8 82 #define IXGBE_DEFAULT_RX_WTHRESH 0 83 84 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 85 #define IXGBE_DEFAULT_TX_PTHRESH 32 86 #define IXGBE_DEFAULT_TX_HTHRESH 0 87 #define IXGBE_DEFAULT_TX_WTHRESH 0 88 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 89 90 /* Bit shift and mask */ 91 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 92 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 93 #define IXGBE_8_BIT_WIDTH CHAR_BIT 94 #define IXGBE_8_BIT_MASK UINT8_MAX 95 96 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 97 98 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 99 100 /* Additional timesync values. */ 101 #define NSEC_PER_SEC 1000000000L 102 #define IXGBE_INCVAL_10GB 0x66666666 103 #define IXGBE_INCVAL_1GB 0x40000000 104 #define IXGBE_INCVAL_100 0x50000000 105 #define IXGBE_INCVAL_SHIFT_10GB 28 106 #define IXGBE_INCVAL_SHIFT_1GB 24 107 #define IXGBE_INCVAL_SHIFT_100 21 108 #define IXGBE_INCVAL_SHIFT_82599 7 109 #define IXGBE_INCPER_SHIFT_82599 24 110 111 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 112 113 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 114 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 115 #define IXGBE_ETAG_ETYPE 0x00005084 116 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 117 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 118 #define IXGBE_RAH_ADTYPE 0x40000000 119 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 120 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 121 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 122 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 123 #define IXGBE_QDE_STRIP_TAG 0x00000004 124 #define IXGBE_VTEICR_MASK 0x07 125 126 #define IXGBE_EXVET_VET_EXT_SHIFT 16 127 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 128 129 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 130 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 131 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 132 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 133 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 134 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 135 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 136 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 137 static int ixgbe_dev_start(struct rte_eth_dev *dev); 138 static void ixgbe_dev_stop(struct rte_eth_dev *dev); 139 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 140 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 141 static void ixgbe_dev_close(struct rte_eth_dev *dev); 142 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 143 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 144 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 145 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 146 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 147 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 148 int wait_to_complete); 149 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 150 struct rte_eth_stats *stats); 151 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 152 struct rte_eth_xstat *xstats, unsigned n); 153 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 154 struct rte_eth_xstat *xstats, unsigned n); 155 static int 156 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 157 uint64_t *values, unsigned int n); 158 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 159 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 160 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 161 struct rte_eth_xstat_name *xstats_names, 162 unsigned int size); 163 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 164 struct rte_eth_xstat_name *xstats_names, unsigned limit); 165 static int ixgbe_dev_xstats_get_names_by_id( 166 struct rte_eth_dev *dev, 167 struct rte_eth_xstat_name *xstats_names, 168 const uint64_t *ids, 169 unsigned int limit); 170 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 171 uint16_t queue_id, 172 uint8_t stat_idx, 173 uint8_t is_rx); 174 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 175 size_t fw_size); 176 static void ixgbe_dev_info_get(struct rte_eth_dev *dev, 177 struct rte_eth_dev_info *dev_info); 178 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 179 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, 180 struct rte_eth_dev_info *dev_info); 181 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 182 183 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 184 uint16_t vlan_id, int on); 185 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 186 enum rte_vlan_type vlan_type, 187 uint16_t tpid_id); 188 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 189 uint16_t queue, bool on); 190 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 191 int on); 192 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 193 int mask); 194 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 195 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 196 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 197 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 198 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 199 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 200 201 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 202 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 203 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 204 struct rte_eth_fc_conf *fc_conf); 205 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 206 struct rte_eth_fc_conf *fc_conf); 207 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 208 struct rte_eth_pfc_conf *pfc_conf); 209 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 210 struct rte_eth_rss_reta_entry64 *reta_conf, 211 uint16_t reta_size); 212 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 213 struct rte_eth_rss_reta_entry64 *reta_conf, 214 uint16_t reta_size); 215 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 216 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 217 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 218 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 219 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 220 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 221 static void ixgbe_dev_interrupt_handler(void *param); 222 static void ixgbe_dev_interrupt_delayed_handler(void *param); 223 static void ixgbe_dev_setup_link_alarm_handler(void *param); 224 225 static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 226 uint32_t index, uint32_t pool); 227 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 228 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 229 struct ether_addr *mac_addr); 230 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 231 static bool is_device_supported(struct rte_eth_dev *dev, 232 struct rte_pci_driver *drv); 233 234 /* For Virtual Function support */ 235 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 236 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 237 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 238 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 239 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 240 int wait_to_complete); 241 static void ixgbevf_dev_stop(struct rte_eth_dev *dev); 242 static void ixgbevf_dev_close(struct rte_eth_dev *dev); 243 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 244 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 245 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 246 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 247 struct rte_eth_stats *stats); 248 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 249 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 250 uint16_t vlan_id, int on); 251 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 252 uint16_t queue, int on); 253 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 254 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 255 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 256 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 257 uint16_t queue_id); 258 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 259 uint16_t queue_id); 260 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 261 uint8_t queue, uint8_t msix_vector); 262 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 263 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 264 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 265 266 /* For Eth VMDQ APIs support */ 267 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 268 ether_addr * mac_addr, uint8_t on); 269 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 270 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 271 struct rte_eth_mirror_conf *mirror_conf, 272 uint8_t rule_id, uint8_t on); 273 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 274 uint8_t rule_id); 275 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 276 uint16_t queue_id); 277 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 278 uint16_t queue_id); 279 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 280 uint8_t queue, uint8_t msix_vector); 281 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 282 283 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 284 struct ether_addr *mac_addr, 285 uint32_t index, uint32_t pool); 286 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 287 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 288 struct ether_addr *mac_addr); 289 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, 290 struct rte_eth_syn_filter *filter); 291 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 292 enum rte_filter_op filter_op, 293 void *arg); 294 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 295 struct ixgbe_5tuple_filter *filter); 296 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 297 struct ixgbe_5tuple_filter *filter); 298 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 299 enum rte_filter_op filter_op, 300 void *arg); 301 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 302 struct rte_eth_ntuple_filter *filter); 303 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 304 enum rte_filter_op filter_op, 305 void *arg); 306 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 307 struct rte_eth_ethertype_filter *filter); 308 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 309 enum rte_filter_type filter_type, 310 enum rte_filter_op filter_op, 311 void *arg); 312 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 313 314 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 315 struct ether_addr *mc_addr_set, 316 uint32_t nb_mc_addr); 317 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 318 struct rte_eth_dcb_info *dcb_info); 319 320 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 321 static int ixgbe_get_regs(struct rte_eth_dev *dev, 322 struct rte_dev_reg_info *regs); 323 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 324 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 325 struct rte_dev_eeprom_info *eeprom); 326 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 327 struct rte_dev_eeprom_info *eeprom); 328 329 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 330 struct rte_eth_dev_module_info *modinfo); 331 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 332 struct rte_dev_eeprom_info *info); 333 334 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 335 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 336 struct rte_dev_reg_info *regs); 337 338 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 339 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 340 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 341 struct timespec *timestamp, 342 uint32_t flags); 343 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 344 struct timespec *timestamp); 345 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 346 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 347 struct timespec *timestamp); 348 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 349 const struct timespec *timestamp); 350 static void ixgbevf_dev_interrupt_handler(void *param); 351 352 static int ixgbe_dev_l2_tunnel_eth_type_conf 353 (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); 354 static int ixgbe_dev_l2_tunnel_offload_set 355 (struct rte_eth_dev *dev, 356 struct rte_eth_l2_tunnel_conf *l2_tunnel, 357 uint32_t mask, 358 uint8_t en); 359 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 360 enum rte_filter_op filter_op, 361 void *arg); 362 363 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 364 struct rte_eth_udp_tunnel *udp_tunnel); 365 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 366 struct rte_eth_udp_tunnel *udp_tunnel); 367 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 368 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 369 370 /* 371 * Define VF Stats MACRO for Non "cleared on read" register 372 */ 373 #define UPDATE_VF_STAT(reg, last, cur) \ 374 { \ 375 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 376 cur += (latest - last) & UINT_MAX; \ 377 last = latest; \ 378 } 379 380 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 381 { \ 382 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 383 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 384 u64 latest = ((new_msb << 32) | new_lsb); \ 385 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 386 last = latest; \ 387 } 388 389 #define IXGBE_SET_HWSTRIP(h, q) do {\ 390 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 391 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 392 (h)->bitmap[idx] |= 1 << bit;\ 393 } while (0) 394 395 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 396 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 397 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 398 (h)->bitmap[idx] &= ~(1 << bit);\ 399 } while (0) 400 401 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 402 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 403 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 404 (r) = (h)->bitmap[idx] >> bit & 1;\ 405 } while (0) 406 407 int ixgbe_logtype_init; 408 int ixgbe_logtype_driver; 409 410 /* 411 * The set of PCI devices this driver supports 412 */ 413 static const struct rte_pci_id pci_id_ixgbe_map[] = { 414 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 415 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 416 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 417 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 418 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 419 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 420 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 421 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 422 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 423 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 424 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 425 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 426 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 427 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 428 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 429 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 430 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 431 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 461 #ifdef RTE_LIBRTE_IXGBE_BYPASS 462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 463 #endif 464 { .vendor_id = 0, /* sentinel */ }, 465 }; 466 467 /* 468 * The set of PCI devices this driver supports (for 82599 VF) 469 */ 470 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 471 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 472 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 473 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 479 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 480 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 481 { .vendor_id = 0, /* sentinel */ }, 482 }; 483 484 static const struct rte_eth_desc_lim rx_desc_lim = { 485 .nb_max = IXGBE_MAX_RING_DESC, 486 .nb_min = IXGBE_MIN_RING_DESC, 487 .nb_align = IXGBE_RXD_ALIGN, 488 }; 489 490 static const struct rte_eth_desc_lim tx_desc_lim = { 491 .nb_max = IXGBE_MAX_RING_DESC, 492 .nb_min = IXGBE_MIN_RING_DESC, 493 .nb_align = IXGBE_TXD_ALIGN, 494 .nb_seg_max = IXGBE_TX_MAX_SEG, 495 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 496 }; 497 498 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 499 .dev_configure = ixgbe_dev_configure, 500 .dev_start = ixgbe_dev_start, 501 .dev_stop = ixgbe_dev_stop, 502 .dev_set_link_up = ixgbe_dev_set_link_up, 503 .dev_set_link_down = ixgbe_dev_set_link_down, 504 .dev_close = ixgbe_dev_close, 505 .dev_reset = ixgbe_dev_reset, 506 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 507 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 508 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 509 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 510 .link_update = ixgbe_dev_link_update, 511 .stats_get = ixgbe_dev_stats_get, 512 .xstats_get = ixgbe_dev_xstats_get, 513 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 514 .stats_reset = ixgbe_dev_stats_reset, 515 .xstats_reset = ixgbe_dev_xstats_reset, 516 .xstats_get_names = ixgbe_dev_xstats_get_names, 517 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 518 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 519 .fw_version_get = ixgbe_fw_version_get, 520 .dev_infos_get = ixgbe_dev_info_get, 521 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 522 .mtu_set = ixgbe_dev_mtu_set, 523 .vlan_filter_set = ixgbe_vlan_filter_set, 524 .vlan_tpid_set = ixgbe_vlan_tpid_set, 525 .vlan_offload_set = ixgbe_vlan_offload_set, 526 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 527 .rx_queue_start = ixgbe_dev_rx_queue_start, 528 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 529 .tx_queue_start = ixgbe_dev_tx_queue_start, 530 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 531 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 532 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 533 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 534 .rx_queue_release = ixgbe_dev_rx_queue_release, 535 .rx_queue_count = ixgbe_dev_rx_queue_count, 536 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 537 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, 538 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, 539 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 540 .tx_queue_release = ixgbe_dev_tx_queue_release, 541 .dev_led_on = ixgbe_dev_led_on, 542 .dev_led_off = ixgbe_dev_led_off, 543 .flow_ctrl_get = ixgbe_flow_ctrl_get, 544 .flow_ctrl_set = ixgbe_flow_ctrl_set, 545 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 546 .mac_addr_add = ixgbe_add_rar, 547 .mac_addr_remove = ixgbe_remove_rar, 548 .mac_addr_set = ixgbe_set_default_mac_addr, 549 .uc_hash_table_set = ixgbe_uc_hash_table_set, 550 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 551 .mirror_rule_set = ixgbe_mirror_rule_set, 552 .mirror_rule_reset = ixgbe_mirror_rule_reset, 553 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 554 .reta_update = ixgbe_dev_rss_reta_update, 555 .reta_query = ixgbe_dev_rss_reta_query, 556 .rss_hash_update = ixgbe_dev_rss_hash_update, 557 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 558 .filter_ctrl = ixgbe_dev_filter_ctrl, 559 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 560 .rxq_info_get = ixgbe_rxq_info_get, 561 .txq_info_get = ixgbe_txq_info_get, 562 .timesync_enable = ixgbe_timesync_enable, 563 .timesync_disable = ixgbe_timesync_disable, 564 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 565 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 566 .get_reg = ixgbe_get_regs, 567 .get_eeprom_length = ixgbe_get_eeprom_length, 568 .get_eeprom = ixgbe_get_eeprom, 569 .set_eeprom = ixgbe_set_eeprom, 570 .get_module_info = ixgbe_get_module_info, 571 .get_module_eeprom = ixgbe_get_module_eeprom, 572 .get_dcb_info = ixgbe_dev_get_dcb_info, 573 .timesync_adjust_time = ixgbe_timesync_adjust_time, 574 .timesync_read_time = ixgbe_timesync_read_time, 575 .timesync_write_time = ixgbe_timesync_write_time, 576 .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, 577 .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, 578 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 579 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 580 .tm_ops_get = ixgbe_tm_ops_get, 581 }; 582 583 /* 584 * dev_ops for virtual function, bare necessities for basic vf 585 * operation have been implemented 586 */ 587 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 588 .dev_configure = ixgbevf_dev_configure, 589 .dev_start = ixgbevf_dev_start, 590 .dev_stop = ixgbevf_dev_stop, 591 .link_update = ixgbevf_dev_link_update, 592 .stats_get = ixgbevf_dev_stats_get, 593 .xstats_get = ixgbevf_dev_xstats_get, 594 .stats_reset = ixgbevf_dev_stats_reset, 595 .xstats_reset = ixgbevf_dev_stats_reset, 596 .xstats_get_names = ixgbevf_dev_xstats_get_names, 597 .dev_close = ixgbevf_dev_close, 598 .dev_reset = ixgbevf_dev_reset, 599 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 600 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 601 .dev_infos_get = ixgbevf_dev_info_get, 602 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 603 .mtu_set = ixgbevf_dev_set_mtu, 604 .vlan_filter_set = ixgbevf_vlan_filter_set, 605 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 606 .vlan_offload_set = ixgbevf_vlan_offload_set, 607 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 608 .rx_queue_release = ixgbe_dev_rx_queue_release, 609 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 610 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, 611 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, 612 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 613 .tx_queue_release = ixgbe_dev_tx_queue_release, 614 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 615 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 616 .mac_addr_add = ixgbevf_add_mac_addr, 617 .mac_addr_remove = ixgbevf_remove_mac_addr, 618 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 619 .rxq_info_get = ixgbe_rxq_info_get, 620 .txq_info_get = ixgbe_txq_info_get, 621 .mac_addr_set = ixgbevf_set_default_mac_addr, 622 .get_reg = ixgbevf_get_regs, 623 .reta_update = ixgbe_dev_rss_reta_update, 624 .reta_query = ixgbe_dev_rss_reta_query, 625 .rss_hash_update = ixgbe_dev_rss_hash_update, 626 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 627 }; 628 629 /* store statistics names and its offset in stats structure */ 630 struct rte_ixgbe_xstats_name_off { 631 char name[RTE_ETH_XSTATS_NAME_SIZE]; 632 unsigned offset; 633 }; 634 635 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 636 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 637 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 638 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 639 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 640 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 641 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 642 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 643 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 644 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 645 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 646 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 647 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 648 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 649 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 650 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 651 prc1023)}, 652 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 653 prc1522)}, 654 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 655 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 656 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 657 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 658 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 659 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 660 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 661 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 662 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 663 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 664 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 665 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 666 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 667 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 668 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 669 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 670 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 671 ptc1023)}, 672 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 673 ptc1522)}, 674 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 675 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 676 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 677 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 678 679 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 680 fdirustat_add)}, 681 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 682 fdirustat_remove)}, 683 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 684 fdirfstat_fadd)}, 685 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 686 fdirfstat_fremove)}, 687 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 688 fdirmatch)}, 689 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 690 fdirmiss)}, 691 692 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 693 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 694 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 695 fclast)}, 696 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 697 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 698 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 699 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 700 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 701 fcoe_noddp)}, 702 {"rx_fcoe_no_direct_data_placement_ext_buff", 703 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 704 705 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 706 lxontxc)}, 707 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 708 lxonrxc)}, 709 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 710 lxofftxc)}, 711 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 712 lxoffrxc)}, 713 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 714 }; 715 716 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 717 sizeof(rte_ixgbe_stats_strings[0])) 718 719 /* MACsec statistics */ 720 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 721 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 722 out_pkts_untagged)}, 723 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 724 out_pkts_encrypted)}, 725 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 726 out_pkts_protected)}, 727 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 728 out_octets_encrypted)}, 729 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 730 out_octets_protected)}, 731 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 732 in_pkts_untagged)}, 733 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 734 in_pkts_badtag)}, 735 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 736 in_pkts_nosci)}, 737 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 738 in_pkts_unknownsci)}, 739 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 740 in_octets_decrypted)}, 741 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 742 in_octets_validated)}, 743 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 744 in_pkts_unchecked)}, 745 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 746 in_pkts_delayed)}, 747 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 748 in_pkts_late)}, 749 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 750 in_pkts_ok)}, 751 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 752 in_pkts_invalid)}, 753 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 754 in_pkts_notvalid)}, 755 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 756 in_pkts_unusedsa)}, 757 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 758 in_pkts_notusingsa)}, 759 }; 760 761 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 762 sizeof(rte_ixgbe_macsec_strings[0])) 763 764 /* Per-queue statistics */ 765 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 766 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 767 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 768 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 769 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 770 }; 771 772 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 773 sizeof(rte_ixgbe_rxq_strings[0])) 774 #define IXGBE_NB_RXQ_PRIO_VALUES 8 775 776 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 777 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 778 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 779 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 780 pxon2offc)}, 781 }; 782 783 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 784 sizeof(rte_ixgbe_txq_strings[0])) 785 #define IXGBE_NB_TXQ_PRIO_VALUES 8 786 787 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 788 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 789 }; 790 791 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 792 sizeof(rte_ixgbevf_stats_strings[0])) 793 794 /* 795 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 796 */ 797 static inline int 798 ixgbe_is_sfp(struct ixgbe_hw *hw) 799 { 800 switch (hw->phy.type) { 801 case ixgbe_phy_sfp_avago: 802 case ixgbe_phy_sfp_ftl: 803 case ixgbe_phy_sfp_intel: 804 case ixgbe_phy_sfp_unknown: 805 case ixgbe_phy_sfp_passive_tyco: 806 case ixgbe_phy_sfp_passive_unknown: 807 return 1; 808 default: 809 return 0; 810 } 811 } 812 813 static inline int32_t 814 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 815 { 816 uint32_t ctrl_ext; 817 int32_t status; 818 819 status = ixgbe_reset_hw(hw); 820 821 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 822 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 823 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 824 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 825 IXGBE_WRITE_FLUSH(hw); 826 827 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 828 status = IXGBE_SUCCESS; 829 return status; 830 } 831 832 static inline void 833 ixgbe_enable_intr(struct rte_eth_dev *dev) 834 { 835 struct ixgbe_interrupt *intr = 836 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 837 struct ixgbe_hw *hw = 838 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 839 840 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 841 IXGBE_WRITE_FLUSH(hw); 842 } 843 844 /* 845 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 846 */ 847 static void 848 ixgbe_disable_intr(struct ixgbe_hw *hw) 849 { 850 PMD_INIT_FUNC_TRACE(); 851 852 if (hw->mac.type == ixgbe_mac_82598EB) { 853 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 854 } else { 855 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 856 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 857 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 858 } 859 IXGBE_WRITE_FLUSH(hw); 860 } 861 862 /* 863 * This function resets queue statistics mapping registers. 864 * From Niantic datasheet, Initialization of Statistics section: 865 * "...if software requires the queue counters, the RQSMR and TQSM registers 866 * must be re-programmed following a device reset. 867 */ 868 static void 869 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 870 { 871 uint32_t i; 872 873 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 874 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 875 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 876 } 877 } 878 879 880 static int 881 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 882 uint16_t queue_id, 883 uint8_t stat_idx, 884 uint8_t is_rx) 885 { 886 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 887 #define NB_QMAP_FIELDS_PER_QSM_REG 4 888 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 889 890 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 891 struct ixgbe_stat_mapping_registers *stat_mappings = 892 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 893 uint32_t qsmr_mask = 0; 894 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 895 uint32_t q_map; 896 uint8_t n, offset; 897 898 if ((hw->mac.type != ixgbe_mac_82599EB) && 899 (hw->mac.type != ixgbe_mac_X540) && 900 (hw->mac.type != ixgbe_mac_X550) && 901 (hw->mac.type != ixgbe_mac_X550EM_x) && 902 (hw->mac.type != ixgbe_mac_X550EM_a)) 903 return -ENOSYS; 904 905 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 906 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 907 queue_id, stat_idx); 908 909 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 910 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 911 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 912 return -EIO; 913 } 914 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 915 916 /* Now clear any previous stat_idx set */ 917 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 918 if (!is_rx) 919 stat_mappings->tqsm[n] &= ~clearing_mask; 920 else 921 stat_mappings->rqsmr[n] &= ~clearing_mask; 922 923 q_map = (uint32_t)stat_idx; 924 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 925 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 926 if (!is_rx) 927 stat_mappings->tqsm[n] |= qsmr_mask; 928 else 929 stat_mappings->rqsmr[n] |= qsmr_mask; 930 931 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 932 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 933 queue_id, stat_idx); 934 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 935 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 936 937 /* Now write the mapping in the appropriate register */ 938 if (is_rx) { 939 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 940 stat_mappings->rqsmr[n], n); 941 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 942 } else { 943 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 944 stat_mappings->tqsm[n], n); 945 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 946 } 947 return 0; 948 } 949 950 static void 951 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 952 { 953 struct ixgbe_stat_mapping_registers *stat_mappings = 954 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 955 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 956 int i; 957 958 /* write whatever was in stat mapping table to the NIC */ 959 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 960 /* rx */ 961 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 962 963 /* tx */ 964 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 965 } 966 } 967 968 static void 969 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 970 { 971 uint8_t i; 972 struct ixgbe_dcb_tc_config *tc; 973 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 974 975 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 976 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 977 for (i = 0; i < dcb_max_tc; i++) { 978 tc = &dcb_config->tc_config[i]; 979 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 980 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 981 (uint8_t)(100/dcb_max_tc + (i & 1)); 982 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 983 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 984 (uint8_t)(100/dcb_max_tc + (i & 1)); 985 tc->pfc = ixgbe_dcb_pfc_disabled; 986 } 987 988 /* Initialize default user to priority mapping, UPx->TC0 */ 989 tc = &dcb_config->tc_config[0]; 990 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 991 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 992 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 993 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 994 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 995 } 996 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 997 dcb_config->pfc_mode_enable = false; 998 dcb_config->vt_mode = true; 999 dcb_config->round_robin_enable = false; 1000 /* support all DCB capabilities in 82599 */ 1001 dcb_config->support.capabilities = 0xFF; 1002 1003 /*we only support 4 Tcs for X540, X550 */ 1004 if (hw->mac.type == ixgbe_mac_X540 || 1005 hw->mac.type == ixgbe_mac_X550 || 1006 hw->mac.type == ixgbe_mac_X550EM_x || 1007 hw->mac.type == ixgbe_mac_X550EM_a) { 1008 dcb_config->num_tcs.pg_tcs = 4; 1009 dcb_config->num_tcs.pfc_tcs = 4; 1010 } 1011 } 1012 1013 /* 1014 * Ensure that all locks are released before first NVM or PHY access 1015 */ 1016 static void 1017 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1018 { 1019 uint16_t mask; 1020 1021 /* 1022 * Phy lock should not fail in this early stage. If this is the case, 1023 * it is due to an improper exit of the application. 1024 * So force the release of the faulty lock. Release of common lock 1025 * is done automatically by swfw_sync function. 1026 */ 1027 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1028 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1029 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1030 } 1031 ixgbe_release_swfw_semaphore(hw, mask); 1032 1033 /* 1034 * These ones are more tricky since they are common to all ports; but 1035 * swfw_sync retries last long enough (1s) to be almost sure that if 1036 * lock can not be taken it is due to an improper lock of the 1037 * semaphore. 1038 */ 1039 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1040 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1041 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1042 } 1043 ixgbe_release_swfw_semaphore(hw, mask); 1044 } 1045 1046 /* 1047 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1048 * It returns 0 on success. 1049 */ 1050 static int 1051 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1052 { 1053 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1054 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1055 struct ixgbe_hw *hw = 1056 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1057 struct ixgbe_vfta *shadow_vfta = 1058 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1059 struct ixgbe_hwstrip *hwstrip = 1060 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1061 struct ixgbe_dcb_config *dcb_config = 1062 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1063 struct ixgbe_filter_info *filter_info = 1064 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1065 struct ixgbe_bw_conf *bw_conf = 1066 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1067 uint32_t ctrl_ext; 1068 uint16_t csum; 1069 int diag, i; 1070 1071 PMD_INIT_FUNC_TRACE(); 1072 1073 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1074 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1075 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1076 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1077 1078 /* 1079 * For secondary processes, we don't initialise any further as primary 1080 * has already done this work. Only check we don't need a different 1081 * RX and TX function. 1082 */ 1083 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1084 struct ixgbe_tx_queue *txq; 1085 /* TX queue function in primary, set by last queue initialized 1086 * Tx queue may not initialized by primary process 1087 */ 1088 if (eth_dev->data->tx_queues) { 1089 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1090 ixgbe_set_tx_function(eth_dev, txq); 1091 } else { 1092 /* Use default TX function if we get here */ 1093 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1094 "Using default TX function."); 1095 } 1096 1097 ixgbe_set_rx_function(eth_dev); 1098 1099 return 0; 1100 } 1101 1102 rte_eth_copy_pci_info(eth_dev, pci_dev); 1103 1104 /* Vendor and Device ID need to be set before init of shared code */ 1105 hw->device_id = pci_dev->id.device_id; 1106 hw->vendor_id = pci_dev->id.vendor_id; 1107 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1108 hw->allow_unsupported_sfp = 1; 1109 1110 /* Initialize the shared code (base driver) */ 1111 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1112 diag = ixgbe_bypass_init_shared_code(hw); 1113 #else 1114 diag = ixgbe_init_shared_code(hw); 1115 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1116 1117 if (diag != IXGBE_SUCCESS) { 1118 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1119 return -EIO; 1120 } 1121 1122 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1123 PMD_INIT_LOG(ERR, "\nERROR: " 1124 "Firmware recovery mode detected. Limiting functionality.\n" 1125 "Refer to the Intel(R) Ethernet Adapters and Devices " 1126 "User Guide for details on firmware recovery mode."); 1127 return -EIO; 1128 } 1129 1130 /* pick up the PCI bus settings for reporting later */ 1131 ixgbe_get_bus_info(hw); 1132 1133 /* Unlock any pending hardware semaphore */ 1134 ixgbe_swfw_lock_reset(hw); 1135 1136 #ifdef RTE_LIBRTE_SECURITY 1137 /* Initialize security_ctx only for primary process*/ 1138 if (ixgbe_ipsec_ctx_create(eth_dev)) 1139 return -ENOMEM; 1140 #endif 1141 1142 /* Initialize DCB configuration*/ 1143 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1144 ixgbe_dcb_init(hw, dcb_config); 1145 /* Get Hardware Flow Control setting */ 1146 hw->fc.requested_mode = ixgbe_fc_full; 1147 hw->fc.current_mode = ixgbe_fc_full; 1148 hw->fc.pause_time = IXGBE_FC_PAUSE; 1149 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1150 hw->fc.low_water[i] = IXGBE_FC_LO; 1151 hw->fc.high_water[i] = IXGBE_FC_HI; 1152 } 1153 hw->fc.send_xon = 1; 1154 1155 /* Make sure we have a good EEPROM before we read from it */ 1156 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1157 if (diag != IXGBE_SUCCESS) { 1158 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1159 return -EIO; 1160 } 1161 1162 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1163 diag = ixgbe_bypass_init_hw(hw); 1164 #else 1165 diag = ixgbe_init_hw(hw); 1166 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1167 1168 /* 1169 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1170 * is called too soon after the kernel driver unbinding/binding occurs. 1171 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1172 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1173 * also called. See ixgbe_identify_phy_82599(). The reason for the 1174 * failure is not known, and only occuts when virtualisation features 1175 * are disabled in the bios. A delay of 100ms was found to be enough by 1176 * trial-and-error, and is doubled to be safe. 1177 */ 1178 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1179 rte_delay_ms(200); 1180 diag = ixgbe_init_hw(hw); 1181 } 1182 1183 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1184 diag = IXGBE_SUCCESS; 1185 1186 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1187 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1188 "LOM. Please be aware there may be issues associated " 1189 "with your hardware."); 1190 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1191 "please contact your Intel or hardware representative " 1192 "who provided you with this hardware."); 1193 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1194 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1195 if (diag) { 1196 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1197 return -EIO; 1198 } 1199 1200 /* Reset the hw statistics */ 1201 ixgbe_dev_stats_reset(eth_dev); 1202 1203 /* disable interrupt */ 1204 ixgbe_disable_intr(hw); 1205 1206 /* reset mappings for queue statistics hw counters*/ 1207 ixgbe_reset_qstat_mappings(hw); 1208 1209 /* Allocate memory for storing MAC addresses */ 1210 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1211 hw->mac.num_rar_entries, 0); 1212 if (eth_dev->data->mac_addrs == NULL) { 1213 PMD_INIT_LOG(ERR, 1214 "Failed to allocate %u bytes needed to store " 1215 "MAC addresses", 1216 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1217 return -ENOMEM; 1218 } 1219 /* Copy the permanent MAC address */ 1220 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 1221 ð_dev->data->mac_addrs[0]); 1222 1223 /* Allocate memory for storing hash filter MAC addresses */ 1224 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1225 IXGBE_VMDQ_NUM_UC_MAC, 0); 1226 if (eth_dev->data->hash_mac_addrs == NULL) { 1227 PMD_INIT_LOG(ERR, 1228 "Failed to allocate %d bytes needed to store MAC addresses", 1229 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1230 return -ENOMEM; 1231 } 1232 1233 /* initialize the vfta */ 1234 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1235 1236 /* initialize the hw strip bitmap*/ 1237 memset(hwstrip, 0, sizeof(*hwstrip)); 1238 1239 /* initialize PF if max_vfs not zero */ 1240 ixgbe_pf_host_init(eth_dev); 1241 1242 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1243 /* let hardware know driver is loaded */ 1244 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1245 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1246 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1247 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1248 IXGBE_WRITE_FLUSH(hw); 1249 1250 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1251 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1252 (int) hw->mac.type, (int) hw->phy.type, 1253 (int) hw->phy.sfp_type); 1254 else 1255 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1256 (int) hw->mac.type, (int) hw->phy.type); 1257 1258 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1259 eth_dev->data->port_id, pci_dev->id.vendor_id, 1260 pci_dev->id.device_id); 1261 1262 rte_intr_callback_register(intr_handle, 1263 ixgbe_dev_interrupt_handler, eth_dev); 1264 1265 /* enable uio/vfio intr/eventfd mapping */ 1266 rte_intr_enable(intr_handle); 1267 1268 /* enable support intr */ 1269 ixgbe_enable_intr(eth_dev); 1270 1271 /* initialize filter info */ 1272 memset(filter_info, 0, 1273 sizeof(struct ixgbe_filter_info)); 1274 1275 /* initialize 5tuple filter list */ 1276 TAILQ_INIT(&filter_info->fivetuple_list); 1277 1278 /* initialize flow director filter list & hash */ 1279 ixgbe_fdir_filter_init(eth_dev); 1280 1281 /* initialize l2 tunnel filter list & hash */ 1282 ixgbe_l2_tn_filter_init(eth_dev); 1283 1284 /* initialize flow filter lists */ 1285 ixgbe_filterlist_init(); 1286 1287 /* initialize bandwidth configuration info */ 1288 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1289 1290 /* initialize Traffic Manager configuration */ 1291 ixgbe_tm_conf_init(eth_dev); 1292 1293 return 0; 1294 } 1295 1296 static int 1297 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1298 { 1299 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1300 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1301 struct ixgbe_hw *hw; 1302 int retries = 0; 1303 int ret; 1304 1305 PMD_INIT_FUNC_TRACE(); 1306 1307 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1308 return 0; 1309 1310 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1311 1312 if (hw->adapter_stopped == 0) 1313 ixgbe_dev_close(eth_dev); 1314 1315 eth_dev->dev_ops = NULL; 1316 eth_dev->rx_pkt_burst = NULL; 1317 eth_dev->tx_pkt_burst = NULL; 1318 1319 /* Unlock any pending hardware semaphore */ 1320 ixgbe_swfw_lock_reset(hw); 1321 1322 /* disable uio intr before callback unregister */ 1323 rte_intr_disable(intr_handle); 1324 1325 do { 1326 ret = rte_intr_callback_unregister(intr_handle, 1327 ixgbe_dev_interrupt_handler, eth_dev); 1328 if (ret >= 0) { 1329 break; 1330 } else if (ret != -EAGAIN) { 1331 PMD_INIT_LOG(ERR, 1332 "intr callback unregister failed: %d", 1333 ret); 1334 return ret; 1335 } 1336 rte_delay_ms(100); 1337 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 1338 1339 /* cancel the delay handler before remove dev */ 1340 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev); 1341 1342 /* uninitialize PF if max_vfs not zero */ 1343 ixgbe_pf_host_uninit(eth_dev); 1344 1345 /* remove all the fdir filters & hash */ 1346 ixgbe_fdir_filter_uninit(eth_dev); 1347 1348 /* remove all the L2 tunnel filters & hash */ 1349 ixgbe_l2_tn_filter_uninit(eth_dev); 1350 1351 /* Remove all ntuple filters of the device */ 1352 ixgbe_ntuple_filter_uninit(eth_dev); 1353 1354 /* clear all the filters list */ 1355 ixgbe_filterlist_flush(); 1356 1357 /* Remove all Traffic Manager configuration */ 1358 ixgbe_tm_conf_uninit(eth_dev); 1359 1360 #ifdef RTE_LIBRTE_SECURITY 1361 rte_free(eth_dev->security_ctx); 1362 #endif 1363 1364 return 0; 1365 } 1366 1367 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1368 { 1369 struct ixgbe_filter_info *filter_info = 1370 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1371 struct ixgbe_5tuple_filter *p_5tuple; 1372 1373 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1374 TAILQ_REMOVE(&filter_info->fivetuple_list, 1375 p_5tuple, 1376 entries); 1377 rte_free(p_5tuple); 1378 } 1379 memset(filter_info->fivetuple_mask, 0, 1380 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1381 1382 return 0; 1383 } 1384 1385 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1386 { 1387 struct ixgbe_hw_fdir_info *fdir_info = 1388 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1389 struct ixgbe_fdir_filter *fdir_filter; 1390 1391 if (fdir_info->hash_map) 1392 rte_free(fdir_info->hash_map); 1393 if (fdir_info->hash_handle) 1394 rte_hash_free(fdir_info->hash_handle); 1395 1396 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1397 TAILQ_REMOVE(&fdir_info->fdir_list, 1398 fdir_filter, 1399 entries); 1400 rte_free(fdir_filter); 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1407 { 1408 struct ixgbe_l2_tn_info *l2_tn_info = 1409 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1410 struct ixgbe_l2_tn_filter *l2_tn_filter; 1411 1412 if (l2_tn_info->hash_map) 1413 rte_free(l2_tn_info->hash_map); 1414 if (l2_tn_info->hash_handle) 1415 rte_hash_free(l2_tn_info->hash_handle); 1416 1417 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1418 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1419 l2_tn_filter, 1420 entries); 1421 rte_free(l2_tn_filter); 1422 } 1423 1424 return 0; 1425 } 1426 1427 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1428 { 1429 struct ixgbe_hw_fdir_info *fdir_info = 1430 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1431 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1432 struct rte_hash_parameters fdir_hash_params = { 1433 .name = fdir_hash_name, 1434 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1435 .key_len = sizeof(union ixgbe_atr_input), 1436 .hash_func = rte_hash_crc, 1437 .hash_func_init_val = 0, 1438 .socket_id = rte_socket_id(), 1439 }; 1440 1441 TAILQ_INIT(&fdir_info->fdir_list); 1442 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1443 "fdir_%s", eth_dev->device->name); 1444 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1445 if (!fdir_info->hash_handle) { 1446 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1447 return -EINVAL; 1448 } 1449 fdir_info->hash_map = rte_zmalloc("ixgbe", 1450 sizeof(struct ixgbe_fdir_filter *) * 1451 IXGBE_MAX_FDIR_FILTER_NUM, 1452 0); 1453 if (!fdir_info->hash_map) { 1454 PMD_INIT_LOG(ERR, 1455 "Failed to allocate memory for fdir hash map!"); 1456 return -ENOMEM; 1457 } 1458 fdir_info->mask_added = FALSE; 1459 1460 return 0; 1461 } 1462 1463 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1464 { 1465 struct ixgbe_l2_tn_info *l2_tn_info = 1466 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1467 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1468 struct rte_hash_parameters l2_tn_hash_params = { 1469 .name = l2_tn_hash_name, 1470 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1471 .key_len = sizeof(struct ixgbe_l2_tn_key), 1472 .hash_func = rte_hash_crc, 1473 .hash_func_init_val = 0, 1474 .socket_id = rte_socket_id(), 1475 }; 1476 1477 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1478 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1479 "l2_tn_%s", eth_dev->device->name); 1480 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1481 if (!l2_tn_info->hash_handle) { 1482 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1483 return -EINVAL; 1484 } 1485 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1486 sizeof(struct ixgbe_l2_tn_filter *) * 1487 IXGBE_MAX_L2_TN_FILTER_NUM, 1488 0); 1489 if (!l2_tn_info->hash_map) { 1490 PMD_INIT_LOG(ERR, 1491 "Failed to allocate memory for L2 TN hash map!"); 1492 return -ENOMEM; 1493 } 1494 l2_tn_info->e_tag_en = FALSE; 1495 l2_tn_info->e_tag_fwd_en = FALSE; 1496 l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG; 1497 1498 return 0; 1499 } 1500 /* 1501 * Negotiate mailbox API version with the PF. 1502 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1503 * Then we try to negotiate starting with the most recent one. 1504 * If all negotiation attempts fail, then we will proceed with 1505 * the default one (ixgbe_mbox_api_10). 1506 */ 1507 static void 1508 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1509 { 1510 int32_t i; 1511 1512 /* start with highest supported, proceed down */ 1513 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1514 ixgbe_mbox_api_12, 1515 ixgbe_mbox_api_11, 1516 ixgbe_mbox_api_10, 1517 }; 1518 1519 for (i = 0; 1520 i != RTE_DIM(sup_ver) && 1521 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1522 i++) 1523 ; 1524 } 1525 1526 static void 1527 generate_random_mac_addr(struct ether_addr *mac_addr) 1528 { 1529 uint64_t random; 1530 1531 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1532 mac_addr->addr_bytes[0] = 0x00; 1533 mac_addr->addr_bytes[1] = 0x09; 1534 mac_addr->addr_bytes[2] = 0xC0; 1535 /* Force indication of locally assigned MAC address. */ 1536 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; 1537 /* Generate the last 3 bytes of the MAC address with a random number. */ 1538 random = rte_rand(); 1539 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1540 } 1541 1542 /* 1543 * Virtual Function device init 1544 */ 1545 static int 1546 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1547 { 1548 int diag; 1549 uint32_t tc, tcs; 1550 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1551 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1552 struct ixgbe_hw *hw = 1553 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1554 struct ixgbe_vfta *shadow_vfta = 1555 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1556 struct ixgbe_hwstrip *hwstrip = 1557 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1558 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 1559 1560 PMD_INIT_FUNC_TRACE(); 1561 1562 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1563 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1564 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1565 1566 /* for secondary processes, we don't initialise any further as primary 1567 * has already done this work. Only check we don't need a different 1568 * RX function 1569 */ 1570 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1571 struct ixgbe_tx_queue *txq; 1572 /* TX queue function in primary, set by last queue initialized 1573 * Tx queue may not initialized by primary process 1574 */ 1575 if (eth_dev->data->tx_queues) { 1576 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1577 ixgbe_set_tx_function(eth_dev, txq); 1578 } else { 1579 /* Use default TX function if we get here */ 1580 PMD_INIT_LOG(NOTICE, 1581 "No TX queues configured yet. Using default TX function."); 1582 } 1583 1584 ixgbe_set_rx_function(eth_dev); 1585 1586 return 0; 1587 } 1588 1589 rte_eth_copy_pci_info(eth_dev, pci_dev); 1590 1591 hw->device_id = pci_dev->id.device_id; 1592 hw->vendor_id = pci_dev->id.vendor_id; 1593 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1594 1595 /* initialize the vfta */ 1596 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1597 1598 /* initialize the hw strip bitmap*/ 1599 memset(hwstrip, 0, sizeof(*hwstrip)); 1600 1601 /* Initialize the shared code (base driver) */ 1602 diag = ixgbe_init_shared_code(hw); 1603 if (diag != IXGBE_SUCCESS) { 1604 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1605 return -EIO; 1606 } 1607 1608 /* init_mailbox_params */ 1609 hw->mbx.ops.init_params(hw); 1610 1611 /* Reset the hw statistics */ 1612 ixgbevf_dev_stats_reset(eth_dev); 1613 1614 /* Disable the interrupts for VF */ 1615 ixgbevf_intr_disable(eth_dev); 1616 1617 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1618 diag = hw->mac.ops.reset_hw(hw); 1619 1620 /* 1621 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1622 * the underlying PF driver has not assigned a MAC address to the VF. 1623 * In this case, assign a random MAC address. 1624 */ 1625 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1626 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1627 /* 1628 * This error code will be propagated to the app by 1629 * rte_eth_dev_reset, so use a public error code rather than 1630 * the internal-only IXGBE_ERR_RESET_FAILED 1631 */ 1632 return -EAGAIN; 1633 } 1634 1635 /* negotiate mailbox API version to use with the PF. */ 1636 ixgbevf_negotiate_api(hw); 1637 1638 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1639 ixgbevf_get_queues(hw, &tcs, &tc); 1640 1641 /* Allocate memory for storing MAC addresses */ 1642 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * 1643 hw->mac.num_rar_entries, 0); 1644 if (eth_dev->data->mac_addrs == NULL) { 1645 PMD_INIT_LOG(ERR, 1646 "Failed to allocate %u bytes needed to store " 1647 "MAC addresses", 1648 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1649 return -ENOMEM; 1650 } 1651 1652 /* Generate a random MAC address, if none was assigned by PF. */ 1653 if (is_zero_ether_addr(perm_addr)) { 1654 generate_random_mac_addr(perm_addr); 1655 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1656 if (diag) { 1657 rte_free(eth_dev->data->mac_addrs); 1658 eth_dev->data->mac_addrs = NULL; 1659 return diag; 1660 } 1661 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1662 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1663 "%02x:%02x:%02x:%02x:%02x:%02x", 1664 perm_addr->addr_bytes[0], 1665 perm_addr->addr_bytes[1], 1666 perm_addr->addr_bytes[2], 1667 perm_addr->addr_bytes[3], 1668 perm_addr->addr_bytes[4], 1669 perm_addr->addr_bytes[5]); 1670 } 1671 1672 /* Copy the permanent MAC address */ 1673 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1674 1675 /* reset the hardware with the new settings */ 1676 diag = hw->mac.ops.start_hw(hw); 1677 switch (diag) { 1678 case 0: 1679 break; 1680 1681 default: 1682 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1683 return -EIO; 1684 } 1685 1686 rte_intr_callback_register(intr_handle, 1687 ixgbevf_dev_interrupt_handler, eth_dev); 1688 rte_intr_enable(intr_handle); 1689 ixgbevf_intr_enable(eth_dev); 1690 1691 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1692 eth_dev->data->port_id, pci_dev->id.vendor_id, 1693 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1694 1695 return 0; 1696 } 1697 1698 /* Virtual Function device uninit */ 1699 1700 static int 1701 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1702 { 1703 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1704 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1705 struct ixgbe_hw *hw; 1706 1707 PMD_INIT_FUNC_TRACE(); 1708 1709 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1710 return 0; 1711 1712 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1713 1714 if (hw->adapter_stopped == 0) 1715 ixgbevf_dev_close(eth_dev); 1716 1717 eth_dev->dev_ops = NULL; 1718 eth_dev->rx_pkt_burst = NULL; 1719 eth_dev->tx_pkt_burst = NULL; 1720 1721 /* Disable the interrupts for VF */ 1722 ixgbevf_intr_disable(eth_dev); 1723 1724 rte_intr_disable(intr_handle); 1725 rte_intr_callback_unregister(intr_handle, 1726 ixgbevf_dev_interrupt_handler, eth_dev); 1727 1728 return 0; 1729 } 1730 1731 static int 1732 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1733 struct rte_pci_device *pci_dev) 1734 { 1735 char name[RTE_ETH_NAME_MAX_LEN]; 1736 struct rte_eth_dev *pf_ethdev; 1737 struct rte_eth_devargs eth_da; 1738 int i, retval; 1739 1740 if (pci_dev->device.devargs) { 1741 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1742 ð_da); 1743 if (retval) 1744 return retval; 1745 } else 1746 memset(ð_da, 0, sizeof(eth_da)); 1747 1748 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1749 sizeof(struct ixgbe_adapter), 1750 eth_dev_pci_specific_init, pci_dev, 1751 eth_ixgbe_dev_init, NULL); 1752 1753 if (retval || eth_da.nb_representor_ports < 1) 1754 return retval; 1755 1756 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1757 if (pf_ethdev == NULL) 1758 return -ENODEV; 1759 1760 /* probe VF representor ports */ 1761 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1762 struct ixgbe_vf_info *vfinfo; 1763 struct ixgbe_vf_representor representor; 1764 1765 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1766 pf_ethdev->data->dev_private); 1767 if (vfinfo == NULL) { 1768 PMD_DRV_LOG(ERR, 1769 "no virtual functions supported by PF"); 1770 break; 1771 } 1772 1773 representor.vf_id = eth_da.representor_ports[i]; 1774 representor.switch_domain_id = vfinfo->switch_domain_id; 1775 representor.pf_ethdev = pf_ethdev; 1776 1777 /* representor port net_bdf_port */ 1778 snprintf(name, sizeof(name), "net_%s_representor_%d", 1779 pci_dev->device.name, 1780 eth_da.representor_ports[i]); 1781 1782 retval = rte_eth_dev_create(&pci_dev->device, name, 1783 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1784 ixgbe_vf_representor_init, &representor); 1785 1786 if (retval) 1787 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1788 "representor %s.", name); 1789 } 1790 1791 return 0; 1792 } 1793 1794 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1795 { 1796 struct rte_eth_dev *ethdev; 1797 1798 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1799 if (!ethdev) 1800 return -ENODEV; 1801 1802 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1803 return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); 1804 else 1805 return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); 1806 } 1807 1808 static struct rte_pci_driver rte_ixgbe_pmd = { 1809 .id_table = pci_id_ixgbe_map, 1810 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1811 RTE_PCI_DRV_IOVA_AS_VA, 1812 .probe = eth_ixgbe_pci_probe, 1813 .remove = eth_ixgbe_pci_remove, 1814 }; 1815 1816 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1817 struct rte_pci_device *pci_dev) 1818 { 1819 return rte_eth_dev_pci_generic_probe(pci_dev, 1820 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1821 } 1822 1823 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1824 { 1825 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1826 } 1827 1828 /* 1829 * virtual function driver struct 1830 */ 1831 static struct rte_pci_driver rte_ixgbevf_pmd = { 1832 .id_table = pci_id_ixgbevf_map, 1833 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, 1834 .probe = eth_ixgbevf_pci_probe, 1835 .remove = eth_ixgbevf_pci_remove, 1836 }; 1837 1838 static int 1839 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1840 { 1841 struct ixgbe_hw *hw = 1842 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1843 struct ixgbe_vfta *shadow_vfta = 1844 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1845 uint32_t vfta; 1846 uint32_t vid_idx; 1847 uint32_t vid_bit; 1848 1849 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1850 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1851 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1852 if (on) 1853 vfta |= vid_bit; 1854 else 1855 vfta &= ~vid_bit; 1856 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1857 1858 /* update local VFTA copy */ 1859 shadow_vfta->vfta[vid_idx] = vfta; 1860 1861 return 0; 1862 } 1863 1864 static void 1865 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1866 { 1867 if (on) 1868 ixgbe_vlan_hw_strip_enable(dev, queue); 1869 else 1870 ixgbe_vlan_hw_strip_disable(dev, queue); 1871 } 1872 1873 static int 1874 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1875 enum rte_vlan_type vlan_type, 1876 uint16_t tpid) 1877 { 1878 struct ixgbe_hw *hw = 1879 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1880 int ret = 0; 1881 uint32_t reg; 1882 uint32_t qinq; 1883 1884 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1885 qinq &= IXGBE_DMATXCTL_GDV; 1886 1887 switch (vlan_type) { 1888 case ETH_VLAN_TYPE_INNER: 1889 if (qinq) { 1890 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1891 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1892 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1893 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1894 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1895 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1896 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1897 } else { 1898 ret = -ENOTSUP; 1899 PMD_DRV_LOG(ERR, "Inner type is not supported" 1900 " by single VLAN"); 1901 } 1902 break; 1903 case ETH_VLAN_TYPE_OUTER: 1904 if (qinq) { 1905 /* Only the high 16-bits is valid */ 1906 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1907 IXGBE_EXVET_VET_EXT_SHIFT); 1908 } else { 1909 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1910 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1911 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1912 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1913 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1914 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1915 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1916 } 1917 1918 break; 1919 default: 1920 ret = -EINVAL; 1921 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1922 break; 1923 } 1924 1925 return ret; 1926 } 1927 1928 void 1929 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1930 { 1931 struct ixgbe_hw *hw = 1932 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1933 uint32_t vlnctrl; 1934 1935 PMD_INIT_FUNC_TRACE(); 1936 1937 /* Filter Table Disable */ 1938 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1939 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1940 1941 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1942 } 1943 1944 void 1945 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1946 { 1947 struct ixgbe_hw *hw = 1948 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1949 struct ixgbe_vfta *shadow_vfta = 1950 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1951 uint32_t vlnctrl; 1952 uint16_t i; 1953 1954 PMD_INIT_FUNC_TRACE(); 1955 1956 /* Filter Table Enable */ 1957 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1958 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1959 vlnctrl |= IXGBE_VLNCTRL_VFE; 1960 1961 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1962 1963 /* write whatever is in local vfta copy */ 1964 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1965 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1966 } 1967 1968 static void 1969 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1970 { 1971 struct ixgbe_hwstrip *hwstrip = 1972 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1973 struct ixgbe_rx_queue *rxq; 1974 1975 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1976 return; 1977 1978 if (on) 1979 IXGBE_SET_HWSTRIP(hwstrip, queue); 1980 else 1981 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1982 1983 if (queue >= dev->data->nb_rx_queues) 1984 return; 1985 1986 rxq = dev->data->rx_queues[queue]; 1987 1988 if (on) { 1989 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 1990 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 1991 } else { 1992 rxq->vlan_flags = PKT_RX_VLAN; 1993 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 1994 } 1995 } 1996 1997 static void 1998 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1999 { 2000 struct ixgbe_hw *hw = 2001 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2002 uint32_t ctrl; 2003 2004 PMD_INIT_FUNC_TRACE(); 2005 2006 if (hw->mac.type == ixgbe_mac_82598EB) { 2007 /* No queue level support */ 2008 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2009 return; 2010 } 2011 2012 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2013 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2014 ctrl &= ~IXGBE_RXDCTL_VME; 2015 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2016 2017 /* record those setting for HW strip per queue */ 2018 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2019 } 2020 2021 static void 2022 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2023 { 2024 struct ixgbe_hw *hw = 2025 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2026 uint32_t ctrl; 2027 2028 PMD_INIT_FUNC_TRACE(); 2029 2030 if (hw->mac.type == ixgbe_mac_82598EB) { 2031 /* No queue level supported */ 2032 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2033 return; 2034 } 2035 2036 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2037 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2038 ctrl |= IXGBE_RXDCTL_VME; 2039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2040 2041 /* record those setting for HW strip per queue */ 2042 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2043 } 2044 2045 static void 2046 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2047 { 2048 struct ixgbe_hw *hw = 2049 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2050 uint32_t ctrl; 2051 2052 PMD_INIT_FUNC_TRACE(); 2053 2054 /* DMATXCTRL: Geric Double VLAN Disable */ 2055 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2056 ctrl &= ~IXGBE_DMATXCTL_GDV; 2057 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2058 2059 /* CTRL_EXT: Global Double VLAN Disable */ 2060 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2061 ctrl &= ~IXGBE_EXTENDED_VLAN; 2062 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2063 2064 } 2065 2066 static void 2067 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2068 { 2069 struct ixgbe_hw *hw = 2070 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2071 uint32_t ctrl; 2072 2073 PMD_INIT_FUNC_TRACE(); 2074 2075 /* DMATXCTRL: Geric Double VLAN Enable */ 2076 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2077 ctrl |= IXGBE_DMATXCTL_GDV; 2078 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2079 2080 /* CTRL_EXT: Global Double VLAN Enable */ 2081 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2082 ctrl |= IXGBE_EXTENDED_VLAN; 2083 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2084 2085 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2086 if (hw->mac.type == ixgbe_mac_X550 || 2087 hw->mac.type == ixgbe_mac_X550EM_x || 2088 hw->mac.type == ixgbe_mac_X550EM_a) { 2089 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2090 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2091 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2092 } 2093 2094 /* 2095 * VET EXT field in the EXVET register = 0x8100 by default 2096 * So no need to change. Same to VT field of DMATXCTL register 2097 */ 2098 } 2099 2100 void 2101 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2102 { 2103 struct ixgbe_hw *hw = 2104 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2105 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2106 uint32_t ctrl; 2107 uint16_t i; 2108 struct ixgbe_rx_queue *rxq; 2109 bool on; 2110 2111 PMD_INIT_FUNC_TRACE(); 2112 2113 if (hw->mac.type == ixgbe_mac_82598EB) { 2114 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2115 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2116 ctrl |= IXGBE_VLNCTRL_VME; 2117 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2118 } else { 2119 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2120 ctrl &= ~IXGBE_VLNCTRL_VME; 2121 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2122 } 2123 } else { 2124 /* 2125 * Other 10G NIC, the VLAN strip can be setup 2126 * per queue in RXDCTL 2127 */ 2128 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2129 rxq = dev->data->rx_queues[i]; 2130 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2131 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2132 ctrl |= IXGBE_RXDCTL_VME; 2133 on = TRUE; 2134 } else { 2135 ctrl &= ~IXGBE_RXDCTL_VME; 2136 on = FALSE; 2137 } 2138 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2139 2140 /* record those setting for HW strip per queue */ 2141 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2142 } 2143 } 2144 } 2145 2146 static void 2147 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2148 { 2149 uint16_t i; 2150 struct rte_eth_rxmode *rxmode; 2151 struct ixgbe_rx_queue *rxq; 2152 2153 if (mask & ETH_VLAN_STRIP_MASK) { 2154 rxmode = &dev->data->dev_conf.rxmode; 2155 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2156 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2157 rxq = dev->data->rx_queues[i]; 2158 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2159 } 2160 else 2161 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2162 rxq = dev->data->rx_queues[i]; 2163 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2164 } 2165 } 2166 } 2167 2168 static int 2169 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2170 { 2171 struct rte_eth_rxmode *rxmode; 2172 rxmode = &dev->data->dev_conf.rxmode; 2173 2174 if (mask & ETH_VLAN_STRIP_MASK) { 2175 ixgbe_vlan_hw_strip_config(dev); 2176 } 2177 2178 if (mask & ETH_VLAN_FILTER_MASK) { 2179 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2180 ixgbe_vlan_hw_filter_enable(dev); 2181 else 2182 ixgbe_vlan_hw_filter_disable(dev); 2183 } 2184 2185 if (mask & ETH_VLAN_EXTEND_MASK) { 2186 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2187 ixgbe_vlan_hw_extend_enable(dev); 2188 else 2189 ixgbe_vlan_hw_extend_disable(dev); 2190 } 2191 2192 return 0; 2193 } 2194 2195 static int 2196 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2197 { 2198 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2199 2200 ixgbe_vlan_offload_config(dev, mask); 2201 2202 return 0; 2203 } 2204 2205 static void 2206 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2207 { 2208 struct ixgbe_hw *hw = 2209 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2210 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2211 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2212 2213 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2214 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2215 } 2216 2217 static int 2218 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2219 { 2220 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2221 2222 switch (nb_rx_q) { 2223 case 1: 2224 case 2: 2225 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 2226 break; 2227 case 4: 2228 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 2229 break; 2230 default: 2231 return -EINVAL; 2232 } 2233 2234 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2235 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2236 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2237 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2238 return 0; 2239 } 2240 2241 static int 2242 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2243 { 2244 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2245 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2246 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2247 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2248 2249 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2250 /* check multi-queue mode */ 2251 switch (dev_conf->rxmode.mq_mode) { 2252 case ETH_MQ_RX_VMDQ_DCB: 2253 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2254 break; 2255 case ETH_MQ_RX_VMDQ_DCB_RSS: 2256 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2257 PMD_INIT_LOG(ERR, "SRIOV active," 2258 " unsupported mq_mode rx %d.", 2259 dev_conf->rxmode.mq_mode); 2260 return -EINVAL; 2261 case ETH_MQ_RX_RSS: 2262 case ETH_MQ_RX_VMDQ_RSS: 2263 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 2264 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2265 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2266 PMD_INIT_LOG(ERR, "SRIOV is active," 2267 " invalid queue number" 2268 " for VMDQ RSS, allowed" 2269 " value are 1, 2 or 4."); 2270 return -EINVAL; 2271 } 2272 break; 2273 case ETH_MQ_RX_VMDQ_ONLY: 2274 case ETH_MQ_RX_NONE: 2275 /* if nothing mq mode configure, use default scheme */ 2276 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2277 break; 2278 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2279 /* SRIOV only works in VMDq enable mode */ 2280 PMD_INIT_LOG(ERR, "SRIOV is active," 2281 " wrong mq_mode rx %d.", 2282 dev_conf->rxmode.mq_mode); 2283 return -EINVAL; 2284 } 2285 2286 switch (dev_conf->txmode.mq_mode) { 2287 case ETH_MQ_TX_VMDQ_DCB: 2288 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2289 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2290 break; 2291 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2292 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2293 break; 2294 } 2295 2296 /* check valid queue number */ 2297 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2298 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2299 PMD_INIT_LOG(ERR, "SRIOV is active," 2300 " nb_rx_q=%d nb_tx_q=%d queue number" 2301 " must be less than or equal to %d.", 2302 nb_rx_q, nb_tx_q, 2303 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2304 return -EINVAL; 2305 } 2306 } else { 2307 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2308 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2309 " not supported."); 2310 return -EINVAL; 2311 } 2312 /* check configuration for vmdb+dcb mode */ 2313 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2314 const struct rte_eth_vmdq_dcb_conf *conf; 2315 2316 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2317 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2318 IXGBE_VMDQ_DCB_NB_QUEUES); 2319 return -EINVAL; 2320 } 2321 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2322 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2323 conf->nb_queue_pools == ETH_32_POOLS)) { 2324 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2325 " nb_queue_pools must be %d or %d.", 2326 ETH_16_POOLS, ETH_32_POOLS); 2327 return -EINVAL; 2328 } 2329 } 2330 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2331 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2332 2333 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2334 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2335 IXGBE_VMDQ_DCB_NB_QUEUES); 2336 return -EINVAL; 2337 } 2338 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2339 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2340 conf->nb_queue_pools == ETH_32_POOLS)) { 2341 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2342 " nb_queue_pools != %d and" 2343 " nb_queue_pools != %d.", 2344 ETH_16_POOLS, ETH_32_POOLS); 2345 return -EINVAL; 2346 } 2347 } 2348 2349 /* For DCB mode check our configuration before we go further */ 2350 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2351 const struct rte_eth_dcb_rx_conf *conf; 2352 2353 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2354 if (!(conf->nb_tcs == ETH_4_TCS || 2355 conf->nb_tcs == ETH_8_TCS)) { 2356 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2357 " and nb_tcs != %d.", 2358 ETH_4_TCS, ETH_8_TCS); 2359 return -EINVAL; 2360 } 2361 } 2362 2363 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2364 const struct rte_eth_dcb_tx_conf *conf; 2365 2366 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2367 if (!(conf->nb_tcs == ETH_4_TCS || 2368 conf->nb_tcs == ETH_8_TCS)) { 2369 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2370 " and nb_tcs != %d.", 2371 ETH_4_TCS, ETH_8_TCS); 2372 return -EINVAL; 2373 } 2374 } 2375 2376 /* 2377 * When DCB/VT is off, maximum number of queues changes, 2378 * except for 82598EB, which remains constant. 2379 */ 2380 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2381 hw->mac.type != ixgbe_mac_82598EB) { 2382 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2383 PMD_INIT_LOG(ERR, 2384 "Neither VT nor DCB are enabled, " 2385 "nb_tx_q > %d.", 2386 IXGBE_NONE_MODE_TX_NB_QUEUES); 2387 return -EINVAL; 2388 } 2389 } 2390 } 2391 return 0; 2392 } 2393 2394 static int 2395 ixgbe_dev_configure(struct rte_eth_dev *dev) 2396 { 2397 struct ixgbe_interrupt *intr = 2398 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2399 struct ixgbe_adapter *adapter = 2400 (struct ixgbe_adapter *)dev->data->dev_private; 2401 int ret; 2402 2403 PMD_INIT_FUNC_TRACE(); 2404 /* multipe queue mode checking */ 2405 ret = ixgbe_check_mq_mode(dev); 2406 if (ret != 0) { 2407 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2408 ret); 2409 return ret; 2410 } 2411 2412 /* set flag to update link status after init */ 2413 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2414 2415 /* 2416 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2417 * allocation or vector Rx preconditions we will reset it. 2418 */ 2419 adapter->rx_bulk_alloc_allowed = true; 2420 adapter->rx_vec_allowed = true; 2421 2422 return 0; 2423 } 2424 2425 static void 2426 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2427 { 2428 struct ixgbe_hw *hw = 2429 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2430 struct ixgbe_interrupt *intr = 2431 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2432 uint32_t gpie; 2433 2434 /* only set up it on X550EM_X */ 2435 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2436 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2437 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2438 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2439 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2440 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2441 } 2442 } 2443 2444 int 2445 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2446 uint16_t tx_rate, uint64_t q_msk) 2447 { 2448 struct ixgbe_hw *hw; 2449 struct ixgbe_vf_info *vfinfo; 2450 struct rte_eth_link link; 2451 uint8_t nb_q_per_pool; 2452 uint32_t queue_stride; 2453 uint32_t queue_idx, idx = 0, vf_idx; 2454 uint32_t queue_end; 2455 uint16_t total_rate = 0; 2456 struct rte_pci_device *pci_dev; 2457 2458 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2459 rte_eth_link_get_nowait(dev->data->port_id, &link); 2460 2461 if (vf >= pci_dev->max_vfs) 2462 return -EINVAL; 2463 2464 if (tx_rate > link.link_speed) 2465 return -EINVAL; 2466 2467 if (q_msk == 0) 2468 return 0; 2469 2470 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2471 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2472 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2473 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2474 queue_idx = vf * queue_stride; 2475 queue_end = queue_idx + nb_q_per_pool - 1; 2476 if (queue_end >= hw->mac.max_tx_queues) 2477 return -EINVAL; 2478 2479 if (vfinfo) { 2480 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2481 if (vf_idx == vf) 2482 continue; 2483 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2484 idx++) 2485 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2486 } 2487 } else { 2488 return -EINVAL; 2489 } 2490 2491 /* Store tx_rate for this vf. */ 2492 for (idx = 0; idx < nb_q_per_pool; idx++) { 2493 if (((uint64_t)0x1 << idx) & q_msk) { 2494 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2495 vfinfo[vf].tx_rate[idx] = tx_rate; 2496 total_rate += tx_rate; 2497 } 2498 } 2499 2500 if (total_rate > dev->data->dev_link.link_speed) { 2501 /* Reset stored TX rate of the VF if it causes exceed 2502 * link speed. 2503 */ 2504 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2505 return -EINVAL; 2506 } 2507 2508 /* Set RTTBCNRC of each queue/pool for vf X */ 2509 for (; queue_idx <= queue_end; queue_idx++) { 2510 if (0x1 & q_msk) 2511 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2512 q_msk = q_msk >> 1; 2513 } 2514 2515 return 0; 2516 } 2517 2518 /* 2519 * Configure device link speed and setup link. 2520 * It returns 0 on success. 2521 */ 2522 static int 2523 ixgbe_dev_start(struct rte_eth_dev *dev) 2524 { 2525 struct ixgbe_hw *hw = 2526 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2527 struct ixgbe_vf_info *vfinfo = 2528 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2529 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2530 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2531 uint32_t intr_vector = 0; 2532 int err, link_up = 0, negotiate = 0; 2533 uint32_t speed = 0; 2534 uint32_t allowed_speeds = 0; 2535 int mask = 0; 2536 int status; 2537 uint16_t vf, idx; 2538 uint32_t *link_speeds; 2539 struct ixgbe_tm_conf *tm_conf = 2540 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2541 2542 PMD_INIT_FUNC_TRACE(); 2543 2544 /* IXGBE devices don't support: 2545 * - half duplex (checked afterwards for valid speeds) 2546 * - fixed speed: TODO implement 2547 */ 2548 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { 2549 PMD_INIT_LOG(ERR, 2550 "Invalid link_speeds for port %u, fix speed not supported", 2551 dev->data->port_id); 2552 return -EINVAL; 2553 } 2554 2555 /* Stop the link setup handler before resetting the HW. */ 2556 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); 2557 2558 /* disable uio/vfio intr/eventfd mapping */ 2559 rte_intr_disable(intr_handle); 2560 2561 /* stop adapter */ 2562 hw->adapter_stopped = 0; 2563 ixgbe_stop_adapter(hw); 2564 2565 /* reinitialize adapter 2566 * this calls reset and start 2567 */ 2568 status = ixgbe_pf_reset_hw(hw); 2569 if (status != 0) 2570 return -1; 2571 hw->mac.ops.start_hw(hw); 2572 hw->mac.get_link_status = true; 2573 2574 /* configure PF module if SRIOV enabled */ 2575 ixgbe_pf_host_configure(dev); 2576 2577 ixgbe_dev_phy_intr_setup(dev); 2578 2579 /* check and configure queue intr-vector mapping */ 2580 if ((rte_intr_cap_multiple(intr_handle) || 2581 !RTE_ETH_DEV_SRIOV(dev).active) && 2582 dev->data->dev_conf.intr_conf.rxq != 0) { 2583 intr_vector = dev->data->nb_rx_queues; 2584 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2585 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2586 IXGBE_MAX_INTR_QUEUE_NUM); 2587 return -ENOTSUP; 2588 } 2589 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2590 return -1; 2591 } 2592 2593 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2594 intr_handle->intr_vec = 2595 rte_zmalloc("intr_vec", 2596 dev->data->nb_rx_queues * sizeof(int), 0); 2597 if (intr_handle->intr_vec == NULL) { 2598 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2599 " intr_vec", dev->data->nb_rx_queues); 2600 return -ENOMEM; 2601 } 2602 } 2603 2604 /* confiugre msix for sleep until rx interrupt */ 2605 ixgbe_configure_msix(dev); 2606 2607 /* initialize transmission unit */ 2608 ixgbe_dev_tx_init(dev); 2609 2610 /* This can fail when allocating mbufs for descriptor rings */ 2611 err = ixgbe_dev_rx_init(dev); 2612 if (err) { 2613 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2614 goto error; 2615 } 2616 2617 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2618 ETH_VLAN_EXTEND_MASK; 2619 err = ixgbe_vlan_offload_config(dev, mask); 2620 if (err) { 2621 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2622 goto error; 2623 } 2624 2625 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2626 /* Enable vlan filtering for VMDq */ 2627 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2628 } 2629 2630 /* Configure DCB hw */ 2631 ixgbe_configure_dcb(dev); 2632 2633 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2634 err = ixgbe_fdir_configure(dev); 2635 if (err) 2636 goto error; 2637 } 2638 2639 /* Restore vf rate limit */ 2640 if (vfinfo != NULL) { 2641 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2642 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2643 if (vfinfo[vf].tx_rate[idx] != 0) 2644 ixgbe_set_vf_rate_limit( 2645 dev, vf, 2646 vfinfo[vf].tx_rate[idx], 2647 1 << idx); 2648 } 2649 2650 ixgbe_restore_statistics_mapping(dev); 2651 2652 err = ixgbe_dev_rxtx_start(dev); 2653 if (err < 0) { 2654 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2655 goto error; 2656 } 2657 2658 /* Skip link setup if loopback mode is enabled for 82599. */ 2659 if (hw->mac.type == ixgbe_mac_82599EB && 2660 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) 2661 goto skip_link_setup; 2662 2663 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2664 err = hw->mac.ops.setup_sfp(hw); 2665 if (err) 2666 goto error; 2667 } 2668 2669 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2670 /* Turn on the copper */ 2671 ixgbe_set_phy_power(hw, true); 2672 } else { 2673 /* Turn on the laser */ 2674 ixgbe_enable_tx_laser(hw); 2675 } 2676 2677 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2678 if (err) 2679 goto error; 2680 dev->data->dev_link.link_status = link_up; 2681 2682 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2683 if (err) 2684 goto error; 2685 2686 switch (hw->mac.type) { 2687 case ixgbe_mac_X550: 2688 case ixgbe_mac_X550EM_x: 2689 case ixgbe_mac_X550EM_a: 2690 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2691 ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | 2692 ETH_LINK_SPEED_10G; 2693 break; 2694 default: 2695 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2696 ETH_LINK_SPEED_10G; 2697 } 2698 2699 link_speeds = &dev->data->dev_conf.link_speeds; 2700 if (*link_speeds & ~allowed_speeds) { 2701 PMD_INIT_LOG(ERR, "Invalid link setting"); 2702 goto error; 2703 } 2704 2705 speed = 0x0; 2706 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2707 switch (hw->mac.type) { 2708 case ixgbe_mac_82598EB: 2709 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2710 break; 2711 case ixgbe_mac_82599EB: 2712 case ixgbe_mac_X540: 2713 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2714 break; 2715 case ixgbe_mac_X550: 2716 case ixgbe_mac_X550EM_x: 2717 case ixgbe_mac_X550EM_a: 2718 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2719 break; 2720 default: 2721 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2722 } 2723 } else { 2724 if (*link_speeds & ETH_LINK_SPEED_10G) 2725 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2726 if (*link_speeds & ETH_LINK_SPEED_5G) 2727 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2728 if (*link_speeds & ETH_LINK_SPEED_2_5G) 2729 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2730 if (*link_speeds & ETH_LINK_SPEED_1G) 2731 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2732 if (*link_speeds & ETH_LINK_SPEED_100M) 2733 speed |= IXGBE_LINK_SPEED_100_FULL; 2734 } 2735 2736 err = ixgbe_setup_link(hw, speed, link_up); 2737 if (err) 2738 goto error; 2739 2740 skip_link_setup: 2741 2742 if (rte_intr_allow_others(intr_handle)) { 2743 /* check if lsc interrupt is enabled */ 2744 if (dev->data->dev_conf.intr_conf.lsc != 0) 2745 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2746 else 2747 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2748 ixgbe_dev_macsec_interrupt_setup(dev); 2749 } else { 2750 rte_intr_callback_unregister(intr_handle, 2751 ixgbe_dev_interrupt_handler, dev); 2752 if (dev->data->dev_conf.intr_conf.lsc != 0) 2753 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2754 " no intr multiplex"); 2755 } 2756 2757 /* check if rxq interrupt is enabled */ 2758 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2759 rte_intr_dp_is_en(intr_handle)) 2760 ixgbe_dev_rxq_interrupt_setup(dev); 2761 2762 /* enable uio/vfio intr/eventfd mapping */ 2763 rte_intr_enable(intr_handle); 2764 2765 /* resume enabled intr since hw reset */ 2766 ixgbe_enable_intr(dev); 2767 ixgbe_l2_tunnel_conf(dev); 2768 ixgbe_filter_restore(dev); 2769 2770 if (tm_conf->root && !tm_conf->committed) 2771 PMD_DRV_LOG(WARNING, 2772 "please call hierarchy_commit() " 2773 "before starting the port"); 2774 2775 /* 2776 * Update link status right before return, because it may 2777 * start link configuration process in a separate thread. 2778 */ 2779 ixgbe_dev_link_update(dev, 0); 2780 2781 return 0; 2782 2783 error: 2784 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2785 ixgbe_dev_clear_queues(dev); 2786 return -EIO; 2787 } 2788 2789 /* 2790 * Stop device: disable rx and tx functions to allow for reconfiguring. 2791 */ 2792 static void 2793 ixgbe_dev_stop(struct rte_eth_dev *dev) 2794 { 2795 struct rte_eth_link link; 2796 struct ixgbe_adapter *adapter = 2797 (struct ixgbe_adapter *)dev->data->dev_private; 2798 struct ixgbe_hw *hw = 2799 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2800 struct ixgbe_vf_info *vfinfo = 2801 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2802 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2803 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2804 int vf; 2805 struct ixgbe_tm_conf *tm_conf = 2806 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2807 2808 PMD_INIT_FUNC_TRACE(); 2809 2810 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); 2811 2812 /* disable interrupts */ 2813 ixgbe_disable_intr(hw); 2814 2815 /* reset the NIC */ 2816 ixgbe_pf_reset_hw(hw); 2817 hw->adapter_stopped = 0; 2818 2819 /* stop adapter */ 2820 ixgbe_stop_adapter(hw); 2821 2822 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2823 vfinfo[vf].clear_to_send = false; 2824 2825 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2826 /* Turn off the copper */ 2827 ixgbe_set_phy_power(hw, false); 2828 } else { 2829 /* Turn off the laser */ 2830 ixgbe_disable_tx_laser(hw); 2831 } 2832 2833 ixgbe_dev_clear_queues(dev); 2834 2835 /* Clear stored conf */ 2836 dev->data->scattered_rx = 0; 2837 dev->data->lro = 0; 2838 2839 /* Clear recorded link status */ 2840 memset(&link, 0, sizeof(link)); 2841 rte_eth_linkstatus_set(dev, &link); 2842 2843 if (!rte_intr_allow_others(intr_handle)) 2844 /* resume to the default handler */ 2845 rte_intr_callback_register(intr_handle, 2846 ixgbe_dev_interrupt_handler, 2847 (void *)dev); 2848 2849 /* Clean datapath event and queue/vec mapping */ 2850 rte_intr_efd_disable(intr_handle); 2851 if (intr_handle->intr_vec != NULL) { 2852 rte_free(intr_handle->intr_vec); 2853 intr_handle->intr_vec = NULL; 2854 } 2855 2856 /* reset hierarchy commit */ 2857 tm_conf->committed = false; 2858 2859 adapter->rss_reta_updated = 0; 2860 } 2861 2862 /* 2863 * Set device link up: enable tx. 2864 */ 2865 static int 2866 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2867 { 2868 struct ixgbe_hw *hw = 2869 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2870 if (hw->mac.type == ixgbe_mac_82599EB) { 2871 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2872 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2873 /* Not suported in bypass mode */ 2874 PMD_INIT_LOG(ERR, "Set link up is not supported " 2875 "by device id 0x%x", hw->device_id); 2876 return -ENOTSUP; 2877 } 2878 #endif 2879 } 2880 2881 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2882 /* Turn on the copper */ 2883 ixgbe_set_phy_power(hw, true); 2884 } else { 2885 /* Turn on the laser */ 2886 ixgbe_enable_tx_laser(hw); 2887 } 2888 2889 return 0; 2890 } 2891 2892 /* 2893 * Set device link down: disable tx. 2894 */ 2895 static int 2896 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2897 { 2898 struct ixgbe_hw *hw = 2899 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2900 if (hw->mac.type == ixgbe_mac_82599EB) { 2901 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2902 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2903 /* Not suported in bypass mode */ 2904 PMD_INIT_LOG(ERR, "Set link down is not supported " 2905 "by device id 0x%x", hw->device_id); 2906 return -ENOTSUP; 2907 } 2908 #endif 2909 } 2910 2911 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2912 /* Turn off the copper */ 2913 ixgbe_set_phy_power(hw, false); 2914 } else { 2915 /* Turn off the laser */ 2916 ixgbe_disable_tx_laser(hw); 2917 } 2918 2919 return 0; 2920 } 2921 2922 /* 2923 * Reset and stop device. 2924 */ 2925 static void 2926 ixgbe_dev_close(struct rte_eth_dev *dev) 2927 { 2928 struct ixgbe_hw *hw = 2929 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2930 2931 PMD_INIT_FUNC_TRACE(); 2932 2933 ixgbe_pf_reset_hw(hw); 2934 2935 ixgbe_dev_stop(dev); 2936 hw->adapter_stopped = 1; 2937 2938 ixgbe_dev_free_queues(dev); 2939 2940 ixgbe_disable_pcie_master(hw); 2941 2942 /* reprogram the RAR[0] in case user changed it. */ 2943 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2944 } 2945 2946 /* 2947 * Reset PF device. 2948 */ 2949 static int 2950 ixgbe_dev_reset(struct rte_eth_dev *dev) 2951 { 2952 int ret; 2953 2954 /* When a DPDK PMD PF begin to reset PF port, it should notify all 2955 * its VF to make them align with it. The detailed notification 2956 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 2957 * To avoid unexpected behavior in VF, currently reset of PF with 2958 * SR-IOV activation is not supported. It might be supported later. 2959 */ 2960 if (dev->data->sriov.active) 2961 return -ENOTSUP; 2962 2963 ret = eth_ixgbe_dev_uninit(dev); 2964 if (ret) 2965 return ret; 2966 2967 ret = eth_ixgbe_dev_init(dev, NULL); 2968 2969 return ret; 2970 } 2971 2972 static void 2973 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 2974 struct ixgbe_hw_stats *hw_stats, 2975 struct ixgbe_macsec_stats *macsec_stats, 2976 uint64_t *total_missed_rx, uint64_t *total_qbrc, 2977 uint64_t *total_qprc, uint64_t *total_qprdc) 2978 { 2979 uint32_t bprc, lxon, lxoff, total; 2980 uint32_t delta_gprc = 0; 2981 unsigned i; 2982 /* Workaround for RX byte count not including CRC bytes when CRC 2983 * strip is enabled. CRC bytes are removed from counters when crc_strip 2984 * is disabled. 2985 */ 2986 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 2987 IXGBE_HLREG0_RXCRCSTRP); 2988 2989 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2990 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 2991 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 2992 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 2993 2994 for (i = 0; i < 8; i++) { 2995 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 2996 2997 /* global total per queue */ 2998 hw_stats->mpc[i] += mp; 2999 /* Running comprehensive total for stats display */ 3000 *total_missed_rx += hw_stats->mpc[i]; 3001 if (hw->mac.type == ixgbe_mac_82598EB) { 3002 hw_stats->rnbc[i] += 3003 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3004 hw_stats->pxonrxc[i] += 3005 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3006 hw_stats->pxoffrxc[i] += 3007 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3008 } else { 3009 hw_stats->pxonrxc[i] += 3010 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3011 hw_stats->pxoffrxc[i] += 3012 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3013 hw_stats->pxon2offc[i] += 3014 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3015 } 3016 hw_stats->pxontxc[i] += 3017 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3018 hw_stats->pxofftxc[i] += 3019 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3020 } 3021 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3022 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3023 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3024 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3025 3026 delta_gprc += delta_qprc; 3027 3028 hw_stats->qprc[i] += delta_qprc; 3029 hw_stats->qptc[i] += delta_qptc; 3030 3031 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3032 hw_stats->qbrc[i] += 3033 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3034 if (crc_strip == 0) 3035 hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; 3036 3037 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3038 hw_stats->qbtc[i] += 3039 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3040 3041 hw_stats->qprdc[i] += delta_qprdc; 3042 *total_qprdc += hw_stats->qprdc[i]; 3043 3044 *total_qprc += hw_stats->qprc[i]; 3045 *total_qbrc += hw_stats->qbrc[i]; 3046 } 3047 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3048 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3049 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3050 3051 /* 3052 * An errata states that gprc actually counts good + missed packets: 3053 * Workaround to set gprc to summated queue packet receives 3054 */ 3055 hw_stats->gprc = *total_qprc; 3056 3057 if (hw->mac.type != ixgbe_mac_82598EB) { 3058 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3059 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3060 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3061 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3062 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3063 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3064 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3065 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3066 } else { 3067 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3068 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3069 /* 82598 only has a counter in the high register */ 3070 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3071 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3072 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3073 } 3074 uint64_t old_tpr = hw_stats->tpr; 3075 3076 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3077 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3078 3079 if (crc_strip == 0) 3080 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; 3081 3082 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3083 hw_stats->gptc += delta_gptc; 3084 hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; 3085 hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; 3086 3087 /* 3088 * Workaround: mprc hardware is incorrectly counting 3089 * broadcasts, so for now we subtract those. 3090 */ 3091 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3092 hw_stats->bprc += bprc; 3093 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3094 if (hw->mac.type == ixgbe_mac_82598EB) 3095 hw_stats->mprc -= bprc; 3096 3097 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3098 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3099 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3100 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3101 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3102 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3103 3104 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3105 hw_stats->lxontxc += lxon; 3106 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3107 hw_stats->lxofftxc += lxoff; 3108 total = lxon + lxoff; 3109 3110 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3111 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3112 hw_stats->gptc -= total; 3113 hw_stats->mptc -= total; 3114 hw_stats->ptc64 -= total; 3115 hw_stats->gotc -= total * ETHER_MIN_LEN; 3116 3117 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3118 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3119 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3120 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3121 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3122 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3123 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3124 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3125 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3126 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3127 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3128 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3129 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3130 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3131 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3132 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3133 /* Only read FCOE on 82599 */ 3134 if (hw->mac.type != ixgbe_mac_82598EB) { 3135 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3136 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3137 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3138 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3139 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3140 } 3141 3142 /* Flow Director Stats registers */ 3143 if (hw->mac.type != ixgbe_mac_82598EB) { 3144 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3145 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3146 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3147 IXGBE_FDIRUSTAT) & 0xFFFF; 3148 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3149 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3150 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3151 IXGBE_FDIRFSTAT) & 0xFFFF; 3152 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3153 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3154 } 3155 /* MACsec Stats registers */ 3156 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3157 macsec_stats->out_pkts_encrypted += 3158 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3159 macsec_stats->out_pkts_protected += 3160 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3161 macsec_stats->out_octets_encrypted += 3162 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3163 macsec_stats->out_octets_protected += 3164 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3165 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3166 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3167 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3168 macsec_stats->in_pkts_unknownsci += 3169 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3170 macsec_stats->in_octets_decrypted += 3171 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3172 macsec_stats->in_octets_validated += 3173 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3174 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3175 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3176 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3177 for (i = 0; i < 2; i++) { 3178 macsec_stats->in_pkts_ok += 3179 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3180 macsec_stats->in_pkts_invalid += 3181 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3182 macsec_stats->in_pkts_notvalid += 3183 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3184 } 3185 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3186 macsec_stats->in_pkts_notusingsa += 3187 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3188 } 3189 3190 /* 3191 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3192 */ 3193 static int 3194 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3195 { 3196 struct ixgbe_hw *hw = 3197 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3198 struct ixgbe_hw_stats *hw_stats = 3199 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3200 struct ixgbe_macsec_stats *macsec_stats = 3201 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3202 dev->data->dev_private); 3203 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3204 unsigned i; 3205 3206 total_missed_rx = 0; 3207 total_qbrc = 0; 3208 total_qprc = 0; 3209 total_qprdc = 0; 3210 3211 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3212 &total_qbrc, &total_qprc, &total_qprdc); 3213 3214 if (stats == NULL) 3215 return -EINVAL; 3216 3217 /* Fill out the rte_eth_stats statistics structure */ 3218 stats->ipackets = total_qprc; 3219 stats->ibytes = total_qbrc; 3220 stats->opackets = hw_stats->gptc; 3221 stats->obytes = hw_stats->gotc; 3222 3223 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3224 stats->q_ipackets[i] = hw_stats->qprc[i]; 3225 stats->q_opackets[i] = hw_stats->qptc[i]; 3226 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3227 stats->q_obytes[i] = hw_stats->qbtc[i]; 3228 stats->q_errors[i] = hw_stats->qprdc[i]; 3229 } 3230 3231 /* Rx Errors */ 3232 stats->imissed = total_missed_rx; 3233 stats->ierrors = hw_stats->crcerrs + 3234 hw_stats->mspdc + 3235 hw_stats->rlec + 3236 hw_stats->ruc + 3237 hw_stats->roc + 3238 hw_stats->illerrc + 3239 hw_stats->errbc + 3240 hw_stats->rfc + 3241 hw_stats->fccrc + 3242 hw_stats->fclast; 3243 3244 /* Tx Errors */ 3245 stats->oerrors = 0; 3246 return 0; 3247 } 3248 3249 static void 3250 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3251 { 3252 struct ixgbe_hw_stats *stats = 3253 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3254 3255 /* HW registers are cleared on read */ 3256 ixgbe_dev_stats_get(dev, NULL); 3257 3258 /* Reset software totals */ 3259 memset(stats, 0, sizeof(*stats)); 3260 } 3261 3262 /* This function calculates the number of xstats based on the current config */ 3263 static unsigned 3264 ixgbe_xstats_calc_num(void) { 3265 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3266 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3267 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3268 } 3269 3270 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3271 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3272 { 3273 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3274 unsigned stat, i, count; 3275 3276 if (xstats_names != NULL) { 3277 count = 0; 3278 3279 /* Note: limit >= cnt_stats checked upstream 3280 * in rte_eth_xstats_names() 3281 */ 3282 3283 /* Extended stats from ixgbe_hw_stats */ 3284 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3285 snprintf(xstats_names[count].name, 3286 sizeof(xstats_names[count].name), 3287 "%s", 3288 rte_ixgbe_stats_strings[i].name); 3289 count++; 3290 } 3291 3292 /* MACsec Stats */ 3293 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3294 snprintf(xstats_names[count].name, 3295 sizeof(xstats_names[count].name), 3296 "%s", 3297 rte_ixgbe_macsec_strings[i].name); 3298 count++; 3299 } 3300 3301 /* RX Priority Stats */ 3302 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3303 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3304 snprintf(xstats_names[count].name, 3305 sizeof(xstats_names[count].name), 3306 "rx_priority%u_%s", i, 3307 rte_ixgbe_rxq_strings[stat].name); 3308 count++; 3309 } 3310 } 3311 3312 /* TX Priority Stats */ 3313 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3314 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3315 snprintf(xstats_names[count].name, 3316 sizeof(xstats_names[count].name), 3317 "tx_priority%u_%s", i, 3318 rte_ixgbe_txq_strings[stat].name); 3319 count++; 3320 } 3321 } 3322 } 3323 return cnt_stats; 3324 } 3325 3326 static int ixgbe_dev_xstats_get_names_by_id( 3327 struct rte_eth_dev *dev, 3328 struct rte_eth_xstat_name *xstats_names, 3329 const uint64_t *ids, 3330 unsigned int limit) 3331 { 3332 if (!ids) { 3333 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3334 unsigned int stat, i, count; 3335 3336 if (xstats_names != NULL) { 3337 count = 0; 3338 3339 /* Note: limit >= cnt_stats checked upstream 3340 * in rte_eth_xstats_names() 3341 */ 3342 3343 /* Extended stats from ixgbe_hw_stats */ 3344 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3345 snprintf(xstats_names[count].name, 3346 sizeof(xstats_names[count].name), 3347 "%s", 3348 rte_ixgbe_stats_strings[i].name); 3349 count++; 3350 } 3351 3352 /* MACsec Stats */ 3353 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3354 snprintf(xstats_names[count].name, 3355 sizeof(xstats_names[count].name), 3356 "%s", 3357 rte_ixgbe_macsec_strings[i].name); 3358 count++; 3359 } 3360 3361 /* RX Priority Stats */ 3362 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3363 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3364 snprintf(xstats_names[count].name, 3365 sizeof(xstats_names[count].name), 3366 "rx_priority%u_%s", i, 3367 rte_ixgbe_rxq_strings[stat].name); 3368 count++; 3369 } 3370 } 3371 3372 /* TX Priority Stats */ 3373 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3374 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3375 snprintf(xstats_names[count].name, 3376 sizeof(xstats_names[count].name), 3377 "tx_priority%u_%s", i, 3378 rte_ixgbe_txq_strings[stat].name); 3379 count++; 3380 } 3381 } 3382 } 3383 return cnt_stats; 3384 } 3385 3386 uint16_t i; 3387 uint16_t size = ixgbe_xstats_calc_num(); 3388 struct rte_eth_xstat_name xstats_names_copy[size]; 3389 3390 ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 3391 size); 3392 3393 for (i = 0; i < limit; i++) { 3394 if (ids[i] >= size) { 3395 PMD_INIT_LOG(ERR, "id value isn't valid"); 3396 return -1; 3397 } 3398 strcpy(xstats_names[i].name, 3399 xstats_names_copy[ids[i]].name); 3400 } 3401 return limit; 3402 } 3403 3404 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3405 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3406 { 3407 unsigned i; 3408 3409 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3410 return -ENOMEM; 3411 3412 if (xstats_names != NULL) 3413 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3414 snprintf(xstats_names[i].name, 3415 sizeof(xstats_names[i].name), 3416 "%s", rte_ixgbevf_stats_strings[i].name); 3417 return IXGBEVF_NB_XSTATS; 3418 } 3419 3420 static int 3421 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3422 unsigned n) 3423 { 3424 struct ixgbe_hw *hw = 3425 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3426 struct ixgbe_hw_stats *hw_stats = 3427 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3428 struct ixgbe_macsec_stats *macsec_stats = 3429 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3430 dev->data->dev_private); 3431 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3432 unsigned i, stat, count = 0; 3433 3434 count = ixgbe_xstats_calc_num(); 3435 3436 if (n < count) 3437 return count; 3438 3439 total_missed_rx = 0; 3440 total_qbrc = 0; 3441 total_qprc = 0; 3442 total_qprdc = 0; 3443 3444 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3445 &total_qbrc, &total_qprc, &total_qprdc); 3446 3447 /* If this is a reset xstats is NULL, and we have cleared the 3448 * registers by reading them. 3449 */ 3450 if (!xstats) 3451 return 0; 3452 3453 /* Extended stats from ixgbe_hw_stats */ 3454 count = 0; 3455 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3456 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3457 rte_ixgbe_stats_strings[i].offset); 3458 xstats[count].id = count; 3459 count++; 3460 } 3461 3462 /* MACsec Stats */ 3463 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3464 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3465 rte_ixgbe_macsec_strings[i].offset); 3466 xstats[count].id = count; 3467 count++; 3468 } 3469 3470 /* RX Priority Stats */ 3471 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3472 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3473 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3474 rte_ixgbe_rxq_strings[stat].offset + 3475 (sizeof(uint64_t) * i)); 3476 xstats[count].id = count; 3477 count++; 3478 } 3479 } 3480 3481 /* TX Priority Stats */ 3482 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3483 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3484 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3485 rte_ixgbe_txq_strings[stat].offset + 3486 (sizeof(uint64_t) * i)); 3487 xstats[count].id = count; 3488 count++; 3489 } 3490 } 3491 return count; 3492 } 3493 3494 static int 3495 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3496 uint64_t *values, unsigned int n) 3497 { 3498 if (!ids) { 3499 struct ixgbe_hw *hw = 3500 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3501 struct ixgbe_hw_stats *hw_stats = 3502 IXGBE_DEV_PRIVATE_TO_STATS( 3503 dev->data->dev_private); 3504 struct ixgbe_macsec_stats *macsec_stats = 3505 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3506 dev->data->dev_private); 3507 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3508 unsigned int i, stat, count = 0; 3509 3510 count = ixgbe_xstats_calc_num(); 3511 3512 if (!ids && n < count) 3513 return count; 3514 3515 total_missed_rx = 0; 3516 total_qbrc = 0; 3517 total_qprc = 0; 3518 total_qprdc = 0; 3519 3520 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3521 &total_missed_rx, &total_qbrc, &total_qprc, 3522 &total_qprdc); 3523 3524 /* If this is a reset xstats is NULL, and we have cleared the 3525 * registers by reading them. 3526 */ 3527 if (!ids && !values) 3528 return 0; 3529 3530 /* Extended stats from ixgbe_hw_stats */ 3531 count = 0; 3532 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3533 values[count] = *(uint64_t *)(((char *)hw_stats) + 3534 rte_ixgbe_stats_strings[i].offset); 3535 count++; 3536 } 3537 3538 /* MACsec Stats */ 3539 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3540 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3541 rte_ixgbe_macsec_strings[i].offset); 3542 count++; 3543 } 3544 3545 /* RX Priority Stats */ 3546 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3547 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3548 values[count] = 3549 *(uint64_t *)(((char *)hw_stats) + 3550 rte_ixgbe_rxq_strings[stat].offset + 3551 (sizeof(uint64_t) * i)); 3552 count++; 3553 } 3554 } 3555 3556 /* TX Priority Stats */ 3557 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3558 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3559 values[count] = 3560 *(uint64_t *)(((char *)hw_stats) + 3561 rte_ixgbe_txq_strings[stat].offset + 3562 (sizeof(uint64_t) * i)); 3563 count++; 3564 } 3565 } 3566 return count; 3567 } 3568 3569 uint16_t i; 3570 uint16_t size = ixgbe_xstats_calc_num(); 3571 uint64_t values_copy[size]; 3572 3573 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3574 3575 for (i = 0; i < n; i++) { 3576 if (ids[i] >= size) { 3577 PMD_INIT_LOG(ERR, "id value isn't valid"); 3578 return -1; 3579 } 3580 values[i] = values_copy[ids[i]]; 3581 } 3582 return n; 3583 } 3584 3585 static void 3586 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3587 { 3588 struct ixgbe_hw_stats *stats = 3589 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3590 struct ixgbe_macsec_stats *macsec_stats = 3591 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3592 dev->data->dev_private); 3593 3594 unsigned count = ixgbe_xstats_calc_num(); 3595 3596 /* HW registers are cleared on read */ 3597 ixgbe_dev_xstats_get(dev, NULL, count); 3598 3599 /* Reset software totals */ 3600 memset(stats, 0, sizeof(*stats)); 3601 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3602 } 3603 3604 static void 3605 ixgbevf_update_stats(struct rte_eth_dev *dev) 3606 { 3607 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3608 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3609 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3610 3611 /* Good Rx packet, include VF loopback */ 3612 UPDATE_VF_STAT(IXGBE_VFGPRC, 3613 hw_stats->last_vfgprc, hw_stats->vfgprc); 3614 3615 /* Good Rx octets, include VF loopback */ 3616 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3617 hw_stats->last_vfgorc, hw_stats->vfgorc); 3618 3619 /* Good Tx packet, include VF loopback */ 3620 UPDATE_VF_STAT(IXGBE_VFGPTC, 3621 hw_stats->last_vfgptc, hw_stats->vfgptc); 3622 3623 /* Good Tx octets, include VF loopback */ 3624 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3625 hw_stats->last_vfgotc, hw_stats->vfgotc); 3626 3627 /* Rx Multicst Packet */ 3628 UPDATE_VF_STAT(IXGBE_VFMPRC, 3629 hw_stats->last_vfmprc, hw_stats->vfmprc); 3630 } 3631 3632 static int 3633 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3634 unsigned n) 3635 { 3636 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3637 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3638 unsigned i; 3639 3640 if (n < IXGBEVF_NB_XSTATS) 3641 return IXGBEVF_NB_XSTATS; 3642 3643 ixgbevf_update_stats(dev); 3644 3645 if (!xstats) 3646 return 0; 3647 3648 /* Extended stats */ 3649 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3650 xstats[i].id = i; 3651 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3652 rte_ixgbevf_stats_strings[i].offset); 3653 } 3654 3655 return IXGBEVF_NB_XSTATS; 3656 } 3657 3658 static int 3659 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3660 { 3661 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3662 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3663 3664 ixgbevf_update_stats(dev); 3665 3666 if (stats == NULL) 3667 return -EINVAL; 3668 3669 stats->ipackets = hw_stats->vfgprc; 3670 stats->ibytes = hw_stats->vfgorc; 3671 stats->opackets = hw_stats->vfgptc; 3672 stats->obytes = hw_stats->vfgotc; 3673 return 0; 3674 } 3675 3676 static void 3677 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3678 { 3679 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3680 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3681 3682 /* Sync HW register to the last stats */ 3683 ixgbevf_dev_stats_get(dev, NULL); 3684 3685 /* reset HW current stats*/ 3686 hw_stats->vfgprc = 0; 3687 hw_stats->vfgorc = 0; 3688 hw_stats->vfgptc = 0; 3689 hw_stats->vfgotc = 0; 3690 } 3691 3692 static int 3693 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3694 { 3695 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3696 u16 eeprom_verh, eeprom_verl; 3697 u32 etrack_id; 3698 int ret; 3699 3700 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3701 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3702 3703 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3704 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3705 3706 ret += 1; /* add the size of '\0' */ 3707 if (fw_size < (u32)ret) 3708 return ret; 3709 else 3710 return 0; 3711 } 3712 3713 static void 3714 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3715 { 3716 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3717 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3718 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3719 3720 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3721 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3722 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3723 /* 3724 * When DCB/VT is off, maximum number of queues changes, 3725 * except for 82598EB, which remains constant. 3726 */ 3727 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3728 hw->mac.type != ixgbe_mac_82598EB) 3729 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3730 } 3731 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3732 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3733 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3734 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3735 dev_info->max_vfs = pci_dev->max_vfs; 3736 if (hw->mac.type == ixgbe_mac_82598EB) 3737 dev_info->max_vmdq_pools = ETH_16_POOLS; 3738 else 3739 dev_info->max_vmdq_pools = ETH_64_POOLS; 3740 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3741 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3742 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3743 dev_info->rx_queue_offload_capa); 3744 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3745 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3746 3747 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3748 .rx_thresh = { 3749 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3750 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3751 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3752 }, 3753 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3754 .rx_drop_en = 0, 3755 .offloads = 0, 3756 }; 3757 3758 dev_info->default_txconf = (struct rte_eth_txconf) { 3759 .tx_thresh = { 3760 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3761 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3762 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3763 }, 3764 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3765 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3766 .offloads = 0, 3767 }; 3768 3769 dev_info->rx_desc_lim = rx_desc_lim; 3770 dev_info->tx_desc_lim = tx_desc_lim; 3771 3772 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3773 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3774 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3775 3776 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3777 if (hw->mac.type == ixgbe_mac_X540 || 3778 hw->mac.type == ixgbe_mac_X540_vf || 3779 hw->mac.type == ixgbe_mac_X550 || 3780 hw->mac.type == ixgbe_mac_X550_vf) { 3781 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3782 } 3783 if (hw->mac.type == ixgbe_mac_X550) { 3784 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 3785 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 3786 } 3787 3788 /* Driver-preferred Rx/Tx parameters */ 3789 dev_info->default_rxportconf.burst_size = 32; 3790 dev_info->default_txportconf.burst_size = 32; 3791 dev_info->default_rxportconf.nb_queues = 1; 3792 dev_info->default_txportconf.nb_queues = 1; 3793 dev_info->default_rxportconf.ring_size = 256; 3794 dev_info->default_txportconf.ring_size = 256; 3795 } 3796 3797 static const uint32_t * 3798 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3799 { 3800 static const uint32_t ptypes[] = { 3801 /* For non-vec functions, 3802 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3803 * for vec functions, 3804 * refers to _recv_raw_pkts_vec(). 3805 */ 3806 RTE_PTYPE_L2_ETHER, 3807 RTE_PTYPE_L3_IPV4, 3808 RTE_PTYPE_L3_IPV4_EXT, 3809 RTE_PTYPE_L3_IPV6, 3810 RTE_PTYPE_L3_IPV6_EXT, 3811 RTE_PTYPE_L4_SCTP, 3812 RTE_PTYPE_L4_TCP, 3813 RTE_PTYPE_L4_UDP, 3814 RTE_PTYPE_TUNNEL_IP, 3815 RTE_PTYPE_INNER_L3_IPV6, 3816 RTE_PTYPE_INNER_L3_IPV6_EXT, 3817 RTE_PTYPE_INNER_L4_TCP, 3818 RTE_PTYPE_INNER_L4_UDP, 3819 RTE_PTYPE_UNKNOWN 3820 }; 3821 3822 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3823 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3824 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3825 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3826 return ptypes; 3827 3828 #if defined(RTE_ARCH_X86) 3829 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3830 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3831 return ptypes; 3832 #endif 3833 return NULL; 3834 } 3835 3836 static void 3837 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3838 struct rte_eth_dev_info *dev_info) 3839 { 3840 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3841 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3842 3843 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3844 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3845 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3846 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3847 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3848 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3849 dev_info->max_vfs = pci_dev->max_vfs; 3850 if (hw->mac.type == ixgbe_mac_82598EB) 3851 dev_info->max_vmdq_pools = ETH_16_POOLS; 3852 else 3853 dev_info->max_vmdq_pools = ETH_64_POOLS; 3854 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3855 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3856 dev_info->rx_queue_offload_capa); 3857 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3858 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3859 3860 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3861 .rx_thresh = { 3862 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3863 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3864 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3865 }, 3866 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3867 .rx_drop_en = 0, 3868 .offloads = 0, 3869 }; 3870 3871 dev_info->default_txconf = (struct rte_eth_txconf) { 3872 .tx_thresh = { 3873 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3874 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3875 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3876 }, 3877 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3878 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3879 .offloads = 0, 3880 }; 3881 3882 dev_info->rx_desc_lim = rx_desc_lim; 3883 dev_info->tx_desc_lim = tx_desc_lim; 3884 } 3885 3886 static int 3887 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 3888 int *link_up, int wait_to_complete) 3889 { 3890 struct ixgbe_mbx_info *mbx = &hw->mbx; 3891 struct ixgbe_mac_info *mac = &hw->mac; 3892 uint32_t links_reg, in_msg; 3893 int ret_val = 0; 3894 3895 /* If we were hit with a reset drop the link */ 3896 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 3897 mac->get_link_status = true; 3898 3899 if (!mac->get_link_status) 3900 goto out; 3901 3902 /* if link status is down no point in checking to see if pf is up */ 3903 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 3904 if (!(links_reg & IXGBE_LINKS_UP)) 3905 goto out; 3906 3907 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 3908 * before the link status is correct 3909 */ 3910 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 3911 int i; 3912 3913 for (i = 0; i < 5; i++) { 3914 rte_delay_us(100); 3915 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 3916 3917 if (!(links_reg & IXGBE_LINKS_UP)) 3918 goto out; 3919 } 3920 } 3921 3922 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 3923 case IXGBE_LINKS_SPEED_10G_82599: 3924 *speed = IXGBE_LINK_SPEED_10GB_FULL; 3925 if (hw->mac.type >= ixgbe_mac_X550) { 3926 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 3927 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 3928 } 3929 break; 3930 case IXGBE_LINKS_SPEED_1G_82599: 3931 *speed = IXGBE_LINK_SPEED_1GB_FULL; 3932 break; 3933 case IXGBE_LINKS_SPEED_100_82599: 3934 *speed = IXGBE_LINK_SPEED_100_FULL; 3935 if (hw->mac.type == ixgbe_mac_X550) { 3936 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 3937 *speed = IXGBE_LINK_SPEED_5GB_FULL; 3938 } 3939 break; 3940 case IXGBE_LINKS_SPEED_10_X550EM_A: 3941 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3942 /* Since Reserved in older MAC's */ 3943 if (hw->mac.type >= ixgbe_mac_X550) 3944 *speed = IXGBE_LINK_SPEED_10_FULL; 3945 break; 3946 default: 3947 *speed = IXGBE_LINK_SPEED_UNKNOWN; 3948 } 3949 3950 /* if the read failed it could just be a mailbox collision, best wait 3951 * until we are called again and don't report an error 3952 */ 3953 if (mbx->ops.read(hw, &in_msg, 1, 0)) 3954 goto out; 3955 3956 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 3957 /* msg is not CTS and is NACK we must have lost CTS status */ 3958 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 3959 mac->get_link_status = false; 3960 goto out; 3961 } 3962 3963 /* the pf is talking, if we timed out in the past we reinit */ 3964 if (!mbx->timeout) { 3965 ret_val = -1; 3966 goto out; 3967 } 3968 3969 /* if we passed all the tests above then the link is up and we no 3970 * longer need to check for link 3971 */ 3972 mac->get_link_status = false; 3973 3974 out: 3975 *link_up = !mac->get_link_status; 3976 return ret_val; 3977 } 3978 3979 static void 3980 ixgbe_dev_setup_link_alarm_handler(void *param) 3981 { 3982 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3983 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3984 struct ixgbe_interrupt *intr = 3985 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3986 u32 speed; 3987 bool autoneg = false; 3988 3989 speed = hw->phy.autoneg_advertised; 3990 if (!speed) 3991 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 3992 3993 ixgbe_setup_link(hw, speed, true); 3994 3995 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 3996 } 3997 3998 /* return 0 means link status changed, -1 means not changed */ 3999 int 4000 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4001 int wait_to_complete, int vf) 4002 { 4003 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4004 struct rte_eth_link link; 4005 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4006 struct ixgbe_interrupt *intr = 4007 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4008 int link_up; 4009 int diag; 4010 int wait = 1; 4011 4012 memset(&link, 0, sizeof(link)); 4013 link.link_status = ETH_LINK_DOWN; 4014 link.link_speed = ETH_SPEED_NUM_NONE; 4015 link.link_duplex = ETH_LINK_HALF_DUPLEX; 4016 link.link_autoneg = ETH_LINK_AUTONEG; 4017 4018 hw->mac.get_link_status = true; 4019 4020 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4021 return rte_eth_linkstatus_set(dev, &link); 4022 4023 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4024 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4025 wait = 0; 4026 4027 if (vf) 4028 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4029 else 4030 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4031 4032 if (diag != 0) { 4033 link.link_speed = ETH_SPEED_NUM_100M; 4034 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4035 return rte_eth_linkstatus_set(dev, &link); 4036 } 4037 4038 if (link_up == 0) { 4039 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4040 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4041 rte_eal_alarm_set(10, 4042 ixgbe_dev_setup_link_alarm_handler, dev); 4043 } 4044 return rte_eth_linkstatus_set(dev, &link); 4045 } 4046 4047 link.link_status = ETH_LINK_UP; 4048 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4049 4050 switch (link_speed) { 4051 default: 4052 case IXGBE_LINK_SPEED_UNKNOWN: 4053 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4054 link.link_speed = ETH_SPEED_NUM_100M; 4055 break; 4056 4057 case IXGBE_LINK_SPEED_100_FULL: 4058 link.link_speed = ETH_SPEED_NUM_100M; 4059 break; 4060 4061 case IXGBE_LINK_SPEED_1GB_FULL: 4062 link.link_speed = ETH_SPEED_NUM_1G; 4063 break; 4064 4065 case IXGBE_LINK_SPEED_2_5GB_FULL: 4066 link.link_speed = ETH_SPEED_NUM_2_5G; 4067 break; 4068 4069 case IXGBE_LINK_SPEED_5GB_FULL: 4070 link.link_speed = ETH_SPEED_NUM_5G; 4071 break; 4072 4073 case IXGBE_LINK_SPEED_10GB_FULL: 4074 link.link_speed = ETH_SPEED_NUM_10G; 4075 break; 4076 } 4077 4078 return rte_eth_linkstatus_set(dev, &link); 4079 } 4080 4081 static int 4082 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4083 { 4084 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4085 } 4086 4087 static int 4088 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4089 { 4090 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4091 } 4092 4093 static void 4094 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4095 { 4096 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4097 uint32_t fctrl; 4098 4099 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4100 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4101 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4102 } 4103 4104 static void 4105 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4106 { 4107 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4108 uint32_t fctrl; 4109 4110 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4111 fctrl &= (~IXGBE_FCTRL_UPE); 4112 if (dev->data->all_multicast == 1) 4113 fctrl |= IXGBE_FCTRL_MPE; 4114 else 4115 fctrl &= (~IXGBE_FCTRL_MPE); 4116 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4117 } 4118 4119 static void 4120 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4121 { 4122 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4123 uint32_t fctrl; 4124 4125 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4126 fctrl |= IXGBE_FCTRL_MPE; 4127 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4128 } 4129 4130 static void 4131 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4132 { 4133 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4134 uint32_t fctrl; 4135 4136 if (dev->data->promiscuous == 1) 4137 return; /* must remain in all_multicast mode */ 4138 4139 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4140 fctrl &= (~IXGBE_FCTRL_MPE); 4141 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4142 } 4143 4144 /** 4145 * It clears the interrupt causes and enables the interrupt. 4146 * It will be called once only during nic initialized. 4147 * 4148 * @param dev 4149 * Pointer to struct rte_eth_dev. 4150 * @param on 4151 * Enable or Disable. 4152 * 4153 * @return 4154 * - On success, zero. 4155 * - On failure, a negative value. 4156 */ 4157 static int 4158 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4159 { 4160 struct ixgbe_interrupt *intr = 4161 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4162 4163 ixgbe_dev_link_status_print(dev); 4164 if (on) 4165 intr->mask |= IXGBE_EICR_LSC; 4166 else 4167 intr->mask &= ~IXGBE_EICR_LSC; 4168 4169 return 0; 4170 } 4171 4172 /** 4173 * It clears the interrupt causes and enables the interrupt. 4174 * It will be called once only during nic initialized. 4175 * 4176 * @param dev 4177 * Pointer to struct rte_eth_dev. 4178 * 4179 * @return 4180 * - On success, zero. 4181 * - On failure, a negative value. 4182 */ 4183 static int 4184 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4185 { 4186 struct ixgbe_interrupt *intr = 4187 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4188 4189 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4190 4191 return 0; 4192 } 4193 4194 /** 4195 * It clears the interrupt causes and enables the interrupt. 4196 * It will be called once only during nic initialized. 4197 * 4198 * @param dev 4199 * Pointer to struct rte_eth_dev. 4200 * 4201 * @return 4202 * - On success, zero. 4203 * - On failure, a negative value. 4204 */ 4205 static int 4206 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4207 { 4208 struct ixgbe_interrupt *intr = 4209 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4210 4211 intr->mask |= IXGBE_EICR_LINKSEC; 4212 4213 return 0; 4214 } 4215 4216 /* 4217 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4218 * 4219 * @param dev 4220 * Pointer to struct rte_eth_dev. 4221 * 4222 * @return 4223 * - On success, zero. 4224 * - On failure, a negative value. 4225 */ 4226 static int 4227 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4228 { 4229 uint32_t eicr; 4230 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4231 struct ixgbe_interrupt *intr = 4232 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4233 4234 /* clear all cause mask */ 4235 ixgbe_disable_intr(hw); 4236 4237 /* read-on-clear nic registers here */ 4238 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4239 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4240 4241 intr->flags = 0; 4242 4243 /* set flag for async link update */ 4244 if (eicr & IXGBE_EICR_LSC) 4245 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4246 4247 if (eicr & IXGBE_EICR_MAILBOX) 4248 intr->flags |= IXGBE_FLAG_MAILBOX; 4249 4250 if (eicr & IXGBE_EICR_LINKSEC) 4251 intr->flags |= IXGBE_FLAG_MACSEC; 4252 4253 if (hw->mac.type == ixgbe_mac_X550EM_x && 4254 hw->phy.type == ixgbe_phy_x550em_ext_t && 4255 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4256 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4257 4258 return 0; 4259 } 4260 4261 /** 4262 * It gets and then prints the link status. 4263 * 4264 * @param dev 4265 * Pointer to struct rte_eth_dev. 4266 * 4267 * @return 4268 * - On success, zero. 4269 * - On failure, a negative value. 4270 */ 4271 static void 4272 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4273 { 4274 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4275 struct rte_eth_link link; 4276 4277 rte_eth_linkstatus_get(dev, &link); 4278 4279 if (link.link_status) { 4280 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4281 (int)(dev->data->port_id), 4282 (unsigned)link.link_speed, 4283 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 4284 "full-duplex" : "half-duplex"); 4285 } else { 4286 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4287 (int)(dev->data->port_id)); 4288 } 4289 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4290 pci_dev->addr.domain, 4291 pci_dev->addr.bus, 4292 pci_dev->addr.devid, 4293 pci_dev->addr.function); 4294 } 4295 4296 /* 4297 * It executes link_update after knowing an interrupt occurred. 4298 * 4299 * @param dev 4300 * Pointer to struct rte_eth_dev. 4301 * 4302 * @return 4303 * - On success, zero. 4304 * - On failure, a negative value. 4305 */ 4306 static int 4307 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4308 { 4309 struct ixgbe_interrupt *intr = 4310 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4311 int64_t timeout; 4312 struct ixgbe_hw *hw = 4313 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4314 4315 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4316 4317 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4318 ixgbe_pf_mbx_process(dev); 4319 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4320 } 4321 4322 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4323 ixgbe_handle_lasi(hw); 4324 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4325 } 4326 4327 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4328 struct rte_eth_link link; 4329 4330 /* get the link status before link update, for predicting later */ 4331 rte_eth_linkstatus_get(dev, &link); 4332 4333 ixgbe_dev_link_update(dev, 0); 4334 4335 /* likely to up */ 4336 if (!link.link_status) 4337 /* handle it 1 sec later, wait it being stable */ 4338 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4339 /* likely to down */ 4340 else 4341 /* handle it 4 sec later, wait it being stable */ 4342 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4343 4344 ixgbe_dev_link_status_print(dev); 4345 if (rte_eal_alarm_set(timeout * 1000, 4346 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4347 PMD_DRV_LOG(ERR, "Error setting alarm"); 4348 else { 4349 /* remember original mask */ 4350 intr->mask_original = intr->mask; 4351 /* only disable lsc interrupt */ 4352 intr->mask &= ~IXGBE_EIMS_LSC; 4353 } 4354 } 4355 4356 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4357 ixgbe_enable_intr(dev); 4358 4359 return 0; 4360 } 4361 4362 /** 4363 * Interrupt handler which shall be registered for alarm callback for delayed 4364 * handling specific interrupt to wait for the stable nic state. As the 4365 * NIC interrupt state is not stable for ixgbe after link is just down, 4366 * it needs to wait 4 seconds to get the stable status. 4367 * 4368 * @param handle 4369 * Pointer to interrupt handle. 4370 * @param param 4371 * The address of parameter (struct rte_eth_dev *) regsitered before. 4372 * 4373 * @return 4374 * void 4375 */ 4376 static void 4377 ixgbe_dev_interrupt_delayed_handler(void *param) 4378 { 4379 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4380 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4381 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4382 struct ixgbe_interrupt *intr = 4383 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4384 struct ixgbe_hw *hw = 4385 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4386 uint32_t eicr; 4387 4388 ixgbe_disable_intr(hw); 4389 4390 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4391 if (eicr & IXGBE_EICR_MAILBOX) 4392 ixgbe_pf_mbx_process(dev); 4393 4394 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4395 ixgbe_handle_lasi(hw); 4396 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4397 } 4398 4399 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4400 ixgbe_dev_link_update(dev, 0); 4401 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4402 ixgbe_dev_link_status_print(dev); 4403 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 4404 NULL); 4405 } 4406 4407 if (intr->flags & IXGBE_FLAG_MACSEC) { 4408 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, 4409 NULL); 4410 intr->flags &= ~IXGBE_FLAG_MACSEC; 4411 } 4412 4413 /* restore original mask */ 4414 intr->mask = intr->mask_original; 4415 intr->mask_original = 0; 4416 4417 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4418 ixgbe_enable_intr(dev); 4419 rte_intr_enable(intr_handle); 4420 } 4421 4422 /** 4423 * Interrupt handler triggered by NIC for handling 4424 * specific interrupt. 4425 * 4426 * @param handle 4427 * Pointer to interrupt handle. 4428 * @param param 4429 * The address of parameter (struct rte_eth_dev *) regsitered before. 4430 * 4431 * @return 4432 * void 4433 */ 4434 static void 4435 ixgbe_dev_interrupt_handler(void *param) 4436 { 4437 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4438 4439 ixgbe_dev_interrupt_get_status(dev); 4440 ixgbe_dev_interrupt_action(dev); 4441 } 4442 4443 static int 4444 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4445 { 4446 struct ixgbe_hw *hw; 4447 4448 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4449 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4450 } 4451 4452 static int 4453 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4454 { 4455 struct ixgbe_hw *hw; 4456 4457 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4458 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4459 } 4460 4461 static int 4462 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4463 { 4464 struct ixgbe_hw *hw; 4465 uint32_t mflcn_reg; 4466 uint32_t fccfg_reg; 4467 int rx_pause; 4468 int tx_pause; 4469 4470 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4471 4472 fc_conf->pause_time = hw->fc.pause_time; 4473 fc_conf->high_water = hw->fc.high_water[0]; 4474 fc_conf->low_water = hw->fc.low_water[0]; 4475 fc_conf->send_xon = hw->fc.send_xon; 4476 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4477 4478 /* 4479 * Return rx_pause status according to actual setting of 4480 * MFLCN register. 4481 */ 4482 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4483 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4484 rx_pause = 1; 4485 else 4486 rx_pause = 0; 4487 4488 /* 4489 * Return tx_pause status according to actual setting of 4490 * FCCFG register. 4491 */ 4492 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4493 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4494 tx_pause = 1; 4495 else 4496 tx_pause = 0; 4497 4498 if (rx_pause && tx_pause) 4499 fc_conf->mode = RTE_FC_FULL; 4500 else if (rx_pause) 4501 fc_conf->mode = RTE_FC_RX_PAUSE; 4502 else if (tx_pause) 4503 fc_conf->mode = RTE_FC_TX_PAUSE; 4504 else 4505 fc_conf->mode = RTE_FC_NONE; 4506 4507 return 0; 4508 } 4509 4510 static int 4511 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4512 { 4513 struct ixgbe_hw *hw; 4514 int err; 4515 uint32_t rx_buf_size; 4516 uint32_t max_high_water; 4517 uint32_t mflcn; 4518 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4519 ixgbe_fc_none, 4520 ixgbe_fc_rx_pause, 4521 ixgbe_fc_tx_pause, 4522 ixgbe_fc_full 4523 }; 4524 4525 PMD_INIT_FUNC_TRACE(); 4526 4527 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4528 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4529 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4530 4531 /* 4532 * At least reserve one Ethernet frame for watermark 4533 * high_water/low_water in kilo bytes for ixgbe 4534 */ 4535 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4536 if ((fc_conf->high_water > max_high_water) || 4537 (fc_conf->high_water < fc_conf->low_water)) { 4538 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4539 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4540 return -EINVAL; 4541 } 4542 4543 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4544 hw->fc.pause_time = fc_conf->pause_time; 4545 hw->fc.high_water[0] = fc_conf->high_water; 4546 hw->fc.low_water[0] = fc_conf->low_water; 4547 hw->fc.send_xon = fc_conf->send_xon; 4548 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4549 4550 err = ixgbe_fc_enable(hw); 4551 4552 /* Not negotiated is not an error case */ 4553 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { 4554 4555 /* check if we want to forward MAC frames - driver doesn't have native 4556 * capability to do that, so we'll write the registers ourselves */ 4557 4558 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4559 4560 /* set or clear MFLCN.PMCF bit depending on configuration */ 4561 if (fc_conf->mac_ctrl_frame_fwd != 0) 4562 mflcn |= IXGBE_MFLCN_PMCF; 4563 else 4564 mflcn &= ~IXGBE_MFLCN_PMCF; 4565 4566 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 4567 IXGBE_WRITE_FLUSH(hw); 4568 4569 return 0; 4570 } 4571 4572 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); 4573 return -EIO; 4574 } 4575 4576 /** 4577 * ixgbe_pfc_enable_generic - Enable flow control 4578 * @hw: pointer to hardware structure 4579 * @tc_num: traffic class number 4580 * Enable flow control according to the current settings. 4581 */ 4582 static int 4583 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4584 { 4585 int ret_val = 0; 4586 uint32_t mflcn_reg, fccfg_reg; 4587 uint32_t reg; 4588 uint32_t fcrtl, fcrth; 4589 uint8_t i; 4590 uint8_t nb_rx_en; 4591 4592 /* Validate the water mark configuration */ 4593 if (!hw->fc.pause_time) { 4594 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4595 goto out; 4596 } 4597 4598 /* Low water mark of zero causes XOFF floods */ 4599 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4600 /* High/Low water can not be 0 */ 4601 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4602 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4603 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4604 goto out; 4605 } 4606 4607 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4608 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4609 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4610 goto out; 4611 } 4612 } 4613 /* Negotiate the fc mode to use */ 4614 ixgbe_fc_autoneg(hw); 4615 4616 /* Disable any previous flow control settings */ 4617 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4618 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4619 4620 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4621 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4622 4623 switch (hw->fc.current_mode) { 4624 case ixgbe_fc_none: 4625 /* 4626 * If the count of enabled RX Priority Flow control >1, 4627 * and the TX pause can not be disabled 4628 */ 4629 nb_rx_en = 0; 4630 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4631 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4632 if (reg & IXGBE_FCRTH_FCEN) 4633 nb_rx_en++; 4634 } 4635 if (nb_rx_en > 1) 4636 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4637 break; 4638 case ixgbe_fc_rx_pause: 4639 /* 4640 * Rx Flow control is enabled and Tx Flow control is 4641 * disabled by software override. Since there really 4642 * isn't a way to advertise that we are capable of RX 4643 * Pause ONLY, we will advertise that we support both 4644 * symmetric and asymmetric Rx PAUSE. Later, we will 4645 * disable the adapter's ability to send PAUSE frames. 4646 */ 4647 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4648 /* 4649 * If the count of enabled RX Priority Flow control >1, 4650 * and the TX pause can not be disabled 4651 */ 4652 nb_rx_en = 0; 4653 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4654 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4655 if (reg & IXGBE_FCRTH_FCEN) 4656 nb_rx_en++; 4657 } 4658 if (nb_rx_en > 1) 4659 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4660 break; 4661 case ixgbe_fc_tx_pause: 4662 /* 4663 * Tx Flow control is enabled, and Rx Flow control is 4664 * disabled by software override. 4665 */ 4666 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4667 break; 4668 case ixgbe_fc_full: 4669 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4670 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4671 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4672 break; 4673 default: 4674 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4675 ret_val = IXGBE_ERR_CONFIG; 4676 goto out; 4677 } 4678 4679 /* Set 802.3x based flow control settings. */ 4680 mflcn_reg |= IXGBE_MFLCN_DPF; 4681 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4682 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4683 4684 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4685 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4686 hw->fc.high_water[tc_num]) { 4687 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4688 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4689 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4690 } else { 4691 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4692 /* 4693 * In order to prevent Tx hangs when the internal Tx 4694 * switch is enabled we must set the high water mark 4695 * to the maximum FCRTH value. This allows the Tx 4696 * switch to function even under heavy Rx workloads. 4697 */ 4698 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4699 } 4700 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4701 4702 /* Configure pause time (2 TCs per register) */ 4703 reg = hw->fc.pause_time * 0x00010001; 4704 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4705 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4706 4707 /* Configure flow control refresh threshold value */ 4708 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4709 4710 out: 4711 return ret_val; 4712 } 4713 4714 static int 4715 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4716 { 4717 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4718 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4719 4720 if (hw->mac.type != ixgbe_mac_82598EB) { 4721 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4722 } 4723 return ret_val; 4724 } 4725 4726 static int 4727 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4728 { 4729 int err; 4730 uint32_t rx_buf_size; 4731 uint32_t max_high_water; 4732 uint8_t tc_num; 4733 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4734 struct ixgbe_hw *hw = 4735 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4736 struct ixgbe_dcb_config *dcb_config = 4737 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4738 4739 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4740 ixgbe_fc_none, 4741 ixgbe_fc_rx_pause, 4742 ixgbe_fc_tx_pause, 4743 ixgbe_fc_full 4744 }; 4745 4746 PMD_INIT_FUNC_TRACE(); 4747 4748 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4749 tc_num = map[pfc_conf->priority]; 4750 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4751 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4752 /* 4753 * At least reserve one Ethernet frame for watermark 4754 * high_water/low_water in kilo bytes for ixgbe 4755 */ 4756 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4757 if ((pfc_conf->fc.high_water > max_high_water) || 4758 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4759 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4760 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4761 return -EINVAL; 4762 } 4763 4764 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 4765 hw->fc.pause_time = pfc_conf->fc.pause_time; 4766 hw->fc.send_xon = pfc_conf->fc.send_xon; 4767 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 4768 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 4769 4770 err = ixgbe_dcb_pfc_enable(dev, tc_num); 4771 4772 /* Not negotiated is not an error case */ 4773 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 4774 return 0; 4775 4776 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 4777 return -EIO; 4778 } 4779 4780 static int 4781 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 4782 struct rte_eth_rss_reta_entry64 *reta_conf, 4783 uint16_t reta_size) 4784 { 4785 uint16_t i, sp_reta_size; 4786 uint8_t j, mask; 4787 uint32_t reta, r; 4788 uint16_t idx, shift; 4789 struct ixgbe_adapter *adapter = 4790 (struct ixgbe_adapter *)dev->data->dev_private; 4791 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4792 uint32_t reta_reg; 4793 4794 PMD_INIT_FUNC_TRACE(); 4795 4796 if (!ixgbe_rss_update_sp(hw->mac.type)) { 4797 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 4798 "NIC."); 4799 return -ENOTSUP; 4800 } 4801 4802 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 4803 if (reta_size != sp_reta_size) { 4804 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 4805 "(%d) doesn't match the number hardware can supported " 4806 "(%d)", reta_size, sp_reta_size); 4807 return -EINVAL; 4808 } 4809 4810 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 4811 idx = i / RTE_RETA_GROUP_SIZE; 4812 shift = i % RTE_RETA_GROUP_SIZE; 4813 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 4814 IXGBE_4_BIT_MASK); 4815 if (!mask) 4816 continue; 4817 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 4818 if (mask == IXGBE_4_BIT_MASK) 4819 r = 0; 4820 else 4821 r = IXGBE_READ_REG(hw, reta_reg); 4822 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 4823 if (mask & (0x1 << j)) 4824 reta |= reta_conf[idx].reta[shift + j] << 4825 (CHAR_BIT * j); 4826 else 4827 reta |= r & (IXGBE_8_BIT_MASK << 4828 (CHAR_BIT * j)); 4829 } 4830 IXGBE_WRITE_REG(hw, reta_reg, reta); 4831 } 4832 adapter->rss_reta_updated = 1; 4833 4834 return 0; 4835 } 4836 4837 static int 4838 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 4839 struct rte_eth_rss_reta_entry64 *reta_conf, 4840 uint16_t reta_size) 4841 { 4842 uint16_t i, sp_reta_size; 4843 uint8_t j, mask; 4844 uint32_t reta; 4845 uint16_t idx, shift; 4846 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4847 uint32_t reta_reg; 4848 4849 PMD_INIT_FUNC_TRACE(); 4850 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 4851 if (reta_size != sp_reta_size) { 4852 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 4853 "(%d) doesn't match the number hardware can supported " 4854 "(%d)", reta_size, sp_reta_size); 4855 return -EINVAL; 4856 } 4857 4858 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 4859 idx = i / RTE_RETA_GROUP_SIZE; 4860 shift = i % RTE_RETA_GROUP_SIZE; 4861 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 4862 IXGBE_4_BIT_MASK); 4863 if (!mask) 4864 continue; 4865 4866 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 4867 reta = IXGBE_READ_REG(hw, reta_reg); 4868 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 4869 if (mask & (0x1 << j)) 4870 reta_conf[idx].reta[shift + j] = 4871 ((reta >> (CHAR_BIT * j)) & 4872 IXGBE_8_BIT_MASK); 4873 } 4874 } 4875 4876 return 0; 4877 } 4878 4879 static int 4880 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 4881 uint32_t index, uint32_t pool) 4882 { 4883 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4884 uint32_t enable_addr = 1; 4885 4886 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 4887 pool, enable_addr); 4888 } 4889 4890 static void 4891 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 4892 { 4893 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4894 4895 ixgbe_clear_rar(hw, index); 4896 } 4897 4898 static int 4899 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 4900 { 4901 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4902 4903 ixgbe_remove_rar(dev, 0); 4904 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 4905 4906 return 0; 4907 } 4908 4909 static bool 4910 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 4911 { 4912 if (strcmp(dev->device->driver->name, drv->driver.name)) 4913 return false; 4914 4915 return true; 4916 } 4917 4918 bool 4919 is_ixgbe_supported(struct rte_eth_dev *dev) 4920 { 4921 return is_device_supported(dev, &rte_ixgbe_pmd); 4922 } 4923 4924 static int 4925 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4926 { 4927 uint32_t hlreg0; 4928 uint32_t maxfrs; 4929 struct ixgbe_hw *hw; 4930 struct rte_eth_dev_info dev_info; 4931 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 4932 struct rte_eth_dev_data *dev_data = dev->data; 4933 4934 ixgbe_dev_info_get(dev, &dev_info); 4935 4936 /* check that mtu is within the allowed range */ 4937 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 4938 return -EINVAL; 4939 4940 /* If device is started, refuse mtu that requires the support of 4941 * scattered packets when this feature has not been enabled before. 4942 */ 4943 if (dev_data->dev_started && !dev_data->scattered_rx && 4944 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 4945 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 4946 PMD_INIT_LOG(ERR, "Stop port first."); 4947 return -EINVAL; 4948 } 4949 4950 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4951 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4952 4953 /* switch to jumbo mode if needed */ 4954 if (frame_size > ETHER_MAX_LEN) { 4955 dev->data->dev_conf.rxmode.offloads |= 4956 DEV_RX_OFFLOAD_JUMBO_FRAME; 4957 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 4958 } else { 4959 dev->data->dev_conf.rxmode.offloads &= 4960 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 4961 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 4962 } 4963 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4964 4965 /* update max frame size */ 4966 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4967 4968 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 4969 maxfrs &= 0x0000FFFF; 4970 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 4971 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 4972 4973 return 0; 4974 } 4975 4976 /* 4977 * Virtual Function operations 4978 */ 4979 static void 4980 ixgbevf_intr_disable(struct rte_eth_dev *dev) 4981 { 4982 struct ixgbe_interrupt *intr = 4983 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4984 struct ixgbe_hw *hw = 4985 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4986 4987 PMD_INIT_FUNC_TRACE(); 4988 4989 /* Clear interrupt mask to stop from interrupts being generated */ 4990 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 4991 4992 IXGBE_WRITE_FLUSH(hw); 4993 4994 /* Clear mask value. */ 4995 intr->mask = 0; 4996 } 4997 4998 static void 4999 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5000 { 5001 struct ixgbe_interrupt *intr = 5002 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5003 struct ixgbe_hw *hw = 5004 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5005 5006 PMD_INIT_FUNC_TRACE(); 5007 5008 /* VF enable interrupt autoclean */ 5009 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5010 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5011 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5012 5013 IXGBE_WRITE_FLUSH(hw); 5014 5015 /* Save IXGBE_VTEIMS value to mask. */ 5016 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5017 } 5018 5019 static int 5020 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5021 { 5022 struct rte_eth_conf *conf = &dev->data->dev_conf; 5023 struct ixgbe_adapter *adapter = 5024 (struct ixgbe_adapter *)dev->data->dev_private; 5025 5026 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5027 dev->data->port_id); 5028 5029 /* 5030 * VF has no ability to enable/disable HW CRC 5031 * Keep the persistent behavior the same as Host PF 5032 */ 5033 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5034 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 5035 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5036 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 5037 } 5038 #else 5039 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 5040 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5041 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 5042 } 5043 #endif 5044 5045 /* 5046 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5047 * allocation or vector Rx preconditions we will reset it. 5048 */ 5049 adapter->rx_bulk_alloc_allowed = true; 5050 adapter->rx_vec_allowed = true; 5051 5052 return 0; 5053 } 5054 5055 static int 5056 ixgbevf_dev_start(struct rte_eth_dev *dev) 5057 { 5058 struct ixgbe_hw *hw = 5059 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5060 uint32_t intr_vector = 0; 5061 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5062 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5063 5064 int err, mask = 0; 5065 5066 PMD_INIT_FUNC_TRACE(); 5067 5068 /* Stop the link setup handler before resetting the HW. */ 5069 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); 5070 5071 err = hw->mac.ops.reset_hw(hw); 5072 if (err) { 5073 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5074 return err; 5075 } 5076 hw->mac.get_link_status = true; 5077 5078 /* negotiate mailbox API version to use with the PF. */ 5079 ixgbevf_negotiate_api(hw); 5080 5081 ixgbevf_dev_tx_init(dev); 5082 5083 /* This can fail when allocating mbufs for descriptor rings */ 5084 err = ixgbevf_dev_rx_init(dev); 5085 if (err) { 5086 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5087 ixgbe_dev_clear_queues(dev); 5088 return err; 5089 } 5090 5091 /* Set vfta */ 5092 ixgbevf_set_vfta_all(dev, 1); 5093 5094 /* Set HW strip */ 5095 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 5096 ETH_VLAN_EXTEND_MASK; 5097 err = ixgbevf_vlan_offload_config(dev, mask); 5098 if (err) { 5099 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5100 ixgbe_dev_clear_queues(dev); 5101 return err; 5102 } 5103 5104 ixgbevf_dev_rxtx_start(dev); 5105 5106 /* check and configure queue intr-vector mapping */ 5107 if (rte_intr_cap_multiple(intr_handle) && 5108 dev->data->dev_conf.intr_conf.rxq) { 5109 /* According to datasheet, only vector 0/1/2 can be used, 5110 * now only one vector is used for Rx queue 5111 */ 5112 intr_vector = 1; 5113 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5114 return -1; 5115 } 5116 5117 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 5118 intr_handle->intr_vec = 5119 rte_zmalloc("intr_vec", 5120 dev->data->nb_rx_queues * sizeof(int), 0); 5121 if (intr_handle->intr_vec == NULL) { 5122 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5123 " intr_vec", dev->data->nb_rx_queues); 5124 return -ENOMEM; 5125 } 5126 } 5127 ixgbevf_configure_msix(dev); 5128 5129 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5130 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5131 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5132 * is not cleared, it will fail when following rte_intr_enable( ) tries 5133 * to map Rx queue interrupt to other VFIO vectors. 5134 * So clear uio/vfio intr/evevnfd first to avoid failure. 5135 */ 5136 rte_intr_disable(intr_handle); 5137 5138 rte_intr_enable(intr_handle); 5139 5140 /* Re-enable interrupt for VF */ 5141 ixgbevf_intr_enable(dev); 5142 5143 /* 5144 * Update link status right before return, because it may 5145 * start link configuration process in a separate thread. 5146 */ 5147 ixgbevf_dev_link_update(dev, 0); 5148 5149 return 0; 5150 } 5151 5152 static void 5153 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5154 { 5155 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5156 struct ixgbe_adapter *adapter = 5157 (struct ixgbe_adapter *)dev->data->dev_private; 5158 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5159 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5160 5161 PMD_INIT_FUNC_TRACE(); 5162 5163 rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); 5164 5165 ixgbevf_intr_disable(dev); 5166 5167 hw->adapter_stopped = 1; 5168 ixgbe_stop_adapter(hw); 5169 5170 /* 5171 * Clear what we set, but we still keep shadow_vfta to 5172 * restore after device starts 5173 */ 5174 ixgbevf_set_vfta_all(dev, 0); 5175 5176 /* Clear stored conf */ 5177 dev->data->scattered_rx = 0; 5178 5179 ixgbe_dev_clear_queues(dev); 5180 5181 /* Clean datapath event and queue/vec mapping */ 5182 rte_intr_efd_disable(intr_handle); 5183 if (intr_handle->intr_vec != NULL) { 5184 rte_free(intr_handle->intr_vec); 5185 intr_handle->intr_vec = NULL; 5186 } 5187 5188 adapter->rss_reta_updated = 0; 5189 } 5190 5191 static void 5192 ixgbevf_dev_close(struct rte_eth_dev *dev) 5193 { 5194 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5195 5196 PMD_INIT_FUNC_TRACE(); 5197 5198 ixgbe_reset_hw(hw); 5199 5200 ixgbevf_dev_stop(dev); 5201 5202 ixgbe_dev_free_queues(dev); 5203 5204 /** 5205 * Remove the VF MAC address ro ensure 5206 * that the VF traffic goes to the PF 5207 * after stop, close and detach of the VF 5208 **/ 5209 ixgbevf_remove_mac_addr(dev, 0); 5210 } 5211 5212 /* 5213 * Reset VF device 5214 */ 5215 static int 5216 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5217 { 5218 int ret; 5219 5220 ret = eth_ixgbevf_dev_uninit(dev); 5221 if (ret) 5222 return ret; 5223 5224 ret = eth_ixgbevf_dev_init(dev); 5225 5226 return ret; 5227 } 5228 5229 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5230 { 5231 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5232 struct ixgbe_vfta *shadow_vfta = 5233 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5234 int i = 0, j = 0, vfta = 0, mask = 1; 5235 5236 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5237 vfta = shadow_vfta->vfta[i]; 5238 if (vfta) { 5239 mask = 1; 5240 for (j = 0; j < 32; j++) { 5241 if (vfta & mask) 5242 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5243 on, false); 5244 mask <<= 1; 5245 } 5246 } 5247 } 5248 5249 } 5250 5251 static int 5252 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5253 { 5254 struct ixgbe_hw *hw = 5255 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5256 struct ixgbe_vfta *shadow_vfta = 5257 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5258 uint32_t vid_idx = 0; 5259 uint32_t vid_bit = 0; 5260 int ret = 0; 5261 5262 PMD_INIT_FUNC_TRACE(); 5263 5264 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5265 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5266 if (ret) { 5267 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5268 return ret; 5269 } 5270 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5271 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5272 5273 /* Save what we set and retore it after device reset */ 5274 if (on) 5275 shadow_vfta->vfta[vid_idx] |= vid_bit; 5276 else 5277 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5278 5279 return 0; 5280 } 5281 5282 static void 5283 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5284 { 5285 struct ixgbe_hw *hw = 5286 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5287 uint32_t ctrl; 5288 5289 PMD_INIT_FUNC_TRACE(); 5290 5291 if (queue >= hw->mac.max_rx_queues) 5292 return; 5293 5294 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5295 if (on) 5296 ctrl |= IXGBE_RXDCTL_VME; 5297 else 5298 ctrl &= ~IXGBE_RXDCTL_VME; 5299 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5300 5301 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5302 } 5303 5304 static int 5305 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5306 { 5307 struct ixgbe_rx_queue *rxq; 5308 uint16_t i; 5309 int on = 0; 5310 5311 /* VF function only support hw strip feature, others are not support */ 5312 if (mask & ETH_VLAN_STRIP_MASK) { 5313 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5314 rxq = dev->data->rx_queues[i]; 5315 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 5316 ixgbevf_vlan_strip_queue_set(dev, i, on); 5317 } 5318 } 5319 5320 return 0; 5321 } 5322 5323 static int 5324 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5325 { 5326 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5327 5328 ixgbevf_vlan_offload_config(dev, mask); 5329 5330 return 0; 5331 } 5332 5333 int 5334 ixgbe_vt_check(struct ixgbe_hw *hw) 5335 { 5336 uint32_t reg_val; 5337 5338 /* if Virtualization Technology is enabled */ 5339 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5340 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5341 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5342 return -1; 5343 } 5344 5345 return 0; 5346 } 5347 5348 static uint32_t 5349 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) 5350 { 5351 uint32_t vector = 0; 5352 5353 switch (hw->mac.mc_filter_type) { 5354 case 0: /* use bits [47:36] of the address */ 5355 vector = ((uc_addr->addr_bytes[4] >> 4) | 5356 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5357 break; 5358 case 1: /* use bits [46:35] of the address */ 5359 vector = ((uc_addr->addr_bytes[4] >> 3) | 5360 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5361 break; 5362 case 2: /* use bits [45:34] of the address */ 5363 vector = ((uc_addr->addr_bytes[4] >> 2) | 5364 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5365 break; 5366 case 3: /* use bits [43:32] of the address */ 5367 vector = ((uc_addr->addr_bytes[4]) | 5368 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5369 break; 5370 default: /* Invalid mc_filter_type */ 5371 break; 5372 } 5373 5374 /* vector can only be 12-bits or boundary will be exceeded */ 5375 vector &= 0xFFF; 5376 return vector; 5377 } 5378 5379 static int 5380 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 5381 uint8_t on) 5382 { 5383 uint32_t vector; 5384 uint32_t uta_idx; 5385 uint32_t reg_val; 5386 uint32_t uta_shift; 5387 uint32_t rc; 5388 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5389 const uint32_t ixgbe_uta_bit_shift = 5; 5390 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5391 const uint32_t bit1 = 0x1; 5392 5393 struct ixgbe_hw *hw = 5394 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5395 struct ixgbe_uta_info *uta_info = 5396 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5397 5398 /* The UTA table only exists on 82599 hardware and newer */ 5399 if (hw->mac.type < ixgbe_mac_82599EB) 5400 return -ENOTSUP; 5401 5402 vector = ixgbe_uta_vector(hw, mac_addr); 5403 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5404 uta_shift = vector & ixgbe_uta_bit_mask; 5405 5406 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5407 if (rc == on) 5408 return 0; 5409 5410 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5411 if (on) { 5412 uta_info->uta_in_use++; 5413 reg_val |= (bit1 << uta_shift); 5414 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5415 } else { 5416 uta_info->uta_in_use--; 5417 reg_val &= ~(bit1 << uta_shift); 5418 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5419 } 5420 5421 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5422 5423 if (uta_info->uta_in_use > 0) 5424 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5425 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5426 else 5427 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5428 5429 return 0; 5430 } 5431 5432 static int 5433 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5434 { 5435 int i; 5436 struct ixgbe_hw *hw = 5437 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5438 struct ixgbe_uta_info *uta_info = 5439 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5440 5441 /* The UTA table only exists on 82599 hardware and newer */ 5442 if (hw->mac.type < ixgbe_mac_82599EB) 5443 return -ENOTSUP; 5444 5445 if (on) { 5446 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5447 uta_info->uta_shadow[i] = ~0; 5448 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5449 } 5450 } else { 5451 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5452 uta_info->uta_shadow[i] = 0; 5453 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5454 } 5455 } 5456 return 0; 5457 5458 } 5459 5460 uint32_t 5461 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5462 { 5463 uint32_t new_val = orig_val; 5464 5465 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 5466 new_val |= IXGBE_VMOLR_AUPE; 5467 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 5468 new_val |= IXGBE_VMOLR_ROMPE; 5469 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 5470 new_val |= IXGBE_VMOLR_ROPE; 5471 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 5472 new_val |= IXGBE_VMOLR_BAM; 5473 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 5474 new_val |= IXGBE_VMOLR_MPE; 5475 5476 return new_val; 5477 } 5478 5479 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 5480 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 5481 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 5482 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 5483 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 5484 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 5485 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 5486 5487 static int 5488 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 5489 struct rte_eth_mirror_conf *mirror_conf, 5490 uint8_t rule_id, uint8_t on) 5491 { 5492 uint32_t mr_ctl, vlvf; 5493 uint32_t mp_lsb = 0; 5494 uint32_t mv_msb = 0; 5495 uint32_t mv_lsb = 0; 5496 uint32_t mp_msb = 0; 5497 uint8_t i = 0; 5498 int reg_index = 0; 5499 uint64_t vlan_mask = 0; 5500 5501 const uint8_t pool_mask_offset = 32; 5502 const uint8_t vlan_mask_offset = 32; 5503 const uint8_t dst_pool_offset = 8; 5504 const uint8_t rule_mr_offset = 4; 5505 const uint8_t mirror_rule_mask = 0x0F; 5506 5507 struct ixgbe_mirror_info *mr_info = 5508 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5509 struct ixgbe_hw *hw = 5510 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5511 uint8_t mirror_type = 0; 5512 5513 if (ixgbe_vt_check(hw) < 0) 5514 return -ENOTSUP; 5515 5516 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5517 return -EINVAL; 5518 5519 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 5520 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 5521 mirror_conf->rule_type); 5522 return -EINVAL; 5523 } 5524 5525 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5526 mirror_type |= IXGBE_MRCTL_VLME; 5527 /* Check if vlan id is valid and find conresponding VLAN ID 5528 * index in VLVF 5529 */ 5530 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 5531 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 5532 /* search vlan id related pool vlan filter 5533 * index 5534 */ 5535 reg_index = ixgbe_find_vlvf_slot( 5536 hw, 5537 mirror_conf->vlan.vlan_id[i], 5538 false); 5539 if (reg_index < 0) 5540 return -EINVAL; 5541 vlvf = IXGBE_READ_REG(hw, 5542 IXGBE_VLVF(reg_index)); 5543 if ((vlvf & IXGBE_VLVF_VIEN) && 5544 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 5545 mirror_conf->vlan.vlan_id[i])) 5546 vlan_mask |= (1ULL << reg_index); 5547 else 5548 return -EINVAL; 5549 } 5550 } 5551 5552 if (on) { 5553 mv_lsb = vlan_mask & 0xFFFFFFFF; 5554 mv_msb = vlan_mask >> vlan_mask_offset; 5555 5556 mr_info->mr_conf[rule_id].vlan.vlan_mask = 5557 mirror_conf->vlan.vlan_mask; 5558 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 5559 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 5560 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 5561 mirror_conf->vlan.vlan_id[i]; 5562 } 5563 } else { 5564 mv_lsb = 0; 5565 mv_msb = 0; 5566 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 5567 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 5568 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 5569 } 5570 } 5571 5572 /** 5573 * if enable pool mirror, write related pool mask register,if disable 5574 * pool mirror, clear PFMRVM register 5575 */ 5576 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5577 mirror_type |= IXGBE_MRCTL_VPME; 5578 if (on) { 5579 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 5580 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 5581 mr_info->mr_conf[rule_id].pool_mask = 5582 mirror_conf->pool_mask; 5583 5584 } else { 5585 mp_lsb = 0; 5586 mp_msb = 0; 5587 mr_info->mr_conf[rule_id].pool_mask = 0; 5588 } 5589 } 5590 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 5591 mirror_type |= IXGBE_MRCTL_UPME; 5592 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 5593 mirror_type |= IXGBE_MRCTL_DPME; 5594 5595 /* read mirror control register and recalculate it */ 5596 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 5597 5598 if (on) { 5599 mr_ctl |= mirror_type; 5600 mr_ctl &= mirror_rule_mask; 5601 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 5602 } else { 5603 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 5604 } 5605 5606 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 5607 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 5608 5609 /* write mirrror control register */ 5610 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5611 5612 /* write pool mirrror control register */ 5613 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5614 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 5615 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 5616 mp_msb); 5617 } 5618 /* write VLAN mirrror control register */ 5619 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5620 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 5621 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 5622 mv_msb); 5623 } 5624 5625 return 0; 5626 } 5627 5628 static int 5629 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 5630 { 5631 int mr_ctl = 0; 5632 uint32_t lsb_val = 0; 5633 uint32_t msb_val = 0; 5634 const uint8_t rule_mr_offset = 4; 5635 5636 struct ixgbe_hw *hw = 5637 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5638 struct ixgbe_mirror_info *mr_info = 5639 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5640 5641 if (ixgbe_vt_check(hw) < 0) 5642 return -ENOTSUP; 5643 5644 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5645 return -EINVAL; 5646 5647 memset(&mr_info->mr_conf[rule_id], 0, 5648 sizeof(struct rte_eth_mirror_conf)); 5649 5650 /* clear PFVMCTL register */ 5651 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5652 5653 /* clear pool mask register */ 5654 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 5655 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 5656 5657 /* clear vlan mask register */ 5658 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 5659 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 5660 5661 return 0; 5662 } 5663 5664 static int 5665 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5666 { 5667 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5668 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5669 struct ixgbe_interrupt *intr = 5670 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5671 struct ixgbe_hw *hw = 5672 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5673 uint32_t vec = IXGBE_MISC_VEC_ID; 5674 5675 if (rte_intr_allow_others(intr_handle)) 5676 vec = IXGBE_RX_VEC_START; 5677 intr->mask |= (1 << vec); 5678 RTE_SET_USED(queue_id); 5679 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5680 5681 rte_intr_enable(intr_handle); 5682 5683 return 0; 5684 } 5685 5686 static int 5687 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5688 { 5689 struct ixgbe_interrupt *intr = 5690 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5691 struct ixgbe_hw *hw = 5692 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5693 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5694 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5695 uint32_t vec = IXGBE_MISC_VEC_ID; 5696 5697 if (rte_intr_allow_others(intr_handle)) 5698 vec = IXGBE_RX_VEC_START; 5699 intr->mask &= ~(1 << vec); 5700 RTE_SET_USED(queue_id); 5701 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5702 5703 return 0; 5704 } 5705 5706 static int 5707 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5708 { 5709 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5710 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5711 uint32_t mask; 5712 struct ixgbe_hw *hw = 5713 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5714 struct ixgbe_interrupt *intr = 5715 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5716 5717 if (queue_id < 16) { 5718 ixgbe_disable_intr(hw); 5719 intr->mask |= (1 << queue_id); 5720 ixgbe_enable_intr(dev); 5721 } else if (queue_id < 32) { 5722 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5723 mask &= (1 << queue_id); 5724 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5725 } else if (queue_id < 64) { 5726 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5727 mask &= (1 << (queue_id - 32)); 5728 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5729 } 5730 rte_intr_enable(intr_handle); 5731 5732 return 0; 5733 } 5734 5735 static int 5736 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5737 { 5738 uint32_t mask; 5739 struct ixgbe_hw *hw = 5740 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5741 struct ixgbe_interrupt *intr = 5742 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5743 5744 if (queue_id < 16) { 5745 ixgbe_disable_intr(hw); 5746 intr->mask &= ~(1 << queue_id); 5747 ixgbe_enable_intr(dev); 5748 } else if (queue_id < 32) { 5749 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5750 mask &= ~(1 << queue_id); 5751 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5752 } else if (queue_id < 64) { 5753 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5754 mask &= ~(1 << (queue_id - 32)); 5755 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5756 } 5757 5758 return 0; 5759 } 5760 5761 static void 5762 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5763 uint8_t queue, uint8_t msix_vector) 5764 { 5765 uint32_t tmp, idx; 5766 5767 if (direction == -1) { 5768 /* other causes */ 5769 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5770 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 5771 tmp &= ~0xFF; 5772 tmp |= msix_vector; 5773 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 5774 } else { 5775 /* rx or tx cause */ 5776 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5777 idx = ((16 * (queue & 1)) + (8 * direction)); 5778 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 5779 tmp &= ~(0xFF << idx); 5780 tmp |= (msix_vector << idx); 5781 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 5782 } 5783 } 5784 5785 /** 5786 * set the IVAR registers, mapping interrupt causes to vectors 5787 * @param hw 5788 * pointer to ixgbe_hw struct 5789 * @direction 5790 * 0 for Rx, 1 for Tx, -1 for other causes 5791 * @queue 5792 * queue to map the corresponding interrupt to 5793 * @msix_vector 5794 * the vector to map to the corresponding queue 5795 */ 5796 static void 5797 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 5798 uint8_t queue, uint8_t msix_vector) 5799 { 5800 uint32_t tmp, idx; 5801 5802 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5803 if (hw->mac.type == ixgbe_mac_82598EB) { 5804 if (direction == -1) 5805 direction = 0; 5806 idx = (((direction * 64) + queue) >> 2) & 0x1F; 5807 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 5808 tmp &= ~(0xFF << (8 * (queue & 0x3))); 5809 tmp |= (msix_vector << (8 * (queue & 0x3))); 5810 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 5811 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 5812 (hw->mac.type == ixgbe_mac_X540) || 5813 (hw->mac.type == ixgbe_mac_X550)) { 5814 if (direction == -1) { 5815 /* other causes */ 5816 idx = ((queue & 1) * 8); 5817 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5818 tmp &= ~(0xFF << idx); 5819 tmp |= (msix_vector << idx); 5820 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 5821 } else { 5822 /* rx or tx causes */ 5823 idx = ((16 * (queue & 1)) + (8 * direction)); 5824 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 5825 tmp &= ~(0xFF << idx); 5826 tmp |= (msix_vector << idx); 5827 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 5828 } 5829 } 5830 } 5831 5832 static void 5833 ixgbevf_configure_msix(struct rte_eth_dev *dev) 5834 { 5835 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5836 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5837 struct ixgbe_hw *hw = 5838 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5839 uint32_t q_idx; 5840 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 5841 uint32_t base = IXGBE_MISC_VEC_ID; 5842 5843 /* Configure VF other cause ivar */ 5844 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 5845 5846 /* won't configure msix register if no mapping is done 5847 * between intr vector and event fd. 5848 */ 5849 if (!rte_intr_dp_is_en(intr_handle)) 5850 return; 5851 5852 if (rte_intr_allow_others(intr_handle)) { 5853 base = IXGBE_RX_VEC_START; 5854 vector_idx = IXGBE_RX_VEC_START; 5855 } 5856 5857 /* Configure all RX queues of VF */ 5858 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 5859 /* Force all queue use vector 0, 5860 * as IXGBE_VF_MAXMSIVECOTR = 1 5861 */ 5862 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 5863 intr_handle->intr_vec[q_idx] = vector_idx; 5864 if (vector_idx < base + intr_handle->nb_efd - 1) 5865 vector_idx++; 5866 } 5867 5868 /* As RX queue setting above show, all queues use the vector 0. 5869 * Set only the ITR value of IXGBE_MISC_VEC_ID. 5870 */ 5871 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 5872 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5873 | IXGBE_EITR_CNT_WDIS); 5874 } 5875 5876 /** 5877 * Sets up the hardware to properly generate MSI-X interrupts 5878 * @hw 5879 * board private structure 5880 */ 5881 static void 5882 ixgbe_configure_msix(struct rte_eth_dev *dev) 5883 { 5884 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5885 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5886 struct ixgbe_hw *hw = 5887 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5888 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5889 uint32_t vec = IXGBE_MISC_VEC_ID; 5890 uint32_t mask; 5891 uint32_t gpie; 5892 5893 /* won't configure msix register if no mapping is done 5894 * between intr vector and event fd 5895 * but if misx has been enabled already, need to configure 5896 * auto clean, auto mask and throttling. 5897 */ 5898 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5899 if (!rte_intr_dp_is_en(intr_handle) && 5900 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 5901 return; 5902 5903 if (rte_intr_allow_others(intr_handle)) 5904 vec = base = IXGBE_RX_VEC_START; 5905 5906 /* setup GPIE for MSI-x mode */ 5907 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5908 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5909 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 5910 /* auto clearing and auto setting corresponding bits in EIMS 5911 * when MSI-X interrupt is triggered 5912 */ 5913 if (hw->mac.type == ixgbe_mac_82598EB) { 5914 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5915 } else { 5916 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5917 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5918 } 5919 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5920 5921 /* Populate the IVAR table and set the ITR values to the 5922 * corresponding register. 5923 */ 5924 if (rte_intr_dp_is_en(intr_handle)) { 5925 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 5926 queue_id++) { 5927 /* by default, 1:1 mapping */ 5928 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 5929 intr_handle->intr_vec[queue_id] = vec; 5930 if (vec < base + intr_handle->nb_efd - 1) 5931 vec++; 5932 } 5933 5934 switch (hw->mac.type) { 5935 case ixgbe_mac_82598EB: 5936 ixgbe_set_ivar_map(hw, -1, 5937 IXGBE_IVAR_OTHER_CAUSES_INDEX, 5938 IXGBE_MISC_VEC_ID); 5939 break; 5940 case ixgbe_mac_82599EB: 5941 case ixgbe_mac_X540: 5942 case ixgbe_mac_X550: 5943 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 5944 break; 5945 default: 5946 break; 5947 } 5948 } 5949 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 5950 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 5951 | IXGBE_EITR_CNT_WDIS); 5952 5953 /* set up to autoclear timer, and the vectors */ 5954 mask = IXGBE_EIMS_ENABLE_MASK; 5955 mask &= ~(IXGBE_EIMS_OTHER | 5956 IXGBE_EIMS_MAILBOX | 5957 IXGBE_EIMS_LSC); 5958 5959 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 5960 } 5961 5962 int 5963 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 5964 uint16_t queue_idx, uint16_t tx_rate) 5965 { 5966 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5967 struct rte_eth_rxmode *rxmode; 5968 uint32_t rf_dec, rf_int; 5969 uint32_t bcnrc_val; 5970 uint16_t link_speed = dev->data->dev_link.link_speed; 5971 5972 if (queue_idx >= hw->mac.max_tx_queues) 5973 return -EINVAL; 5974 5975 if (tx_rate != 0) { 5976 /* Calculate the rate factor values to set */ 5977 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 5978 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 5979 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 5980 5981 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 5982 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 5983 IXGBE_RTTBCNRC_RF_INT_MASK_M); 5984 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 5985 } else { 5986 bcnrc_val = 0; 5987 } 5988 5989 rxmode = &dev->data->dev_conf.rxmode; 5990 /* 5991 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 5992 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 5993 * set as 0x4. 5994 */ 5995 if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && 5996 (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) 5997 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 5998 IXGBE_MMW_SIZE_JUMBO_FRAME); 5999 else 6000 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6001 IXGBE_MMW_SIZE_DEFAULT); 6002 6003 /* Set RTTBCNRC of queue X */ 6004 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6005 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6006 IXGBE_WRITE_FLUSH(hw); 6007 6008 return 0; 6009 } 6010 6011 static int 6012 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 6013 __attribute__((unused)) uint32_t index, 6014 __attribute__((unused)) uint32_t pool) 6015 { 6016 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6017 int diag; 6018 6019 /* 6020 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6021 * operation. Trap this case to avoid exhausting the [very limited] 6022 * set of PF resources used to store VF MAC addresses. 6023 */ 6024 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 6025 return -1; 6026 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6027 if (diag != 0) 6028 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6029 "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", 6030 mac_addr->addr_bytes[0], 6031 mac_addr->addr_bytes[1], 6032 mac_addr->addr_bytes[2], 6033 mac_addr->addr_bytes[3], 6034 mac_addr->addr_bytes[4], 6035 mac_addr->addr_bytes[5], 6036 diag); 6037 return diag; 6038 } 6039 6040 static void 6041 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6042 { 6043 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6044 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 6045 struct ether_addr *mac_addr; 6046 uint32_t i; 6047 int diag; 6048 6049 /* 6050 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6051 * not support the deletion of a given MAC address. 6052 * Instead, it imposes to delete all MAC addresses, then to add again 6053 * all MAC addresses with the exception of the one to be deleted. 6054 */ 6055 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6056 6057 /* 6058 * Add again all MAC addresses, with the exception of the deleted one 6059 * and of the permanent MAC address. 6060 */ 6061 for (i = 0, mac_addr = dev->data->mac_addrs; 6062 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6063 /* Skip the deleted MAC address */ 6064 if (i == index) 6065 continue; 6066 /* Skip NULL MAC addresses */ 6067 if (is_zero_ether_addr(mac_addr)) 6068 continue; 6069 /* Skip the permanent MAC address */ 6070 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 6071 continue; 6072 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6073 if (diag != 0) 6074 PMD_DRV_LOG(ERR, 6075 "Adding again MAC address " 6076 "%02x:%02x:%02x:%02x:%02x:%02x failed " 6077 "diag=%d", 6078 mac_addr->addr_bytes[0], 6079 mac_addr->addr_bytes[1], 6080 mac_addr->addr_bytes[2], 6081 mac_addr->addr_bytes[3], 6082 mac_addr->addr_bytes[4], 6083 mac_addr->addr_bytes[5], 6084 diag); 6085 } 6086 } 6087 6088 static int 6089 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 6090 { 6091 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6092 6093 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6094 6095 return 0; 6096 } 6097 6098 int 6099 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6100 struct rte_eth_syn_filter *filter, 6101 bool add) 6102 { 6103 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6104 struct ixgbe_filter_info *filter_info = 6105 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6106 uint32_t syn_info; 6107 uint32_t synqf; 6108 6109 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6110 return -EINVAL; 6111 6112 syn_info = filter_info->syn_info; 6113 6114 if (add) { 6115 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6116 return -EINVAL; 6117 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6118 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6119 6120 if (filter->hig_pri) 6121 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6122 else 6123 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6124 } else { 6125 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6126 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6127 return -ENOENT; 6128 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6129 } 6130 6131 filter_info->syn_info = synqf; 6132 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6133 IXGBE_WRITE_FLUSH(hw); 6134 return 0; 6135 } 6136 6137 static int 6138 ixgbe_syn_filter_get(struct rte_eth_dev *dev, 6139 struct rte_eth_syn_filter *filter) 6140 { 6141 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6142 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6143 6144 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 6145 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; 6146 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); 6147 return 0; 6148 } 6149 return -ENOENT; 6150 } 6151 6152 static int 6153 ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 6154 enum rte_filter_op filter_op, 6155 void *arg) 6156 { 6157 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6158 int ret; 6159 6160 MAC_TYPE_FILTER_SUP(hw->mac.type); 6161 6162 if (filter_op == RTE_ETH_FILTER_NOP) 6163 return 0; 6164 6165 if (arg == NULL) { 6166 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 6167 filter_op); 6168 return -EINVAL; 6169 } 6170 6171 switch (filter_op) { 6172 case RTE_ETH_FILTER_ADD: 6173 ret = ixgbe_syn_filter_set(dev, 6174 (struct rte_eth_syn_filter *)arg, 6175 TRUE); 6176 break; 6177 case RTE_ETH_FILTER_DELETE: 6178 ret = ixgbe_syn_filter_set(dev, 6179 (struct rte_eth_syn_filter *)arg, 6180 FALSE); 6181 break; 6182 case RTE_ETH_FILTER_GET: 6183 ret = ixgbe_syn_filter_get(dev, 6184 (struct rte_eth_syn_filter *)arg); 6185 break; 6186 default: 6187 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 6188 ret = -EINVAL; 6189 break; 6190 } 6191 6192 return ret; 6193 } 6194 6195 6196 static inline enum ixgbe_5tuple_protocol 6197 convert_protocol_type(uint8_t protocol_value) 6198 { 6199 if (protocol_value == IPPROTO_TCP) 6200 return IXGBE_FILTER_PROTOCOL_TCP; 6201 else if (protocol_value == IPPROTO_UDP) 6202 return IXGBE_FILTER_PROTOCOL_UDP; 6203 else if (protocol_value == IPPROTO_SCTP) 6204 return IXGBE_FILTER_PROTOCOL_SCTP; 6205 else 6206 return IXGBE_FILTER_PROTOCOL_NONE; 6207 } 6208 6209 /* inject a 5-tuple filter to HW */ 6210 static inline void 6211 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6212 struct ixgbe_5tuple_filter *filter) 6213 { 6214 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6215 int i; 6216 uint32_t ftqf, sdpqf; 6217 uint32_t l34timir = 0; 6218 uint8_t mask = 0xff; 6219 6220 i = filter->index; 6221 6222 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6223 IXGBE_SDPQF_DSTPORT_SHIFT); 6224 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6225 6226 ftqf = (uint32_t)(filter->filter_info.proto & 6227 IXGBE_FTQF_PROTOCOL_MASK); 6228 ftqf |= (uint32_t)((filter->filter_info.priority & 6229 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6230 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6231 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6232 if (filter->filter_info.dst_ip_mask == 0) 6233 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6234 if (filter->filter_info.src_port_mask == 0) 6235 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6236 if (filter->filter_info.dst_port_mask == 0) 6237 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6238 if (filter->filter_info.proto_mask == 0) 6239 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6240 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6241 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6242 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6243 6244 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6245 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6246 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6247 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6248 6249 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6250 l34timir |= (uint32_t)(filter->queue << 6251 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6252 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6253 } 6254 6255 /* 6256 * add a 5tuple filter 6257 * 6258 * @param 6259 * dev: Pointer to struct rte_eth_dev. 6260 * index: the index the filter allocates. 6261 * filter: ponter to the filter that will be added. 6262 * rx_queue: the queue id the filter assigned to. 6263 * 6264 * @return 6265 * - On success, zero. 6266 * - On failure, a negative value. 6267 */ 6268 static int 6269 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6270 struct ixgbe_5tuple_filter *filter) 6271 { 6272 struct ixgbe_filter_info *filter_info = 6273 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6274 int i, idx, shift; 6275 6276 /* 6277 * look for an unused 5tuple filter index, 6278 * and insert the filter to list. 6279 */ 6280 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6281 idx = i / (sizeof(uint32_t) * NBBY); 6282 shift = i % (sizeof(uint32_t) * NBBY); 6283 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6284 filter_info->fivetuple_mask[idx] |= 1 << shift; 6285 filter->index = i; 6286 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6287 filter, 6288 entries); 6289 break; 6290 } 6291 } 6292 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6293 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6294 return -ENOSYS; 6295 } 6296 6297 ixgbe_inject_5tuple_filter(dev, filter); 6298 6299 return 0; 6300 } 6301 6302 /* 6303 * remove a 5tuple filter 6304 * 6305 * @param 6306 * dev: Pointer to struct rte_eth_dev. 6307 * filter: the pointer of the filter will be removed. 6308 */ 6309 static void 6310 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6311 struct ixgbe_5tuple_filter *filter) 6312 { 6313 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6314 struct ixgbe_filter_info *filter_info = 6315 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6316 uint16_t index = filter->index; 6317 6318 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6319 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6320 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6321 rte_free(filter); 6322 6323 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6324 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6325 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6326 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6327 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6328 } 6329 6330 static int 6331 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6332 { 6333 struct ixgbe_hw *hw; 6334 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 6335 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; 6336 6337 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6338 6339 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) 6340 return -EINVAL; 6341 6342 /* refuse mtu that requires the support of scattered packets when this 6343 * feature has not been enabled before. 6344 */ 6345 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) && 6346 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6347 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) 6348 return -EINVAL; 6349 6350 /* 6351 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6352 * request of the version 2.0 of the mailbox API. 6353 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6354 * of the mailbox API. 6355 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6356 * prior to 3.11.33 which contains the following change: 6357 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6358 */ 6359 ixgbevf_rlpml_set_vf(hw, max_frame); 6360 6361 /* update max frame size */ 6362 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 6363 return 0; 6364 } 6365 6366 static inline struct ixgbe_5tuple_filter * 6367 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6368 struct ixgbe_5tuple_filter_info *key) 6369 { 6370 struct ixgbe_5tuple_filter *it; 6371 6372 TAILQ_FOREACH(it, filter_list, entries) { 6373 if (memcmp(key, &it->filter_info, 6374 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6375 return it; 6376 } 6377 } 6378 return NULL; 6379 } 6380 6381 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6382 static inline int 6383 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6384 struct ixgbe_5tuple_filter_info *filter_info) 6385 { 6386 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6387 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6388 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6389 return -EINVAL; 6390 6391 switch (filter->dst_ip_mask) { 6392 case UINT32_MAX: 6393 filter_info->dst_ip_mask = 0; 6394 filter_info->dst_ip = filter->dst_ip; 6395 break; 6396 case 0: 6397 filter_info->dst_ip_mask = 1; 6398 break; 6399 default: 6400 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6401 return -EINVAL; 6402 } 6403 6404 switch (filter->src_ip_mask) { 6405 case UINT32_MAX: 6406 filter_info->src_ip_mask = 0; 6407 filter_info->src_ip = filter->src_ip; 6408 break; 6409 case 0: 6410 filter_info->src_ip_mask = 1; 6411 break; 6412 default: 6413 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6414 return -EINVAL; 6415 } 6416 6417 switch (filter->dst_port_mask) { 6418 case UINT16_MAX: 6419 filter_info->dst_port_mask = 0; 6420 filter_info->dst_port = filter->dst_port; 6421 break; 6422 case 0: 6423 filter_info->dst_port_mask = 1; 6424 break; 6425 default: 6426 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6427 return -EINVAL; 6428 } 6429 6430 switch (filter->src_port_mask) { 6431 case UINT16_MAX: 6432 filter_info->src_port_mask = 0; 6433 filter_info->src_port = filter->src_port; 6434 break; 6435 case 0: 6436 filter_info->src_port_mask = 1; 6437 break; 6438 default: 6439 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6440 return -EINVAL; 6441 } 6442 6443 switch (filter->proto_mask) { 6444 case UINT8_MAX: 6445 filter_info->proto_mask = 0; 6446 filter_info->proto = 6447 convert_protocol_type(filter->proto); 6448 break; 6449 case 0: 6450 filter_info->proto_mask = 1; 6451 break; 6452 default: 6453 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6454 return -EINVAL; 6455 } 6456 6457 filter_info->priority = (uint8_t)filter->priority; 6458 return 0; 6459 } 6460 6461 /* 6462 * add or delete a ntuple filter 6463 * 6464 * @param 6465 * dev: Pointer to struct rte_eth_dev. 6466 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6467 * add: if true, add filter, if false, remove filter 6468 * 6469 * @return 6470 * - On success, zero. 6471 * - On failure, a negative value. 6472 */ 6473 int 6474 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6475 struct rte_eth_ntuple_filter *ntuple_filter, 6476 bool add) 6477 { 6478 struct ixgbe_filter_info *filter_info = 6479 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6480 struct ixgbe_5tuple_filter_info filter_5tuple; 6481 struct ixgbe_5tuple_filter *filter; 6482 int ret; 6483 6484 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6485 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6486 return -EINVAL; 6487 } 6488 6489 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6490 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6491 if (ret < 0) 6492 return ret; 6493 6494 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6495 &filter_5tuple); 6496 if (filter != NULL && add) { 6497 PMD_DRV_LOG(ERR, "filter exists."); 6498 return -EEXIST; 6499 } 6500 if (filter == NULL && !add) { 6501 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6502 return -ENOENT; 6503 } 6504 6505 if (add) { 6506 filter = rte_zmalloc("ixgbe_5tuple_filter", 6507 sizeof(struct ixgbe_5tuple_filter), 0); 6508 if (filter == NULL) 6509 return -ENOMEM; 6510 rte_memcpy(&filter->filter_info, 6511 &filter_5tuple, 6512 sizeof(struct ixgbe_5tuple_filter_info)); 6513 filter->queue = ntuple_filter->queue; 6514 ret = ixgbe_add_5tuple_filter(dev, filter); 6515 if (ret < 0) { 6516 rte_free(filter); 6517 return ret; 6518 } 6519 } else 6520 ixgbe_remove_5tuple_filter(dev, filter); 6521 6522 return 0; 6523 } 6524 6525 /* 6526 * get a ntuple filter 6527 * 6528 * @param 6529 * dev: Pointer to struct rte_eth_dev. 6530 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6531 * 6532 * @return 6533 * - On success, zero. 6534 * - On failure, a negative value. 6535 */ 6536 static int 6537 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 6538 struct rte_eth_ntuple_filter *ntuple_filter) 6539 { 6540 struct ixgbe_filter_info *filter_info = 6541 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6542 struct ixgbe_5tuple_filter_info filter_5tuple; 6543 struct ixgbe_5tuple_filter *filter; 6544 int ret; 6545 6546 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6547 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6548 return -EINVAL; 6549 } 6550 6551 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6552 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6553 if (ret < 0) 6554 return ret; 6555 6556 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6557 &filter_5tuple); 6558 if (filter == NULL) { 6559 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6560 return -ENOENT; 6561 } 6562 ntuple_filter->queue = filter->queue; 6563 return 0; 6564 } 6565 6566 /* 6567 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. 6568 * @dev: pointer to rte_eth_dev structure 6569 * @filter_op:operation will be taken. 6570 * @arg: a pointer to specific structure corresponding to the filter_op 6571 * 6572 * @return 6573 * - On success, zero. 6574 * - On failure, a negative value. 6575 */ 6576 static int 6577 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 6578 enum rte_filter_op filter_op, 6579 void *arg) 6580 { 6581 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6582 int ret; 6583 6584 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 6585 6586 if (filter_op == RTE_ETH_FILTER_NOP) 6587 return 0; 6588 6589 if (arg == NULL) { 6590 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6591 filter_op); 6592 return -EINVAL; 6593 } 6594 6595 switch (filter_op) { 6596 case RTE_ETH_FILTER_ADD: 6597 ret = ixgbe_add_del_ntuple_filter(dev, 6598 (struct rte_eth_ntuple_filter *)arg, 6599 TRUE); 6600 break; 6601 case RTE_ETH_FILTER_DELETE: 6602 ret = ixgbe_add_del_ntuple_filter(dev, 6603 (struct rte_eth_ntuple_filter *)arg, 6604 FALSE); 6605 break; 6606 case RTE_ETH_FILTER_GET: 6607 ret = ixgbe_get_ntuple_filter(dev, 6608 (struct rte_eth_ntuple_filter *)arg); 6609 break; 6610 default: 6611 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 6612 ret = -EINVAL; 6613 break; 6614 } 6615 return ret; 6616 } 6617 6618 int 6619 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6620 struct rte_eth_ethertype_filter *filter, 6621 bool add) 6622 { 6623 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6624 struct ixgbe_filter_info *filter_info = 6625 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6626 uint32_t etqf = 0; 6627 uint32_t etqs = 0; 6628 int ret; 6629 struct ixgbe_ethertype_filter ethertype_filter; 6630 6631 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6632 return -EINVAL; 6633 6634 if (filter->ether_type == ETHER_TYPE_IPv4 || 6635 filter->ether_type == ETHER_TYPE_IPv6) { 6636 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6637 " ethertype filter.", filter->ether_type); 6638 return -EINVAL; 6639 } 6640 6641 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6642 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6643 return -EINVAL; 6644 } 6645 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6646 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6647 return -EINVAL; 6648 } 6649 6650 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6651 if (ret >= 0 && add) { 6652 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6653 filter->ether_type); 6654 return -EEXIST; 6655 } 6656 if (ret < 0 && !add) { 6657 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6658 filter->ether_type); 6659 return -ENOENT; 6660 } 6661 6662 if (add) { 6663 etqf = IXGBE_ETQF_FILTER_EN; 6664 etqf |= (uint32_t)filter->ether_type; 6665 etqs |= (uint32_t)((filter->queue << 6666 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6667 IXGBE_ETQS_RX_QUEUE); 6668 etqs |= IXGBE_ETQS_QUEUE_EN; 6669 6670 ethertype_filter.ethertype = filter->ether_type; 6671 ethertype_filter.etqf = etqf; 6672 ethertype_filter.etqs = etqs; 6673 ethertype_filter.conf = FALSE; 6674 ret = ixgbe_ethertype_filter_insert(filter_info, 6675 ðertype_filter); 6676 if (ret < 0) { 6677 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6678 return -ENOSPC; 6679 } 6680 } else { 6681 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6682 if (ret < 0) 6683 return -ENOSYS; 6684 } 6685 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6686 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6687 IXGBE_WRITE_FLUSH(hw); 6688 6689 return 0; 6690 } 6691 6692 static int 6693 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 6694 struct rte_eth_ethertype_filter *filter) 6695 { 6696 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6697 struct ixgbe_filter_info *filter_info = 6698 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6699 uint32_t etqf, etqs; 6700 int ret; 6701 6702 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6703 if (ret < 0) { 6704 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6705 filter->ether_type); 6706 return -ENOENT; 6707 } 6708 6709 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); 6710 if (etqf & IXGBE_ETQF_FILTER_EN) { 6711 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); 6712 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; 6713 filter->flags = 0; 6714 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> 6715 IXGBE_ETQS_RX_QUEUE_SHIFT; 6716 return 0; 6717 } 6718 return -ENOENT; 6719 } 6720 6721 /* 6722 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. 6723 * @dev: pointer to rte_eth_dev structure 6724 * @filter_op:operation will be taken. 6725 * @arg: a pointer to specific structure corresponding to the filter_op 6726 */ 6727 static int 6728 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 6729 enum rte_filter_op filter_op, 6730 void *arg) 6731 { 6732 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6733 int ret; 6734 6735 MAC_TYPE_FILTER_SUP(hw->mac.type); 6736 6737 if (filter_op == RTE_ETH_FILTER_NOP) 6738 return 0; 6739 6740 if (arg == NULL) { 6741 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6742 filter_op); 6743 return -EINVAL; 6744 } 6745 6746 switch (filter_op) { 6747 case RTE_ETH_FILTER_ADD: 6748 ret = ixgbe_add_del_ethertype_filter(dev, 6749 (struct rte_eth_ethertype_filter *)arg, 6750 TRUE); 6751 break; 6752 case RTE_ETH_FILTER_DELETE: 6753 ret = ixgbe_add_del_ethertype_filter(dev, 6754 (struct rte_eth_ethertype_filter *)arg, 6755 FALSE); 6756 break; 6757 case RTE_ETH_FILTER_GET: 6758 ret = ixgbe_get_ethertype_filter(dev, 6759 (struct rte_eth_ethertype_filter *)arg); 6760 break; 6761 default: 6762 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 6763 ret = -EINVAL; 6764 break; 6765 } 6766 return ret; 6767 } 6768 6769 static int 6770 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 6771 enum rte_filter_type filter_type, 6772 enum rte_filter_op filter_op, 6773 void *arg) 6774 { 6775 int ret = 0; 6776 6777 switch (filter_type) { 6778 case RTE_ETH_FILTER_NTUPLE: 6779 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); 6780 break; 6781 case RTE_ETH_FILTER_ETHERTYPE: 6782 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); 6783 break; 6784 case RTE_ETH_FILTER_SYN: 6785 ret = ixgbe_syn_filter_handle(dev, filter_op, arg); 6786 break; 6787 case RTE_ETH_FILTER_FDIR: 6788 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); 6789 break; 6790 case RTE_ETH_FILTER_L2_TUNNEL: 6791 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); 6792 break; 6793 case RTE_ETH_FILTER_GENERIC: 6794 if (filter_op != RTE_ETH_FILTER_GET) 6795 return -EINVAL; 6796 *(const void **)arg = &ixgbe_flow_ops; 6797 break; 6798 default: 6799 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 6800 filter_type); 6801 ret = -EINVAL; 6802 break; 6803 } 6804 6805 return ret; 6806 } 6807 6808 static u8 * 6809 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, 6810 u8 **mc_addr_ptr, u32 *vmdq) 6811 { 6812 u8 *mc_addr; 6813 6814 *vmdq = 0; 6815 mc_addr = *mc_addr_ptr; 6816 *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); 6817 return mc_addr; 6818 } 6819 6820 static int 6821 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6822 struct ether_addr *mc_addr_set, 6823 uint32_t nb_mc_addr) 6824 { 6825 struct ixgbe_hw *hw; 6826 u8 *mc_addr_list; 6827 6828 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6829 mc_addr_list = (u8 *)mc_addr_set; 6830 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6831 ixgbe_dev_addr_list_itr, TRUE); 6832 } 6833 6834 static uint64_t 6835 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6836 { 6837 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6838 uint64_t systime_cycles; 6839 6840 switch (hw->mac.type) { 6841 case ixgbe_mac_X550: 6842 case ixgbe_mac_X550EM_x: 6843 case ixgbe_mac_X550EM_a: 6844 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6845 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6846 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6847 * NSEC_PER_SEC; 6848 break; 6849 default: 6850 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6851 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6852 << 32; 6853 } 6854 6855 return systime_cycles; 6856 } 6857 6858 static uint64_t 6859 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6860 { 6861 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6862 uint64_t rx_tstamp_cycles; 6863 6864 switch (hw->mac.type) { 6865 case ixgbe_mac_X550: 6866 case ixgbe_mac_X550EM_x: 6867 case ixgbe_mac_X550EM_a: 6868 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6869 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6870 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6871 * NSEC_PER_SEC; 6872 break; 6873 default: 6874 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6875 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6876 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6877 << 32; 6878 } 6879 6880 return rx_tstamp_cycles; 6881 } 6882 6883 static uint64_t 6884 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6885 { 6886 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6887 uint64_t tx_tstamp_cycles; 6888 6889 switch (hw->mac.type) { 6890 case ixgbe_mac_X550: 6891 case ixgbe_mac_X550EM_x: 6892 case ixgbe_mac_X550EM_a: 6893 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6894 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6895 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6896 * NSEC_PER_SEC; 6897 break; 6898 default: 6899 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6900 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6901 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6902 << 32; 6903 } 6904 6905 return tx_tstamp_cycles; 6906 } 6907 6908 static void 6909 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6910 { 6911 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6912 struct ixgbe_adapter *adapter = 6913 (struct ixgbe_adapter *)dev->data->dev_private; 6914 struct rte_eth_link link; 6915 uint32_t incval = 0; 6916 uint32_t shift = 0; 6917 6918 /* Get current link speed. */ 6919 ixgbe_dev_link_update(dev, 1); 6920 rte_eth_linkstatus_get(dev, &link); 6921 6922 switch (link.link_speed) { 6923 case ETH_SPEED_NUM_100M: 6924 incval = IXGBE_INCVAL_100; 6925 shift = IXGBE_INCVAL_SHIFT_100; 6926 break; 6927 case ETH_SPEED_NUM_1G: 6928 incval = IXGBE_INCVAL_1GB; 6929 shift = IXGBE_INCVAL_SHIFT_1GB; 6930 break; 6931 case ETH_SPEED_NUM_10G: 6932 default: 6933 incval = IXGBE_INCVAL_10GB; 6934 shift = IXGBE_INCVAL_SHIFT_10GB; 6935 break; 6936 } 6937 6938 switch (hw->mac.type) { 6939 case ixgbe_mac_X550: 6940 case ixgbe_mac_X550EM_x: 6941 case ixgbe_mac_X550EM_a: 6942 /* Independent of link speed. */ 6943 incval = 1; 6944 /* Cycles read will be interpreted as ns. */ 6945 shift = 0; 6946 /* Fall-through */ 6947 case ixgbe_mac_X540: 6948 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6949 break; 6950 case ixgbe_mac_82599EB: 6951 incval >>= IXGBE_INCVAL_SHIFT_82599; 6952 shift -= IXGBE_INCVAL_SHIFT_82599; 6953 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6954 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6955 break; 6956 default: 6957 /* Not supported. */ 6958 return; 6959 } 6960 6961 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6962 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6963 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6964 6965 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6966 adapter->systime_tc.cc_shift = shift; 6967 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6968 6969 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6970 adapter->rx_tstamp_tc.cc_shift = shift; 6971 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6972 6973 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6974 adapter->tx_tstamp_tc.cc_shift = shift; 6975 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6976 } 6977 6978 static int 6979 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6980 { 6981 struct ixgbe_adapter *adapter = 6982 (struct ixgbe_adapter *)dev->data->dev_private; 6983 6984 adapter->systime_tc.nsec += delta; 6985 adapter->rx_tstamp_tc.nsec += delta; 6986 adapter->tx_tstamp_tc.nsec += delta; 6987 6988 return 0; 6989 } 6990 6991 static int 6992 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6993 { 6994 uint64_t ns; 6995 struct ixgbe_adapter *adapter = 6996 (struct ixgbe_adapter *)dev->data->dev_private; 6997 6998 ns = rte_timespec_to_ns(ts); 6999 /* Set the timecounters to a new value. */ 7000 adapter->systime_tc.nsec = ns; 7001 adapter->rx_tstamp_tc.nsec = ns; 7002 adapter->tx_tstamp_tc.nsec = ns; 7003 7004 return 0; 7005 } 7006 7007 static int 7008 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 7009 { 7010 uint64_t ns, systime_cycles; 7011 struct ixgbe_adapter *adapter = 7012 (struct ixgbe_adapter *)dev->data->dev_private; 7013 7014 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 7015 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 7016 *ts = rte_ns_to_timespec(ns); 7017 7018 return 0; 7019 } 7020 7021 static int 7022 ixgbe_timesync_enable(struct rte_eth_dev *dev) 7023 { 7024 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7025 uint32_t tsync_ctl; 7026 uint32_t tsauxc; 7027 7028 /* Stop the timesync system time. */ 7029 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 7030 /* Reset the timesync system time value. */ 7031 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 7032 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 7033 7034 /* Enable system time for platforms where it isn't on by default. */ 7035 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 7036 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 7037 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 7038 7039 ixgbe_start_timecounters(dev); 7040 7041 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7042 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 7043 (ETHER_TYPE_1588 | 7044 IXGBE_ETQF_FILTER_EN | 7045 IXGBE_ETQF_1588)); 7046 7047 /* Enable timestamping of received PTP packets. */ 7048 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7049 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 7050 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7051 7052 /* Enable timestamping of transmitted PTP packets. */ 7053 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7054 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 7055 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7056 7057 IXGBE_WRITE_FLUSH(hw); 7058 7059 return 0; 7060 } 7061 7062 static int 7063 ixgbe_timesync_disable(struct rte_eth_dev *dev) 7064 { 7065 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7066 uint32_t tsync_ctl; 7067 7068 /* Disable timestamping of transmitted PTP packets. */ 7069 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7070 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 7071 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7072 7073 /* Disable timestamping of received PTP packets. */ 7074 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7075 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 7076 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7077 7078 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7079 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 7080 7081 /* Stop incrementating the System Time registers. */ 7082 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 7083 7084 return 0; 7085 } 7086 7087 static int 7088 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 7089 struct timespec *timestamp, 7090 uint32_t flags __rte_unused) 7091 { 7092 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7093 struct ixgbe_adapter *adapter = 7094 (struct ixgbe_adapter *)dev->data->dev_private; 7095 uint32_t tsync_rxctl; 7096 uint64_t rx_tstamp_cycles; 7097 uint64_t ns; 7098 7099 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7100 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 7101 return -EINVAL; 7102 7103 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 7104 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 7105 *timestamp = rte_ns_to_timespec(ns); 7106 7107 return 0; 7108 } 7109 7110 static int 7111 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 7112 struct timespec *timestamp) 7113 { 7114 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7115 struct ixgbe_adapter *adapter = 7116 (struct ixgbe_adapter *)dev->data->dev_private; 7117 uint32_t tsync_txctl; 7118 uint64_t tx_tstamp_cycles; 7119 uint64_t ns; 7120 7121 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7122 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 7123 return -EINVAL; 7124 7125 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 7126 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 7127 *timestamp = rte_ns_to_timespec(ns); 7128 7129 return 0; 7130 } 7131 7132 static int 7133 ixgbe_get_reg_length(struct rte_eth_dev *dev) 7134 { 7135 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7136 int count = 0; 7137 int g_ind = 0; 7138 const struct reg_info *reg_group; 7139 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7140 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7141 7142 while ((reg_group = reg_set[g_ind++])) 7143 count += ixgbe_regs_group_count(reg_group); 7144 7145 return count; 7146 } 7147 7148 static int 7149 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 7150 { 7151 int count = 0; 7152 int g_ind = 0; 7153 const struct reg_info *reg_group; 7154 7155 while ((reg_group = ixgbevf_regs[g_ind++])) 7156 count += ixgbe_regs_group_count(reg_group); 7157 7158 return count; 7159 } 7160 7161 static int 7162 ixgbe_get_regs(struct rte_eth_dev *dev, 7163 struct rte_dev_reg_info *regs) 7164 { 7165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7166 uint32_t *data = regs->data; 7167 int g_ind = 0; 7168 int count = 0; 7169 const struct reg_info *reg_group; 7170 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7171 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7172 7173 if (data == NULL) { 7174 regs->length = ixgbe_get_reg_length(dev); 7175 regs->width = sizeof(uint32_t); 7176 return 0; 7177 } 7178 7179 /* Support only full register dump */ 7180 if ((regs->length == 0) || 7181 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 7182 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7183 hw->device_id; 7184 while ((reg_group = reg_set[g_ind++])) 7185 count += ixgbe_read_regs_group(dev, &data[count], 7186 reg_group); 7187 return 0; 7188 } 7189 7190 return -ENOTSUP; 7191 } 7192 7193 static int 7194 ixgbevf_get_regs(struct rte_eth_dev *dev, 7195 struct rte_dev_reg_info *regs) 7196 { 7197 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7198 uint32_t *data = regs->data; 7199 int g_ind = 0; 7200 int count = 0; 7201 const struct reg_info *reg_group; 7202 7203 if (data == NULL) { 7204 regs->length = ixgbevf_get_reg_length(dev); 7205 regs->width = sizeof(uint32_t); 7206 return 0; 7207 } 7208 7209 /* Support only full register dump */ 7210 if ((regs->length == 0) || 7211 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7212 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7213 hw->device_id; 7214 while ((reg_group = ixgbevf_regs[g_ind++])) 7215 count += ixgbe_read_regs_group(dev, &data[count], 7216 reg_group); 7217 return 0; 7218 } 7219 7220 return -ENOTSUP; 7221 } 7222 7223 static int 7224 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7225 { 7226 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7227 7228 /* Return unit is byte count */ 7229 return hw->eeprom.word_size * 2; 7230 } 7231 7232 static int 7233 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7234 struct rte_dev_eeprom_info *in_eeprom) 7235 { 7236 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7237 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7238 uint16_t *data = in_eeprom->data; 7239 int first, length; 7240 7241 first = in_eeprom->offset >> 1; 7242 length = in_eeprom->length >> 1; 7243 if ((first > hw->eeprom.word_size) || 7244 ((first + length) > hw->eeprom.word_size)) 7245 return -EINVAL; 7246 7247 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7248 7249 return eeprom->ops.read_buffer(hw, first, length, data); 7250 } 7251 7252 static int 7253 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7254 struct rte_dev_eeprom_info *in_eeprom) 7255 { 7256 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7257 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7258 uint16_t *data = in_eeprom->data; 7259 int first, length; 7260 7261 first = in_eeprom->offset >> 1; 7262 length = in_eeprom->length >> 1; 7263 if ((first > hw->eeprom.word_size) || 7264 ((first + length) > hw->eeprom.word_size)) 7265 return -EINVAL; 7266 7267 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7268 7269 return eeprom->ops.write_buffer(hw, first, length, data); 7270 } 7271 7272 static int 7273 ixgbe_get_module_info(struct rte_eth_dev *dev, 7274 struct rte_eth_dev_module_info *modinfo) 7275 { 7276 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7277 uint32_t status; 7278 uint8_t sff8472_rev, addr_mode; 7279 bool page_swap = false; 7280 7281 /* Check whether we support SFF-8472 or not */ 7282 status = hw->phy.ops.read_i2c_eeprom(hw, 7283 IXGBE_SFF_SFF_8472_COMP, 7284 &sff8472_rev); 7285 if (status != 0) 7286 return -EIO; 7287 7288 /* addressing mode is not supported */ 7289 status = hw->phy.ops.read_i2c_eeprom(hw, 7290 IXGBE_SFF_SFF_8472_SWAP, 7291 &addr_mode); 7292 if (status != 0) 7293 return -EIO; 7294 7295 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7296 PMD_DRV_LOG(ERR, 7297 "Address change required to access page 0xA2, " 7298 "but not supported. Please report the module " 7299 "type to the driver maintainers."); 7300 page_swap = true; 7301 } 7302 7303 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7304 /* We have a SFP, but it does not support SFF-8472 */ 7305 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7306 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7307 } else { 7308 /* We have a SFP which supports a revision of SFF-8472. */ 7309 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7310 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7311 } 7312 7313 return 0; 7314 } 7315 7316 static int 7317 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7318 struct rte_dev_eeprom_info *info) 7319 { 7320 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7321 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7322 uint8_t databyte = 0xFF; 7323 uint8_t *data = info->data; 7324 uint32_t i = 0; 7325 7326 if (info->length == 0) 7327 return -EINVAL; 7328 7329 for (i = info->offset; i < info->offset + info->length; i++) { 7330 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7331 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7332 else 7333 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7334 7335 if (status != 0) 7336 return -EIO; 7337 7338 data[i - info->offset] = databyte; 7339 } 7340 7341 return 0; 7342 } 7343 7344 uint16_t 7345 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7346 switch (mac_type) { 7347 case ixgbe_mac_X550: 7348 case ixgbe_mac_X550EM_x: 7349 case ixgbe_mac_X550EM_a: 7350 return ETH_RSS_RETA_SIZE_512; 7351 case ixgbe_mac_X550_vf: 7352 case ixgbe_mac_X550EM_x_vf: 7353 case ixgbe_mac_X550EM_a_vf: 7354 return ETH_RSS_RETA_SIZE_64; 7355 default: 7356 return ETH_RSS_RETA_SIZE_128; 7357 } 7358 } 7359 7360 uint32_t 7361 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7362 switch (mac_type) { 7363 case ixgbe_mac_X550: 7364 case ixgbe_mac_X550EM_x: 7365 case ixgbe_mac_X550EM_a: 7366 if (reta_idx < ETH_RSS_RETA_SIZE_128) 7367 return IXGBE_RETA(reta_idx >> 2); 7368 else 7369 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 7370 case ixgbe_mac_X550_vf: 7371 case ixgbe_mac_X550EM_x_vf: 7372 case ixgbe_mac_X550EM_a_vf: 7373 return IXGBE_VFRETA(reta_idx >> 2); 7374 default: 7375 return IXGBE_RETA(reta_idx >> 2); 7376 } 7377 } 7378 7379 uint32_t 7380 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7381 switch (mac_type) { 7382 case ixgbe_mac_X550_vf: 7383 case ixgbe_mac_X550EM_x_vf: 7384 case ixgbe_mac_X550EM_a_vf: 7385 return IXGBE_VFMRQC; 7386 default: 7387 return IXGBE_MRQC; 7388 } 7389 } 7390 7391 uint32_t 7392 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7393 switch (mac_type) { 7394 case ixgbe_mac_X550_vf: 7395 case ixgbe_mac_X550EM_x_vf: 7396 case ixgbe_mac_X550EM_a_vf: 7397 return IXGBE_VFRSSRK(i); 7398 default: 7399 return IXGBE_RSSRK(i); 7400 } 7401 } 7402 7403 bool 7404 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7405 switch (mac_type) { 7406 case ixgbe_mac_82599_vf: 7407 case ixgbe_mac_X540_vf: 7408 return 0; 7409 default: 7410 return 1; 7411 } 7412 } 7413 7414 static int 7415 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7416 struct rte_eth_dcb_info *dcb_info) 7417 { 7418 struct ixgbe_dcb_config *dcb_config = 7419 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7420 struct ixgbe_dcb_tc_config *tc; 7421 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7422 uint8_t nb_tcs; 7423 uint8_t i, j; 7424 7425 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 7426 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7427 else 7428 dcb_info->nb_tcs = 1; 7429 7430 tc_queue = &dcb_info->tc_queue; 7431 nb_tcs = dcb_info->nb_tcs; 7432 7433 if (dcb_config->vt_mode) { /* vt is enabled*/ 7434 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7435 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7436 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7437 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7438 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7439 for (j = 0; j < nb_tcs; j++) { 7440 tc_queue->tc_rxq[0][j].base = j; 7441 tc_queue->tc_rxq[0][j].nb_queue = 1; 7442 tc_queue->tc_txq[0][j].base = j; 7443 tc_queue->tc_txq[0][j].nb_queue = 1; 7444 } 7445 } else { 7446 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7447 for (j = 0; j < nb_tcs; j++) { 7448 tc_queue->tc_rxq[i][j].base = 7449 i * nb_tcs + j; 7450 tc_queue->tc_rxq[i][j].nb_queue = 1; 7451 tc_queue->tc_txq[i][j].base = 7452 i * nb_tcs + j; 7453 tc_queue->tc_txq[i][j].nb_queue = 1; 7454 } 7455 } 7456 } 7457 } else { /* vt is disabled*/ 7458 struct rte_eth_dcb_rx_conf *rx_conf = 7459 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7460 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7461 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7462 if (dcb_info->nb_tcs == ETH_4_TCS) { 7463 for (i = 0; i < dcb_info->nb_tcs; i++) { 7464 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7465 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7466 } 7467 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7468 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7469 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7470 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7471 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7472 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7473 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7474 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7475 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 7476 for (i = 0; i < dcb_info->nb_tcs; i++) { 7477 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7478 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7479 } 7480 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7481 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7482 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7483 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7484 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7485 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7486 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7487 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7488 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7489 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7490 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7491 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7492 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7493 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7494 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7495 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7496 } 7497 } 7498 for (i = 0; i < dcb_info->nb_tcs; i++) { 7499 tc = &dcb_config->tc_config[i]; 7500 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7501 } 7502 return 0; 7503 } 7504 7505 /* Update e-tag ether type */ 7506 static int 7507 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7508 uint16_t ether_type) 7509 { 7510 uint32_t etag_etype; 7511 7512 if (hw->mac.type != ixgbe_mac_X550 && 7513 hw->mac.type != ixgbe_mac_X550EM_x && 7514 hw->mac.type != ixgbe_mac_X550EM_a) { 7515 return -ENOTSUP; 7516 } 7517 7518 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7519 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7520 etag_etype |= ether_type; 7521 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7522 IXGBE_WRITE_FLUSH(hw); 7523 7524 return 0; 7525 } 7526 7527 /* Config l2 tunnel ether type */ 7528 static int 7529 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, 7530 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7531 { 7532 int ret = 0; 7533 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7534 struct ixgbe_l2_tn_info *l2_tn_info = 7535 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7536 7537 if (l2_tunnel == NULL) 7538 return -EINVAL; 7539 7540 switch (l2_tunnel->l2_tunnel_type) { 7541 case RTE_L2_TUNNEL_TYPE_E_TAG: 7542 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; 7543 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); 7544 break; 7545 default: 7546 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7547 ret = -EINVAL; 7548 break; 7549 } 7550 7551 return ret; 7552 } 7553 7554 /* Enable e-tag tunnel */ 7555 static int 7556 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7557 { 7558 uint32_t etag_etype; 7559 7560 if (hw->mac.type != ixgbe_mac_X550 && 7561 hw->mac.type != ixgbe_mac_X550EM_x && 7562 hw->mac.type != ixgbe_mac_X550EM_a) { 7563 return -ENOTSUP; 7564 } 7565 7566 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7567 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7568 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7569 IXGBE_WRITE_FLUSH(hw); 7570 7571 return 0; 7572 } 7573 7574 /* Enable l2 tunnel */ 7575 static int 7576 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, 7577 enum rte_eth_tunnel_type l2_tunnel_type) 7578 { 7579 int ret = 0; 7580 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7581 struct ixgbe_l2_tn_info *l2_tn_info = 7582 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7583 7584 switch (l2_tunnel_type) { 7585 case RTE_L2_TUNNEL_TYPE_E_TAG: 7586 l2_tn_info->e_tag_en = TRUE; 7587 ret = ixgbe_e_tag_enable(hw); 7588 break; 7589 default: 7590 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7591 ret = -EINVAL; 7592 break; 7593 } 7594 7595 return ret; 7596 } 7597 7598 /* Disable e-tag tunnel */ 7599 static int 7600 ixgbe_e_tag_disable(struct ixgbe_hw *hw) 7601 { 7602 uint32_t etag_etype; 7603 7604 if (hw->mac.type != ixgbe_mac_X550 && 7605 hw->mac.type != ixgbe_mac_X550EM_x && 7606 hw->mac.type != ixgbe_mac_X550EM_a) { 7607 return -ENOTSUP; 7608 } 7609 7610 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7611 etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; 7612 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7613 IXGBE_WRITE_FLUSH(hw); 7614 7615 return 0; 7616 } 7617 7618 /* Disable l2 tunnel */ 7619 static int 7620 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, 7621 enum rte_eth_tunnel_type l2_tunnel_type) 7622 { 7623 int ret = 0; 7624 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7625 struct ixgbe_l2_tn_info *l2_tn_info = 7626 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7627 7628 switch (l2_tunnel_type) { 7629 case RTE_L2_TUNNEL_TYPE_E_TAG: 7630 l2_tn_info->e_tag_en = FALSE; 7631 ret = ixgbe_e_tag_disable(hw); 7632 break; 7633 default: 7634 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7635 ret = -EINVAL; 7636 break; 7637 } 7638 7639 return ret; 7640 } 7641 7642 static int 7643 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7644 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7645 { 7646 int ret = 0; 7647 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7648 uint32_t i, rar_entries; 7649 uint32_t rar_low, rar_high; 7650 7651 if (hw->mac.type != ixgbe_mac_X550 && 7652 hw->mac.type != ixgbe_mac_X550EM_x && 7653 hw->mac.type != ixgbe_mac_X550EM_a) { 7654 return -ENOTSUP; 7655 } 7656 7657 rar_entries = ixgbe_get_num_rx_addrs(hw); 7658 7659 for (i = 1; i < rar_entries; i++) { 7660 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7661 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7662 if ((rar_high & IXGBE_RAH_AV) && 7663 (rar_high & IXGBE_RAH_ADTYPE) && 7664 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7665 l2_tunnel->tunnel_id)) { 7666 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7667 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7668 7669 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7670 7671 return ret; 7672 } 7673 } 7674 7675 return ret; 7676 } 7677 7678 static int 7679 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7680 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7681 { 7682 int ret = 0; 7683 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7684 uint32_t i, rar_entries; 7685 uint32_t rar_low, rar_high; 7686 7687 if (hw->mac.type != ixgbe_mac_X550 && 7688 hw->mac.type != ixgbe_mac_X550EM_x && 7689 hw->mac.type != ixgbe_mac_X550EM_a) { 7690 return -ENOTSUP; 7691 } 7692 7693 /* One entry for one tunnel. Try to remove potential existing entry. */ 7694 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7695 7696 rar_entries = ixgbe_get_num_rx_addrs(hw); 7697 7698 for (i = 1; i < rar_entries; i++) { 7699 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7700 if (rar_high & IXGBE_RAH_AV) { 7701 continue; 7702 } else { 7703 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7704 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7705 rar_low = l2_tunnel->tunnel_id; 7706 7707 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7708 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7709 7710 return ret; 7711 } 7712 } 7713 7714 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7715 " Please remove a rule before adding a new one."); 7716 return -EINVAL; 7717 } 7718 7719 static inline struct ixgbe_l2_tn_filter * 7720 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7721 struct ixgbe_l2_tn_key *key) 7722 { 7723 int ret; 7724 7725 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7726 if (ret < 0) 7727 return NULL; 7728 7729 return l2_tn_info->hash_map[ret]; 7730 } 7731 7732 static inline int 7733 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7734 struct ixgbe_l2_tn_filter *l2_tn_filter) 7735 { 7736 int ret; 7737 7738 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7739 &l2_tn_filter->key); 7740 7741 if (ret < 0) { 7742 PMD_DRV_LOG(ERR, 7743 "Failed to insert L2 tunnel filter" 7744 " to hash table %d!", 7745 ret); 7746 return ret; 7747 } 7748 7749 l2_tn_info->hash_map[ret] = l2_tn_filter; 7750 7751 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7752 7753 return 0; 7754 } 7755 7756 static inline int 7757 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7758 struct ixgbe_l2_tn_key *key) 7759 { 7760 int ret; 7761 struct ixgbe_l2_tn_filter *l2_tn_filter; 7762 7763 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 7764 7765 if (ret < 0) { 7766 PMD_DRV_LOG(ERR, 7767 "No such L2 tunnel filter to delete %d!", 7768 ret); 7769 return ret; 7770 } 7771 7772 l2_tn_filter = l2_tn_info->hash_map[ret]; 7773 l2_tn_info->hash_map[ret] = NULL; 7774 7775 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 7776 rte_free(l2_tn_filter); 7777 7778 return 0; 7779 } 7780 7781 /* Add l2 tunnel filter */ 7782 int 7783 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 7784 struct rte_eth_l2_tunnel_conf *l2_tunnel, 7785 bool restore) 7786 { 7787 int ret; 7788 struct ixgbe_l2_tn_info *l2_tn_info = 7789 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7790 struct ixgbe_l2_tn_key key; 7791 struct ixgbe_l2_tn_filter *node; 7792 7793 if (!restore) { 7794 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7795 key.tn_id = l2_tunnel->tunnel_id; 7796 7797 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 7798 7799 if (node) { 7800 PMD_DRV_LOG(ERR, 7801 "The L2 tunnel filter already exists!"); 7802 return -EINVAL; 7803 } 7804 7805 node = rte_zmalloc("ixgbe_l2_tn", 7806 sizeof(struct ixgbe_l2_tn_filter), 7807 0); 7808 if (!node) 7809 return -ENOMEM; 7810 7811 rte_memcpy(&node->key, 7812 &key, 7813 sizeof(struct ixgbe_l2_tn_key)); 7814 node->pool = l2_tunnel->pool; 7815 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 7816 if (ret < 0) { 7817 rte_free(node); 7818 return ret; 7819 } 7820 } 7821 7822 switch (l2_tunnel->l2_tunnel_type) { 7823 case RTE_L2_TUNNEL_TYPE_E_TAG: 7824 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 7825 break; 7826 default: 7827 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7828 ret = -EINVAL; 7829 break; 7830 } 7831 7832 if ((!restore) && (ret < 0)) 7833 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7834 7835 return ret; 7836 } 7837 7838 /* Delete l2 tunnel filter */ 7839 int 7840 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 7841 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7842 { 7843 int ret; 7844 struct ixgbe_l2_tn_info *l2_tn_info = 7845 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7846 struct ixgbe_l2_tn_key key; 7847 7848 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 7849 key.tn_id = l2_tunnel->tunnel_id; 7850 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 7851 if (ret < 0) 7852 return ret; 7853 7854 switch (l2_tunnel->l2_tunnel_type) { 7855 case RTE_L2_TUNNEL_TYPE_E_TAG: 7856 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 7857 break; 7858 default: 7859 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7860 ret = -EINVAL; 7861 break; 7862 } 7863 7864 return ret; 7865 } 7866 7867 /** 7868 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. 7869 * @dev: pointer to rte_eth_dev structure 7870 * @filter_op:operation will be taken. 7871 * @arg: a pointer to specific structure corresponding to the filter_op 7872 */ 7873 static int 7874 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 7875 enum rte_filter_op filter_op, 7876 void *arg) 7877 { 7878 int ret; 7879 7880 if (filter_op == RTE_ETH_FILTER_NOP) 7881 return 0; 7882 7883 if (arg == NULL) { 7884 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 7885 filter_op); 7886 return -EINVAL; 7887 } 7888 7889 switch (filter_op) { 7890 case RTE_ETH_FILTER_ADD: 7891 ret = ixgbe_dev_l2_tunnel_filter_add 7892 (dev, 7893 (struct rte_eth_l2_tunnel_conf *)arg, 7894 FALSE); 7895 break; 7896 case RTE_ETH_FILTER_DELETE: 7897 ret = ixgbe_dev_l2_tunnel_filter_del 7898 (dev, 7899 (struct rte_eth_l2_tunnel_conf *)arg); 7900 break; 7901 default: 7902 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 7903 ret = -EINVAL; 7904 break; 7905 } 7906 return ret; 7907 } 7908 7909 static int 7910 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 7911 { 7912 int ret = 0; 7913 uint32_t ctrl; 7914 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7915 7916 if (hw->mac.type != ixgbe_mac_X550 && 7917 hw->mac.type != ixgbe_mac_X550EM_x && 7918 hw->mac.type != ixgbe_mac_X550EM_a) { 7919 return -ENOTSUP; 7920 } 7921 7922 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 7923 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 7924 if (en) 7925 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 7926 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 7927 7928 return ret; 7929 } 7930 7931 /* Enable l2 tunnel forwarding */ 7932 static int 7933 ixgbe_dev_l2_tunnel_forwarding_enable 7934 (struct rte_eth_dev *dev, 7935 enum rte_eth_tunnel_type l2_tunnel_type) 7936 { 7937 struct ixgbe_l2_tn_info *l2_tn_info = 7938 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7939 int ret = 0; 7940 7941 switch (l2_tunnel_type) { 7942 case RTE_L2_TUNNEL_TYPE_E_TAG: 7943 l2_tn_info->e_tag_fwd_en = TRUE; 7944 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); 7945 break; 7946 default: 7947 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7948 ret = -EINVAL; 7949 break; 7950 } 7951 7952 return ret; 7953 } 7954 7955 /* Disable l2 tunnel forwarding */ 7956 static int 7957 ixgbe_dev_l2_tunnel_forwarding_disable 7958 (struct rte_eth_dev *dev, 7959 enum rte_eth_tunnel_type l2_tunnel_type) 7960 { 7961 struct ixgbe_l2_tn_info *l2_tn_info = 7962 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7963 int ret = 0; 7964 7965 switch (l2_tunnel_type) { 7966 case RTE_L2_TUNNEL_TYPE_E_TAG: 7967 l2_tn_info->e_tag_fwd_en = FALSE; 7968 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); 7969 break; 7970 default: 7971 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7972 ret = -EINVAL; 7973 break; 7974 } 7975 7976 return ret; 7977 } 7978 7979 static int 7980 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, 7981 struct rte_eth_l2_tunnel_conf *l2_tunnel, 7982 bool en) 7983 { 7984 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 7985 int ret = 0; 7986 uint32_t vmtir, vmvir; 7987 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7988 7989 if (l2_tunnel->vf_id >= pci_dev->max_vfs) { 7990 PMD_DRV_LOG(ERR, 7991 "VF id %u should be less than %u", 7992 l2_tunnel->vf_id, 7993 pci_dev->max_vfs); 7994 return -EINVAL; 7995 } 7996 7997 if (hw->mac.type != ixgbe_mac_X550 && 7998 hw->mac.type != ixgbe_mac_X550EM_x && 7999 hw->mac.type != ixgbe_mac_X550EM_a) { 8000 return -ENOTSUP; 8001 } 8002 8003 if (en) 8004 vmtir = l2_tunnel->tunnel_id; 8005 else 8006 vmtir = 0; 8007 8008 IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); 8009 8010 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); 8011 vmvir &= ~IXGBE_VMVIR_TAGA_MASK; 8012 if (en) 8013 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; 8014 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); 8015 8016 return ret; 8017 } 8018 8019 /* Enable l2 tunnel tag insertion */ 8020 static int 8021 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, 8022 struct rte_eth_l2_tunnel_conf *l2_tunnel) 8023 { 8024 int ret = 0; 8025 8026 switch (l2_tunnel->l2_tunnel_type) { 8027 case RTE_L2_TUNNEL_TYPE_E_TAG: 8028 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); 8029 break; 8030 default: 8031 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8032 ret = -EINVAL; 8033 break; 8034 } 8035 8036 return ret; 8037 } 8038 8039 /* Disable l2 tunnel tag insertion */ 8040 static int 8041 ixgbe_dev_l2_tunnel_insertion_disable 8042 (struct rte_eth_dev *dev, 8043 struct rte_eth_l2_tunnel_conf *l2_tunnel) 8044 { 8045 int ret = 0; 8046 8047 switch (l2_tunnel->l2_tunnel_type) { 8048 case RTE_L2_TUNNEL_TYPE_E_TAG: 8049 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); 8050 break; 8051 default: 8052 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8053 ret = -EINVAL; 8054 break; 8055 } 8056 8057 return ret; 8058 } 8059 8060 static int 8061 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, 8062 bool en) 8063 { 8064 int ret = 0; 8065 uint32_t qde; 8066 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8067 8068 if (hw->mac.type != ixgbe_mac_X550 && 8069 hw->mac.type != ixgbe_mac_X550EM_x && 8070 hw->mac.type != ixgbe_mac_X550EM_a) { 8071 return -ENOTSUP; 8072 } 8073 8074 qde = IXGBE_READ_REG(hw, IXGBE_QDE); 8075 if (en) 8076 qde |= IXGBE_QDE_STRIP_TAG; 8077 else 8078 qde &= ~IXGBE_QDE_STRIP_TAG; 8079 qde &= ~IXGBE_QDE_READ; 8080 qde |= IXGBE_QDE_WRITE; 8081 IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); 8082 8083 return ret; 8084 } 8085 8086 /* Enable l2 tunnel tag stripping */ 8087 static int 8088 ixgbe_dev_l2_tunnel_stripping_enable 8089 (struct rte_eth_dev *dev, 8090 enum rte_eth_tunnel_type l2_tunnel_type) 8091 { 8092 int ret = 0; 8093 8094 switch (l2_tunnel_type) { 8095 case RTE_L2_TUNNEL_TYPE_E_TAG: 8096 ret = ixgbe_e_tag_stripping_en_dis(dev, 1); 8097 break; 8098 default: 8099 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8100 ret = -EINVAL; 8101 break; 8102 } 8103 8104 return ret; 8105 } 8106 8107 /* Disable l2 tunnel tag stripping */ 8108 static int 8109 ixgbe_dev_l2_tunnel_stripping_disable 8110 (struct rte_eth_dev *dev, 8111 enum rte_eth_tunnel_type l2_tunnel_type) 8112 { 8113 int ret = 0; 8114 8115 switch (l2_tunnel_type) { 8116 case RTE_L2_TUNNEL_TYPE_E_TAG: 8117 ret = ixgbe_e_tag_stripping_en_dis(dev, 0); 8118 break; 8119 default: 8120 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8121 ret = -EINVAL; 8122 break; 8123 } 8124 8125 return ret; 8126 } 8127 8128 /* Enable/disable l2 tunnel offload functions */ 8129 static int 8130 ixgbe_dev_l2_tunnel_offload_set 8131 (struct rte_eth_dev *dev, 8132 struct rte_eth_l2_tunnel_conf *l2_tunnel, 8133 uint32_t mask, 8134 uint8_t en) 8135 { 8136 int ret = 0; 8137 8138 if (l2_tunnel == NULL) 8139 return -EINVAL; 8140 8141 ret = -EINVAL; 8142 if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { 8143 if (en) 8144 ret = ixgbe_dev_l2_tunnel_enable( 8145 dev, 8146 l2_tunnel->l2_tunnel_type); 8147 else 8148 ret = ixgbe_dev_l2_tunnel_disable( 8149 dev, 8150 l2_tunnel->l2_tunnel_type); 8151 } 8152 8153 if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { 8154 if (en) 8155 ret = ixgbe_dev_l2_tunnel_insertion_enable( 8156 dev, 8157 l2_tunnel); 8158 else 8159 ret = ixgbe_dev_l2_tunnel_insertion_disable( 8160 dev, 8161 l2_tunnel); 8162 } 8163 8164 if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { 8165 if (en) 8166 ret = ixgbe_dev_l2_tunnel_stripping_enable( 8167 dev, 8168 l2_tunnel->l2_tunnel_type); 8169 else 8170 ret = ixgbe_dev_l2_tunnel_stripping_disable( 8171 dev, 8172 l2_tunnel->l2_tunnel_type); 8173 } 8174 8175 if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { 8176 if (en) 8177 ret = ixgbe_dev_l2_tunnel_forwarding_enable( 8178 dev, 8179 l2_tunnel->l2_tunnel_type); 8180 else 8181 ret = ixgbe_dev_l2_tunnel_forwarding_disable( 8182 dev, 8183 l2_tunnel->l2_tunnel_type); 8184 } 8185 8186 return ret; 8187 } 8188 8189 static int 8190 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 8191 uint16_t port) 8192 { 8193 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 8194 IXGBE_WRITE_FLUSH(hw); 8195 8196 return 0; 8197 } 8198 8199 /* There's only one register for VxLAN UDP port. 8200 * So, we cannot add several ports. Will update it. 8201 */ 8202 static int 8203 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 8204 uint16_t port) 8205 { 8206 if (port == 0) { 8207 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 8208 return -EINVAL; 8209 } 8210 8211 return ixgbe_update_vxlan_port(hw, port); 8212 } 8213 8214 /* We cannot delete the VxLAN port. For there's a register for VxLAN 8215 * UDP port, it must have a value. 8216 * So, will reset it to the original value 0. 8217 */ 8218 static int 8219 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 8220 uint16_t port) 8221 { 8222 uint16_t cur_port; 8223 8224 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 8225 8226 if (cur_port != port) { 8227 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 8228 return -EINVAL; 8229 } 8230 8231 return ixgbe_update_vxlan_port(hw, 0); 8232 } 8233 8234 /* Add UDP tunneling port */ 8235 static int 8236 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 8237 struct rte_eth_udp_tunnel *udp_tunnel) 8238 { 8239 int ret = 0; 8240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8241 8242 if (hw->mac.type != ixgbe_mac_X550 && 8243 hw->mac.type != ixgbe_mac_X550EM_x && 8244 hw->mac.type != ixgbe_mac_X550EM_a) { 8245 return -ENOTSUP; 8246 } 8247 8248 if (udp_tunnel == NULL) 8249 return -EINVAL; 8250 8251 switch (udp_tunnel->prot_type) { 8252 case RTE_TUNNEL_TYPE_VXLAN: 8253 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 8254 break; 8255 8256 case RTE_TUNNEL_TYPE_GENEVE: 8257 case RTE_TUNNEL_TYPE_TEREDO: 8258 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 8259 ret = -EINVAL; 8260 break; 8261 8262 default: 8263 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8264 ret = -EINVAL; 8265 break; 8266 } 8267 8268 return ret; 8269 } 8270 8271 /* Remove UDP tunneling port */ 8272 static int 8273 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 8274 struct rte_eth_udp_tunnel *udp_tunnel) 8275 { 8276 int ret = 0; 8277 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8278 8279 if (hw->mac.type != ixgbe_mac_X550 && 8280 hw->mac.type != ixgbe_mac_X550EM_x && 8281 hw->mac.type != ixgbe_mac_X550EM_a) { 8282 return -ENOTSUP; 8283 } 8284 8285 if (udp_tunnel == NULL) 8286 return -EINVAL; 8287 8288 switch (udp_tunnel->prot_type) { 8289 case RTE_TUNNEL_TYPE_VXLAN: 8290 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 8291 break; 8292 case RTE_TUNNEL_TYPE_GENEVE: 8293 case RTE_TUNNEL_TYPE_TEREDO: 8294 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 8295 ret = -EINVAL; 8296 break; 8297 default: 8298 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8299 ret = -EINVAL; 8300 break; 8301 } 8302 8303 return ret; 8304 } 8305 8306 static void 8307 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 8308 { 8309 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8310 8311 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); 8312 } 8313 8314 static void 8315 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 8316 { 8317 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8318 8319 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI); 8320 } 8321 8322 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 8323 { 8324 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8325 u32 in_msg = 0; 8326 8327 /* peek the message first */ 8328 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 8329 8330 /* PF reset VF event */ 8331 if (in_msg == IXGBE_PF_CONTROL_MSG) { 8332 /* dummy mbx read to ack pf */ 8333 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 8334 return; 8335 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 8336 NULL); 8337 } 8338 } 8339 8340 static int 8341 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 8342 { 8343 uint32_t eicr; 8344 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8345 struct ixgbe_interrupt *intr = 8346 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8347 ixgbevf_intr_disable(dev); 8348 8349 /* read-on-clear nic registers here */ 8350 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 8351 intr->flags = 0; 8352 8353 /* only one misc vector supported - mailbox */ 8354 eicr &= IXGBE_VTEICR_MASK; 8355 if (eicr == IXGBE_MISC_VEC_ID) 8356 intr->flags |= IXGBE_FLAG_MAILBOX; 8357 8358 return 0; 8359 } 8360 8361 static int 8362 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 8363 { 8364 struct ixgbe_interrupt *intr = 8365 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8366 8367 if (intr->flags & IXGBE_FLAG_MAILBOX) { 8368 ixgbevf_mbx_process(dev); 8369 intr->flags &= ~IXGBE_FLAG_MAILBOX; 8370 } 8371 8372 ixgbevf_intr_enable(dev); 8373 8374 return 0; 8375 } 8376 8377 static void 8378 ixgbevf_dev_interrupt_handler(void *param) 8379 { 8380 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 8381 8382 ixgbevf_dev_interrupt_get_status(dev); 8383 ixgbevf_dev_interrupt_action(dev); 8384 } 8385 8386 /** 8387 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 8388 * @hw: pointer to hardware structure 8389 * 8390 * Stops the transmit data path and waits for the HW to internally empty 8391 * the Tx security block 8392 **/ 8393 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 8394 { 8395 #define IXGBE_MAX_SECTX_POLL 40 8396 8397 int i; 8398 int sectxreg; 8399 8400 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8401 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 8402 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8403 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 8404 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 8405 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 8406 break; 8407 /* Use interrupt-safe sleep just in case */ 8408 usec_delay(1000); 8409 } 8410 8411 /* For informational purposes only */ 8412 if (i >= IXGBE_MAX_SECTX_POLL) 8413 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 8414 "path fully disabled. Continuing with init."); 8415 8416 return IXGBE_SUCCESS; 8417 } 8418 8419 /** 8420 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 8421 * @hw: pointer to hardware structure 8422 * 8423 * Enables the transmit data path. 8424 **/ 8425 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 8426 { 8427 uint32_t sectxreg; 8428 8429 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8430 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 8431 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8432 IXGBE_WRITE_FLUSH(hw); 8433 8434 return IXGBE_SUCCESS; 8435 } 8436 8437 /* restore n-tuple filter */ 8438 static inline void 8439 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 8440 { 8441 struct ixgbe_filter_info *filter_info = 8442 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8443 struct ixgbe_5tuple_filter *node; 8444 8445 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 8446 ixgbe_inject_5tuple_filter(dev, node); 8447 } 8448 } 8449 8450 /* restore ethernet type filter */ 8451 static inline void 8452 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 8453 { 8454 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8455 struct ixgbe_filter_info *filter_info = 8456 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8457 int i; 8458 8459 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8460 if (filter_info->ethertype_mask & (1 << i)) { 8461 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 8462 filter_info->ethertype_filters[i].etqf); 8463 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 8464 filter_info->ethertype_filters[i].etqs); 8465 IXGBE_WRITE_FLUSH(hw); 8466 } 8467 } 8468 } 8469 8470 /* restore SYN filter */ 8471 static inline void 8472 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 8473 { 8474 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8475 struct ixgbe_filter_info *filter_info = 8476 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8477 uint32_t synqf; 8478 8479 synqf = filter_info->syn_info; 8480 8481 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 8482 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 8483 IXGBE_WRITE_FLUSH(hw); 8484 } 8485 } 8486 8487 /* restore L2 tunnel filter */ 8488 static inline void 8489 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 8490 { 8491 struct ixgbe_l2_tn_info *l2_tn_info = 8492 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8493 struct ixgbe_l2_tn_filter *node; 8494 struct rte_eth_l2_tunnel_conf l2_tn_conf; 8495 8496 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 8497 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 8498 l2_tn_conf.tunnel_id = node->key.tn_id; 8499 l2_tn_conf.pool = node->pool; 8500 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 8501 } 8502 } 8503 8504 /* restore rss filter */ 8505 static inline void 8506 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 8507 { 8508 struct ixgbe_filter_info *filter_info = 8509 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8510 8511 if (filter_info->rss_info.conf.queue_num) 8512 ixgbe_config_rss_filter(dev, 8513 &filter_info->rss_info, TRUE); 8514 } 8515 8516 static int 8517 ixgbe_filter_restore(struct rte_eth_dev *dev) 8518 { 8519 ixgbe_ntuple_filter_restore(dev); 8520 ixgbe_ethertype_filter_restore(dev); 8521 ixgbe_syn_filter_restore(dev); 8522 ixgbe_fdir_filter_restore(dev); 8523 ixgbe_l2_tn_filter_restore(dev); 8524 ixgbe_rss_filter_restore(dev); 8525 8526 return 0; 8527 } 8528 8529 static void 8530 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8531 { 8532 struct ixgbe_l2_tn_info *l2_tn_info = 8533 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8534 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8535 8536 if (l2_tn_info->e_tag_en) 8537 (void)ixgbe_e_tag_enable(hw); 8538 8539 if (l2_tn_info->e_tag_fwd_en) 8540 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8541 8542 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8543 } 8544 8545 /* remove all the n-tuple filters */ 8546 void 8547 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8548 { 8549 struct ixgbe_filter_info *filter_info = 8550 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8551 struct ixgbe_5tuple_filter *p_5tuple; 8552 8553 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8554 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8555 } 8556 8557 /* remove all the ether type filters */ 8558 void 8559 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8560 { 8561 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8562 struct ixgbe_filter_info *filter_info = 8563 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8564 int i; 8565 8566 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8567 if (filter_info->ethertype_mask & (1 << i) && 8568 !filter_info->ethertype_filters[i].conf) { 8569 (void)ixgbe_ethertype_filter_remove(filter_info, 8570 (uint8_t)i); 8571 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8572 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8573 IXGBE_WRITE_FLUSH(hw); 8574 } 8575 } 8576 } 8577 8578 /* remove the SYN filter */ 8579 void 8580 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8581 { 8582 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8583 struct ixgbe_filter_info *filter_info = 8584 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8585 8586 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8587 filter_info->syn_info = 0; 8588 8589 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8590 IXGBE_WRITE_FLUSH(hw); 8591 } 8592 } 8593 8594 /* remove all the L2 tunnel filters */ 8595 int 8596 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8597 { 8598 struct ixgbe_l2_tn_info *l2_tn_info = 8599 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8600 struct ixgbe_l2_tn_filter *l2_tn_filter; 8601 struct rte_eth_l2_tunnel_conf l2_tn_conf; 8602 int ret = 0; 8603 8604 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8605 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8606 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8607 l2_tn_conf.pool = l2_tn_filter->pool; 8608 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8609 if (ret < 0) 8610 return ret; 8611 } 8612 8613 return 0; 8614 } 8615 8616 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 8617 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 8618 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 8619 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 8620 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 8621 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 8622 8623 RTE_INIT(ixgbe_init_log) 8624 { 8625 ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); 8626 if (ixgbe_logtype_init >= 0) 8627 rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); 8628 ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); 8629 if (ixgbe_logtype_driver >= 0) 8630 rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); 8631 } 8632