1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2017 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <inttypes.h> 13 #include <netinet/in.h> 14 #include <rte_string_fns.h> 15 #include <rte_byteorder.h> 16 #include <rte_common.h> 17 #include <rte_cycles.h> 18 19 #include <rte_interrupts.h> 20 #include <rte_log.h> 21 #include <rte_debug.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_kvargs.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <rte_ethdev_driver.h> 31 #include <rte_ethdev_pci.h> 32 #include <rte_malloc.h> 33 #include <rte_random.h> 34 #include <rte_dev.h> 35 #include <rte_hash_crc.h> 36 #ifdef RTE_LIBRTE_SECURITY 37 #include <rte_security_driver.h> 38 #endif 39 40 #include "ixgbe_logs.h" 41 #include "base/ixgbe_api.h" 42 #include "base/ixgbe_vf.h" 43 #include "base/ixgbe_common.h" 44 #include "ixgbe_ethdev.h" 45 #include "ixgbe_bypass.h" 46 #include "ixgbe_rxtx.h" 47 #include "base/ixgbe_type.h" 48 #include "base/ixgbe_phy.h" 49 #include "ixgbe_regs.h" 50 51 /* 52 * High threshold controlling when to start sending XOFF frames. Must be at 53 * least 8 bytes less than receive packet buffer size. This value is in units 54 * of 1024 bytes. 55 */ 56 #define IXGBE_FC_HI 0x80 57 58 /* 59 * Low threshold controlling when to start sending XON frames. This value is 60 * in units of 1024 bytes. 61 */ 62 #define IXGBE_FC_LO 0x40 63 64 /* Timer value included in XOFF frames. */ 65 #define IXGBE_FC_PAUSE 0x680 66 67 /*Default value of Max Rx Queue*/ 68 #define IXGBE_MAX_RX_QUEUE_NUM 128 69 70 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 71 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 72 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 73 74 #define IXGBE_MMW_SIZE_DEFAULT 0x4 75 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 76 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 77 78 /* 79 * Default values for RX/TX configuration 80 */ 81 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 82 #define IXGBE_DEFAULT_RX_PTHRESH 8 83 #define IXGBE_DEFAULT_RX_HTHRESH 8 84 #define IXGBE_DEFAULT_RX_WTHRESH 0 85 86 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 87 #define IXGBE_DEFAULT_TX_PTHRESH 32 88 #define IXGBE_DEFAULT_TX_HTHRESH 0 89 #define IXGBE_DEFAULT_TX_WTHRESH 0 90 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 91 92 /* Bit shift and mask */ 93 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 94 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 95 #define IXGBE_8_BIT_WIDTH CHAR_BIT 96 #define IXGBE_8_BIT_MASK UINT8_MAX 97 98 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 99 100 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 101 102 /* Additional timesync values. */ 103 #define NSEC_PER_SEC 1000000000L 104 #define IXGBE_INCVAL_10GB 0x66666666 105 #define IXGBE_INCVAL_1GB 0x40000000 106 #define IXGBE_INCVAL_100 0x50000000 107 #define IXGBE_INCVAL_SHIFT_10GB 28 108 #define IXGBE_INCVAL_SHIFT_1GB 24 109 #define IXGBE_INCVAL_SHIFT_100 21 110 #define IXGBE_INCVAL_SHIFT_82599 7 111 #define IXGBE_INCPER_SHIFT_82599 24 112 113 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 114 115 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 116 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 117 #define IXGBE_ETAG_ETYPE 0x00005084 118 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 119 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 120 #define IXGBE_RAH_ADTYPE 0x40000000 121 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 122 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 123 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 124 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 125 #define IXGBE_QDE_STRIP_TAG 0x00000004 126 #define IXGBE_VTEICR_MASK 0x07 127 128 #define IXGBE_EXVET_VET_EXT_SHIFT 16 129 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 130 131 #define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" 132 133 static const char * const ixgbevf_valid_arguments[] = { 134 IXGBEVF_DEVARG_PFLINK_FULLCHK, 135 NULL 136 }; 137 138 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); 139 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 140 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); 141 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); 142 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); 143 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); 144 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); 145 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 146 static int ixgbe_dev_start(struct rte_eth_dev *dev); 147 static void ixgbe_dev_stop(struct rte_eth_dev *dev); 148 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 149 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 150 static void ixgbe_dev_close(struct rte_eth_dev *dev); 151 static int ixgbe_dev_reset(struct rte_eth_dev *dev); 152 static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 153 static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 154 static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 155 static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 156 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 157 int wait_to_complete); 158 static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, 159 struct rte_eth_stats *stats); 160 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 161 struct rte_eth_xstat *xstats, unsigned n); 162 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 163 struct rte_eth_xstat *xstats, unsigned n); 164 static int 165 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 166 uint64_t *values, unsigned int n); 167 static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 168 static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 169 static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, 170 struct rte_eth_xstat_name *xstats_names, 171 unsigned int size); 172 static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, 173 struct rte_eth_xstat_name *xstats_names, unsigned limit); 174 static int ixgbe_dev_xstats_get_names_by_id( 175 struct rte_eth_dev *dev, 176 struct rte_eth_xstat_name *xstats_names, 177 const uint64_t *ids, 178 unsigned int limit); 179 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 180 uint16_t queue_id, 181 uint8_t stat_idx, 182 uint8_t is_rx); 183 static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, 184 size_t fw_size); 185 static int ixgbe_dev_info_get(struct rte_eth_dev *dev, 186 struct rte_eth_dev_info *dev_info); 187 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 188 static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, 189 struct rte_eth_dev_info *dev_info); 190 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 191 192 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 193 uint16_t vlan_id, int on); 194 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 195 enum rte_vlan_type vlan_type, 196 uint16_t tpid_id); 197 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 198 uint16_t queue, bool on); 199 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 200 int on); 201 static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 202 int mask); 203 static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); 204 static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 205 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 206 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 207 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 208 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 209 210 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 211 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 212 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 213 struct rte_eth_fc_conf *fc_conf); 214 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 215 struct rte_eth_fc_conf *fc_conf); 216 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 217 struct rte_eth_pfc_conf *pfc_conf); 218 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 219 struct rte_eth_rss_reta_entry64 *reta_conf, 220 uint16_t reta_size); 221 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 222 struct rte_eth_rss_reta_entry64 *reta_conf, 223 uint16_t reta_size); 224 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 225 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); 226 static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); 227 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 228 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 229 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 230 static void ixgbe_dev_interrupt_handler(void *param); 231 static void ixgbe_dev_interrupt_delayed_handler(void *param); 232 static void *ixgbe_dev_setup_link_thread_handler(void *param); 233 static void ixgbe_dev_cancel_link_thread(struct rte_eth_dev *dev); 234 235 static int ixgbe_add_rar(struct rte_eth_dev *dev, 236 struct rte_ether_addr *mac_addr, 237 uint32_t index, uint32_t pool); 238 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 239 static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 240 struct rte_ether_addr *mac_addr); 241 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 242 static bool is_device_supported(struct rte_eth_dev *dev, 243 struct rte_pci_driver *drv); 244 245 /* For Virtual Function support */ 246 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 247 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 248 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 249 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 250 static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, 251 int wait_to_complete); 252 static void ixgbevf_dev_stop(struct rte_eth_dev *dev); 253 static void ixgbevf_dev_close(struct rte_eth_dev *dev); 254 static int ixgbevf_dev_reset(struct rte_eth_dev *dev); 255 static void ixgbevf_intr_disable(struct rte_eth_dev *dev); 256 static void ixgbevf_intr_enable(struct rte_eth_dev *dev); 257 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 258 struct rte_eth_stats *stats); 259 static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 260 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 261 uint16_t vlan_id, int on); 262 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 263 uint16_t queue, int on); 264 static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); 265 static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 266 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 267 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 268 uint16_t queue_id); 269 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 270 uint16_t queue_id); 271 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 272 uint8_t queue, uint8_t msix_vector); 273 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 274 static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); 275 static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); 276 static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 277 static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 278 279 /* For Eth VMDQ APIs support */ 280 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 281 rte_ether_addr * mac_addr, uint8_t on); 282 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 283 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 284 struct rte_eth_mirror_conf *mirror_conf, 285 uint8_t rule_id, uint8_t on); 286 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 287 uint8_t rule_id); 288 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 289 uint16_t queue_id); 290 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 291 uint16_t queue_id); 292 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 293 uint8_t queue, uint8_t msix_vector); 294 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 295 296 static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 297 struct rte_ether_addr *mac_addr, 298 uint32_t index, uint32_t pool); 299 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 300 static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 301 struct rte_ether_addr *mac_addr); 302 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, 303 struct rte_eth_syn_filter *filter); 304 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 305 enum rte_filter_op filter_op, 306 void *arg); 307 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 308 struct ixgbe_5tuple_filter *filter); 309 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 310 struct ixgbe_5tuple_filter *filter); 311 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 312 enum rte_filter_op filter_op, 313 void *arg); 314 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 315 struct rte_eth_ntuple_filter *filter); 316 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 317 enum rte_filter_op filter_op, 318 void *arg); 319 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 320 struct rte_eth_ethertype_filter *filter); 321 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 322 enum rte_filter_type filter_type, 323 enum rte_filter_op filter_op, 324 void *arg); 325 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 326 327 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 328 struct rte_ether_addr *mc_addr_set, 329 uint32_t nb_mc_addr); 330 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 331 struct rte_eth_dcb_info *dcb_info); 332 333 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 334 static int ixgbe_get_regs(struct rte_eth_dev *dev, 335 struct rte_dev_reg_info *regs); 336 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 337 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 338 struct rte_dev_eeprom_info *eeprom); 339 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 340 struct rte_dev_eeprom_info *eeprom); 341 342 static int ixgbe_get_module_info(struct rte_eth_dev *dev, 343 struct rte_eth_dev_module_info *modinfo); 344 static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 345 struct rte_dev_eeprom_info *info); 346 347 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 348 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 349 struct rte_dev_reg_info *regs); 350 351 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 352 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 353 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 354 struct timespec *timestamp, 355 uint32_t flags); 356 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 357 struct timespec *timestamp); 358 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 359 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 360 struct timespec *timestamp); 361 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 362 const struct timespec *timestamp); 363 static void ixgbevf_dev_interrupt_handler(void *param); 364 365 static int ixgbe_dev_l2_tunnel_eth_type_conf 366 (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); 367 static int ixgbe_dev_l2_tunnel_offload_set 368 (struct rte_eth_dev *dev, 369 struct rte_eth_l2_tunnel_conf *l2_tunnel, 370 uint32_t mask, 371 uint8_t en); 372 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 373 enum rte_filter_op filter_op, 374 void *arg); 375 376 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 377 struct rte_eth_udp_tunnel *udp_tunnel); 378 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 379 struct rte_eth_udp_tunnel *udp_tunnel); 380 static int ixgbe_filter_restore(struct rte_eth_dev *dev); 381 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); 382 static int ixgbe_wait_for_link_up(struct ixgbe_hw *hw); 383 384 /* 385 * Define VF Stats MACRO for Non "cleared on read" register 386 */ 387 #define UPDATE_VF_STAT(reg, last, cur) \ 388 { \ 389 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 390 cur += (latest - last) & UINT_MAX; \ 391 last = latest; \ 392 } 393 394 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 395 { \ 396 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 397 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 398 u64 latest = ((new_msb << 32) | new_lsb); \ 399 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 400 last = latest; \ 401 } 402 403 #define IXGBE_SET_HWSTRIP(h, q) do {\ 404 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 405 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 406 (h)->bitmap[idx] |= 1 << bit;\ 407 } while (0) 408 409 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 410 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 411 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 412 (h)->bitmap[idx] &= ~(1 << bit);\ 413 } while (0) 414 415 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 416 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 417 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 418 (r) = (h)->bitmap[idx] >> bit & 1;\ 419 } while (0) 420 421 int ixgbe_logtype_init; 422 int ixgbe_logtype_driver; 423 424 #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX 425 int ixgbe_logtype_rx; 426 #endif 427 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX 428 int ixgbe_logtype_tx; 429 #endif 430 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE 431 int ixgbe_logtype_tx_free; 432 #endif 433 434 /* 435 * The set of PCI devices this driver supports 436 */ 437 static const struct rte_pci_id pci_id_ixgbe_map[] = { 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 464 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 466 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 467 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 468 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 469 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 470 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 471 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 472 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 473 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 479 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 480 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 481 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 482 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 483 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 484 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 485 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, 486 #ifdef RTE_LIBRTE_IXGBE_BYPASS 487 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 488 #endif 489 { .vendor_id = 0, /* sentinel */ }, 490 }; 491 492 /* 493 * The set of PCI devices this driver supports (for 82599 VF) 494 */ 495 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 496 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 497 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 498 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 499 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 500 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 501 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 502 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 503 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 504 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 505 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 506 { .vendor_id = 0, /* sentinel */ }, 507 }; 508 509 static const struct rte_eth_desc_lim rx_desc_lim = { 510 .nb_max = IXGBE_MAX_RING_DESC, 511 .nb_min = IXGBE_MIN_RING_DESC, 512 .nb_align = IXGBE_RXD_ALIGN, 513 }; 514 515 static const struct rte_eth_desc_lim tx_desc_lim = { 516 .nb_max = IXGBE_MAX_RING_DESC, 517 .nb_min = IXGBE_MIN_RING_DESC, 518 .nb_align = IXGBE_TXD_ALIGN, 519 .nb_seg_max = IXGBE_TX_MAX_SEG, 520 .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, 521 }; 522 523 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 524 .dev_configure = ixgbe_dev_configure, 525 .dev_start = ixgbe_dev_start, 526 .dev_stop = ixgbe_dev_stop, 527 .dev_set_link_up = ixgbe_dev_set_link_up, 528 .dev_set_link_down = ixgbe_dev_set_link_down, 529 .dev_close = ixgbe_dev_close, 530 .dev_reset = ixgbe_dev_reset, 531 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 532 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 533 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 534 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 535 .link_update = ixgbe_dev_link_update, 536 .stats_get = ixgbe_dev_stats_get, 537 .xstats_get = ixgbe_dev_xstats_get, 538 .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, 539 .stats_reset = ixgbe_dev_stats_reset, 540 .xstats_reset = ixgbe_dev_xstats_reset, 541 .xstats_get_names = ixgbe_dev_xstats_get_names, 542 .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, 543 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 544 .fw_version_get = ixgbe_fw_version_get, 545 .dev_infos_get = ixgbe_dev_info_get, 546 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 547 .mtu_set = ixgbe_dev_mtu_set, 548 .vlan_filter_set = ixgbe_vlan_filter_set, 549 .vlan_tpid_set = ixgbe_vlan_tpid_set, 550 .vlan_offload_set = ixgbe_vlan_offload_set, 551 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 552 .rx_queue_start = ixgbe_dev_rx_queue_start, 553 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 554 .tx_queue_start = ixgbe_dev_tx_queue_start, 555 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 556 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 557 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 558 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 559 .rx_queue_release = ixgbe_dev_rx_queue_release, 560 .rx_queue_count = ixgbe_dev_rx_queue_count, 561 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 562 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, 563 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, 564 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 565 .tx_queue_release = ixgbe_dev_tx_queue_release, 566 .dev_led_on = ixgbe_dev_led_on, 567 .dev_led_off = ixgbe_dev_led_off, 568 .flow_ctrl_get = ixgbe_flow_ctrl_get, 569 .flow_ctrl_set = ixgbe_flow_ctrl_set, 570 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 571 .mac_addr_add = ixgbe_add_rar, 572 .mac_addr_remove = ixgbe_remove_rar, 573 .mac_addr_set = ixgbe_set_default_mac_addr, 574 .uc_hash_table_set = ixgbe_uc_hash_table_set, 575 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 576 .mirror_rule_set = ixgbe_mirror_rule_set, 577 .mirror_rule_reset = ixgbe_mirror_rule_reset, 578 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 579 .reta_update = ixgbe_dev_rss_reta_update, 580 .reta_query = ixgbe_dev_rss_reta_query, 581 .rss_hash_update = ixgbe_dev_rss_hash_update, 582 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 583 .filter_ctrl = ixgbe_dev_filter_ctrl, 584 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 585 .rxq_info_get = ixgbe_rxq_info_get, 586 .txq_info_get = ixgbe_txq_info_get, 587 .timesync_enable = ixgbe_timesync_enable, 588 .timesync_disable = ixgbe_timesync_disable, 589 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 590 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 591 .get_reg = ixgbe_get_regs, 592 .get_eeprom_length = ixgbe_get_eeprom_length, 593 .get_eeprom = ixgbe_get_eeprom, 594 .set_eeprom = ixgbe_set_eeprom, 595 .get_module_info = ixgbe_get_module_info, 596 .get_module_eeprom = ixgbe_get_module_eeprom, 597 .get_dcb_info = ixgbe_dev_get_dcb_info, 598 .timesync_adjust_time = ixgbe_timesync_adjust_time, 599 .timesync_read_time = ixgbe_timesync_read_time, 600 .timesync_write_time = ixgbe_timesync_write_time, 601 .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, 602 .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, 603 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 604 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 605 .tm_ops_get = ixgbe_tm_ops_get, 606 }; 607 608 /* 609 * dev_ops for virtual function, bare necessities for basic vf 610 * operation have been implemented 611 */ 612 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 613 .dev_configure = ixgbevf_dev_configure, 614 .dev_start = ixgbevf_dev_start, 615 .dev_stop = ixgbevf_dev_stop, 616 .link_update = ixgbevf_dev_link_update, 617 .stats_get = ixgbevf_dev_stats_get, 618 .xstats_get = ixgbevf_dev_xstats_get, 619 .stats_reset = ixgbevf_dev_stats_reset, 620 .xstats_reset = ixgbevf_dev_stats_reset, 621 .xstats_get_names = ixgbevf_dev_xstats_get_names, 622 .dev_close = ixgbevf_dev_close, 623 .dev_reset = ixgbevf_dev_reset, 624 .promiscuous_enable = ixgbevf_dev_promiscuous_enable, 625 .promiscuous_disable = ixgbevf_dev_promiscuous_disable, 626 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 627 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 628 .dev_infos_get = ixgbevf_dev_info_get, 629 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 630 .mtu_set = ixgbevf_dev_set_mtu, 631 .vlan_filter_set = ixgbevf_vlan_filter_set, 632 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 633 .vlan_offload_set = ixgbevf_vlan_offload_set, 634 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 635 .rx_queue_release = ixgbe_dev_rx_queue_release, 636 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 637 .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, 638 .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, 639 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 640 .tx_queue_release = ixgbe_dev_tx_queue_release, 641 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 642 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 643 .mac_addr_add = ixgbevf_add_mac_addr, 644 .mac_addr_remove = ixgbevf_remove_mac_addr, 645 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 646 .rxq_info_get = ixgbe_rxq_info_get, 647 .txq_info_get = ixgbe_txq_info_get, 648 .mac_addr_set = ixgbevf_set_default_mac_addr, 649 .get_reg = ixgbevf_get_regs, 650 .reta_update = ixgbe_dev_rss_reta_update, 651 .reta_query = ixgbe_dev_rss_reta_query, 652 .rss_hash_update = ixgbe_dev_rss_hash_update, 653 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 654 }; 655 656 /* store statistics names and its offset in stats structure */ 657 struct rte_ixgbe_xstats_name_off { 658 char name[RTE_ETH_XSTATS_NAME_SIZE]; 659 unsigned offset; 660 }; 661 662 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 663 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 664 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 665 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 666 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 667 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 668 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 669 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 670 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 671 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 672 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 673 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 674 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 675 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 676 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 677 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 678 prc1023)}, 679 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 680 prc1522)}, 681 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 682 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 683 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 684 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 685 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 686 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 687 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 688 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 689 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 690 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 691 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 692 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 693 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 694 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 695 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 696 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 697 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 698 ptc1023)}, 699 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 700 ptc1522)}, 701 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 702 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 703 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 704 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 705 706 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 707 fdirustat_add)}, 708 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 709 fdirustat_remove)}, 710 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 711 fdirfstat_fadd)}, 712 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 713 fdirfstat_fremove)}, 714 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 715 fdirmatch)}, 716 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 717 fdirmiss)}, 718 719 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 720 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 721 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 722 fclast)}, 723 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 724 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 725 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 726 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 727 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 728 fcoe_noddp)}, 729 {"rx_fcoe_no_direct_data_placement_ext_buff", 730 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 731 732 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 733 lxontxc)}, 734 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 735 lxonrxc)}, 736 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 737 lxofftxc)}, 738 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 739 lxoffrxc)}, 740 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 741 }; 742 743 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 744 sizeof(rte_ixgbe_stats_strings[0])) 745 746 /* MACsec statistics */ 747 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { 748 {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 749 out_pkts_untagged)}, 750 {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, 751 out_pkts_encrypted)}, 752 {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, 753 out_pkts_protected)}, 754 {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, 755 out_octets_encrypted)}, 756 {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, 757 out_octets_protected)}, 758 {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, 759 in_pkts_untagged)}, 760 {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, 761 in_pkts_badtag)}, 762 {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, 763 in_pkts_nosci)}, 764 {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, 765 in_pkts_unknownsci)}, 766 {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, 767 in_octets_decrypted)}, 768 {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, 769 in_octets_validated)}, 770 {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, 771 in_pkts_unchecked)}, 772 {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, 773 in_pkts_delayed)}, 774 {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, 775 in_pkts_late)}, 776 {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, 777 in_pkts_ok)}, 778 {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, 779 in_pkts_invalid)}, 780 {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, 781 in_pkts_notvalid)}, 782 {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, 783 in_pkts_unusedsa)}, 784 {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, 785 in_pkts_notusingsa)}, 786 }; 787 788 #define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ 789 sizeof(rte_ixgbe_macsec_strings[0])) 790 791 /* Per-queue statistics */ 792 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 793 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 794 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 795 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 796 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 797 }; 798 799 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 800 sizeof(rte_ixgbe_rxq_strings[0])) 801 #define IXGBE_NB_RXQ_PRIO_VALUES 8 802 803 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 804 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 805 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 806 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 807 pxon2offc)}, 808 }; 809 810 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 811 sizeof(rte_ixgbe_txq_strings[0])) 812 #define IXGBE_NB_TXQ_PRIO_VALUES 8 813 814 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 815 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 816 }; 817 818 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 819 sizeof(rte_ixgbevf_stats_strings[0])) 820 821 /* 822 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 823 */ 824 static inline int 825 ixgbe_is_sfp(struct ixgbe_hw *hw) 826 { 827 switch (hw->phy.type) { 828 case ixgbe_phy_sfp_avago: 829 case ixgbe_phy_sfp_ftl: 830 case ixgbe_phy_sfp_intel: 831 case ixgbe_phy_sfp_unknown: 832 case ixgbe_phy_sfp_passive_tyco: 833 case ixgbe_phy_sfp_passive_unknown: 834 return 1; 835 default: 836 return 0; 837 } 838 } 839 840 static inline int32_t 841 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 842 { 843 uint32_t ctrl_ext; 844 int32_t status; 845 846 status = ixgbe_reset_hw(hw); 847 848 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 849 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 850 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 851 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 852 IXGBE_WRITE_FLUSH(hw); 853 854 if (status == IXGBE_ERR_SFP_NOT_PRESENT) 855 status = IXGBE_SUCCESS; 856 return status; 857 } 858 859 static inline void 860 ixgbe_enable_intr(struct rte_eth_dev *dev) 861 { 862 struct ixgbe_interrupt *intr = 863 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 864 struct ixgbe_hw *hw = 865 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 866 867 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 868 IXGBE_WRITE_FLUSH(hw); 869 } 870 871 /* 872 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 873 */ 874 static void 875 ixgbe_disable_intr(struct ixgbe_hw *hw) 876 { 877 PMD_INIT_FUNC_TRACE(); 878 879 if (hw->mac.type == ixgbe_mac_82598EB) { 880 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 881 } else { 882 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 883 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 884 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 885 } 886 IXGBE_WRITE_FLUSH(hw); 887 } 888 889 /* 890 * This function resets queue statistics mapping registers. 891 * From Niantic datasheet, Initialization of Statistics section: 892 * "...if software requires the queue counters, the RQSMR and TQSM registers 893 * must be re-programmed following a device reset. 894 */ 895 static void 896 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 897 { 898 uint32_t i; 899 900 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 901 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 902 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 903 } 904 } 905 906 907 static int 908 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 909 uint16_t queue_id, 910 uint8_t stat_idx, 911 uint8_t is_rx) 912 { 913 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 914 #define NB_QMAP_FIELDS_PER_QSM_REG 4 915 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 916 917 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 918 struct ixgbe_stat_mapping_registers *stat_mappings = 919 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 920 uint32_t qsmr_mask = 0; 921 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 922 uint32_t q_map; 923 uint8_t n, offset; 924 925 if ((hw->mac.type != ixgbe_mac_82599EB) && 926 (hw->mac.type != ixgbe_mac_X540) && 927 (hw->mac.type != ixgbe_mac_X550) && 928 (hw->mac.type != ixgbe_mac_X550EM_x) && 929 (hw->mac.type != ixgbe_mac_X550EM_a)) 930 return -ENOSYS; 931 932 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 933 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 934 queue_id, stat_idx); 935 936 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 937 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 938 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 939 return -EIO; 940 } 941 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 942 943 /* Now clear any previous stat_idx set */ 944 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 945 if (!is_rx) 946 stat_mappings->tqsm[n] &= ~clearing_mask; 947 else 948 stat_mappings->rqsmr[n] &= ~clearing_mask; 949 950 q_map = (uint32_t)stat_idx; 951 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 952 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 953 if (!is_rx) 954 stat_mappings->tqsm[n] |= qsmr_mask; 955 else 956 stat_mappings->rqsmr[n] |= qsmr_mask; 957 958 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 959 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 960 queue_id, stat_idx); 961 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 962 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 963 964 /* Now write the mapping in the appropriate register */ 965 if (is_rx) { 966 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 967 stat_mappings->rqsmr[n], n); 968 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 969 } else { 970 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 971 stat_mappings->tqsm[n], n); 972 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 973 } 974 return 0; 975 } 976 977 static void 978 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 979 { 980 struct ixgbe_stat_mapping_registers *stat_mappings = 981 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 982 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 983 int i; 984 985 /* write whatever was in stat mapping table to the NIC */ 986 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 987 /* rx */ 988 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 989 990 /* tx */ 991 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 992 } 993 } 994 995 static void 996 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 997 { 998 uint8_t i; 999 struct ixgbe_dcb_tc_config *tc; 1000 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 1001 1002 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 1003 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 1004 for (i = 0; i < dcb_max_tc; i++) { 1005 tc = &dcb_config->tc_config[i]; 1006 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 1007 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 1008 (uint8_t)(100/dcb_max_tc + (i & 1)); 1009 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 1010 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 1011 (uint8_t)(100/dcb_max_tc + (i & 1)); 1012 tc->pfc = ixgbe_dcb_pfc_disabled; 1013 } 1014 1015 /* Initialize default user to priority mapping, UPx->TC0 */ 1016 tc = &dcb_config->tc_config[0]; 1017 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 1018 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 1019 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 1020 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 1021 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 1022 } 1023 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 1024 dcb_config->pfc_mode_enable = false; 1025 dcb_config->vt_mode = true; 1026 dcb_config->round_robin_enable = false; 1027 /* support all DCB capabilities in 82599 */ 1028 dcb_config->support.capabilities = 0xFF; 1029 1030 /*we only support 4 Tcs for X540, X550 */ 1031 if (hw->mac.type == ixgbe_mac_X540 || 1032 hw->mac.type == ixgbe_mac_X550 || 1033 hw->mac.type == ixgbe_mac_X550EM_x || 1034 hw->mac.type == ixgbe_mac_X550EM_a) { 1035 dcb_config->num_tcs.pg_tcs = 4; 1036 dcb_config->num_tcs.pfc_tcs = 4; 1037 } 1038 } 1039 1040 /* 1041 * Ensure that all locks are released before first NVM or PHY access 1042 */ 1043 static void 1044 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1045 { 1046 uint16_t mask; 1047 1048 /* 1049 * Phy lock should not fail in this early stage. If this is the case, 1050 * it is due to an improper exit of the application. 1051 * So force the release of the faulty lock. Release of common lock 1052 * is done automatically by swfw_sync function. 1053 */ 1054 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1055 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1056 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1057 } 1058 ixgbe_release_swfw_semaphore(hw, mask); 1059 1060 /* 1061 * These ones are more tricky since they are common to all ports; but 1062 * swfw_sync retries last long enough (1s) to be almost sure that if 1063 * lock can not be taken it is due to an improper lock of the 1064 * semaphore. 1065 */ 1066 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1067 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1068 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1069 } 1070 ixgbe_release_swfw_semaphore(hw, mask); 1071 } 1072 1073 /* 1074 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1075 * It returns 0 on success. 1076 */ 1077 static int 1078 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) 1079 { 1080 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1081 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1082 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1083 struct ixgbe_hw *hw = 1084 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1085 struct ixgbe_vfta *shadow_vfta = 1086 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1087 struct ixgbe_hwstrip *hwstrip = 1088 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1089 struct ixgbe_dcb_config *dcb_config = 1090 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1091 struct ixgbe_filter_info *filter_info = 1092 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1093 struct ixgbe_bw_conf *bw_conf = 1094 IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); 1095 uint32_t ctrl_ext; 1096 uint16_t csum; 1097 int diag, i; 1098 1099 PMD_INIT_FUNC_TRACE(); 1100 1101 ixgbe_dev_macsec_setting_reset(eth_dev); 1102 1103 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1104 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1105 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1106 eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; 1107 1108 /* 1109 * For secondary processes, we don't initialise any further as primary 1110 * has already done this work. Only check we don't need a different 1111 * RX and TX function. 1112 */ 1113 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1114 struct ixgbe_tx_queue *txq; 1115 /* TX queue function in primary, set by last queue initialized 1116 * Tx queue may not initialized by primary process 1117 */ 1118 if (eth_dev->data->tx_queues) { 1119 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1120 ixgbe_set_tx_function(eth_dev, txq); 1121 } else { 1122 /* Use default TX function if we get here */ 1123 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1124 "Using default TX function."); 1125 } 1126 1127 ixgbe_set_rx_function(eth_dev); 1128 1129 return 0; 1130 } 1131 1132 rte_atomic32_clear(&ad->link_thread_running); 1133 rte_eth_copy_pci_info(eth_dev, pci_dev); 1134 1135 /* Vendor and Device ID need to be set before init of shared code */ 1136 hw->device_id = pci_dev->id.device_id; 1137 hw->vendor_id = pci_dev->id.vendor_id; 1138 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1139 hw->allow_unsupported_sfp = 1; 1140 1141 /* Initialize the shared code (base driver) */ 1142 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1143 diag = ixgbe_bypass_init_shared_code(hw); 1144 #else 1145 diag = ixgbe_init_shared_code(hw); 1146 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1147 1148 if (diag != IXGBE_SUCCESS) { 1149 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1150 return -EIO; 1151 } 1152 1153 if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { 1154 PMD_INIT_LOG(ERR, "\nERROR: " 1155 "Firmware recovery mode detected. Limiting functionality.\n" 1156 "Refer to the Intel(R) Ethernet Adapters and Devices " 1157 "User Guide for details on firmware recovery mode."); 1158 return -EIO; 1159 } 1160 1161 /* pick up the PCI bus settings for reporting later */ 1162 ixgbe_get_bus_info(hw); 1163 1164 /* Unlock any pending hardware semaphore */ 1165 ixgbe_swfw_lock_reset(hw); 1166 1167 #ifdef RTE_LIBRTE_SECURITY 1168 /* Initialize security_ctx only for primary process*/ 1169 if (ixgbe_ipsec_ctx_create(eth_dev)) 1170 return -ENOMEM; 1171 #endif 1172 1173 /* Initialize DCB configuration*/ 1174 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1175 ixgbe_dcb_init(hw, dcb_config); 1176 /* Get Hardware Flow Control setting */ 1177 hw->fc.requested_mode = ixgbe_fc_none; 1178 hw->fc.current_mode = ixgbe_fc_none; 1179 hw->fc.pause_time = IXGBE_FC_PAUSE; 1180 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1181 hw->fc.low_water[i] = IXGBE_FC_LO; 1182 hw->fc.high_water[i] = IXGBE_FC_HI; 1183 } 1184 hw->fc.send_xon = 1; 1185 1186 /* Make sure we have a good EEPROM before we read from it */ 1187 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1188 if (diag != IXGBE_SUCCESS) { 1189 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1190 return -EIO; 1191 } 1192 1193 #ifdef RTE_LIBRTE_IXGBE_BYPASS 1194 diag = ixgbe_bypass_init_hw(hw); 1195 #else 1196 diag = ixgbe_init_hw(hw); 1197 hw->mac.autotry_restart = false; 1198 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 1199 1200 /* 1201 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1202 * is called too soon after the kernel driver unbinding/binding occurs. 1203 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1204 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1205 * also called. See ixgbe_identify_phy_82599(). The reason for the 1206 * failure is not known, and only occuts when virtualisation features 1207 * are disabled in the bios. A delay of 100ms was found to be enough by 1208 * trial-and-error, and is doubled to be safe. 1209 */ 1210 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1211 rte_delay_ms(200); 1212 diag = ixgbe_init_hw(hw); 1213 } 1214 1215 if (diag == IXGBE_ERR_SFP_NOT_PRESENT) 1216 diag = IXGBE_SUCCESS; 1217 1218 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1219 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1220 "LOM. Please be aware there may be issues associated " 1221 "with your hardware."); 1222 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1223 "please contact your Intel or hardware representative " 1224 "who provided you with this hardware."); 1225 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1226 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1227 if (diag) { 1228 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1229 return -EIO; 1230 } 1231 1232 /* Reset the hw statistics */ 1233 ixgbe_dev_stats_reset(eth_dev); 1234 1235 /* disable interrupt */ 1236 ixgbe_disable_intr(hw); 1237 1238 /* reset mappings for queue statistics hw counters*/ 1239 ixgbe_reset_qstat_mappings(hw); 1240 1241 /* Allocate memory for storing MAC addresses */ 1242 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * 1243 hw->mac.num_rar_entries, 0); 1244 if (eth_dev->data->mac_addrs == NULL) { 1245 PMD_INIT_LOG(ERR, 1246 "Failed to allocate %u bytes needed to store " 1247 "MAC addresses", 1248 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1249 return -ENOMEM; 1250 } 1251 /* Copy the permanent MAC address */ 1252 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, 1253 ð_dev->data->mac_addrs[0]); 1254 1255 /* Allocate memory for storing hash filter MAC addresses */ 1256 eth_dev->data->hash_mac_addrs = rte_zmalloc( 1257 "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); 1258 if (eth_dev->data->hash_mac_addrs == NULL) { 1259 PMD_INIT_LOG(ERR, 1260 "Failed to allocate %d bytes needed to store MAC addresses", 1261 RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1262 return -ENOMEM; 1263 } 1264 1265 /* Pass the information to the rte_eth_dev_close() that it should also 1266 * release the private port resources. 1267 */ 1268 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1269 1270 /* initialize the vfta */ 1271 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1272 1273 /* initialize the hw strip bitmap*/ 1274 memset(hwstrip, 0, sizeof(*hwstrip)); 1275 1276 /* initialize PF if max_vfs not zero */ 1277 ixgbe_pf_host_init(eth_dev); 1278 1279 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1280 /* let hardware know driver is loaded */ 1281 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1282 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1283 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1284 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1285 IXGBE_WRITE_FLUSH(hw); 1286 1287 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1288 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1289 (int) hw->mac.type, (int) hw->phy.type, 1290 (int) hw->phy.sfp_type); 1291 else 1292 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1293 (int) hw->mac.type, (int) hw->phy.type); 1294 1295 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1296 eth_dev->data->port_id, pci_dev->id.vendor_id, 1297 pci_dev->id.device_id); 1298 1299 rte_intr_callback_register(intr_handle, 1300 ixgbe_dev_interrupt_handler, eth_dev); 1301 1302 /* enable uio/vfio intr/eventfd mapping */ 1303 rte_intr_enable(intr_handle); 1304 1305 /* enable support intr */ 1306 ixgbe_enable_intr(eth_dev); 1307 1308 ixgbe_dev_set_link_down(eth_dev); 1309 1310 /* initialize filter info */ 1311 memset(filter_info, 0, 1312 sizeof(struct ixgbe_filter_info)); 1313 1314 /* initialize 5tuple filter list */ 1315 TAILQ_INIT(&filter_info->fivetuple_list); 1316 1317 /* initialize flow director filter list & hash */ 1318 ixgbe_fdir_filter_init(eth_dev); 1319 1320 /* initialize l2 tunnel filter list & hash */ 1321 ixgbe_l2_tn_filter_init(eth_dev); 1322 1323 /* initialize flow filter lists */ 1324 ixgbe_filterlist_init(); 1325 1326 /* initialize bandwidth configuration info */ 1327 memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); 1328 1329 /* initialize Traffic Manager configuration */ 1330 ixgbe_tm_conf_init(eth_dev); 1331 1332 return 0; 1333 } 1334 1335 static int 1336 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1337 { 1338 PMD_INIT_FUNC_TRACE(); 1339 1340 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1341 return 0; 1342 1343 ixgbe_dev_close(eth_dev); 1344 1345 return 0; 1346 } 1347 1348 static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) 1349 { 1350 struct ixgbe_filter_info *filter_info = 1351 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1352 struct ixgbe_5tuple_filter *p_5tuple; 1353 1354 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { 1355 TAILQ_REMOVE(&filter_info->fivetuple_list, 1356 p_5tuple, 1357 entries); 1358 rte_free(p_5tuple); 1359 } 1360 memset(filter_info->fivetuple_mask, 0, 1361 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1362 1363 return 0; 1364 } 1365 1366 static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) 1367 { 1368 struct ixgbe_hw_fdir_info *fdir_info = 1369 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1370 struct ixgbe_fdir_filter *fdir_filter; 1371 1372 if (fdir_info->hash_map) 1373 rte_free(fdir_info->hash_map); 1374 if (fdir_info->hash_handle) 1375 rte_hash_free(fdir_info->hash_handle); 1376 1377 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { 1378 TAILQ_REMOVE(&fdir_info->fdir_list, 1379 fdir_filter, 1380 entries); 1381 rte_free(fdir_filter); 1382 } 1383 1384 return 0; 1385 } 1386 1387 static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) 1388 { 1389 struct ixgbe_l2_tn_info *l2_tn_info = 1390 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1391 struct ixgbe_l2_tn_filter *l2_tn_filter; 1392 1393 if (l2_tn_info->hash_map) 1394 rte_free(l2_tn_info->hash_map); 1395 if (l2_tn_info->hash_handle) 1396 rte_hash_free(l2_tn_info->hash_handle); 1397 1398 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 1399 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, 1400 l2_tn_filter, 1401 entries); 1402 rte_free(l2_tn_filter); 1403 } 1404 1405 return 0; 1406 } 1407 1408 static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) 1409 { 1410 struct ixgbe_hw_fdir_info *fdir_info = 1411 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); 1412 char fdir_hash_name[RTE_HASH_NAMESIZE]; 1413 struct rte_hash_parameters fdir_hash_params = { 1414 .name = fdir_hash_name, 1415 .entries = IXGBE_MAX_FDIR_FILTER_NUM, 1416 .key_len = sizeof(union ixgbe_atr_input), 1417 .hash_func = rte_hash_crc, 1418 .hash_func_init_val = 0, 1419 .socket_id = rte_socket_id(), 1420 }; 1421 1422 TAILQ_INIT(&fdir_info->fdir_list); 1423 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, 1424 "fdir_%s", eth_dev->device->name); 1425 fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); 1426 if (!fdir_info->hash_handle) { 1427 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); 1428 return -EINVAL; 1429 } 1430 fdir_info->hash_map = rte_zmalloc("ixgbe", 1431 sizeof(struct ixgbe_fdir_filter *) * 1432 IXGBE_MAX_FDIR_FILTER_NUM, 1433 0); 1434 if (!fdir_info->hash_map) { 1435 PMD_INIT_LOG(ERR, 1436 "Failed to allocate memory for fdir hash map!"); 1437 return -ENOMEM; 1438 } 1439 fdir_info->mask_added = FALSE; 1440 1441 return 0; 1442 } 1443 1444 static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) 1445 { 1446 struct ixgbe_l2_tn_info *l2_tn_info = 1447 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); 1448 char l2_tn_hash_name[RTE_HASH_NAMESIZE]; 1449 struct rte_hash_parameters l2_tn_hash_params = { 1450 .name = l2_tn_hash_name, 1451 .entries = IXGBE_MAX_L2_TN_FILTER_NUM, 1452 .key_len = sizeof(struct ixgbe_l2_tn_key), 1453 .hash_func = rte_hash_crc, 1454 .hash_func_init_val = 0, 1455 .socket_id = rte_socket_id(), 1456 }; 1457 1458 TAILQ_INIT(&l2_tn_info->l2_tn_list); 1459 snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, 1460 "l2_tn_%s", eth_dev->device->name); 1461 l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); 1462 if (!l2_tn_info->hash_handle) { 1463 PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); 1464 return -EINVAL; 1465 } 1466 l2_tn_info->hash_map = rte_zmalloc("ixgbe", 1467 sizeof(struct ixgbe_l2_tn_filter *) * 1468 IXGBE_MAX_L2_TN_FILTER_NUM, 1469 0); 1470 if (!l2_tn_info->hash_map) { 1471 PMD_INIT_LOG(ERR, 1472 "Failed to allocate memory for L2 TN hash map!"); 1473 return -ENOMEM; 1474 } 1475 l2_tn_info->e_tag_en = FALSE; 1476 l2_tn_info->e_tag_fwd_en = FALSE; 1477 l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; 1478 1479 return 0; 1480 } 1481 /* 1482 * Negotiate mailbox API version with the PF. 1483 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1484 * Then we try to negotiate starting with the most recent one. 1485 * If all negotiation attempts fail, then we will proceed with 1486 * the default one (ixgbe_mbox_api_10). 1487 */ 1488 static void 1489 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1490 { 1491 int32_t i; 1492 1493 /* start with highest supported, proceed down */ 1494 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1495 ixgbe_mbox_api_13, 1496 ixgbe_mbox_api_12, 1497 ixgbe_mbox_api_11, 1498 ixgbe_mbox_api_10, 1499 }; 1500 1501 for (i = 0; 1502 i != RTE_DIM(sup_ver) && 1503 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1504 i++) 1505 ; 1506 } 1507 1508 static void 1509 generate_random_mac_addr(struct rte_ether_addr *mac_addr) 1510 { 1511 uint64_t random; 1512 1513 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1514 mac_addr->addr_bytes[0] = 0x00; 1515 mac_addr->addr_bytes[1] = 0x09; 1516 mac_addr->addr_bytes[2] = 0xC0; 1517 /* Force indication of locally assigned MAC address. */ 1518 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 1519 /* Generate the last 3 bytes of the MAC address with a random number. */ 1520 random = rte_rand(); 1521 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1522 } 1523 1524 static int 1525 devarg_handle_int(__rte_unused const char *key, const char *value, 1526 void *extra_args) 1527 { 1528 uint16_t *n = extra_args; 1529 1530 if (value == NULL || extra_args == NULL) 1531 return -EINVAL; 1532 1533 *n = (uint16_t)strtoul(value, NULL, 0); 1534 if (*n == USHRT_MAX && errno == ERANGE) 1535 return -1; 1536 1537 return 0; 1538 } 1539 1540 static void 1541 ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, 1542 struct rte_devargs *devargs) 1543 { 1544 struct rte_kvargs *kvlist; 1545 uint16_t pflink_fullchk; 1546 1547 if (devargs == NULL) 1548 return; 1549 1550 kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); 1551 if (kvlist == NULL) 1552 return; 1553 1554 if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && 1555 rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, 1556 devarg_handle_int, &pflink_fullchk) == 0 && 1557 pflink_fullchk == 1) 1558 adapter->pflink_fullchk = 1; 1559 1560 rte_kvargs_free(kvlist); 1561 } 1562 1563 /* 1564 * Virtual Function device init 1565 */ 1566 static int 1567 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1568 { 1569 int diag; 1570 uint32_t tc, tcs; 1571 struct ixgbe_adapter *ad = eth_dev->data->dev_private; 1572 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1573 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 1574 struct ixgbe_hw *hw = 1575 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1576 struct ixgbe_vfta *shadow_vfta = 1577 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1578 struct ixgbe_hwstrip *hwstrip = 1579 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1580 struct rte_ether_addr *perm_addr = 1581 (struct rte_ether_addr *)hw->mac.perm_addr; 1582 1583 PMD_INIT_FUNC_TRACE(); 1584 1585 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1586 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1587 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1588 1589 /* for secondary processes, we don't initialise any further as primary 1590 * has already done this work. Only check we don't need a different 1591 * RX function 1592 */ 1593 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1594 struct ixgbe_tx_queue *txq; 1595 /* TX queue function in primary, set by last queue initialized 1596 * Tx queue may not initialized by primary process 1597 */ 1598 if (eth_dev->data->tx_queues) { 1599 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1600 ixgbe_set_tx_function(eth_dev, txq); 1601 } else { 1602 /* Use default TX function if we get here */ 1603 PMD_INIT_LOG(NOTICE, 1604 "No TX queues configured yet. Using default TX function."); 1605 } 1606 1607 ixgbe_set_rx_function(eth_dev); 1608 1609 return 0; 1610 } 1611 1612 rte_atomic32_clear(&ad->link_thread_running); 1613 ixgbevf_parse_devargs(eth_dev->data->dev_private, 1614 pci_dev->device.devargs); 1615 1616 rte_eth_copy_pci_info(eth_dev, pci_dev); 1617 1618 hw->device_id = pci_dev->id.device_id; 1619 hw->vendor_id = pci_dev->id.vendor_id; 1620 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1621 1622 /* initialize the vfta */ 1623 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1624 1625 /* initialize the hw strip bitmap*/ 1626 memset(hwstrip, 0, sizeof(*hwstrip)); 1627 1628 /* Initialize the shared code (base driver) */ 1629 diag = ixgbe_init_shared_code(hw); 1630 if (diag != IXGBE_SUCCESS) { 1631 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1632 return -EIO; 1633 } 1634 1635 /* init_mailbox_params */ 1636 hw->mbx.ops.init_params(hw); 1637 1638 /* Reset the hw statistics */ 1639 ixgbevf_dev_stats_reset(eth_dev); 1640 1641 /* Disable the interrupts for VF */ 1642 ixgbevf_intr_disable(eth_dev); 1643 1644 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1645 diag = hw->mac.ops.reset_hw(hw); 1646 1647 /* 1648 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1649 * the underlying PF driver has not assigned a MAC address to the VF. 1650 * In this case, assign a random MAC address. 1651 */ 1652 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1653 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1654 /* 1655 * This error code will be propagated to the app by 1656 * rte_eth_dev_reset, so use a public error code rather than 1657 * the internal-only IXGBE_ERR_RESET_FAILED 1658 */ 1659 return -EAGAIN; 1660 } 1661 1662 /* negotiate mailbox API version to use with the PF. */ 1663 ixgbevf_negotiate_api(hw); 1664 1665 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1666 ixgbevf_get_queues(hw, &tcs, &tc); 1667 1668 /* Allocate memory for storing MAC addresses */ 1669 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * 1670 hw->mac.num_rar_entries, 0); 1671 if (eth_dev->data->mac_addrs == NULL) { 1672 PMD_INIT_LOG(ERR, 1673 "Failed to allocate %u bytes needed to store " 1674 "MAC addresses", 1675 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1676 return -ENOMEM; 1677 } 1678 1679 /* Pass the information to the rte_eth_dev_close() that it should also 1680 * release the private port resources. 1681 */ 1682 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1683 1684 /* Generate a random MAC address, if none was assigned by PF. */ 1685 if (rte_is_zero_ether_addr(perm_addr)) { 1686 generate_random_mac_addr(perm_addr); 1687 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1688 if (diag) { 1689 rte_free(eth_dev->data->mac_addrs); 1690 eth_dev->data->mac_addrs = NULL; 1691 return diag; 1692 } 1693 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1694 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1695 "%02x:%02x:%02x:%02x:%02x:%02x", 1696 perm_addr->addr_bytes[0], 1697 perm_addr->addr_bytes[1], 1698 perm_addr->addr_bytes[2], 1699 perm_addr->addr_bytes[3], 1700 perm_addr->addr_bytes[4], 1701 perm_addr->addr_bytes[5]); 1702 } 1703 1704 /* Copy the permanent MAC address */ 1705 rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1706 1707 /* reset the hardware with the new settings */ 1708 diag = hw->mac.ops.start_hw(hw); 1709 switch (diag) { 1710 case 0: 1711 break; 1712 1713 default: 1714 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1715 return -EIO; 1716 } 1717 1718 rte_intr_callback_register(intr_handle, 1719 ixgbevf_dev_interrupt_handler, eth_dev); 1720 rte_intr_enable(intr_handle); 1721 ixgbevf_intr_enable(eth_dev); 1722 1723 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1724 eth_dev->data->port_id, pci_dev->id.vendor_id, 1725 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1726 1727 return 0; 1728 } 1729 1730 /* Virtual Function device uninit */ 1731 1732 static int 1733 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1734 { 1735 PMD_INIT_FUNC_TRACE(); 1736 1737 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1738 return 0; 1739 1740 ixgbevf_dev_close(eth_dev); 1741 1742 return 0; 1743 } 1744 1745 static int 1746 eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1747 struct rte_pci_device *pci_dev) 1748 { 1749 char name[RTE_ETH_NAME_MAX_LEN]; 1750 struct rte_eth_dev *pf_ethdev; 1751 struct rte_eth_devargs eth_da; 1752 int i, retval; 1753 1754 if (pci_dev->device.devargs) { 1755 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1756 ð_da); 1757 if (retval) 1758 return retval; 1759 } else 1760 memset(ð_da, 0, sizeof(eth_da)); 1761 1762 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1763 sizeof(struct ixgbe_adapter), 1764 eth_dev_pci_specific_init, pci_dev, 1765 eth_ixgbe_dev_init, NULL); 1766 1767 if (retval || eth_da.nb_representor_ports < 1) 1768 return retval; 1769 1770 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1771 if (pf_ethdev == NULL) 1772 return -ENODEV; 1773 1774 /* probe VF representor ports */ 1775 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1776 struct ixgbe_vf_info *vfinfo; 1777 struct ixgbe_vf_representor representor; 1778 1779 vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( 1780 pf_ethdev->data->dev_private); 1781 if (vfinfo == NULL) { 1782 PMD_DRV_LOG(ERR, 1783 "no virtual functions supported by PF"); 1784 break; 1785 } 1786 1787 representor.vf_id = eth_da.representor_ports[i]; 1788 representor.switch_domain_id = vfinfo->switch_domain_id; 1789 representor.pf_ethdev = pf_ethdev; 1790 1791 /* representor port net_bdf_port */ 1792 snprintf(name, sizeof(name), "net_%s_representor_%d", 1793 pci_dev->device.name, 1794 eth_da.representor_ports[i]); 1795 1796 retval = rte_eth_dev_create(&pci_dev->device, name, 1797 sizeof(struct ixgbe_vf_representor), NULL, NULL, 1798 ixgbe_vf_representor_init, &representor); 1799 1800 if (retval) 1801 PMD_DRV_LOG(ERR, "failed to create ixgbe vf " 1802 "representor %s.", name); 1803 } 1804 1805 return 0; 1806 } 1807 1808 static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) 1809 { 1810 struct rte_eth_dev *ethdev; 1811 1812 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1813 if (!ethdev) 1814 return 0; 1815 1816 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1817 return rte_eth_dev_pci_generic_remove(pci_dev, 1818 ixgbe_vf_representor_uninit); 1819 else 1820 return rte_eth_dev_pci_generic_remove(pci_dev, 1821 eth_ixgbe_dev_uninit); 1822 } 1823 1824 static struct rte_pci_driver rte_ixgbe_pmd = { 1825 .id_table = pci_id_ixgbe_map, 1826 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1827 .probe = eth_ixgbe_pci_probe, 1828 .remove = eth_ixgbe_pci_remove, 1829 }; 1830 1831 static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1832 struct rte_pci_device *pci_dev) 1833 { 1834 return rte_eth_dev_pci_generic_probe(pci_dev, 1835 sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); 1836 } 1837 1838 static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) 1839 { 1840 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); 1841 } 1842 1843 /* 1844 * virtual function driver struct 1845 */ 1846 static struct rte_pci_driver rte_ixgbevf_pmd = { 1847 .id_table = pci_id_ixgbevf_map, 1848 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1849 .probe = eth_ixgbevf_pci_probe, 1850 .remove = eth_ixgbevf_pci_remove, 1851 }; 1852 1853 static int 1854 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1855 { 1856 struct ixgbe_hw *hw = 1857 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1858 struct ixgbe_vfta *shadow_vfta = 1859 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1860 uint32_t vfta; 1861 uint32_t vid_idx; 1862 uint32_t vid_bit; 1863 1864 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1865 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1866 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1867 if (on) 1868 vfta |= vid_bit; 1869 else 1870 vfta &= ~vid_bit; 1871 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1872 1873 /* update local VFTA copy */ 1874 shadow_vfta->vfta[vid_idx] = vfta; 1875 1876 return 0; 1877 } 1878 1879 static void 1880 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1881 { 1882 if (on) 1883 ixgbe_vlan_hw_strip_enable(dev, queue); 1884 else 1885 ixgbe_vlan_hw_strip_disable(dev, queue); 1886 } 1887 1888 static int 1889 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1890 enum rte_vlan_type vlan_type, 1891 uint16_t tpid) 1892 { 1893 struct ixgbe_hw *hw = 1894 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1895 int ret = 0; 1896 uint32_t reg; 1897 uint32_t qinq; 1898 1899 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1900 qinq &= IXGBE_DMATXCTL_GDV; 1901 1902 switch (vlan_type) { 1903 case ETH_VLAN_TYPE_INNER: 1904 if (qinq) { 1905 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1906 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1907 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1908 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1909 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1910 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1911 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1912 } else { 1913 ret = -ENOTSUP; 1914 PMD_DRV_LOG(ERR, "Inner type is not supported" 1915 " by single VLAN"); 1916 } 1917 break; 1918 case ETH_VLAN_TYPE_OUTER: 1919 if (qinq) { 1920 /* Only the high 16-bits is valid */ 1921 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1922 IXGBE_EXVET_VET_EXT_SHIFT); 1923 } else { 1924 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1925 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1926 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1927 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1928 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1929 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1930 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1931 } 1932 1933 break; 1934 default: 1935 ret = -EINVAL; 1936 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1937 break; 1938 } 1939 1940 return ret; 1941 } 1942 1943 void 1944 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1945 { 1946 struct ixgbe_hw *hw = 1947 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1948 uint32_t vlnctrl; 1949 1950 PMD_INIT_FUNC_TRACE(); 1951 1952 /* Filter Table Disable */ 1953 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1954 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1955 1956 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1957 } 1958 1959 void 1960 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1961 { 1962 struct ixgbe_hw *hw = 1963 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1964 struct ixgbe_vfta *shadow_vfta = 1965 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1966 uint32_t vlnctrl; 1967 uint16_t i; 1968 1969 PMD_INIT_FUNC_TRACE(); 1970 1971 /* Filter Table Enable */ 1972 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1973 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1974 vlnctrl |= IXGBE_VLNCTRL_VFE; 1975 1976 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1977 1978 /* write whatever is in local vfta copy */ 1979 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1980 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1981 } 1982 1983 static void 1984 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1985 { 1986 struct ixgbe_hwstrip *hwstrip = 1987 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1988 struct ixgbe_rx_queue *rxq; 1989 1990 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1991 return; 1992 1993 if (on) 1994 IXGBE_SET_HWSTRIP(hwstrip, queue); 1995 else 1996 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1997 1998 if (queue >= dev->data->nb_rx_queues) 1999 return; 2000 2001 rxq = dev->data->rx_queues[queue]; 2002 2003 if (on) { 2004 rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 2005 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2006 } else { 2007 rxq->vlan_flags = PKT_RX_VLAN; 2008 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2009 } 2010 } 2011 2012 static void 2013 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 2014 { 2015 struct ixgbe_hw *hw = 2016 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2017 uint32_t ctrl; 2018 2019 PMD_INIT_FUNC_TRACE(); 2020 2021 if (hw->mac.type == ixgbe_mac_82598EB) { 2022 /* No queue level support */ 2023 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2024 return; 2025 } 2026 2027 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2028 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2029 ctrl &= ~IXGBE_RXDCTL_VME; 2030 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2031 2032 /* record those setting for HW strip per queue */ 2033 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 2034 } 2035 2036 static void 2037 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 2038 { 2039 struct ixgbe_hw *hw = 2040 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2041 uint32_t ctrl; 2042 2043 PMD_INIT_FUNC_TRACE(); 2044 2045 if (hw->mac.type == ixgbe_mac_82598EB) { 2046 /* No queue level supported */ 2047 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 2048 return; 2049 } 2050 2051 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 2052 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 2053 ctrl |= IXGBE_RXDCTL_VME; 2054 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 2055 2056 /* record those setting for HW strip per queue */ 2057 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 2058 } 2059 2060 static void 2061 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 2062 { 2063 struct ixgbe_hw *hw = 2064 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2065 uint32_t ctrl; 2066 2067 PMD_INIT_FUNC_TRACE(); 2068 2069 /* DMATXCTRL: Geric Double VLAN Disable */ 2070 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2071 ctrl &= ~IXGBE_DMATXCTL_GDV; 2072 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2073 2074 /* CTRL_EXT: Global Double VLAN Disable */ 2075 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2076 ctrl &= ~IXGBE_EXTENDED_VLAN; 2077 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2078 2079 } 2080 2081 static void 2082 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 2083 { 2084 struct ixgbe_hw *hw = 2085 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2086 uint32_t ctrl; 2087 2088 PMD_INIT_FUNC_TRACE(); 2089 2090 /* DMATXCTRL: Geric Double VLAN Enable */ 2091 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2092 ctrl |= IXGBE_DMATXCTL_GDV; 2093 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 2094 2095 /* CTRL_EXT: Global Double VLAN Enable */ 2096 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 2097 ctrl |= IXGBE_EXTENDED_VLAN; 2098 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 2099 2100 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 2101 if (hw->mac.type == ixgbe_mac_X550 || 2102 hw->mac.type == ixgbe_mac_X550EM_x || 2103 hw->mac.type == ixgbe_mac_X550EM_a) { 2104 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 2105 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 2106 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 2107 } 2108 2109 /* 2110 * VET EXT field in the EXVET register = 0x8100 by default 2111 * So no need to change. Same to VT field of DMATXCTL register 2112 */ 2113 } 2114 2115 void 2116 ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) 2117 { 2118 struct ixgbe_hw *hw = 2119 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2120 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; 2121 uint32_t ctrl; 2122 uint16_t i; 2123 struct ixgbe_rx_queue *rxq; 2124 bool on; 2125 2126 PMD_INIT_FUNC_TRACE(); 2127 2128 if (hw->mac.type == ixgbe_mac_82598EB) { 2129 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2130 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2131 ctrl |= IXGBE_VLNCTRL_VME; 2132 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2133 } else { 2134 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2135 ctrl &= ~IXGBE_VLNCTRL_VME; 2136 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 2137 } 2138 } else { 2139 /* 2140 * Other 10G NIC, the VLAN strip can be setup 2141 * per queue in RXDCTL 2142 */ 2143 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2144 rxq = dev->data->rx_queues[i]; 2145 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 2146 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { 2147 ctrl |= IXGBE_RXDCTL_VME; 2148 on = TRUE; 2149 } else { 2150 ctrl &= ~IXGBE_RXDCTL_VME; 2151 on = FALSE; 2152 } 2153 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 2154 2155 /* record those setting for HW strip per queue */ 2156 ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); 2157 } 2158 } 2159 } 2160 2161 static void 2162 ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) 2163 { 2164 uint16_t i; 2165 struct rte_eth_rxmode *rxmode; 2166 struct ixgbe_rx_queue *rxq; 2167 2168 if (mask & ETH_VLAN_STRIP_MASK) { 2169 rxmode = &dev->data->dev_conf.rxmode; 2170 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 2171 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2172 rxq = dev->data->rx_queues[i]; 2173 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; 2174 } 2175 else 2176 for (i = 0; i < dev->data->nb_rx_queues; i++) { 2177 rxq = dev->data->rx_queues[i]; 2178 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 2179 } 2180 } 2181 } 2182 2183 static int 2184 ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) 2185 { 2186 struct rte_eth_rxmode *rxmode; 2187 rxmode = &dev->data->dev_conf.rxmode; 2188 2189 if (mask & ETH_VLAN_STRIP_MASK) { 2190 ixgbe_vlan_hw_strip_config(dev); 2191 } 2192 2193 if (mask & ETH_VLAN_FILTER_MASK) { 2194 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 2195 ixgbe_vlan_hw_filter_enable(dev); 2196 else 2197 ixgbe_vlan_hw_filter_disable(dev); 2198 } 2199 2200 if (mask & ETH_VLAN_EXTEND_MASK) { 2201 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) 2202 ixgbe_vlan_hw_extend_enable(dev); 2203 else 2204 ixgbe_vlan_hw_extend_disable(dev); 2205 } 2206 2207 return 0; 2208 } 2209 2210 static int 2211 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 2212 { 2213 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 2214 2215 ixgbe_vlan_offload_config(dev, mask); 2216 2217 return 0; 2218 } 2219 2220 static void 2221 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 2222 { 2223 struct ixgbe_hw *hw = 2224 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2225 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 2226 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2227 2228 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 2229 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 2230 } 2231 2232 static int 2233 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 2234 { 2235 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2236 2237 switch (nb_rx_q) { 2238 case 1: 2239 case 2: 2240 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 2241 break; 2242 case 4: 2243 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 2244 break; 2245 default: 2246 return -EINVAL; 2247 } 2248 2249 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 2250 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2251 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 2252 pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2253 return 0; 2254 } 2255 2256 static int 2257 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 2258 { 2259 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 2260 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2261 uint16_t nb_rx_q = dev->data->nb_rx_queues; 2262 uint16_t nb_tx_q = dev->data->nb_tx_queues; 2263 2264 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 2265 /* check multi-queue mode */ 2266 switch (dev_conf->rxmode.mq_mode) { 2267 case ETH_MQ_RX_VMDQ_DCB: 2268 PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); 2269 break; 2270 case ETH_MQ_RX_VMDQ_DCB_RSS: 2271 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 2272 PMD_INIT_LOG(ERR, "SRIOV active," 2273 " unsupported mq_mode rx %d.", 2274 dev_conf->rxmode.mq_mode); 2275 return -EINVAL; 2276 case ETH_MQ_RX_RSS: 2277 case ETH_MQ_RX_VMDQ_RSS: 2278 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 2279 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 2280 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 2281 PMD_INIT_LOG(ERR, "SRIOV is active," 2282 " invalid queue number" 2283 " for VMDQ RSS, allowed" 2284 " value are 1, 2 or 4."); 2285 return -EINVAL; 2286 } 2287 break; 2288 case ETH_MQ_RX_VMDQ_ONLY: 2289 case ETH_MQ_RX_NONE: 2290 /* if nothing mq mode configure, use default scheme */ 2291 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2292 break; 2293 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2294 /* SRIOV only works in VMDq enable mode */ 2295 PMD_INIT_LOG(ERR, "SRIOV is active," 2296 " wrong mq_mode rx %d.", 2297 dev_conf->rxmode.mq_mode); 2298 return -EINVAL; 2299 } 2300 2301 switch (dev_conf->txmode.mq_mode) { 2302 case ETH_MQ_TX_VMDQ_DCB: 2303 PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); 2304 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; 2305 break; 2306 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2307 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2308 break; 2309 } 2310 2311 /* check valid queue number */ 2312 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2313 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2314 PMD_INIT_LOG(ERR, "SRIOV is active," 2315 " nb_rx_q=%d nb_tx_q=%d queue number" 2316 " must be less than or equal to %d.", 2317 nb_rx_q, nb_tx_q, 2318 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2319 return -EINVAL; 2320 } 2321 } else { 2322 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2323 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2324 " not supported."); 2325 return -EINVAL; 2326 } 2327 /* check configuration for vmdb+dcb mode */ 2328 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2329 const struct rte_eth_vmdq_dcb_conf *conf; 2330 2331 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2332 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2333 IXGBE_VMDQ_DCB_NB_QUEUES); 2334 return -EINVAL; 2335 } 2336 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2337 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2338 conf->nb_queue_pools == ETH_32_POOLS)) { 2339 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2340 " nb_queue_pools must be %d or %d.", 2341 ETH_16_POOLS, ETH_32_POOLS); 2342 return -EINVAL; 2343 } 2344 } 2345 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2346 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2347 2348 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2349 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2350 IXGBE_VMDQ_DCB_NB_QUEUES); 2351 return -EINVAL; 2352 } 2353 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2354 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2355 conf->nb_queue_pools == ETH_32_POOLS)) { 2356 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2357 " nb_queue_pools != %d and" 2358 " nb_queue_pools != %d.", 2359 ETH_16_POOLS, ETH_32_POOLS); 2360 return -EINVAL; 2361 } 2362 } 2363 2364 /* For DCB mode check our configuration before we go further */ 2365 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2366 const struct rte_eth_dcb_rx_conf *conf; 2367 2368 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2369 if (!(conf->nb_tcs == ETH_4_TCS || 2370 conf->nb_tcs == ETH_8_TCS)) { 2371 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2372 " and nb_tcs != %d.", 2373 ETH_4_TCS, ETH_8_TCS); 2374 return -EINVAL; 2375 } 2376 } 2377 2378 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2379 const struct rte_eth_dcb_tx_conf *conf; 2380 2381 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2382 if (!(conf->nb_tcs == ETH_4_TCS || 2383 conf->nb_tcs == ETH_8_TCS)) { 2384 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2385 " and nb_tcs != %d.", 2386 ETH_4_TCS, ETH_8_TCS); 2387 return -EINVAL; 2388 } 2389 } 2390 2391 /* 2392 * When DCB/VT is off, maximum number of queues changes, 2393 * except for 82598EB, which remains constant. 2394 */ 2395 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2396 hw->mac.type != ixgbe_mac_82598EB) { 2397 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2398 PMD_INIT_LOG(ERR, 2399 "Neither VT nor DCB are enabled, " 2400 "nb_tx_q > %d.", 2401 IXGBE_NONE_MODE_TX_NB_QUEUES); 2402 return -EINVAL; 2403 } 2404 } 2405 } 2406 return 0; 2407 } 2408 2409 static int 2410 ixgbe_dev_configure(struct rte_eth_dev *dev) 2411 { 2412 struct ixgbe_interrupt *intr = 2413 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2414 struct ixgbe_adapter *adapter = dev->data->dev_private; 2415 int ret; 2416 2417 PMD_INIT_FUNC_TRACE(); 2418 2419 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 2420 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 2421 2422 /* multipe queue mode checking */ 2423 ret = ixgbe_check_mq_mode(dev); 2424 if (ret != 0) { 2425 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2426 ret); 2427 return ret; 2428 } 2429 2430 /* set flag to update link status after init */ 2431 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2432 2433 /* 2434 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2435 * allocation or vector Rx preconditions we will reset it. 2436 */ 2437 adapter->rx_bulk_alloc_allowed = true; 2438 adapter->rx_vec_allowed = true; 2439 2440 return 0; 2441 } 2442 2443 static void 2444 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2445 { 2446 struct ixgbe_hw *hw = 2447 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2448 struct ixgbe_interrupt *intr = 2449 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2450 uint32_t gpie; 2451 2452 /* only set up it on X550EM_X */ 2453 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2454 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2455 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2456 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2457 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2458 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2459 } 2460 } 2461 2462 int 2463 ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 2464 uint16_t tx_rate, uint64_t q_msk) 2465 { 2466 struct ixgbe_hw *hw; 2467 struct ixgbe_vf_info *vfinfo; 2468 struct rte_eth_link link; 2469 uint8_t nb_q_per_pool; 2470 uint32_t queue_stride; 2471 uint32_t queue_idx, idx = 0, vf_idx; 2472 uint32_t queue_end; 2473 uint16_t total_rate = 0; 2474 struct rte_pci_device *pci_dev; 2475 int ret; 2476 2477 pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2478 ret = rte_eth_link_get_nowait(dev->data->port_id, &link); 2479 if (ret < 0) 2480 return ret; 2481 2482 if (vf >= pci_dev->max_vfs) 2483 return -EINVAL; 2484 2485 if (tx_rate > link.link_speed) 2486 return -EINVAL; 2487 2488 if (q_msk == 0) 2489 return 0; 2490 2491 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2492 vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 2493 nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 2494 queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 2495 queue_idx = vf * queue_stride; 2496 queue_end = queue_idx + nb_q_per_pool - 1; 2497 if (queue_end >= hw->mac.max_tx_queues) 2498 return -EINVAL; 2499 2500 if (vfinfo) { 2501 for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { 2502 if (vf_idx == vf) 2503 continue; 2504 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 2505 idx++) 2506 total_rate += vfinfo[vf_idx].tx_rate[idx]; 2507 } 2508 } else { 2509 return -EINVAL; 2510 } 2511 2512 /* Store tx_rate for this vf. */ 2513 for (idx = 0; idx < nb_q_per_pool; idx++) { 2514 if (((uint64_t)0x1 << idx) & q_msk) { 2515 if (vfinfo[vf].tx_rate[idx] != tx_rate) 2516 vfinfo[vf].tx_rate[idx] = tx_rate; 2517 total_rate += tx_rate; 2518 } 2519 } 2520 2521 if (total_rate > dev->data->dev_link.link_speed) { 2522 /* Reset stored TX rate of the VF if it causes exceed 2523 * link speed. 2524 */ 2525 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 2526 return -EINVAL; 2527 } 2528 2529 /* Set RTTBCNRC of each queue/pool for vf X */ 2530 for (; queue_idx <= queue_end; queue_idx++) { 2531 if (0x1 & q_msk) 2532 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 2533 q_msk = q_msk >> 1; 2534 } 2535 2536 return 0; 2537 } 2538 2539 static int 2540 ixgbe_flow_ctrl_enable(struct rte_eth_dev *dev, struct ixgbe_hw *hw) 2541 { 2542 struct ixgbe_adapter *adapter = dev->data->dev_private; 2543 int err; 2544 uint32_t mflcn; 2545 2546 err = ixgbe_fc_enable(hw); 2547 2548 /* Not negotiated is not an error case */ 2549 if (err == IXGBE_SUCCESS || err == IXGBE_ERR_FC_NOT_NEGOTIATED) { 2550 /* 2551 *check if we want to forward MAC frames - driver doesn't 2552 *have native capability to do that, 2553 *so we'll write the registers ourselves 2554 */ 2555 2556 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2557 2558 /* set or clear MFLCN.PMCF bit depending on configuration */ 2559 if (adapter->mac_ctrl_frame_fwd != 0) 2560 mflcn |= IXGBE_MFLCN_PMCF; 2561 else 2562 mflcn &= ~IXGBE_MFLCN_PMCF; 2563 2564 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 2565 IXGBE_WRITE_FLUSH(hw); 2566 2567 return 0; 2568 } 2569 return err; 2570 } 2571 2572 /* 2573 * Configure device link speed and setup link. 2574 * It returns 0 on success. 2575 */ 2576 static int 2577 ixgbe_dev_start(struct rte_eth_dev *dev) 2578 { 2579 struct ixgbe_hw *hw = 2580 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2581 struct ixgbe_vf_info *vfinfo = 2582 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2583 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2584 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2585 uint32_t intr_vector = 0; 2586 int err, link_up = 0, negotiate = 0; 2587 uint32_t speed = 0; 2588 uint32_t allowed_speeds = 0; 2589 int mask = 0; 2590 int status; 2591 uint16_t vf, idx; 2592 uint32_t *link_speeds; 2593 struct ixgbe_tm_conf *tm_conf = 2594 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2595 struct ixgbe_macsec_setting *macsec_setting = 2596 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 2597 2598 PMD_INIT_FUNC_TRACE(); 2599 2600 /* Stop the link setup handler before resetting the HW. */ 2601 ixgbe_dev_cancel_link_thread(dev); 2602 2603 /* disable uio/vfio intr/eventfd mapping */ 2604 rte_intr_disable(intr_handle); 2605 2606 /* stop adapter */ 2607 hw->adapter_stopped = 0; 2608 ixgbe_stop_adapter(hw); 2609 2610 /* reinitialize adapter 2611 * this calls reset and start 2612 */ 2613 status = ixgbe_pf_reset_hw(hw); 2614 if (status != 0) 2615 return -1; 2616 hw->mac.ops.start_hw(hw); 2617 hw->mac.get_link_status = true; 2618 2619 /* configure PF module if SRIOV enabled */ 2620 ixgbe_pf_host_configure(dev); 2621 2622 ixgbe_dev_phy_intr_setup(dev); 2623 2624 /* check and configure queue intr-vector mapping */ 2625 if ((rte_intr_cap_multiple(intr_handle) || 2626 !RTE_ETH_DEV_SRIOV(dev).active) && 2627 dev->data->dev_conf.intr_conf.rxq != 0) { 2628 intr_vector = dev->data->nb_rx_queues; 2629 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2630 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2631 IXGBE_MAX_INTR_QUEUE_NUM); 2632 return -ENOTSUP; 2633 } 2634 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2635 return -1; 2636 } 2637 2638 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2639 intr_handle->intr_vec = 2640 rte_zmalloc("intr_vec", 2641 dev->data->nb_rx_queues * sizeof(int), 0); 2642 if (intr_handle->intr_vec == NULL) { 2643 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2644 " intr_vec", dev->data->nb_rx_queues); 2645 return -ENOMEM; 2646 } 2647 } 2648 2649 /* confiugre msix for sleep until rx interrupt */ 2650 ixgbe_configure_msix(dev); 2651 2652 /* initialize transmission unit */ 2653 ixgbe_dev_tx_init(dev); 2654 2655 /* This can fail when allocating mbufs for descriptor rings */ 2656 err = ixgbe_dev_rx_init(dev); 2657 if (err) { 2658 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2659 goto error; 2660 } 2661 2662 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2663 ETH_VLAN_EXTEND_MASK; 2664 err = ixgbe_vlan_offload_config(dev, mask); 2665 if (err) { 2666 PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); 2667 goto error; 2668 } 2669 2670 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2671 /* Enable vlan filtering for VMDq */ 2672 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2673 } 2674 2675 /* Configure DCB hw */ 2676 ixgbe_configure_dcb(dev); 2677 2678 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2679 err = ixgbe_fdir_configure(dev); 2680 if (err) 2681 goto error; 2682 } 2683 2684 /* Restore vf rate limit */ 2685 if (vfinfo != NULL) { 2686 for (vf = 0; vf < pci_dev->max_vfs; vf++) 2687 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2688 if (vfinfo[vf].tx_rate[idx] != 0) 2689 ixgbe_set_vf_rate_limit( 2690 dev, vf, 2691 vfinfo[vf].tx_rate[idx], 2692 1 << idx); 2693 } 2694 2695 ixgbe_restore_statistics_mapping(dev); 2696 2697 err = ixgbe_flow_ctrl_enable(dev, hw); 2698 if (err < 0) { 2699 PMD_INIT_LOG(ERR, "enable flow ctrl err"); 2700 goto error; 2701 } 2702 2703 err = ixgbe_dev_rxtx_start(dev); 2704 if (err < 0) { 2705 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2706 goto error; 2707 } 2708 2709 /* Skip link setup if loopback mode is enabled. */ 2710 if (dev->data->dev_conf.lpbk_mode != 0) { 2711 err = ixgbe_check_supported_loopback_mode(dev); 2712 if (err < 0) { 2713 PMD_INIT_LOG(ERR, "Unsupported loopback mode"); 2714 goto error; 2715 } else { 2716 goto skip_link_setup; 2717 } 2718 } 2719 2720 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2721 err = hw->mac.ops.setup_sfp(hw); 2722 if (err) 2723 goto error; 2724 } 2725 2726 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2727 /* Turn on the copper */ 2728 ixgbe_set_phy_power(hw, true); 2729 } else { 2730 /* Turn on the laser */ 2731 ixgbe_enable_tx_laser(hw); 2732 } 2733 2734 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2735 if (err) 2736 goto error; 2737 dev->data->dev_link.link_status = link_up; 2738 2739 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2740 if (err) 2741 goto error; 2742 2743 switch (hw->mac.type) { 2744 case ixgbe_mac_X550: 2745 case ixgbe_mac_X550EM_x: 2746 case ixgbe_mac_X550EM_a: 2747 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2748 ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | 2749 ETH_LINK_SPEED_10G; 2750 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 2751 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 2752 allowed_speeds = ETH_LINK_SPEED_10M | 2753 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 2754 break; 2755 default: 2756 allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2757 ETH_LINK_SPEED_10G; 2758 } 2759 2760 link_speeds = &dev->data->dev_conf.link_speeds; 2761 2762 /* Ignore autoneg flag bit and check the validity of 2763 * link_speed 2764 */ 2765 if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) { 2766 PMD_INIT_LOG(ERR, "Invalid link setting"); 2767 goto error; 2768 } 2769 2770 speed = 0x0; 2771 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2772 switch (hw->mac.type) { 2773 case ixgbe_mac_82598EB: 2774 speed = IXGBE_LINK_SPEED_82598_AUTONEG; 2775 break; 2776 case ixgbe_mac_82599EB: 2777 case ixgbe_mac_X540: 2778 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2779 break; 2780 case ixgbe_mac_X550: 2781 case ixgbe_mac_X550EM_x: 2782 case ixgbe_mac_X550EM_a: 2783 speed = IXGBE_LINK_SPEED_X550_AUTONEG; 2784 break; 2785 default: 2786 speed = IXGBE_LINK_SPEED_82599_AUTONEG; 2787 } 2788 } else { 2789 if (*link_speeds & ETH_LINK_SPEED_10G) 2790 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2791 if (*link_speeds & ETH_LINK_SPEED_5G) 2792 speed |= IXGBE_LINK_SPEED_5GB_FULL; 2793 if (*link_speeds & ETH_LINK_SPEED_2_5G) 2794 speed |= IXGBE_LINK_SPEED_2_5GB_FULL; 2795 if (*link_speeds & ETH_LINK_SPEED_1G) 2796 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2797 if (*link_speeds & ETH_LINK_SPEED_100M) 2798 speed |= IXGBE_LINK_SPEED_100_FULL; 2799 if (*link_speeds & ETH_LINK_SPEED_10M) 2800 speed |= IXGBE_LINK_SPEED_10_FULL; 2801 } 2802 2803 err = ixgbe_setup_link(hw, speed, link_up); 2804 if (err) 2805 goto error; 2806 2807 skip_link_setup: 2808 2809 if (rte_intr_allow_others(intr_handle)) { 2810 /* check if lsc interrupt is enabled */ 2811 if (dev->data->dev_conf.intr_conf.lsc != 0) 2812 ixgbe_dev_lsc_interrupt_setup(dev, TRUE); 2813 else 2814 ixgbe_dev_lsc_interrupt_setup(dev, FALSE); 2815 ixgbe_dev_macsec_interrupt_setup(dev); 2816 } else { 2817 rte_intr_callback_unregister(intr_handle, 2818 ixgbe_dev_interrupt_handler, dev); 2819 if (dev->data->dev_conf.intr_conf.lsc != 0) 2820 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2821 " no intr multiplex"); 2822 } 2823 2824 /* check if rxq interrupt is enabled */ 2825 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2826 rte_intr_dp_is_en(intr_handle)) 2827 ixgbe_dev_rxq_interrupt_setup(dev); 2828 2829 /* enable uio/vfio intr/eventfd mapping */ 2830 rte_intr_enable(intr_handle); 2831 2832 /* resume enabled intr since hw reset */ 2833 ixgbe_enable_intr(dev); 2834 ixgbe_l2_tunnel_conf(dev); 2835 ixgbe_filter_restore(dev); 2836 2837 if (tm_conf->root && !tm_conf->committed) 2838 PMD_DRV_LOG(WARNING, 2839 "please call hierarchy_commit() " 2840 "before starting the port"); 2841 2842 /* wait for the controller to acquire link */ 2843 err = ixgbe_wait_for_link_up(hw); 2844 if (err) 2845 goto error; 2846 2847 /* 2848 * Update link status right before return, because it may 2849 * start link configuration process in a separate thread. 2850 */ 2851 ixgbe_dev_link_update(dev, 0); 2852 2853 /* setup the macsec setting register */ 2854 if (macsec_setting->offload_en) 2855 ixgbe_dev_macsec_register_enable(dev, macsec_setting); 2856 2857 return 0; 2858 2859 error: 2860 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2861 ixgbe_dev_clear_queues(dev); 2862 return -EIO; 2863 } 2864 2865 /* 2866 * Stop device: disable rx and tx functions to allow for reconfiguring. 2867 */ 2868 static void 2869 ixgbe_dev_stop(struct rte_eth_dev *dev) 2870 { 2871 struct rte_eth_link link; 2872 struct ixgbe_adapter *adapter = dev->data->dev_private; 2873 struct ixgbe_hw *hw = 2874 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2875 struct ixgbe_vf_info *vfinfo = 2876 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2877 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2878 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 2879 int vf; 2880 struct ixgbe_tm_conf *tm_conf = 2881 IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); 2882 2883 if (hw->adapter_stopped) 2884 return; 2885 2886 PMD_INIT_FUNC_TRACE(); 2887 2888 ixgbe_dev_cancel_link_thread(dev); 2889 2890 /* disable interrupts */ 2891 ixgbe_disable_intr(hw); 2892 2893 /* reset the NIC */ 2894 ixgbe_pf_reset_hw(hw); 2895 hw->adapter_stopped = 0; 2896 2897 /* stop adapter */ 2898 ixgbe_stop_adapter(hw); 2899 2900 for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) 2901 vfinfo[vf].clear_to_send = false; 2902 2903 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2904 /* Turn off the copper */ 2905 ixgbe_set_phy_power(hw, false); 2906 } else { 2907 /* Turn off the laser */ 2908 ixgbe_disable_tx_laser(hw); 2909 } 2910 2911 ixgbe_dev_clear_queues(dev); 2912 2913 /* Clear stored conf */ 2914 dev->data->scattered_rx = 0; 2915 dev->data->lro = 0; 2916 2917 /* Clear recorded link status */ 2918 memset(&link, 0, sizeof(link)); 2919 rte_eth_linkstatus_set(dev, &link); 2920 2921 if (!rte_intr_allow_others(intr_handle)) 2922 /* resume to the default handler */ 2923 rte_intr_callback_register(intr_handle, 2924 ixgbe_dev_interrupt_handler, 2925 (void *)dev); 2926 2927 /* Clean datapath event and queue/vec mapping */ 2928 rte_intr_efd_disable(intr_handle); 2929 if (intr_handle->intr_vec != NULL) { 2930 rte_free(intr_handle->intr_vec); 2931 intr_handle->intr_vec = NULL; 2932 } 2933 2934 /* reset hierarchy commit */ 2935 tm_conf->committed = false; 2936 2937 adapter->rss_reta_updated = 0; 2938 2939 adapter->mac_ctrl_frame_fwd = 0; 2940 2941 hw->adapter_stopped = true; 2942 } 2943 2944 /* 2945 * Set device link up: enable tx. 2946 */ 2947 static int 2948 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2949 { 2950 struct ixgbe_hw *hw = 2951 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2952 if (hw->mac.type == ixgbe_mac_82599EB) { 2953 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2954 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2955 /* Not suported in bypass mode */ 2956 PMD_INIT_LOG(ERR, "Set link up is not supported " 2957 "by device id 0x%x", hw->device_id); 2958 return -ENOTSUP; 2959 } 2960 #endif 2961 } 2962 2963 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2964 /* Turn on the copper */ 2965 ixgbe_set_phy_power(hw, true); 2966 } else { 2967 /* Turn on the laser */ 2968 ixgbe_enable_tx_laser(hw); 2969 ixgbe_dev_link_update(dev, 0); 2970 } 2971 2972 return 0; 2973 } 2974 2975 /* 2976 * Set device link down: disable tx. 2977 */ 2978 static int 2979 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2980 { 2981 struct ixgbe_hw *hw = 2982 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2983 if (hw->mac.type == ixgbe_mac_82599EB) { 2984 #ifdef RTE_LIBRTE_IXGBE_BYPASS 2985 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2986 /* Not suported in bypass mode */ 2987 PMD_INIT_LOG(ERR, "Set link down is not supported " 2988 "by device id 0x%x", hw->device_id); 2989 return -ENOTSUP; 2990 } 2991 #endif 2992 } 2993 2994 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2995 /* Turn off the copper */ 2996 ixgbe_set_phy_power(hw, false); 2997 } else { 2998 /* Turn off the laser */ 2999 ixgbe_disable_tx_laser(hw); 3000 ixgbe_dev_link_update(dev, 0); 3001 } 3002 3003 return 0; 3004 } 3005 3006 /* 3007 * Reset and stop device. 3008 */ 3009 static void 3010 ixgbe_dev_close(struct rte_eth_dev *dev) 3011 { 3012 struct ixgbe_hw *hw = 3013 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3014 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3015 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 3016 int retries = 0; 3017 int ret; 3018 3019 PMD_INIT_FUNC_TRACE(); 3020 3021 ixgbe_pf_reset_hw(hw); 3022 3023 ixgbe_dev_stop(dev); 3024 3025 ixgbe_dev_free_queues(dev); 3026 3027 ixgbe_disable_pcie_master(hw); 3028 3029 /* reprogram the RAR[0] in case user changed it. */ 3030 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3031 3032 dev->dev_ops = NULL; 3033 dev->rx_pkt_burst = NULL; 3034 dev->tx_pkt_burst = NULL; 3035 3036 /* Unlock any pending hardware semaphore */ 3037 ixgbe_swfw_lock_reset(hw); 3038 3039 /* disable uio intr before callback unregister */ 3040 rte_intr_disable(intr_handle); 3041 3042 do { 3043 ret = rte_intr_callback_unregister(intr_handle, 3044 ixgbe_dev_interrupt_handler, dev); 3045 if (ret >= 0 || ret == -ENOENT) { 3046 break; 3047 } else if (ret != -EAGAIN) { 3048 PMD_INIT_LOG(ERR, 3049 "intr callback unregister failed: %d", 3050 ret); 3051 } 3052 rte_delay_ms(100); 3053 } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); 3054 3055 /* cancel the delay handler before remove dev */ 3056 rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); 3057 3058 /* uninitialize PF if max_vfs not zero */ 3059 ixgbe_pf_host_uninit(dev); 3060 3061 /* remove all the fdir filters & hash */ 3062 ixgbe_fdir_filter_uninit(dev); 3063 3064 /* remove all the L2 tunnel filters & hash */ 3065 ixgbe_l2_tn_filter_uninit(dev); 3066 3067 /* Remove all ntuple filters of the device */ 3068 ixgbe_ntuple_filter_uninit(dev); 3069 3070 /* clear all the filters list */ 3071 ixgbe_filterlist_flush(); 3072 3073 /* Remove all Traffic Manager configuration */ 3074 ixgbe_tm_conf_uninit(dev); 3075 3076 #ifdef RTE_LIBRTE_SECURITY 3077 rte_free(dev->security_ctx); 3078 #endif 3079 3080 } 3081 3082 /* 3083 * Reset PF device. 3084 */ 3085 static int 3086 ixgbe_dev_reset(struct rte_eth_dev *dev) 3087 { 3088 int ret; 3089 3090 /* When a DPDK PMD PF begin to reset PF port, it should notify all 3091 * its VF to make them align with it. The detailed notification 3092 * mechanism is PMD specific. As to ixgbe PF, it is rather complex. 3093 * To avoid unexpected behavior in VF, currently reset of PF with 3094 * SR-IOV activation is not supported. It might be supported later. 3095 */ 3096 if (dev->data->sriov.active) 3097 return -ENOTSUP; 3098 3099 ret = eth_ixgbe_dev_uninit(dev); 3100 if (ret) 3101 return ret; 3102 3103 ret = eth_ixgbe_dev_init(dev, NULL); 3104 3105 return ret; 3106 } 3107 3108 static void 3109 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 3110 struct ixgbe_hw_stats *hw_stats, 3111 struct ixgbe_macsec_stats *macsec_stats, 3112 uint64_t *total_missed_rx, uint64_t *total_qbrc, 3113 uint64_t *total_qprc, uint64_t *total_qprdc) 3114 { 3115 uint32_t bprc, lxon, lxoff, total; 3116 uint32_t delta_gprc = 0; 3117 unsigned i; 3118 /* Workaround for RX byte count not including CRC bytes when CRC 3119 * strip is enabled. CRC bytes are removed from counters when crc_strip 3120 * is disabled. 3121 */ 3122 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 3123 IXGBE_HLREG0_RXCRCSTRP); 3124 3125 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 3126 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 3127 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 3128 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 3129 3130 for (i = 0; i < 8; i++) { 3131 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 3132 3133 /* global total per queue */ 3134 hw_stats->mpc[i] += mp; 3135 /* Running comprehensive total for stats display */ 3136 *total_missed_rx += hw_stats->mpc[i]; 3137 if (hw->mac.type == ixgbe_mac_82598EB) { 3138 hw_stats->rnbc[i] += 3139 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3140 hw_stats->pxonrxc[i] += 3141 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 3142 hw_stats->pxoffrxc[i] += 3143 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 3144 } else { 3145 hw_stats->pxonrxc[i] += 3146 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 3147 hw_stats->pxoffrxc[i] += 3148 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 3149 hw_stats->pxon2offc[i] += 3150 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 3151 } 3152 hw_stats->pxontxc[i] += 3153 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 3154 hw_stats->pxofftxc[i] += 3155 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 3156 } 3157 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3158 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 3159 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 3160 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 3161 3162 delta_gprc += delta_qprc; 3163 3164 hw_stats->qprc[i] += delta_qprc; 3165 hw_stats->qptc[i] += delta_qptc; 3166 3167 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 3168 hw_stats->qbrc[i] += 3169 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 3170 if (crc_strip == 0) 3171 hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; 3172 3173 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 3174 hw_stats->qbtc[i] += 3175 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 3176 3177 hw_stats->qprdc[i] += delta_qprdc; 3178 *total_qprdc += hw_stats->qprdc[i]; 3179 3180 *total_qprc += hw_stats->qprc[i]; 3181 *total_qbrc += hw_stats->qbrc[i]; 3182 } 3183 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 3184 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 3185 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 3186 3187 /* 3188 * An errata states that gprc actually counts good + missed packets: 3189 * Workaround to set gprc to summated queue packet receives 3190 */ 3191 hw_stats->gprc = *total_qprc; 3192 3193 if (hw->mac.type != ixgbe_mac_82598EB) { 3194 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 3195 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 3196 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 3197 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 3198 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 3199 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 3200 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 3201 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 3202 } else { 3203 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 3204 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 3205 /* 82598 only has a counter in the high register */ 3206 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 3207 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 3208 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 3209 } 3210 uint64_t old_tpr = hw_stats->tpr; 3211 3212 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 3213 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 3214 3215 if (crc_strip == 0) 3216 hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; 3217 3218 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 3219 hw_stats->gptc += delta_gptc; 3220 hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; 3221 hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; 3222 3223 /* 3224 * Workaround: mprc hardware is incorrectly counting 3225 * broadcasts, so for now we subtract those. 3226 */ 3227 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 3228 hw_stats->bprc += bprc; 3229 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 3230 if (hw->mac.type == ixgbe_mac_82598EB) 3231 hw_stats->mprc -= bprc; 3232 3233 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 3234 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 3235 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 3236 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 3237 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 3238 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 3239 3240 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 3241 hw_stats->lxontxc += lxon; 3242 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 3243 hw_stats->lxofftxc += lxoff; 3244 total = lxon + lxoff; 3245 3246 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 3247 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 3248 hw_stats->gptc -= total; 3249 hw_stats->mptc -= total; 3250 hw_stats->ptc64 -= total; 3251 hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; 3252 3253 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 3254 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 3255 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 3256 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 3257 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 3258 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 3259 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 3260 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 3261 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 3262 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 3263 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 3264 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 3265 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 3266 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 3267 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 3268 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 3269 /* Only read FCOE on 82599 */ 3270 if (hw->mac.type != ixgbe_mac_82598EB) { 3271 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 3272 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 3273 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 3274 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 3275 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 3276 } 3277 3278 /* Flow Director Stats registers */ 3279 if (hw->mac.type != ixgbe_mac_82598EB) { 3280 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 3281 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 3282 hw_stats->fdirustat_add += IXGBE_READ_REG(hw, 3283 IXGBE_FDIRUSTAT) & 0xFFFF; 3284 hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, 3285 IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; 3286 hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, 3287 IXGBE_FDIRFSTAT) & 0xFFFF; 3288 hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, 3289 IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; 3290 } 3291 /* MACsec Stats registers */ 3292 macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); 3293 macsec_stats->out_pkts_encrypted += 3294 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); 3295 macsec_stats->out_pkts_protected += 3296 IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); 3297 macsec_stats->out_octets_encrypted += 3298 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); 3299 macsec_stats->out_octets_protected += 3300 IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); 3301 macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); 3302 macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); 3303 macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); 3304 macsec_stats->in_pkts_unknownsci += 3305 IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); 3306 macsec_stats->in_octets_decrypted += 3307 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); 3308 macsec_stats->in_octets_validated += 3309 IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); 3310 macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); 3311 macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); 3312 macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); 3313 for (i = 0; i < 2; i++) { 3314 macsec_stats->in_pkts_ok += 3315 IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); 3316 macsec_stats->in_pkts_invalid += 3317 IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); 3318 macsec_stats->in_pkts_notvalid += 3319 IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); 3320 } 3321 macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); 3322 macsec_stats->in_pkts_notusingsa += 3323 IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); 3324 } 3325 3326 /* 3327 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 3328 */ 3329 static int 3330 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3331 { 3332 struct ixgbe_hw *hw = 3333 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3334 struct ixgbe_hw_stats *hw_stats = 3335 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3336 struct ixgbe_macsec_stats *macsec_stats = 3337 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3338 dev->data->dev_private); 3339 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3340 unsigned i; 3341 3342 total_missed_rx = 0; 3343 total_qbrc = 0; 3344 total_qprc = 0; 3345 total_qprdc = 0; 3346 3347 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3348 &total_qbrc, &total_qprc, &total_qprdc); 3349 3350 if (stats == NULL) 3351 return -EINVAL; 3352 3353 /* Fill out the rte_eth_stats statistics structure */ 3354 stats->ipackets = total_qprc; 3355 stats->ibytes = total_qbrc; 3356 stats->opackets = hw_stats->gptc; 3357 stats->obytes = hw_stats->gotc; 3358 3359 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 3360 stats->q_ipackets[i] = hw_stats->qprc[i]; 3361 stats->q_opackets[i] = hw_stats->qptc[i]; 3362 stats->q_ibytes[i] = hw_stats->qbrc[i]; 3363 stats->q_obytes[i] = hw_stats->qbtc[i]; 3364 stats->q_errors[i] = hw_stats->qprdc[i]; 3365 } 3366 3367 /* Rx Errors */ 3368 stats->imissed = total_missed_rx; 3369 stats->ierrors = hw_stats->crcerrs + 3370 hw_stats->mspdc + 3371 hw_stats->rlec + 3372 hw_stats->ruc + 3373 hw_stats->roc + 3374 hw_stats->illerrc + 3375 hw_stats->errbc + 3376 hw_stats->rfc + 3377 hw_stats->fccrc + 3378 hw_stats->fclast; 3379 3380 /* Tx Errors */ 3381 stats->oerrors = 0; 3382 return 0; 3383 } 3384 3385 static int 3386 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 3387 { 3388 struct ixgbe_hw_stats *stats = 3389 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3390 3391 /* HW registers are cleared on read */ 3392 ixgbe_dev_stats_get(dev, NULL); 3393 3394 /* Reset software totals */ 3395 memset(stats, 0, sizeof(*stats)); 3396 3397 return 0; 3398 } 3399 3400 /* This function calculates the number of xstats based on the current config */ 3401 static unsigned 3402 ixgbe_xstats_calc_num(void) { 3403 return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + 3404 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 3405 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 3406 } 3407 3408 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3409 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) 3410 { 3411 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 3412 unsigned stat, i, count; 3413 3414 if (xstats_names != NULL) { 3415 count = 0; 3416 3417 /* Note: limit >= cnt_stats checked upstream 3418 * in rte_eth_xstats_names() 3419 */ 3420 3421 /* Extended stats from ixgbe_hw_stats */ 3422 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3423 strlcpy(xstats_names[count].name, 3424 rte_ixgbe_stats_strings[i].name, 3425 sizeof(xstats_names[count].name)); 3426 count++; 3427 } 3428 3429 /* MACsec Stats */ 3430 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3431 strlcpy(xstats_names[count].name, 3432 rte_ixgbe_macsec_strings[i].name, 3433 sizeof(xstats_names[count].name)); 3434 count++; 3435 } 3436 3437 /* RX Priority Stats */ 3438 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3439 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3440 snprintf(xstats_names[count].name, 3441 sizeof(xstats_names[count].name), 3442 "rx_priority%u_%s", i, 3443 rte_ixgbe_rxq_strings[stat].name); 3444 count++; 3445 } 3446 } 3447 3448 /* TX Priority Stats */ 3449 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3450 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3451 snprintf(xstats_names[count].name, 3452 sizeof(xstats_names[count].name), 3453 "tx_priority%u_%s", i, 3454 rte_ixgbe_txq_strings[stat].name); 3455 count++; 3456 } 3457 } 3458 } 3459 return cnt_stats; 3460 } 3461 3462 static int ixgbe_dev_xstats_get_names_by_id( 3463 struct rte_eth_dev *dev, 3464 struct rte_eth_xstat_name *xstats_names, 3465 const uint64_t *ids, 3466 unsigned int limit) 3467 { 3468 if (!ids) { 3469 const unsigned int cnt_stats = ixgbe_xstats_calc_num(); 3470 unsigned int stat, i, count; 3471 3472 if (xstats_names != NULL) { 3473 count = 0; 3474 3475 /* Note: limit >= cnt_stats checked upstream 3476 * in rte_eth_xstats_names() 3477 */ 3478 3479 /* Extended stats from ixgbe_hw_stats */ 3480 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3481 strlcpy(xstats_names[count].name, 3482 rte_ixgbe_stats_strings[i].name, 3483 sizeof(xstats_names[count].name)); 3484 count++; 3485 } 3486 3487 /* MACsec Stats */ 3488 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3489 strlcpy(xstats_names[count].name, 3490 rte_ixgbe_macsec_strings[i].name, 3491 sizeof(xstats_names[count].name)); 3492 count++; 3493 } 3494 3495 /* RX Priority Stats */ 3496 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3497 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3498 snprintf(xstats_names[count].name, 3499 sizeof(xstats_names[count].name), 3500 "rx_priority%u_%s", i, 3501 rte_ixgbe_rxq_strings[stat].name); 3502 count++; 3503 } 3504 } 3505 3506 /* TX Priority Stats */ 3507 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3508 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3509 snprintf(xstats_names[count].name, 3510 sizeof(xstats_names[count].name), 3511 "tx_priority%u_%s", i, 3512 rte_ixgbe_txq_strings[stat].name); 3513 count++; 3514 } 3515 } 3516 } 3517 return cnt_stats; 3518 } 3519 3520 uint16_t i; 3521 uint16_t size = ixgbe_xstats_calc_num(); 3522 struct rte_eth_xstat_name xstats_names_copy[size]; 3523 3524 ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, 3525 size); 3526 3527 for (i = 0; i < limit; i++) { 3528 if (ids[i] >= size) { 3529 PMD_INIT_LOG(ERR, "id value isn't valid"); 3530 return -1; 3531 } 3532 strcpy(xstats_names[i].name, 3533 xstats_names_copy[ids[i]].name); 3534 } 3535 return limit; 3536 } 3537 3538 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 3539 struct rte_eth_xstat_name *xstats_names, unsigned limit) 3540 { 3541 unsigned i; 3542 3543 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 3544 return -ENOMEM; 3545 3546 if (xstats_names != NULL) 3547 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 3548 strlcpy(xstats_names[i].name, 3549 rte_ixgbevf_stats_strings[i].name, 3550 sizeof(xstats_names[i].name)); 3551 return IXGBEVF_NB_XSTATS; 3552 } 3553 3554 static int 3555 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3556 unsigned n) 3557 { 3558 struct ixgbe_hw *hw = 3559 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3560 struct ixgbe_hw_stats *hw_stats = 3561 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3562 struct ixgbe_macsec_stats *macsec_stats = 3563 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3564 dev->data->dev_private); 3565 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3566 unsigned i, stat, count = 0; 3567 3568 count = ixgbe_xstats_calc_num(); 3569 3570 if (n < count) 3571 return count; 3572 3573 total_missed_rx = 0; 3574 total_qbrc = 0; 3575 total_qprc = 0; 3576 total_qprdc = 0; 3577 3578 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, 3579 &total_qbrc, &total_qprc, &total_qprdc); 3580 3581 /* If this is a reset xstats is NULL, and we have cleared the 3582 * registers by reading them. 3583 */ 3584 if (!xstats) 3585 return 0; 3586 3587 /* Extended stats from ixgbe_hw_stats */ 3588 count = 0; 3589 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3590 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3591 rte_ixgbe_stats_strings[i].offset); 3592 xstats[count].id = count; 3593 count++; 3594 } 3595 3596 /* MACsec Stats */ 3597 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3598 xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + 3599 rte_ixgbe_macsec_strings[i].offset); 3600 xstats[count].id = count; 3601 count++; 3602 } 3603 3604 /* RX Priority Stats */ 3605 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3606 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3607 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3608 rte_ixgbe_rxq_strings[stat].offset + 3609 (sizeof(uint64_t) * i)); 3610 xstats[count].id = count; 3611 count++; 3612 } 3613 } 3614 3615 /* TX Priority Stats */ 3616 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3617 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3618 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 3619 rte_ixgbe_txq_strings[stat].offset + 3620 (sizeof(uint64_t) * i)); 3621 xstats[count].id = count; 3622 count++; 3623 } 3624 } 3625 return count; 3626 } 3627 3628 static int 3629 ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, 3630 uint64_t *values, unsigned int n) 3631 { 3632 if (!ids) { 3633 struct ixgbe_hw *hw = 3634 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3635 struct ixgbe_hw_stats *hw_stats = 3636 IXGBE_DEV_PRIVATE_TO_STATS( 3637 dev->data->dev_private); 3638 struct ixgbe_macsec_stats *macsec_stats = 3639 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3640 dev->data->dev_private); 3641 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 3642 unsigned int i, stat, count = 0; 3643 3644 count = ixgbe_xstats_calc_num(); 3645 3646 if (!ids && n < count) 3647 return count; 3648 3649 total_missed_rx = 0; 3650 total_qbrc = 0; 3651 total_qprc = 0; 3652 total_qprdc = 0; 3653 3654 ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, 3655 &total_missed_rx, &total_qbrc, &total_qprc, 3656 &total_qprdc); 3657 3658 /* If this is a reset xstats is NULL, and we have cleared the 3659 * registers by reading them. 3660 */ 3661 if (!ids && !values) 3662 return 0; 3663 3664 /* Extended stats from ixgbe_hw_stats */ 3665 count = 0; 3666 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 3667 values[count] = *(uint64_t *)(((char *)hw_stats) + 3668 rte_ixgbe_stats_strings[i].offset); 3669 count++; 3670 } 3671 3672 /* MACsec Stats */ 3673 for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { 3674 values[count] = *(uint64_t *)(((char *)macsec_stats) + 3675 rte_ixgbe_macsec_strings[i].offset); 3676 count++; 3677 } 3678 3679 /* RX Priority Stats */ 3680 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 3681 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 3682 values[count] = 3683 *(uint64_t *)(((char *)hw_stats) + 3684 rte_ixgbe_rxq_strings[stat].offset + 3685 (sizeof(uint64_t) * i)); 3686 count++; 3687 } 3688 } 3689 3690 /* TX Priority Stats */ 3691 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 3692 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 3693 values[count] = 3694 *(uint64_t *)(((char *)hw_stats) + 3695 rte_ixgbe_txq_strings[stat].offset + 3696 (sizeof(uint64_t) * i)); 3697 count++; 3698 } 3699 } 3700 return count; 3701 } 3702 3703 uint16_t i; 3704 uint16_t size = ixgbe_xstats_calc_num(); 3705 uint64_t values_copy[size]; 3706 3707 ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); 3708 3709 for (i = 0; i < n; i++) { 3710 if (ids[i] >= size) { 3711 PMD_INIT_LOG(ERR, "id value isn't valid"); 3712 return -1; 3713 } 3714 values[i] = values_copy[ids[i]]; 3715 } 3716 return n; 3717 } 3718 3719 static int 3720 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 3721 { 3722 struct ixgbe_hw_stats *stats = 3723 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3724 struct ixgbe_macsec_stats *macsec_stats = 3725 IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( 3726 dev->data->dev_private); 3727 3728 unsigned count = ixgbe_xstats_calc_num(); 3729 3730 /* HW registers are cleared on read */ 3731 ixgbe_dev_xstats_get(dev, NULL, count); 3732 3733 /* Reset software totals */ 3734 memset(stats, 0, sizeof(*stats)); 3735 memset(macsec_stats, 0, sizeof(*macsec_stats)); 3736 3737 return 0; 3738 } 3739 3740 static void 3741 ixgbevf_update_stats(struct rte_eth_dev *dev) 3742 { 3743 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3744 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3745 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3746 3747 /* Good Rx packet, include VF loopback */ 3748 UPDATE_VF_STAT(IXGBE_VFGPRC, 3749 hw_stats->last_vfgprc, hw_stats->vfgprc); 3750 3751 /* Good Rx octets, include VF loopback */ 3752 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 3753 hw_stats->last_vfgorc, hw_stats->vfgorc); 3754 3755 /* Good Tx packet, include VF loopback */ 3756 UPDATE_VF_STAT(IXGBE_VFGPTC, 3757 hw_stats->last_vfgptc, hw_stats->vfgptc); 3758 3759 /* Good Tx octets, include VF loopback */ 3760 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 3761 hw_stats->last_vfgotc, hw_stats->vfgotc); 3762 3763 /* Rx Multicst Packet */ 3764 UPDATE_VF_STAT(IXGBE_VFMPRC, 3765 hw_stats->last_vfmprc, hw_stats->vfmprc); 3766 } 3767 3768 static int 3769 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 3770 unsigned n) 3771 { 3772 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3773 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3774 unsigned i; 3775 3776 if (n < IXGBEVF_NB_XSTATS) 3777 return IXGBEVF_NB_XSTATS; 3778 3779 ixgbevf_update_stats(dev); 3780 3781 if (!xstats) 3782 return 0; 3783 3784 /* Extended stats */ 3785 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 3786 xstats[i].id = i; 3787 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 3788 rte_ixgbevf_stats_strings[i].offset); 3789 } 3790 3791 return IXGBEVF_NB_XSTATS; 3792 } 3793 3794 static int 3795 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 3796 { 3797 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3798 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3799 3800 ixgbevf_update_stats(dev); 3801 3802 if (stats == NULL) 3803 return -EINVAL; 3804 3805 stats->ipackets = hw_stats->vfgprc; 3806 stats->ibytes = hw_stats->vfgorc; 3807 stats->opackets = hw_stats->vfgptc; 3808 stats->obytes = hw_stats->vfgotc; 3809 return 0; 3810 } 3811 3812 static int 3813 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3814 { 3815 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3816 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3817 3818 /* Sync HW register to the last stats */ 3819 ixgbevf_dev_stats_get(dev, NULL); 3820 3821 /* reset HW current stats*/ 3822 hw_stats->vfgprc = 0; 3823 hw_stats->vfgorc = 0; 3824 hw_stats->vfgptc = 0; 3825 hw_stats->vfgotc = 0; 3826 3827 return 0; 3828 } 3829 3830 static int 3831 ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) 3832 { 3833 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3834 u16 eeprom_verh, eeprom_verl; 3835 u32 etrack_id; 3836 int ret; 3837 3838 ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); 3839 ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); 3840 3841 etrack_id = (eeprom_verh << 16) | eeprom_verl; 3842 ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); 3843 3844 ret += 1; /* add the size of '\0' */ 3845 if (fw_size < (u32)ret) 3846 return ret; 3847 else 3848 return 0; 3849 } 3850 3851 static int 3852 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3853 { 3854 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3855 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3856 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3857 3858 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3859 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3860 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3861 /* 3862 * When DCB/VT is off, maximum number of queues changes, 3863 * except for 82598EB, which remains constant. 3864 */ 3865 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3866 hw->mac.type != ixgbe_mac_82598EB) 3867 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3868 } 3869 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3870 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3871 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3872 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3873 dev_info->max_vfs = pci_dev->max_vfs; 3874 if (hw->mac.type == ixgbe_mac_82598EB) 3875 dev_info->max_vmdq_pools = ETH_16_POOLS; 3876 else 3877 dev_info->max_vmdq_pools = ETH_64_POOLS; 3878 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3879 dev_info->min_mtu = RTE_ETHER_MIN_MTU; 3880 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3881 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 3882 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 3883 dev_info->rx_queue_offload_capa); 3884 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 3885 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 3886 3887 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3888 .rx_thresh = { 3889 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3890 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3891 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3892 }, 3893 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3894 .rx_drop_en = 0, 3895 .offloads = 0, 3896 }; 3897 3898 dev_info->default_txconf = (struct rte_eth_txconf) { 3899 .tx_thresh = { 3900 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3901 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3902 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3903 }, 3904 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3905 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3906 .offloads = 0, 3907 }; 3908 3909 dev_info->rx_desc_lim = rx_desc_lim; 3910 dev_info->tx_desc_lim = tx_desc_lim; 3911 3912 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3913 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3914 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3915 3916 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3917 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 3918 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 3919 dev_info->speed_capa = ETH_LINK_SPEED_10M | 3920 ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; 3921 3922 if (hw->mac.type == ixgbe_mac_X540 || 3923 hw->mac.type == ixgbe_mac_X540_vf || 3924 hw->mac.type == ixgbe_mac_X550 || 3925 hw->mac.type == ixgbe_mac_X550_vf) { 3926 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3927 } 3928 if (hw->mac.type == ixgbe_mac_X550) { 3929 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; 3930 dev_info->speed_capa |= ETH_LINK_SPEED_5G; 3931 } 3932 3933 /* Driver-preferred Rx/Tx parameters */ 3934 dev_info->default_rxportconf.burst_size = 32; 3935 dev_info->default_txportconf.burst_size = 32; 3936 dev_info->default_rxportconf.nb_queues = 1; 3937 dev_info->default_txportconf.nb_queues = 1; 3938 dev_info->default_rxportconf.ring_size = 256; 3939 dev_info->default_txportconf.ring_size = 256; 3940 3941 return 0; 3942 } 3943 3944 static const uint32_t * 3945 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3946 { 3947 static const uint32_t ptypes[] = { 3948 /* For non-vec functions, 3949 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3950 * for vec functions, 3951 * refers to _recv_raw_pkts_vec(). 3952 */ 3953 RTE_PTYPE_L2_ETHER, 3954 RTE_PTYPE_L3_IPV4, 3955 RTE_PTYPE_L3_IPV4_EXT, 3956 RTE_PTYPE_L3_IPV6, 3957 RTE_PTYPE_L3_IPV6_EXT, 3958 RTE_PTYPE_L4_SCTP, 3959 RTE_PTYPE_L4_TCP, 3960 RTE_PTYPE_L4_UDP, 3961 RTE_PTYPE_TUNNEL_IP, 3962 RTE_PTYPE_INNER_L3_IPV6, 3963 RTE_PTYPE_INNER_L3_IPV6_EXT, 3964 RTE_PTYPE_INNER_L4_TCP, 3965 RTE_PTYPE_INNER_L4_UDP, 3966 RTE_PTYPE_UNKNOWN 3967 }; 3968 3969 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3970 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3971 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3972 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3973 return ptypes; 3974 3975 #if defined(RTE_ARCH_X86) || defined(RTE_MACHINE_CPUFLAG_NEON) 3976 if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || 3977 dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) 3978 return ptypes; 3979 #endif 3980 return NULL; 3981 } 3982 3983 static int 3984 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3985 struct rte_eth_dev_info *dev_info) 3986 { 3987 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 3988 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3989 3990 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3991 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3992 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3993 dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ 3994 dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; 3995 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3996 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3997 dev_info->max_vfs = pci_dev->max_vfs; 3998 if (hw->mac.type == ixgbe_mac_82598EB) 3999 dev_info->max_vmdq_pools = ETH_16_POOLS; 4000 else 4001 dev_info->max_vmdq_pools = ETH_64_POOLS; 4002 dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); 4003 dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | 4004 dev_info->rx_queue_offload_capa); 4005 dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); 4006 dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); 4007 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 4008 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 4009 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 4010 4011 dev_info->default_rxconf = (struct rte_eth_rxconf) { 4012 .rx_thresh = { 4013 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 4014 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 4015 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 4016 }, 4017 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 4018 .rx_drop_en = 0, 4019 .offloads = 0, 4020 }; 4021 4022 dev_info->default_txconf = (struct rte_eth_txconf) { 4023 .tx_thresh = { 4024 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 4025 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 4026 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 4027 }, 4028 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 4029 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 4030 .offloads = 0, 4031 }; 4032 4033 dev_info->rx_desc_lim = rx_desc_lim; 4034 dev_info->tx_desc_lim = tx_desc_lim; 4035 4036 return 0; 4037 } 4038 4039 static int 4040 ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 4041 int *link_up, int wait_to_complete) 4042 { 4043 struct ixgbe_adapter *adapter = container_of(hw, 4044 struct ixgbe_adapter, hw); 4045 struct ixgbe_mbx_info *mbx = &hw->mbx; 4046 struct ixgbe_mac_info *mac = &hw->mac; 4047 uint32_t links_reg, in_msg; 4048 int ret_val = 0; 4049 4050 /* If we were hit with a reset drop the link */ 4051 if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) 4052 mac->get_link_status = true; 4053 4054 if (!mac->get_link_status) 4055 goto out; 4056 4057 /* if link status is down no point in checking to see if pf is up */ 4058 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4059 if (!(links_reg & IXGBE_LINKS_UP)) 4060 goto out; 4061 4062 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs 4063 * before the link status is correct 4064 */ 4065 if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { 4066 int i; 4067 4068 for (i = 0; i < 5; i++) { 4069 rte_delay_us(100); 4070 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); 4071 4072 if (!(links_reg & IXGBE_LINKS_UP)) 4073 goto out; 4074 } 4075 } 4076 4077 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 4078 case IXGBE_LINKS_SPEED_10G_82599: 4079 *speed = IXGBE_LINK_SPEED_10GB_FULL; 4080 if (hw->mac.type >= ixgbe_mac_X550) { 4081 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4082 *speed = IXGBE_LINK_SPEED_2_5GB_FULL; 4083 } 4084 break; 4085 case IXGBE_LINKS_SPEED_1G_82599: 4086 *speed = IXGBE_LINK_SPEED_1GB_FULL; 4087 break; 4088 case IXGBE_LINKS_SPEED_100_82599: 4089 *speed = IXGBE_LINK_SPEED_100_FULL; 4090 if (hw->mac.type == ixgbe_mac_X550) { 4091 if (links_reg & IXGBE_LINKS_SPEED_NON_STD) 4092 *speed = IXGBE_LINK_SPEED_5GB_FULL; 4093 } 4094 break; 4095 case IXGBE_LINKS_SPEED_10_X550EM_A: 4096 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4097 /* Since Reserved in older MAC's */ 4098 if (hw->mac.type >= ixgbe_mac_X550) 4099 *speed = IXGBE_LINK_SPEED_10_FULL; 4100 break; 4101 default: 4102 *speed = IXGBE_LINK_SPEED_UNKNOWN; 4103 } 4104 4105 if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { 4106 if (*speed == IXGBE_LINK_SPEED_UNKNOWN) 4107 mac->get_link_status = true; 4108 else 4109 mac->get_link_status = false; 4110 4111 goto out; 4112 } 4113 4114 /* if the read failed it could just be a mailbox collision, best wait 4115 * until we are called again and don't report an error 4116 */ 4117 if (mbx->ops.read(hw, &in_msg, 1, 0)) 4118 goto out; 4119 4120 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { 4121 /* msg is not CTS and is NACK we must have lost CTS status */ 4122 if (in_msg & IXGBE_VT_MSGTYPE_NACK) 4123 mac->get_link_status = false; 4124 goto out; 4125 } 4126 4127 /* the pf is talking, if we timed out in the past we reinit */ 4128 if (!mbx->timeout) { 4129 ret_val = -1; 4130 goto out; 4131 } 4132 4133 /* if we passed all the tests above then the link is up and we no 4134 * longer need to check for link 4135 */ 4136 mac->get_link_status = false; 4137 4138 out: 4139 *link_up = !mac->get_link_status; 4140 return ret_val; 4141 } 4142 4143 static void 4144 ixgbe_dev_cancel_link_thread(struct rte_eth_dev *dev) 4145 { 4146 struct ixgbe_adapter *ad = dev->data->dev_private; 4147 void *retval; 4148 4149 if (rte_atomic32_read(&ad->link_thread_running)) { 4150 pthread_cancel(ad->link_thread_tid); 4151 pthread_join(ad->link_thread_tid, &retval); 4152 rte_atomic32_clear(&ad->link_thread_running); 4153 } 4154 } 4155 4156 static void * 4157 ixgbe_dev_setup_link_thread_handler(void *param) 4158 { 4159 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4160 struct ixgbe_adapter *ad = dev->data->dev_private; 4161 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4162 struct ixgbe_interrupt *intr = 4163 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4164 u32 speed; 4165 bool autoneg = false; 4166 4167 speed = hw->phy.autoneg_advertised; 4168 if (!speed) 4169 ixgbe_get_link_capabilities(hw, &speed, &autoneg); 4170 4171 ixgbe_setup_link(hw, speed, true); 4172 4173 intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; 4174 rte_atomic32_clear(&ad->link_thread_running); 4175 return NULL; 4176 } 4177 4178 /* 4179 * In freebsd environment, nic_uio drivers do not support interrupts, 4180 * rte_intr_callback_register() will fail to register interrupts. 4181 * We can not make link status to change from down to up by interrupt 4182 * callback. So we need to wait for the controller to acquire link 4183 * when ports start. 4184 * It returns 0 on link up. 4185 */ 4186 static int 4187 ixgbe_wait_for_link_up(struct ixgbe_hw *hw) 4188 { 4189 #ifdef RTE_EXEC_ENV_FREEBSD 4190 int err, i, link_up = 0; 4191 uint32_t speed = 0; 4192 const int nb_iter = 25; 4193 4194 for (i = 0; i < nb_iter; i++) { 4195 err = ixgbe_check_link(hw, &speed, &link_up, 0); 4196 if (err) 4197 return err; 4198 if (link_up) 4199 return 0; 4200 msec_delay(200); 4201 } 4202 4203 return 0; 4204 #else 4205 RTE_SET_USED(hw); 4206 return 0; 4207 #endif 4208 } 4209 4210 /* return 0 means link status changed, -1 means not changed */ 4211 int 4212 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 4213 int wait_to_complete, int vf) 4214 { 4215 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4216 struct ixgbe_adapter *ad = dev->data->dev_private; 4217 struct rte_eth_link link; 4218 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 4219 struct ixgbe_interrupt *intr = 4220 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4221 int link_up; 4222 int diag; 4223 int wait = 1; 4224 u32 esdp_reg; 4225 4226 memset(&link, 0, sizeof(link)); 4227 link.link_status = ETH_LINK_DOWN; 4228 link.link_speed = ETH_SPEED_NUM_NONE; 4229 link.link_duplex = ETH_LINK_HALF_DUPLEX; 4230 link.link_autoneg = !(dev->data->dev_conf.link_speeds & 4231 ETH_LINK_SPEED_FIXED); 4232 4233 hw->mac.get_link_status = true; 4234 4235 if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) 4236 return rte_eth_linkstatus_set(dev, &link); 4237 4238 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 4239 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 4240 wait = 0; 4241 4242 if (vf) 4243 diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); 4244 else 4245 diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); 4246 4247 if (diag != 0) { 4248 link.link_speed = ETH_SPEED_NUM_100M; 4249 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4250 return rte_eth_linkstatus_set(dev, &link); 4251 } 4252 4253 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4254 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 4255 if ((esdp_reg & IXGBE_ESDP_SDP3)) 4256 link_up = 0; 4257 } 4258 4259 if (link_up == 0) { 4260 if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { 4261 intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; 4262 if (rte_atomic32_test_and_set(&ad->link_thread_running)) { 4263 if (rte_ctrl_thread_create(&ad->link_thread_tid, 4264 "ixgbe-link-handler", 4265 NULL, 4266 ixgbe_dev_setup_link_thread_handler, 4267 dev) < 0) { 4268 PMD_DRV_LOG(ERR, 4269 "Create link thread failed!"); 4270 rte_atomic32_clear(&ad->link_thread_running); 4271 } 4272 } else { 4273 PMD_DRV_LOG(ERR, 4274 "Other link thread is running now!"); 4275 } 4276 } 4277 return rte_eth_linkstatus_set(dev, &link); 4278 } 4279 4280 link.link_status = ETH_LINK_UP; 4281 link.link_duplex = ETH_LINK_FULL_DUPLEX; 4282 4283 switch (link_speed) { 4284 default: 4285 case IXGBE_LINK_SPEED_UNKNOWN: 4286 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || 4287 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) 4288 link.link_speed = ETH_SPEED_NUM_10M; 4289 else 4290 link.link_speed = ETH_SPEED_NUM_100M; 4291 break; 4292 4293 case IXGBE_LINK_SPEED_100_FULL: 4294 link.link_speed = ETH_SPEED_NUM_100M; 4295 break; 4296 4297 case IXGBE_LINK_SPEED_1GB_FULL: 4298 link.link_speed = ETH_SPEED_NUM_1G; 4299 break; 4300 4301 case IXGBE_LINK_SPEED_2_5GB_FULL: 4302 link.link_speed = ETH_SPEED_NUM_2_5G; 4303 break; 4304 4305 case IXGBE_LINK_SPEED_5GB_FULL: 4306 link.link_speed = ETH_SPEED_NUM_5G; 4307 break; 4308 4309 case IXGBE_LINK_SPEED_10GB_FULL: 4310 link.link_speed = ETH_SPEED_NUM_10G; 4311 break; 4312 } 4313 4314 return rte_eth_linkstatus_set(dev, &link); 4315 } 4316 4317 static int 4318 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4319 { 4320 return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); 4321 } 4322 4323 static int 4324 ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 4325 { 4326 return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); 4327 } 4328 4329 static int 4330 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 4331 { 4332 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4333 uint32_t fctrl; 4334 4335 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4336 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 4337 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4338 4339 return 0; 4340 } 4341 4342 static int 4343 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 4344 { 4345 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4346 uint32_t fctrl; 4347 4348 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4349 fctrl &= (~IXGBE_FCTRL_UPE); 4350 if (dev->data->all_multicast == 1) 4351 fctrl |= IXGBE_FCTRL_MPE; 4352 else 4353 fctrl &= (~IXGBE_FCTRL_MPE); 4354 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4355 4356 return 0; 4357 } 4358 4359 static int 4360 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 4361 { 4362 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4363 uint32_t fctrl; 4364 4365 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4366 fctrl |= IXGBE_FCTRL_MPE; 4367 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4368 4369 return 0; 4370 } 4371 4372 static int 4373 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 4374 { 4375 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4376 uint32_t fctrl; 4377 4378 if (dev->data->promiscuous == 1) 4379 return 0; /* must remain in all_multicast mode */ 4380 4381 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 4382 fctrl &= (~IXGBE_FCTRL_MPE); 4383 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 4384 4385 return 0; 4386 } 4387 4388 /** 4389 * It clears the interrupt causes and enables the interrupt. 4390 * It will be called once only during nic initialized. 4391 * 4392 * @param dev 4393 * Pointer to struct rte_eth_dev. 4394 * @param on 4395 * Enable or Disable. 4396 * 4397 * @return 4398 * - On success, zero. 4399 * - On failure, a negative value. 4400 */ 4401 static int 4402 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) 4403 { 4404 struct ixgbe_interrupt *intr = 4405 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4406 4407 ixgbe_dev_link_status_print(dev); 4408 if (on) 4409 intr->mask |= IXGBE_EICR_LSC; 4410 else 4411 intr->mask &= ~IXGBE_EICR_LSC; 4412 4413 return 0; 4414 } 4415 4416 /** 4417 * It clears the interrupt causes and enables the interrupt. 4418 * It will be called once only during nic initialized. 4419 * 4420 * @param dev 4421 * Pointer to struct rte_eth_dev. 4422 * 4423 * @return 4424 * - On success, zero. 4425 * - On failure, a negative value. 4426 */ 4427 static int 4428 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 4429 { 4430 struct ixgbe_interrupt *intr = 4431 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4432 4433 intr->mask |= IXGBE_EICR_RTX_QUEUE; 4434 4435 return 0; 4436 } 4437 4438 /** 4439 * It clears the interrupt causes and enables the interrupt. 4440 * It will be called once only during nic initialized. 4441 * 4442 * @param dev 4443 * Pointer to struct rte_eth_dev. 4444 * 4445 * @return 4446 * - On success, zero. 4447 * - On failure, a negative value. 4448 */ 4449 static int 4450 ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) 4451 { 4452 struct ixgbe_interrupt *intr = 4453 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4454 4455 intr->mask |= IXGBE_EICR_LINKSEC; 4456 4457 return 0; 4458 } 4459 4460 /* 4461 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 4462 * 4463 * @param dev 4464 * Pointer to struct rte_eth_dev. 4465 * 4466 * @return 4467 * - On success, zero. 4468 * - On failure, a negative value. 4469 */ 4470 static int 4471 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 4472 { 4473 uint32_t eicr; 4474 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4475 struct ixgbe_interrupt *intr = 4476 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4477 4478 /* clear all cause mask */ 4479 ixgbe_disable_intr(hw); 4480 4481 /* read-on-clear nic registers here */ 4482 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4483 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 4484 4485 intr->flags = 0; 4486 4487 /* set flag for async link update */ 4488 if (eicr & IXGBE_EICR_LSC) 4489 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4490 4491 if (eicr & IXGBE_EICR_MAILBOX) 4492 intr->flags |= IXGBE_FLAG_MAILBOX; 4493 4494 if (eicr & IXGBE_EICR_LINKSEC) 4495 intr->flags |= IXGBE_FLAG_MACSEC; 4496 4497 if (hw->mac.type == ixgbe_mac_X550EM_x && 4498 hw->phy.type == ixgbe_phy_x550em_ext_t && 4499 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 4500 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 4501 4502 return 0; 4503 } 4504 4505 /** 4506 * It gets and then prints the link status. 4507 * 4508 * @param dev 4509 * Pointer to struct rte_eth_dev. 4510 * 4511 * @return 4512 * - On success, zero. 4513 * - On failure, a negative value. 4514 */ 4515 static void 4516 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 4517 { 4518 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4519 struct rte_eth_link link; 4520 4521 rte_eth_linkstatus_get(dev, &link); 4522 4523 if (link.link_status) { 4524 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 4525 (int)(dev->data->port_id), 4526 (unsigned)link.link_speed, 4527 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 4528 "full-duplex" : "half-duplex"); 4529 } else { 4530 PMD_INIT_LOG(INFO, " Port %d: Link Down", 4531 (int)(dev->data->port_id)); 4532 } 4533 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 4534 pci_dev->addr.domain, 4535 pci_dev->addr.bus, 4536 pci_dev->addr.devid, 4537 pci_dev->addr.function); 4538 } 4539 4540 /* 4541 * It executes link_update after knowing an interrupt occurred. 4542 * 4543 * @param dev 4544 * Pointer to struct rte_eth_dev. 4545 * 4546 * @return 4547 * - On success, zero. 4548 * - On failure, a negative value. 4549 */ 4550 static int 4551 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 4552 { 4553 struct ixgbe_interrupt *intr = 4554 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4555 int64_t timeout; 4556 struct ixgbe_hw *hw = 4557 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4558 4559 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 4560 4561 if (intr->flags & IXGBE_FLAG_MAILBOX) { 4562 ixgbe_pf_mbx_process(dev); 4563 intr->flags &= ~IXGBE_FLAG_MAILBOX; 4564 } 4565 4566 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4567 ixgbe_handle_lasi(hw); 4568 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4569 } 4570 4571 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4572 struct rte_eth_link link; 4573 4574 /* get the link status before link update, for predicting later */ 4575 rte_eth_linkstatus_get(dev, &link); 4576 4577 ixgbe_dev_link_update(dev, 0); 4578 4579 /* likely to up */ 4580 if (!link.link_status) 4581 /* handle it 1 sec later, wait it being stable */ 4582 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 4583 /* likely to down */ 4584 else 4585 /* handle it 4 sec later, wait it being stable */ 4586 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 4587 4588 ixgbe_dev_link_status_print(dev); 4589 if (rte_eal_alarm_set(timeout * 1000, 4590 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 4591 PMD_DRV_LOG(ERR, "Error setting alarm"); 4592 else { 4593 /* remember original mask */ 4594 intr->mask_original = intr->mask; 4595 /* only disable lsc interrupt */ 4596 intr->mask &= ~IXGBE_EIMS_LSC; 4597 } 4598 } 4599 4600 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 4601 ixgbe_enable_intr(dev); 4602 4603 return 0; 4604 } 4605 4606 /** 4607 * Interrupt handler which shall be registered for alarm callback for delayed 4608 * handling specific interrupt to wait for the stable nic state. As the 4609 * NIC interrupt state is not stable for ixgbe after link is just down, 4610 * it needs to wait 4 seconds to get the stable status. 4611 * 4612 * @param handle 4613 * Pointer to interrupt handle. 4614 * @param param 4615 * The address of parameter (struct rte_eth_dev *) regsitered before. 4616 * 4617 * @return 4618 * void 4619 */ 4620 static void 4621 ixgbe_dev_interrupt_delayed_handler(void *param) 4622 { 4623 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4624 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 4625 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 4626 struct ixgbe_interrupt *intr = 4627 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4628 struct ixgbe_hw *hw = 4629 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4630 uint32_t eicr; 4631 4632 ixgbe_disable_intr(hw); 4633 4634 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4635 if (eicr & IXGBE_EICR_MAILBOX) 4636 ixgbe_pf_mbx_process(dev); 4637 4638 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 4639 ixgbe_handle_lasi(hw); 4640 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 4641 } 4642 4643 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 4644 ixgbe_dev_link_update(dev, 0); 4645 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 4646 ixgbe_dev_link_status_print(dev); 4647 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, 4648 NULL); 4649 } 4650 4651 if (intr->flags & IXGBE_FLAG_MACSEC) { 4652 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, 4653 NULL); 4654 intr->flags &= ~IXGBE_FLAG_MACSEC; 4655 } 4656 4657 /* restore original mask */ 4658 intr->mask = intr->mask_original; 4659 intr->mask_original = 0; 4660 4661 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 4662 ixgbe_enable_intr(dev); 4663 rte_intr_ack(intr_handle); 4664 } 4665 4666 /** 4667 * Interrupt handler triggered by NIC for handling 4668 * specific interrupt. 4669 * 4670 * @param handle 4671 * Pointer to interrupt handle. 4672 * @param param 4673 * The address of parameter (struct rte_eth_dev *) regsitered before. 4674 * 4675 * @return 4676 * void 4677 */ 4678 static void 4679 ixgbe_dev_interrupt_handler(void *param) 4680 { 4681 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 4682 4683 ixgbe_dev_interrupt_get_status(dev); 4684 ixgbe_dev_interrupt_action(dev); 4685 } 4686 4687 static int 4688 ixgbe_dev_led_on(struct rte_eth_dev *dev) 4689 { 4690 struct ixgbe_hw *hw; 4691 4692 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4693 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4694 } 4695 4696 static int 4697 ixgbe_dev_led_off(struct rte_eth_dev *dev) 4698 { 4699 struct ixgbe_hw *hw; 4700 4701 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4702 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 4703 } 4704 4705 static int 4706 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4707 { 4708 struct ixgbe_hw *hw; 4709 uint32_t mflcn_reg; 4710 uint32_t fccfg_reg; 4711 int rx_pause; 4712 int tx_pause; 4713 4714 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4715 4716 fc_conf->pause_time = hw->fc.pause_time; 4717 fc_conf->high_water = hw->fc.high_water[0]; 4718 fc_conf->low_water = hw->fc.low_water[0]; 4719 fc_conf->send_xon = hw->fc.send_xon; 4720 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 4721 4722 /* 4723 * Return rx_pause status according to actual setting of 4724 * MFLCN register. 4725 */ 4726 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4727 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 4728 rx_pause = 1; 4729 else 4730 rx_pause = 0; 4731 4732 /* 4733 * Return tx_pause status according to actual setting of 4734 * FCCFG register. 4735 */ 4736 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4737 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 4738 tx_pause = 1; 4739 else 4740 tx_pause = 0; 4741 4742 if (rx_pause && tx_pause) 4743 fc_conf->mode = RTE_FC_FULL; 4744 else if (rx_pause) 4745 fc_conf->mode = RTE_FC_RX_PAUSE; 4746 else if (tx_pause) 4747 fc_conf->mode = RTE_FC_TX_PAUSE; 4748 else 4749 fc_conf->mode = RTE_FC_NONE; 4750 4751 return 0; 4752 } 4753 4754 static int 4755 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 4756 { 4757 struct ixgbe_hw *hw; 4758 struct ixgbe_adapter *adapter = dev->data->dev_private; 4759 int err; 4760 uint32_t rx_buf_size; 4761 uint32_t max_high_water; 4762 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4763 ixgbe_fc_none, 4764 ixgbe_fc_rx_pause, 4765 ixgbe_fc_tx_pause, 4766 ixgbe_fc_full 4767 }; 4768 4769 PMD_INIT_FUNC_TRACE(); 4770 4771 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4772 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 4773 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4774 4775 /* 4776 * At least reserve one Ethernet frame for watermark 4777 * high_water/low_water in kilo bytes for ixgbe 4778 */ 4779 max_high_water = (rx_buf_size - 4780 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4781 if ((fc_conf->high_water > max_high_water) || 4782 (fc_conf->high_water < fc_conf->low_water)) { 4783 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4784 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4785 return -EINVAL; 4786 } 4787 4788 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 4789 hw->fc.pause_time = fc_conf->pause_time; 4790 hw->fc.high_water[0] = fc_conf->high_water; 4791 hw->fc.low_water[0] = fc_conf->low_water; 4792 hw->fc.send_xon = fc_conf->send_xon; 4793 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 4794 adapter->mac_ctrl_frame_fwd = fc_conf->mac_ctrl_frame_fwd; 4795 4796 err = ixgbe_flow_ctrl_enable(dev, hw); 4797 if (err < 0) { 4798 PMD_INIT_LOG(ERR, "ixgbe_flow_ctrl_enable = 0x%x", err); 4799 return -EIO; 4800 } 4801 return err; 4802 } 4803 4804 /** 4805 * ixgbe_pfc_enable_generic - Enable flow control 4806 * @hw: pointer to hardware structure 4807 * @tc_num: traffic class number 4808 * Enable flow control according to the current settings. 4809 */ 4810 static int 4811 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 4812 { 4813 int ret_val = 0; 4814 uint32_t mflcn_reg, fccfg_reg; 4815 uint32_t reg; 4816 uint32_t fcrtl, fcrth; 4817 uint8_t i; 4818 uint8_t nb_rx_en; 4819 4820 /* Validate the water mark configuration */ 4821 if (!hw->fc.pause_time) { 4822 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4823 goto out; 4824 } 4825 4826 /* Low water mark of zero causes XOFF floods */ 4827 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 4828 /* High/Low water can not be 0 */ 4829 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 4830 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4831 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4832 goto out; 4833 } 4834 4835 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 4836 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 4837 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 4838 goto out; 4839 } 4840 } 4841 /* Negotiate the fc mode to use */ 4842 ixgbe_fc_autoneg(hw); 4843 4844 /* Disable any previous flow control settings */ 4845 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 4846 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 4847 4848 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 4849 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 4850 4851 switch (hw->fc.current_mode) { 4852 case ixgbe_fc_none: 4853 /* 4854 * If the count of enabled RX Priority Flow control >1, 4855 * and the TX pause can not be disabled 4856 */ 4857 nb_rx_en = 0; 4858 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4859 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4860 if (reg & IXGBE_FCRTH_FCEN) 4861 nb_rx_en++; 4862 } 4863 if (nb_rx_en > 1) 4864 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4865 break; 4866 case ixgbe_fc_rx_pause: 4867 /* 4868 * Rx Flow control is enabled and Tx Flow control is 4869 * disabled by software override. Since there really 4870 * isn't a way to advertise that we are capable of RX 4871 * Pause ONLY, we will advertise that we support both 4872 * symmetric and asymmetric Rx PAUSE. Later, we will 4873 * disable the adapter's ability to send PAUSE frames. 4874 */ 4875 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4876 /* 4877 * If the count of enabled RX Priority Flow control >1, 4878 * and the TX pause can not be disabled 4879 */ 4880 nb_rx_en = 0; 4881 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 4882 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 4883 if (reg & IXGBE_FCRTH_FCEN) 4884 nb_rx_en++; 4885 } 4886 if (nb_rx_en > 1) 4887 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4888 break; 4889 case ixgbe_fc_tx_pause: 4890 /* 4891 * Tx Flow control is enabled, and Rx Flow control is 4892 * disabled by software override. 4893 */ 4894 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4895 break; 4896 case ixgbe_fc_full: 4897 /* Flow control (both Rx and Tx) is enabled by SW override. */ 4898 mflcn_reg |= IXGBE_MFLCN_RPFCE; 4899 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 4900 break; 4901 default: 4902 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 4903 ret_val = IXGBE_ERR_CONFIG; 4904 goto out; 4905 } 4906 4907 /* Set 802.3x based flow control settings. */ 4908 mflcn_reg |= IXGBE_MFLCN_DPF; 4909 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 4910 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 4911 4912 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 4913 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 4914 hw->fc.high_water[tc_num]) { 4915 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 4916 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 4917 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 4918 } else { 4919 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 4920 /* 4921 * In order to prevent Tx hangs when the internal Tx 4922 * switch is enabled we must set the high water mark 4923 * to the maximum FCRTH value. This allows the Tx 4924 * switch to function even under heavy Rx workloads. 4925 */ 4926 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 4927 } 4928 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 4929 4930 /* Configure pause time (2 TCs per register) */ 4931 reg = hw->fc.pause_time * 0x00010001; 4932 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 4933 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 4934 4935 /* Configure flow control refresh threshold value */ 4936 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 4937 4938 out: 4939 return ret_val; 4940 } 4941 4942 static int 4943 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 4944 { 4945 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4946 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 4947 4948 if (hw->mac.type != ixgbe_mac_82598EB) { 4949 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 4950 } 4951 return ret_val; 4952 } 4953 4954 static int 4955 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 4956 { 4957 int err; 4958 uint32_t rx_buf_size; 4959 uint32_t max_high_water; 4960 uint8_t tc_num; 4961 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 4962 struct ixgbe_hw *hw = 4963 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4964 struct ixgbe_dcb_config *dcb_config = 4965 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 4966 4967 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 4968 ixgbe_fc_none, 4969 ixgbe_fc_rx_pause, 4970 ixgbe_fc_tx_pause, 4971 ixgbe_fc_full 4972 }; 4973 4974 PMD_INIT_FUNC_TRACE(); 4975 4976 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 4977 tc_num = map[pfc_conf->priority]; 4978 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 4979 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 4980 /* 4981 * At least reserve one Ethernet frame for watermark 4982 * high_water/low_water in kilo bytes for ixgbe 4983 */ 4984 max_high_water = (rx_buf_size - 4985 RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 4986 if ((pfc_conf->fc.high_water > max_high_water) || 4987 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 4988 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 4989 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 4990 return -EINVAL; 4991 } 4992 4993 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 4994 hw->fc.pause_time = pfc_conf->fc.pause_time; 4995 hw->fc.send_xon = pfc_conf->fc.send_xon; 4996 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 4997 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 4998 4999 err = ixgbe_dcb_pfc_enable(dev, tc_num); 5000 5001 /* Not negotiated is not an error case */ 5002 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 5003 return 0; 5004 5005 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 5006 return -EIO; 5007 } 5008 5009 static int 5010 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 5011 struct rte_eth_rss_reta_entry64 *reta_conf, 5012 uint16_t reta_size) 5013 { 5014 uint16_t i, sp_reta_size; 5015 uint8_t j, mask; 5016 uint32_t reta, r; 5017 uint16_t idx, shift; 5018 struct ixgbe_adapter *adapter = dev->data->dev_private; 5019 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5020 uint32_t reta_reg; 5021 5022 PMD_INIT_FUNC_TRACE(); 5023 5024 if (!ixgbe_rss_update_sp(hw->mac.type)) { 5025 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 5026 "NIC."); 5027 return -ENOTSUP; 5028 } 5029 5030 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5031 if (reta_size != sp_reta_size) { 5032 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5033 "(%d) doesn't match the number hardware can supported " 5034 "(%d)", reta_size, sp_reta_size); 5035 return -EINVAL; 5036 } 5037 5038 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5039 idx = i / RTE_RETA_GROUP_SIZE; 5040 shift = i % RTE_RETA_GROUP_SIZE; 5041 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5042 IXGBE_4_BIT_MASK); 5043 if (!mask) 5044 continue; 5045 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5046 if (mask == IXGBE_4_BIT_MASK) 5047 r = 0; 5048 else 5049 r = IXGBE_READ_REG(hw, reta_reg); 5050 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5051 if (mask & (0x1 << j)) 5052 reta |= reta_conf[idx].reta[shift + j] << 5053 (CHAR_BIT * j); 5054 else 5055 reta |= r & (IXGBE_8_BIT_MASK << 5056 (CHAR_BIT * j)); 5057 } 5058 IXGBE_WRITE_REG(hw, reta_reg, reta); 5059 } 5060 adapter->rss_reta_updated = 1; 5061 5062 return 0; 5063 } 5064 5065 static int 5066 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 5067 struct rte_eth_rss_reta_entry64 *reta_conf, 5068 uint16_t reta_size) 5069 { 5070 uint16_t i, sp_reta_size; 5071 uint8_t j, mask; 5072 uint32_t reta; 5073 uint16_t idx, shift; 5074 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5075 uint32_t reta_reg; 5076 5077 PMD_INIT_FUNC_TRACE(); 5078 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 5079 if (reta_size != sp_reta_size) { 5080 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 5081 "(%d) doesn't match the number hardware can supported " 5082 "(%d)", reta_size, sp_reta_size); 5083 return -EINVAL; 5084 } 5085 5086 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 5087 idx = i / RTE_RETA_GROUP_SIZE; 5088 shift = i % RTE_RETA_GROUP_SIZE; 5089 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 5090 IXGBE_4_BIT_MASK); 5091 if (!mask) 5092 continue; 5093 5094 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 5095 reta = IXGBE_READ_REG(hw, reta_reg); 5096 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 5097 if (mask & (0x1 << j)) 5098 reta_conf[idx].reta[shift + j] = 5099 ((reta >> (CHAR_BIT * j)) & 5100 IXGBE_8_BIT_MASK); 5101 } 5102 } 5103 5104 return 0; 5105 } 5106 5107 static int 5108 ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 5109 uint32_t index, uint32_t pool) 5110 { 5111 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5112 uint32_t enable_addr = 1; 5113 5114 return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, 5115 pool, enable_addr); 5116 } 5117 5118 static void 5119 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 5120 { 5121 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5122 5123 ixgbe_clear_rar(hw, index); 5124 } 5125 5126 static int 5127 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) 5128 { 5129 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5130 5131 ixgbe_remove_rar(dev, 0); 5132 ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); 5133 5134 return 0; 5135 } 5136 5137 static bool 5138 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) 5139 { 5140 if (strcmp(dev->device->driver->name, drv->driver.name)) 5141 return false; 5142 5143 return true; 5144 } 5145 5146 bool 5147 is_ixgbe_supported(struct rte_eth_dev *dev) 5148 { 5149 return is_device_supported(dev, &rte_ixgbe_pmd); 5150 } 5151 5152 static int 5153 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 5154 { 5155 uint32_t hlreg0; 5156 uint32_t maxfrs; 5157 struct ixgbe_hw *hw; 5158 struct rte_eth_dev_info dev_info; 5159 uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; 5160 struct rte_eth_dev_data *dev_data = dev->data; 5161 int ret; 5162 5163 ret = ixgbe_dev_info_get(dev, &dev_info); 5164 if (ret != 0) 5165 return ret; 5166 5167 /* check that mtu is within the allowed range */ 5168 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) 5169 return -EINVAL; 5170 5171 /* If device is started, refuse mtu that requires the support of 5172 * scattered packets when this feature has not been enabled before. 5173 */ 5174 if (dev_data->dev_started && !dev_data->scattered_rx && 5175 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 5176 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 5177 PMD_INIT_LOG(ERR, "Stop port first."); 5178 return -EINVAL; 5179 } 5180 5181 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5182 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 5183 5184 /* switch to jumbo mode if needed */ 5185 if (frame_size > RTE_ETHER_MAX_LEN) { 5186 dev->data->dev_conf.rxmode.offloads |= 5187 DEV_RX_OFFLOAD_JUMBO_FRAME; 5188 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 5189 } else { 5190 dev->data->dev_conf.rxmode.offloads &= 5191 ~DEV_RX_OFFLOAD_JUMBO_FRAME; 5192 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 5193 } 5194 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 5195 5196 /* update max frame size */ 5197 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 5198 5199 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 5200 maxfrs &= 0x0000FFFF; 5201 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 5202 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 5203 5204 return 0; 5205 } 5206 5207 /* 5208 * Virtual Function operations 5209 */ 5210 static void 5211 ixgbevf_intr_disable(struct rte_eth_dev *dev) 5212 { 5213 struct ixgbe_interrupt *intr = 5214 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5215 struct ixgbe_hw *hw = 5216 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5217 5218 PMD_INIT_FUNC_TRACE(); 5219 5220 /* Clear interrupt mask to stop from interrupts being generated */ 5221 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 5222 5223 IXGBE_WRITE_FLUSH(hw); 5224 5225 /* Clear mask value. */ 5226 intr->mask = 0; 5227 } 5228 5229 static void 5230 ixgbevf_intr_enable(struct rte_eth_dev *dev) 5231 { 5232 struct ixgbe_interrupt *intr = 5233 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5234 struct ixgbe_hw *hw = 5235 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5236 5237 PMD_INIT_FUNC_TRACE(); 5238 5239 /* VF enable interrupt autoclean */ 5240 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 5241 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 5242 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 5243 5244 IXGBE_WRITE_FLUSH(hw); 5245 5246 /* Save IXGBE_VTEIMS value to mask. */ 5247 intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; 5248 } 5249 5250 static int 5251 ixgbevf_dev_configure(struct rte_eth_dev *dev) 5252 { 5253 struct rte_eth_conf *conf = &dev->data->dev_conf; 5254 struct ixgbe_adapter *adapter = dev->data->dev_private; 5255 5256 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 5257 dev->data->port_id); 5258 5259 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 5260 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; 5261 5262 /* 5263 * VF has no ability to enable/disable HW CRC 5264 * Keep the persistent behavior the same as Host PF 5265 */ 5266 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 5267 if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { 5268 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 5269 conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; 5270 } 5271 #else 5272 if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { 5273 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 5274 conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; 5275 } 5276 #endif 5277 5278 /* 5279 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 5280 * allocation or vector Rx preconditions we will reset it. 5281 */ 5282 adapter->rx_bulk_alloc_allowed = true; 5283 adapter->rx_vec_allowed = true; 5284 5285 return 0; 5286 } 5287 5288 static int 5289 ixgbevf_dev_start(struct rte_eth_dev *dev) 5290 { 5291 struct ixgbe_hw *hw = 5292 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5293 uint32_t intr_vector = 0; 5294 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5295 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5296 5297 int err, mask = 0; 5298 5299 PMD_INIT_FUNC_TRACE(); 5300 5301 /* Stop the link setup handler before resetting the HW. */ 5302 ixgbe_dev_cancel_link_thread(dev); 5303 5304 err = hw->mac.ops.reset_hw(hw); 5305 if (err) { 5306 PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); 5307 return err; 5308 } 5309 hw->mac.get_link_status = true; 5310 5311 /* negotiate mailbox API version to use with the PF. */ 5312 ixgbevf_negotiate_api(hw); 5313 5314 ixgbevf_dev_tx_init(dev); 5315 5316 /* This can fail when allocating mbufs for descriptor rings */ 5317 err = ixgbevf_dev_rx_init(dev); 5318 if (err) { 5319 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 5320 ixgbe_dev_clear_queues(dev); 5321 return err; 5322 } 5323 5324 /* Set vfta */ 5325 ixgbevf_set_vfta_all(dev, 1); 5326 5327 /* Set HW strip */ 5328 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 5329 ETH_VLAN_EXTEND_MASK; 5330 err = ixgbevf_vlan_offload_config(dev, mask); 5331 if (err) { 5332 PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); 5333 ixgbe_dev_clear_queues(dev); 5334 return err; 5335 } 5336 5337 ixgbevf_dev_rxtx_start(dev); 5338 5339 /* check and configure queue intr-vector mapping */ 5340 if (rte_intr_cap_multiple(intr_handle) && 5341 dev->data->dev_conf.intr_conf.rxq) { 5342 /* According to datasheet, only vector 0/1/2 can be used, 5343 * now only one vector is used for Rx queue 5344 */ 5345 intr_vector = 1; 5346 if (rte_intr_efd_enable(intr_handle, intr_vector)) 5347 return -1; 5348 } 5349 5350 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 5351 intr_handle->intr_vec = 5352 rte_zmalloc("intr_vec", 5353 dev->data->nb_rx_queues * sizeof(int), 0); 5354 if (intr_handle->intr_vec == NULL) { 5355 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 5356 " intr_vec", dev->data->nb_rx_queues); 5357 return -ENOMEM; 5358 } 5359 } 5360 ixgbevf_configure_msix(dev); 5361 5362 /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt 5363 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). 5364 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) 5365 * is not cleared, it will fail when following rte_intr_enable( ) tries 5366 * to map Rx queue interrupt to other VFIO vectors. 5367 * So clear uio/vfio intr/evevnfd first to avoid failure. 5368 */ 5369 rte_intr_disable(intr_handle); 5370 5371 rte_intr_enable(intr_handle); 5372 5373 /* Re-enable interrupt for VF */ 5374 ixgbevf_intr_enable(dev); 5375 5376 /* 5377 * Update link status right before return, because it may 5378 * start link configuration process in a separate thread. 5379 */ 5380 ixgbevf_dev_link_update(dev, 0); 5381 5382 hw->adapter_stopped = false; 5383 5384 return 0; 5385 } 5386 5387 static void 5388 ixgbevf_dev_stop(struct rte_eth_dev *dev) 5389 { 5390 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5391 struct ixgbe_adapter *adapter = dev->data->dev_private; 5392 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5393 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5394 5395 if (hw->adapter_stopped) 5396 return; 5397 5398 PMD_INIT_FUNC_TRACE(); 5399 5400 ixgbe_dev_cancel_link_thread(dev); 5401 5402 ixgbevf_intr_disable(dev); 5403 5404 hw->adapter_stopped = 1; 5405 ixgbe_stop_adapter(hw); 5406 5407 /* 5408 * Clear what we set, but we still keep shadow_vfta to 5409 * restore after device starts 5410 */ 5411 ixgbevf_set_vfta_all(dev, 0); 5412 5413 /* Clear stored conf */ 5414 dev->data->scattered_rx = 0; 5415 5416 ixgbe_dev_clear_queues(dev); 5417 5418 /* Clean datapath event and queue/vec mapping */ 5419 rte_intr_efd_disable(intr_handle); 5420 if (intr_handle->intr_vec != NULL) { 5421 rte_free(intr_handle->intr_vec); 5422 intr_handle->intr_vec = NULL; 5423 } 5424 5425 adapter->rss_reta_updated = 0; 5426 } 5427 5428 static void 5429 ixgbevf_dev_close(struct rte_eth_dev *dev) 5430 { 5431 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5432 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5433 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5434 5435 PMD_INIT_FUNC_TRACE(); 5436 5437 ixgbe_reset_hw(hw); 5438 5439 ixgbevf_dev_stop(dev); 5440 5441 ixgbe_dev_free_queues(dev); 5442 5443 /** 5444 * Remove the VF MAC address ro ensure 5445 * that the VF traffic goes to the PF 5446 * after stop, close and detach of the VF 5447 **/ 5448 ixgbevf_remove_mac_addr(dev, 0); 5449 5450 dev->dev_ops = NULL; 5451 dev->rx_pkt_burst = NULL; 5452 dev->tx_pkt_burst = NULL; 5453 5454 rte_intr_disable(intr_handle); 5455 rte_intr_callback_unregister(intr_handle, 5456 ixgbevf_dev_interrupt_handler, dev); 5457 } 5458 5459 /* 5460 * Reset VF device 5461 */ 5462 static int 5463 ixgbevf_dev_reset(struct rte_eth_dev *dev) 5464 { 5465 int ret; 5466 5467 ret = eth_ixgbevf_dev_uninit(dev); 5468 if (ret) 5469 return ret; 5470 5471 ret = eth_ixgbevf_dev_init(dev); 5472 5473 return ret; 5474 } 5475 5476 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 5477 { 5478 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5479 struct ixgbe_vfta *shadow_vfta = 5480 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5481 int i = 0, j = 0, vfta = 0, mask = 1; 5482 5483 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 5484 vfta = shadow_vfta->vfta[i]; 5485 if (vfta) { 5486 mask = 1; 5487 for (j = 0; j < 32; j++) { 5488 if (vfta & mask) 5489 ixgbe_set_vfta(hw, (i<<5)+j, 0, 5490 on, false); 5491 mask <<= 1; 5492 } 5493 } 5494 } 5495 5496 } 5497 5498 static int 5499 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 5500 { 5501 struct ixgbe_hw *hw = 5502 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5503 struct ixgbe_vfta *shadow_vfta = 5504 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 5505 uint32_t vid_idx = 0; 5506 uint32_t vid_bit = 0; 5507 int ret = 0; 5508 5509 PMD_INIT_FUNC_TRACE(); 5510 5511 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 5512 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 5513 if (ret) { 5514 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 5515 return ret; 5516 } 5517 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 5518 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 5519 5520 /* Save what we set and retore it after device reset */ 5521 if (on) 5522 shadow_vfta->vfta[vid_idx] |= vid_bit; 5523 else 5524 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 5525 5526 return 0; 5527 } 5528 5529 static void 5530 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 5531 { 5532 struct ixgbe_hw *hw = 5533 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5534 uint32_t ctrl; 5535 5536 PMD_INIT_FUNC_TRACE(); 5537 5538 if (queue >= hw->mac.max_rx_queues) 5539 return; 5540 5541 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 5542 if (on) 5543 ctrl |= IXGBE_RXDCTL_VME; 5544 else 5545 ctrl &= ~IXGBE_RXDCTL_VME; 5546 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 5547 5548 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 5549 } 5550 5551 static int 5552 ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) 5553 { 5554 struct ixgbe_rx_queue *rxq; 5555 uint16_t i; 5556 int on = 0; 5557 5558 /* VF function only support hw strip feature, others are not support */ 5559 if (mask & ETH_VLAN_STRIP_MASK) { 5560 for (i = 0; i < dev->data->nb_rx_queues; i++) { 5561 rxq = dev->data->rx_queues[i]; 5562 on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); 5563 ixgbevf_vlan_strip_queue_set(dev, i, on); 5564 } 5565 } 5566 5567 return 0; 5568 } 5569 5570 static int 5571 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 5572 { 5573 ixgbe_config_vlan_strip_on_all_queues(dev, mask); 5574 5575 ixgbevf_vlan_offload_config(dev, mask); 5576 5577 return 0; 5578 } 5579 5580 int 5581 ixgbe_vt_check(struct ixgbe_hw *hw) 5582 { 5583 uint32_t reg_val; 5584 5585 /* if Virtualization Technology is enabled */ 5586 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 5587 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 5588 PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); 5589 return -1; 5590 } 5591 5592 return 0; 5593 } 5594 5595 static uint32_t 5596 ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) 5597 { 5598 uint32_t vector = 0; 5599 5600 switch (hw->mac.mc_filter_type) { 5601 case 0: /* use bits [47:36] of the address */ 5602 vector = ((uc_addr->addr_bytes[4] >> 4) | 5603 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 5604 break; 5605 case 1: /* use bits [46:35] of the address */ 5606 vector = ((uc_addr->addr_bytes[4] >> 3) | 5607 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 5608 break; 5609 case 2: /* use bits [45:34] of the address */ 5610 vector = ((uc_addr->addr_bytes[4] >> 2) | 5611 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 5612 break; 5613 case 3: /* use bits [43:32] of the address */ 5614 vector = ((uc_addr->addr_bytes[4]) | 5615 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 5616 break; 5617 default: /* Invalid mc_filter_type */ 5618 break; 5619 } 5620 5621 /* vector can only be 12-bits or boundary will be exceeded */ 5622 vector &= 0xFFF; 5623 return vector; 5624 } 5625 5626 static int 5627 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, 5628 struct rte_ether_addr *mac_addr, uint8_t on) 5629 { 5630 uint32_t vector; 5631 uint32_t uta_idx; 5632 uint32_t reg_val; 5633 uint32_t uta_shift; 5634 uint32_t rc; 5635 const uint32_t ixgbe_uta_idx_mask = 0x7F; 5636 const uint32_t ixgbe_uta_bit_shift = 5; 5637 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 5638 const uint32_t bit1 = 0x1; 5639 5640 struct ixgbe_hw *hw = 5641 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5642 struct ixgbe_uta_info *uta_info = 5643 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5644 5645 /* The UTA table only exists on 82599 hardware and newer */ 5646 if (hw->mac.type < ixgbe_mac_82599EB) 5647 return -ENOTSUP; 5648 5649 vector = ixgbe_uta_vector(hw, mac_addr); 5650 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 5651 uta_shift = vector & ixgbe_uta_bit_mask; 5652 5653 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 5654 if (rc == on) 5655 return 0; 5656 5657 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 5658 if (on) { 5659 uta_info->uta_in_use++; 5660 reg_val |= (bit1 << uta_shift); 5661 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 5662 } else { 5663 uta_info->uta_in_use--; 5664 reg_val &= ~(bit1 << uta_shift); 5665 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 5666 } 5667 5668 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 5669 5670 if (uta_info->uta_in_use > 0) 5671 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 5672 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 5673 else 5674 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 5675 5676 return 0; 5677 } 5678 5679 static int 5680 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 5681 { 5682 int i; 5683 struct ixgbe_hw *hw = 5684 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5685 struct ixgbe_uta_info *uta_info = 5686 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 5687 5688 /* The UTA table only exists on 82599 hardware and newer */ 5689 if (hw->mac.type < ixgbe_mac_82599EB) 5690 return -ENOTSUP; 5691 5692 if (on) { 5693 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5694 uta_info->uta_shadow[i] = ~0; 5695 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 5696 } 5697 } else { 5698 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 5699 uta_info->uta_shadow[i] = 0; 5700 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 5701 } 5702 } 5703 return 0; 5704 5705 } 5706 5707 uint32_t 5708 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 5709 { 5710 uint32_t new_val = orig_val; 5711 5712 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 5713 new_val |= IXGBE_VMOLR_AUPE; 5714 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 5715 new_val |= IXGBE_VMOLR_ROMPE; 5716 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 5717 new_val |= IXGBE_VMOLR_ROPE; 5718 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 5719 new_val |= IXGBE_VMOLR_BAM; 5720 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 5721 new_val |= IXGBE_VMOLR_MPE; 5722 5723 return new_val; 5724 } 5725 5726 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 5727 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 5728 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 5729 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 5730 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 5731 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 5732 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 5733 5734 static int 5735 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 5736 struct rte_eth_mirror_conf *mirror_conf, 5737 uint8_t rule_id, uint8_t on) 5738 { 5739 uint32_t mr_ctl, vlvf; 5740 uint32_t mp_lsb = 0; 5741 uint32_t mv_msb = 0; 5742 uint32_t mv_lsb = 0; 5743 uint32_t mp_msb = 0; 5744 uint8_t i = 0; 5745 int reg_index = 0; 5746 uint64_t vlan_mask = 0; 5747 5748 const uint8_t pool_mask_offset = 32; 5749 const uint8_t vlan_mask_offset = 32; 5750 const uint8_t dst_pool_offset = 8; 5751 const uint8_t rule_mr_offset = 4; 5752 const uint8_t mirror_rule_mask = 0x0F; 5753 5754 struct ixgbe_mirror_info *mr_info = 5755 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5756 struct ixgbe_hw *hw = 5757 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5758 uint8_t mirror_type = 0; 5759 5760 if (ixgbe_vt_check(hw) < 0) 5761 return -ENOTSUP; 5762 5763 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5764 return -EINVAL; 5765 5766 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 5767 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 5768 mirror_conf->rule_type); 5769 return -EINVAL; 5770 } 5771 5772 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5773 mirror_type |= IXGBE_MRCTL_VLME; 5774 /* Check if vlan id is valid and find conresponding VLAN ID 5775 * index in VLVF 5776 */ 5777 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 5778 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 5779 /* search vlan id related pool vlan filter 5780 * index 5781 */ 5782 reg_index = ixgbe_find_vlvf_slot( 5783 hw, 5784 mirror_conf->vlan.vlan_id[i], 5785 false); 5786 if (reg_index < 0) 5787 return -EINVAL; 5788 vlvf = IXGBE_READ_REG(hw, 5789 IXGBE_VLVF(reg_index)); 5790 if ((vlvf & IXGBE_VLVF_VIEN) && 5791 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 5792 mirror_conf->vlan.vlan_id[i])) 5793 vlan_mask |= (1ULL << reg_index); 5794 else 5795 return -EINVAL; 5796 } 5797 } 5798 5799 if (on) { 5800 mv_lsb = vlan_mask & 0xFFFFFFFF; 5801 mv_msb = vlan_mask >> vlan_mask_offset; 5802 5803 mr_info->mr_conf[rule_id].vlan.vlan_mask = 5804 mirror_conf->vlan.vlan_mask; 5805 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 5806 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 5807 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 5808 mirror_conf->vlan.vlan_id[i]; 5809 } 5810 } else { 5811 mv_lsb = 0; 5812 mv_msb = 0; 5813 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 5814 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 5815 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 5816 } 5817 } 5818 5819 /** 5820 * if enable pool mirror, write related pool mask register,if disable 5821 * pool mirror, clear PFMRVM register 5822 */ 5823 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5824 mirror_type |= IXGBE_MRCTL_VPME; 5825 if (on) { 5826 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 5827 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 5828 mr_info->mr_conf[rule_id].pool_mask = 5829 mirror_conf->pool_mask; 5830 5831 } else { 5832 mp_lsb = 0; 5833 mp_msb = 0; 5834 mr_info->mr_conf[rule_id].pool_mask = 0; 5835 } 5836 } 5837 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 5838 mirror_type |= IXGBE_MRCTL_UPME; 5839 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 5840 mirror_type |= IXGBE_MRCTL_DPME; 5841 5842 /* read mirror control register and recalculate it */ 5843 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 5844 5845 if (on) { 5846 mr_ctl |= mirror_type; 5847 mr_ctl &= mirror_rule_mask; 5848 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 5849 } else { 5850 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 5851 } 5852 5853 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 5854 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 5855 5856 /* write mirrror control register */ 5857 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5858 5859 /* write pool mirrror control register */ 5860 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 5861 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 5862 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 5863 mp_msb); 5864 } 5865 /* write VLAN mirrror control register */ 5866 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 5867 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 5868 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 5869 mv_msb); 5870 } 5871 5872 return 0; 5873 } 5874 5875 static int 5876 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 5877 { 5878 int mr_ctl = 0; 5879 uint32_t lsb_val = 0; 5880 uint32_t msb_val = 0; 5881 const uint8_t rule_mr_offset = 4; 5882 5883 struct ixgbe_hw *hw = 5884 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5885 struct ixgbe_mirror_info *mr_info = 5886 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 5887 5888 if (ixgbe_vt_check(hw) < 0) 5889 return -ENOTSUP; 5890 5891 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 5892 return -EINVAL; 5893 5894 memset(&mr_info->mr_conf[rule_id], 0, 5895 sizeof(struct rte_eth_mirror_conf)); 5896 5897 /* clear PFVMCTL register */ 5898 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 5899 5900 /* clear pool mask register */ 5901 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 5902 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 5903 5904 /* clear vlan mask register */ 5905 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 5906 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 5907 5908 return 0; 5909 } 5910 5911 static int 5912 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5913 { 5914 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5915 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5916 struct ixgbe_interrupt *intr = 5917 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5918 struct ixgbe_hw *hw = 5919 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5920 uint32_t vec = IXGBE_MISC_VEC_ID; 5921 5922 if (rte_intr_allow_others(intr_handle)) 5923 vec = IXGBE_RX_VEC_START; 5924 intr->mask |= (1 << vec); 5925 RTE_SET_USED(queue_id); 5926 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5927 5928 rte_intr_ack(intr_handle); 5929 5930 return 0; 5931 } 5932 5933 static int 5934 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5935 { 5936 struct ixgbe_interrupt *intr = 5937 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5938 struct ixgbe_hw *hw = 5939 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5940 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5941 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5942 uint32_t vec = IXGBE_MISC_VEC_ID; 5943 5944 if (rte_intr_allow_others(intr_handle)) 5945 vec = IXGBE_RX_VEC_START; 5946 intr->mask &= ~(1 << vec); 5947 RTE_SET_USED(queue_id); 5948 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); 5949 5950 return 0; 5951 } 5952 5953 static int 5954 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 5955 { 5956 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 5957 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 5958 uint32_t mask; 5959 struct ixgbe_hw *hw = 5960 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5961 struct ixgbe_interrupt *intr = 5962 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5963 5964 if (queue_id < 16) { 5965 ixgbe_disable_intr(hw); 5966 intr->mask |= (1 << queue_id); 5967 ixgbe_enable_intr(dev); 5968 } else if (queue_id < 32) { 5969 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5970 mask &= (1 << queue_id); 5971 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5972 } else if (queue_id < 64) { 5973 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 5974 mask &= (1 << (queue_id - 32)); 5975 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 5976 } 5977 rte_intr_ack(intr_handle); 5978 5979 return 0; 5980 } 5981 5982 static int 5983 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 5984 { 5985 uint32_t mask; 5986 struct ixgbe_hw *hw = 5987 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5988 struct ixgbe_interrupt *intr = 5989 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 5990 5991 if (queue_id < 16) { 5992 ixgbe_disable_intr(hw); 5993 intr->mask &= ~(1 << queue_id); 5994 ixgbe_enable_intr(dev); 5995 } else if (queue_id < 32) { 5996 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 5997 mask &= ~(1 << queue_id); 5998 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 5999 } else if (queue_id < 64) { 6000 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 6001 mask &= ~(1 << (queue_id - 32)); 6002 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 6003 } 6004 6005 return 0; 6006 } 6007 6008 static void 6009 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6010 uint8_t queue, uint8_t msix_vector) 6011 { 6012 uint32_t tmp, idx; 6013 6014 if (direction == -1) { 6015 /* other causes */ 6016 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6017 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 6018 tmp &= ~0xFF; 6019 tmp |= msix_vector; 6020 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 6021 } else { 6022 /* rx or tx cause */ 6023 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6024 idx = ((16 * (queue & 1)) + (8 * direction)); 6025 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 6026 tmp &= ~(0xFF << idx); 6027 tmp |= (msix_vector << idx); 6028 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 6029 } 6030 } 6031 6032 /** 6033 * set the IVAR registers, mapping interrupt causes to vectors 6034 * @param hw 6035 * pointer to ixgbe_hw struct 6036 * @direction 6037 * 0 for Rx, 1 for Tx, -1 for other causes 6038 * @queue 6039 * queue to map the corresponding interrupt to 6040 * @msix_vector 6041 * the vector to map to the corresponding queue 6042 */ 6043 static void 6044 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 6045 uint8_t queue, uint8_t msix_vector) 6046 { 6047 uint32_t tmp, idx; 6048 6049 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 6050 if (hw->mac.type == ixgbe_mac_82598EB) { 6051 if (direction == -1) 6052 direction = 0; 6053 idx = (((direction * 64) + queue) >> 2) & 0x1F; 6054 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 6055 tmp &= ~(0xFF << (8 * (queue & 0x3))); 6056 tmp |= (msix_vector << (8 * (queue & 0x3))); 6057 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 6058 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 6059 (hw->mac.type == ixgbe_mac_X540) || 6060 (hw->mac.type == ixgbe_mac_X550) || 6061 (hw->mac.type == ixgbe_mac_X550EM_x)) { 6062 if (direction == -1) { 6063 /* other causes */ 6064 idx = ((queue & 1) * 8); 6065 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 6066 tmp &= ~(0xFF << idx); 6067 tmp |= (msix_vector << idx); 6068 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 6069 } else { 6070 /* rx or tx causes */ 6071 idx = ((16 * (queue & 1)) + (8 * direction)); 6072 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 6073 tmp &= ~(0xFF << idx); 6074 tmp |= (msix_vector << idx); 6075 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 6076 } 6077 } 6078 } 6079 6080 static void 6081 ixgbevf_configure_msix(struct rte_eth_dev *dev) 6082 { 6083 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6084 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6085 struct ixgbe_hw *hw = 6086 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6087 uint32_t q_idx; 6088 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 6089 uint32_t base = IXGBE_MISC_VEC_ID; 6090 6091 /* Configure VF other cause ivar */ 6092 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 6093 6094 /* won't configure msix register if no mapping is done 6095 * between intr vector and event fd. 6096 */ 6097 if (!rte_intr_dp_is_en(intr_handle)) 6098 return; 6099 6100 if (rte_intr_allow_others(intr_handle)) { 6101 base = IXGBE_RX_VEC_START; 6102 vector_idx = IXGBE_RX_VEC_START; 6103 } 6104 6105 /* Configure all RX queues of VF */ 6106 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 6107 /* Force all queue use vector 0, 6108 * as IXGBE_VF_MAXMSIVECOTR = 1 6109 */ 6110 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 6111 intr_handle->intr_vec[q_idx] = vector_idx; 6112 if (vector_idx < base + intr_handle->nb_efd - 1) 6113 vector_idx++; 6114 } 6115 6116 /* As RX queue setting above show, all queues use the vector 0. 6117 * Set only the ITR value of IXGBE_MISC_VEC_ID. 6118 */ 6119 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), 6120 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6121 | IXGBE_EITR_CNT_WDIS); 6122 } 6123 6124 /** 6125 * Sets up the hardware to properly generate MSI-X interrupts 6126 * @hw 6127 * board private structure 6128 */ 6129 static void 6130 ixgbe_configure_msix(struct rte_eth_dev *dev) 6131 { 6132 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 6133 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 6134 struct ixgbe_hw *hw = 6135 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6136 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 6137 uint32_t vec = IXGBE_MISC_VEC_ID; 6138 uint32_t mask; 6139 uint32_t gpie; 6140 6141 /* won't configure msix register if no mapping is done 6142 * between intr vector and event fd 6143 * but if misx has been enabled already, need to configure 6144 * auto clean, auto mask and throttling. 6145 */ 6146 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6147 if (!rte_intr_dp_is_en(intr_handle) && 6148 !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) 6149 return; 6150 6151 if (rte_intr_allow_others(intr_handle)) 6152 vec = base = IXGBE_RX_VEC_START; 6153 6154 /* setup GPIE for MSI-x mode */ 6155 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 6156 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 6157 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 6158 /* auto clearing and auto setting corresponding bits in EIMS 6159 * when MSI-X interrupt is triggered 6160 */ 6161 if (hw->mac.type == ixgbe_mac_82598EB) { 6162 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 6163 } else { 6164 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 6165 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 6166 } 6167 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 6168 6169 /* Populate the IVAR table and set the ITR values to the 6170 * corresponding register. 6171 */ 6172 if (rte_intr_dp_is_en(intr_handle)) { 6173 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 6174 queue_id++) { 6175 /* by default, 1:1 mapping */ 6176 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 6177 intr_handle->intr_vec[queue_id] = vec; 6178 if (vec < base + intr_handle->nb_efd - 1) 6179 vec++; 6180 } 6181 6182 switch (hw->mac.type) { 6183 case ixgbe_mac_82598EB: 6184 ixgbe_set_ivar_map(hw, -1, 6185 IXGBE_IVAR_OTHER_CAUSES_INDEX, 6186 IXGBE_MISC_VEC_ID); 6187 break; 6188 case ixgbe_mac_82599EB: 6189 case ixgbe_mac_X540: 6190 case ixgbe_mac_X550: 6191 case ixgbe_mac_X550EM_x: 6192 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 6193 break; 6194 default: 6195 break; 6196 } 6197 } 6198 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 6199 IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) 6200 | IXGBE_EITR_CNT_WDIS); 6201 6202 /* set up to autoclear timer, and the vectors */ 6203 mask = IXGBE_EIMS_ENABLE_MASK; 6204 mask &= ~(IXGBE_EIMS_OTHER | 6205 IXGBE_EIMS_MAILBOX | 6206 IXGBE_EIMS_LSC); 6207 6208 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 6209 } 6210 6211 int 6212 ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 6213 uint16_t queue_idx, uint16_t tx_rate) 6214 { 6215 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6216 struct rte_eth_rxmode *rxmode; 6217 uint32_t rf_dec, rf_int; 6218 uint32_t bcnrc_val; 6219 uint16_t link_speed = dev->data->dev_link.link_speed; 6220 6221 if (queue_idx >= hw->mac.max_tx_queues) 6222 return -EINVAL; 6223 6224 if (tx_rate != 0) { 6225 /* Calculate the rate factor values to set */ 6226 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 6227 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 6228 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 6229 6230 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 6231 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 6232 IXGBE_RTTBCNRC_RF_INT_MASK_M); 6233 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 6234 } else { 6235 bcnrc_val = 0; 6236 } 6237 6238 rxmode = &dev->data->dev_conf.rxmode; 6239 /* 6240 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 6241 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 6242 * set as 0x4. 6243 */ 6244 if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && 6245 (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) 6246 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6247 IXGBE_MMW_SIZE_JUMBO_FRAME); 6248 else 6249 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 6250 IXGBE_MMW_SIZE_DEFAULT); 6251 6252 /* Set RTTBCNRC of queue X */ 6253 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 6254 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 6255 IXGBE_WRITE_FLUSH(hw); 6256 6257 return 0; 6258 } 6259 6260 static int 6261 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, 6262 __attribute__((unused)) uint32_t index, 6263 __attribute__((unused)) uint32_t pool) 6264 { 6265 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6266 int diag; 6267 6268 /* 6269 * On a 82599 VF, adding again the same MAC addr is not an idempotent 6270 * operation. Trap this case to avoid exhausting the [very limited] 6271 * set of PF resources used to store VF MAC addresses. 6272 */ 6273 if (memcmp(hw->mac.perm_addr, mac_addr, 6274 sizeof(struct rte_ether_addr)) == 0) 6275 return -1; 6276 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6277 if (diag != 0) 6278 PMD_DRV_LOG(ERR, "Unable to add MAC address " 6279 "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", 6280 mac_addr->addr_bytes[0], 6281 mac_addr->addr_bytes[1], 6282 mac_addr->addr_bytes[2], 6283 mac_addr->addr_bytes[3], 6284 mac_addr->addr_bytes[4], 6285 mac_addr->addr_bytes[5], 6286 diag); 6287 return diag; 6288 } 6289 6290 static void 6291 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 6292 { 6293 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6294 struct rte_ether_addr *perm_addr = 6295 (struct rte_ether_addr *)hw->mac.perm_addr; 6296 struct rte_ether_addr *mac_addr; 6297 uint32_t i; 6298 int diag; 6299 6300 /* 6301 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 6302 * not support the deletion of a given MAC address. 6303 * Instead, it imposes to delete all MAC addresses, then to add again 6304 * all MAC addresses with the exception of the one to be deleted. 6305 */ 6306 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 6307 6308 /* 6309 * Add again all MAC addresses, with the exception of the deleted one 6310 * and of the permanent MAC address. 6311 */ 6312 for (i = 0, mac_addr = dev->data->mac_addrs; 6313 i < hw->mac.num_rar_entries; i++, mac_addr++) { 6314 /* Skip the deleted MAC address */ 6315 if (i == index) 6316 continue; 6317 /* Skip NULL MAC addresses */ 6318 if (rte_is_zero_ether_addr(mac_addr)) 6319 continue; 6320 /* Skip the permanent MAC address */ 6321 if (memcmp(perm_addr, mac_addr, 6322 sizeof(struct rte_ether_addr)) == 0) 6323 continue; 6324 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 6325 if (diag != 0) 6326 PMD_DRV_LOG(ERR, 6327 "Adding again MAC address " 6328 "%02x:%02x:%02x:%02x:%02x:%02x failed " 6329 "diag=%d", 6330 mac_addr->addr_bytes[0], 6331 mac_addr->addr_bytes[1], 6332 mac_addr->addr_bytes[2], 6333 mac_addr->addr_bytes[3], 6334 mac_addr->addr_bytes[4], 6335 mac_addr->addr_bytes[5], 6336 diag); 6337 } 6338 } 6339 6340 static int 6341 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 6342 struct rte_ether_addr *addr) 6343 { 6344 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6345 6346 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 6347 6348 return 0; 6349 } 6350 6351 int 6352 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 6353 struct rte_eth_syn_filter *filter, 6354 bool add) 6355 { 6356 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6357 struct ixgbe_filter_info *filter_info = 6358 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6359 uint32_t syn_info; 6360 uint32_t synqf; 6361 6362 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6363 return -EINVAL; 6364 6365 syn_info = filter_info->syn_info; 6366 6367 if (add) { 6368 if (syn_info & IXGBE_SYN_FILTER_ENABLE) 6369 return -EINVAL; 6370 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 6371 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 6372 6373 if (filter->hig_pri) 6374 synqf |= IXGBE_SYN_FILTER_SYNQFP; 6375 else 6376 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 6377 } else { 6378 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6379 if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) 6380 return -ENOENT; 6381 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 6382 } 6383 6384 filter_info->syn_info = synqf; 6385 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 6386 IXGBE_WRITE_FLUSH(hw); 6387 return 0; 6388 } 6389 6390 static int 6391 ixgbe_syn_filter_get(struct rte_eth_dev *dev, 6392 struct rte_eth_syn_filter *filter) 6393 { 6394 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6395 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 6396 6397 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 6398 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; 6399 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); 6400 return 0; 6401 } 6402 return -ENOENT; 6403 } 6404 6405 static int 6406 ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 6407 enum rte_filter_op filter_op, 6408 void *arg) 6409 { 6410 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6411 int ret; 6412 6413 MAC_TYPE_FILTER_SUP(hw->mac.type); 6414 6415 if (filter_op == RTE_ETH_FILTER_NOP) 6416 return 0; 6417 6418 if (arg == NULL) { 6419 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 6420 filter_op); 6421 return -EINVAL; 6422 } 6423 6424 switch (filter_op) { 6425 case RTE_ETH_FILTER_ADD: 6426 ret = ixgbe_syn_filter_set(dev, 6427 (struct rte_eth_syn_filter *)arg, 6428 TRUE); 6429 break; 6430 case RTE_ETH_FILTER_DELETE: 6431 ret = ixgbe_syn_filter_set(dev, 6432 (struct rte_eth_syn_filter *)arg, 6433 FALSE); 6434 break; 6435 case RTE_ETH_FILTER_GET: 6436 ret = ixgbe_syn_filter_get(dev, 6437 (struct rte_eth_syn_filter *)arg); 6438 break; 6439 default: 6440 PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); 6441 ret = -EINVAL; 6442 break; 6443 } 6444 6445 return ret; 6446 } 6447 6448 6449 static inline enum ixgbe_5tuple_protocol 6450 convert_protocol_type(uint8_t protocol_value) 6451 { 6452 if (protocol_value == IPPROTO_TCP) 6453 return IXGBE_FILTER_PROTOCOL_TCP; 6454 else if (protocol_value == IPPROTO_UDP) 6455 return IXGBE_FILTER_PROTOCOL_UDP; 6456 else if (protocol_value == IPPROTO_SCTP) 6457 return IXGBE_FILTER_PROTOCOL_SCTP; 6458 else 6459 return IXGBE_FILTER_PROTOCOL_NONE; 6460 } 6461 6462 /* inject a 5-tuple filter to HW */ 6463 static inline void 6464 ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, 6465 struct ixgbe_5tuple_filter *filter) 6466 { 6467 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6468 int i; 6469 uint32_t ftqf, sdpqf; 6470 uint32_t l34timir = 0; 6471 uint8_t mask = 0xff; 6472 6473 i = filter->index; 6474 6475 sdpqf = (uint32_t)(filter->filter_info.dst_port << 6476 IXGBE_SDPQF_DSTPORT_SHIFT); 6477 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 6478 6479 ftqf = (uint32_t)(filter->filter_info.proto & 6480 IXGBE_FTQF_PROTOCOL_MASK); 6481 ftqf |= (uint32_t)((filter->filter_info.priority & 6482 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 6483 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 6484 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 6485 if (filter->filter_info.dst_ip_mask == 0) 6486 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 6487 if (filter->filter_info.src_port_mask == 0) 6488 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 6489 if (filter->filter_info.dst_port_mask == 0) 6490 mask &= IXGBE_FTQF_DEST_PORT_MASK; 6491 if (filter->filter_info.proto_mask == 0) 6492 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 6493 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 6494 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 6495 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 6496 6497 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 6498 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 6499 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 6500 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 6501 6502 l34timir |= IXGBE_L34T_IMIR_RESERVE; 6503 l34timir |= (uint32_t)(filter->queue << 6504 IXGBE_L34T_IMIR_QUEUE_SHIFT); 6505 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 6506 } 6507 6508 /* 6509 * add a 5tuple filter 6510 * 6511 * @param 6512 * dev: Pointer to struct rte_eth_dev. 6513 * index: the index the filter allocates. 6514 * filter: ponter to the filter that will be added. 6515 * rx_queue: the queue id the filter assigned to. 6516 * 6517 * @return 6518 * - On success, zero. 6519 * - On failure, a negative value. 6520 */ 6521 static int 6522 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 6523 struct ixgbe_5tuple_filter *filter) 6524 { 6525 struct ixgbe_filter_info *filter_info = 6526 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6527 int i, idx, shift; 6528 6529 /* 6530 * look for an unused 5tuple filter index, 6531 * and insert the filter to list. 6532 */ 6533 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 6534 idx = i / (sizeof(uint32_t) * NBBY); 6535 shift = i % (sizeof(uint32_t) * NBBY); 6536 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 6537 filter_info->fivetuple_mask[idx] |= 1 << shift; 6538 filter->index = i; 6539 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 6540 filter, 6541 entries); 6542 break; 6543 } 6544 } 6545 if (i >= IXGBE_MAX_FTQF_FILTERS) { 6546 PMD_DRV_LOG(ERR, "5tuple filters are full."); 6547 return -ENOSYS; 6548 } 6549 6550 ixgbe_inject_5tuple_filter(dev, filter); 6551 6552 return 0; 6553 } 6554 6555 /* 6556 * remove a 5tuple filter 6557 * 6558 * @param 6559 * dev: Pointer to struct rte_eth_dev. 6560 * filter: the pointer of the filter will be removed. 6561 */ 6562 static void 6563 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 6564 struct ixgbe_5tuple_filter *filter) 6565 { 6566 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6567 struct ixgbe_filter_info *filter_info = 6568 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6569 uint16_t index = filter->index; 6570 6571 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 6572 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 6573 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 6574 rte_free(filter); 6575 6576 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 6577 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 6578 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 6579 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 6580 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 6581 } 6582 6583 static int 6584 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 6585 { 6586 struct ixgbe_hw *hw; 6587 uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; 6588 struct rte_eth_dev_data *dev_data = dev->data; 6589 6590 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6591 6592 if (mtu < RTE_ETHER_MIN_MTU || 6593 max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) 6594 return -EINVAL; 6595 6596 /* If device is started, refuse mtu that requires the support of 6597 * scattered packets when this feature has not been enabled before. 6598 */ 6599 if (dev_data->dev_started && !dev_data->scattered_rx && 6600 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 6601 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { 6602 PMD_INIT_LOG(ERR, "Stop port first."); 6603 return -EINVAL; 6604 } 6605 6606 /* 6607 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 6608 * request of the version 2.0 of the mailbox API. 6609 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 6610 * of the mailbox API. 6611 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 6612 * prior to 3.11.33 which contains the following change: 6613 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 6614 */ 6615 ixgbevf_rlpml_set_vf(hw, max_frame); 6616 6617 /* update max frame size */ 6618 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 6619 return 0; 6620 } 6621 6622 static inline struct ixgbe_5tuple_filter * 6623 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 6624 struct ixgbe_5tuple_filter_info *key) 6625 { 6626 struct ixgbe_5tuple_filter *it; 6627 6628 TAILQ_FOREACH(it, filter_list, entries) { 6629 if (memcmp(key, &it->filter_info, 6630 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 6631 return it; 6632 } 6633 } 6634 return NULL; 6635 } 6636 6637 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 6638 static inline int 6639 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 6640 struct ixgbe_5tuple_filter_info *filter_info) 6641 { 6642 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 6643 filter->priority > IXGBE_5TUPLE_MAX_PRI || 6644 filter->priority < IXGBE_5TUPLE_MIN_PRI) 6645 return -EINVAL; 6646 6647 switch (filter->dst_ip_mask) { 6648 case UINT32_MAX: 6649 filter_info->dst_ip_mask = 0; 6650 filter_info->dst_ip = filter->dst_ip; 6651 break; 6652 case 0: 6653 filter_info->dst_ip_mask = 1; 6654 break; 6655 default: 6656 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 6657 return -EINVAL; 6658 } 6659 6660 switch (filter->src_ip_mask) { 6661 case UINT32_MAX: 6662 filter_info->src_ip_mask = 0; 6663 filter_info->src_ip = filter->src_ip; 6664 break; 6665 case 0: 6666 filter_info->src_ip_mask = 1; 6667 break; 6668 default: 6669 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 6670 return -EINVAL; 6671 } 6672 6673 switch (filter->dst_port_mask) { 6674 case UINT16_MAX: 6675 filter_info->dst_port_mask = 0; 6676 filter_info->dst_port = filter->dst_port; 6677 break; 6678 case 0: 6679 filter_info->dst_port_mask = 1; 6680 break; 6681 default: 6682 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 6683 return -EINVAL; 6684 } 6685 6686 switch (filter->src_port_mask) { 6687 case UINT16_MAX: 6688 filter_info->src_port_mask = 0; 6689 filter_info->src_port = filter->src_port; 6690 break; 6691 case 0: 6692 filter_info->src_port_mask = 1; 6693 break; 6694 default: 6695 PMD_DRV_LOG(ERR, "invalid src_port mask."); 6696 return -EINVAL; 6697 } 6698 6699 switch (filter->proto_mask) { 6700 case UINT8_MAX: 6701 filter_info->proto_mask = 0; 6702 filter_info->proto = 6703 convert_protocol_type(filter->proto); 6704 break; 6705 case 0: 6706 filter_info->proto_mask = 1; 6707 break; 6708 default: 6709 PMD_DRV_LOG(ERR, "invalid protocol mask."); 6710 return -EINVAL; 6711 } 6712 6713 filter_info->priority = (uint8_t)filter->priority; 6714 return 0; 6715 } 6716 6717 /* 6718 * add or delete a ntuple filter 6719 * 6720 * @param 6721 * dev: Pointer to struct rte_eth_dev. 6722 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6723 * add: if true, add filter, if false, remove filter 6724 * 6725 * @return 6726 * - On success, zero. 6727 * - On failure, a negative value. 6728 */ 6729 int 6730 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 6731 struct rte_eth_ntuple_filter *ntuple_filter, 6732 bool add) 6733 { 6734 struct ixgbe_filter_info *filter_info = 6735 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6736 struct ixgbe_5tuple_filter_info filter_5tuple; 6737 struct ixgbe_5tuple_filter *filter; 6738 int ret; 6739 6740 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6741 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6742 return -EINVAL; 6743 } 6744 6745 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6746 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6747 if (ret < 0) 6748 return ret; 6749 6750 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6751 &filter_5tuple); 6752 if (filter != NULL && add) { 6753 PMD_DRV_LOG(ERR, "filter exists."); 6754 return -EEXIST; 6755 } 6756 if (filter == NULL && !add) { 6757 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6758 return -ENOENT; 6759 } 6760 6761 if (add) { 6762 filter = rte_zmalloc("ixgbe_5tuple_filter", 6763 sizeof(struct ixgbe_5tuple_filter), 0); 6764 if (filter == NULL) 6765 return -ENOMEM; 6766 rte_memcpy(&filter->filter_info, 6767 &filter_5tuple, 6768 sizeof(struct ixgbe_5tuple_filter_info)); 6769 filter->queue = ntuple_filter->queue; 6770 ret = ixgbe_add_5tuple_filter(dev, filter); 6771 if (ret < 0) { 6772 rte_free(filter); 6773 return ret; 6774 } 6775 } else 6776 ixgbe_remove_5tuple_filter(dev, filter); 6777 6778 return 0; 6779 } 6780 6781 /* 6782 * get a ntuple filter 6783 * 6784 * @param 6785 * dev: Pointer to struct rte_eth_dev. 6786 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 6787 * 6788 * @return 6789 * - On success, zero. 6790 * - On failure, a negative value. 6791 */ 6792 static int 6793 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 6794 struct rte_eth_ntuple_filter *ntuple_filter) 6795 { 6796 struct ixgbe_filter_info *filter_info = 6797 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6798 struct ixgbe_5tuple_filter_info filter_5tuple; 6799 struct ixgbe_5tuple_filter *filter; 6800 int ret; 6801 6802 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 6803 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 6804 return -EINVAL; 6805 } 6806 6807 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 6808 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 6809 if (ret < 0) 6810 return ret; 6811 6812 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 6813 &filter_5tuple); 6814 if (filter == NULL) { 6815 PMD_DRV_LOG(ERR, "filter doesn't exist."); 6816 return -ENOENT; 6817 } 6818 ntuple_filter->queue = filter->queue; 6819 return 0; 6820 } 6821 6822 /* 6823 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. 6824 * @dev: pointer to rte_eth_dev structure 6825 * @filter_op:operation will be taken. 6826 * @arg: a pointer to specific structure corresponding to the filter_op 6827 * 6828 * @return 6829 * - On success, zero. 6830 * - On failure, a negative value. 6831 */ 6832 static int 6833 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 6834 enum rte_filter_op filter_op, 6835 void *arg) 6836 { 6837 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6838 int ret; 6839 6840 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 6841 6842 if (filter_op == RTE_ETH_FILTER_NOP) 6843 return 0; 6844 6845 if (arg == NULL) { 6846 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6847 filter_op); 6848 return -EINVAL; 6849 } 6850 6851 switch (filter_op) { 6852 case RTE_ETH_FILTER_ADD: 6853 ret = ixgbe_add_del_ntuple_filter(dev, 6854 (struct rte_eth_ntuple_filter *)arg, 6855 TRUE); 6856 break; 6857 case RTE_ETH_FILTER_DELETE: 6858 ret = ixgbe_add_del_ntuple_filter(dev, 6859 (struct rte_eth_ntuple_filter *)arg, 6860 FALSE); 6861 break; 6862 case RTE_ETH_FILTER_GET: 6863 ret = ixgbe_get_ntuple_filter(dev, 6864 (struct rte_eth_ntuple_filter *)arg); 6865 break; 6866 default: 6867 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 6868 ret = -EINVAL; 6869 break; 6870 } 6871 return ret; 6872 } 6873 6874 int 6875 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 6876 struct rte_eth_ethertype_filter *filter, 6877 bool add) 6878 { 6879 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6880 struct ixgbe_filter_info *filter_info = 6881 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6882 uint32_t etqf = 0; 6883 uint32_t etqs = 0; 6884 int ret; 6885 struct ixgbe_ethertype_filter ethertype_filter; 6886 6887 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 6888 return -EINVAL; 6889 6890 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || 6891 filter->ether_type == RTE_ETHER_TYPE_IPV6) { 6892 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 6893 " ethertype filter.", filter->ether_type); 6894 return -EINVAL; 6895 } 6896 6897 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 6898 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 6899 return -EINVAL; 6900 } 6901 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 6902 PMD_DRV_LOG(ERR, "drop option is unsupported."); 6903 return -EINVAL; 6904 } 6905 6906 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6907 if (ret >= 0 && add) { 6908 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 6909 filter->ether_type); 6910 return -EEXIST; 6911 } 6912 if (ret < 0 && !add) { 6913 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6914 filter->ether_type); 6915 return -ENOENT; 6916 } 6917 6918 if (add) { 6919 etqf = IXGBE_ETQF_FILTER_EN; 6920 etqf |= (uint32_t)filter->ether_type; 6921 etqs |= (uint32_t)((filter->queue << 6922 IXGBE_ETQS_RX_QUEUE_SHIFT) & 6923 IXGBE_ETQS_RX_QUEUE); 6924 etqs |= IXGBE_ETQS_QUEUE_EN; 6925 6926 ethertype_filter.ethertype = filter->ether_type; 6927 ethertype_filter.etqf = etqf; 6928 ethertype_filter.etqs = etqs; 6929 ethertype_filter.conf = FALSE; 6930 ret = ixgbe_ethertype_filter_insert(filter_info, 6931 ðertype_filter); 6932 if (ret < 0) { 6933 PMD_DRV_LOG(ERR, "ethertype filters are full."); 6934 return -ENOSPC; 6935 } 6936 } else { 6937 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 6938 if (ret < 0) 6939 return -ENOSYS; 6940 } 6941 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 6942 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 6943 IXGBE_WRITE_FLUSH(hw); 6944 6945 return 0; 6946 } 6947 6948 static int 6949 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 6950 struct rte_eth_ethertype_filter *filter) 6951 { 6952 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6953 struct ixgbe_filter_info *filter_info = 6954 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 6955 uint32_t etqf, etqs; 6956 int ret; 6957 6958 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 6959 if (ret < 0) { 6960 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 6961 filter->ether_type); 6962 return -ENOENT; 6963 } 6964 6965 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); 6966 if (etqf & IXGBE_ETQF_FILTER_EN) { 6967 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); 6968 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; 6969 filter->flags = 0; 6970 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> 6971 IXGBE_ETQS_RX_QUEUE_SHIFT; 6972 return 0; 6973 } 6974 return -ENOENT; 6975 } 6976 6977 /* 6978 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. 6979 * @dev: pointer to rte_eth_dev structure 6980 * @filter_op:operation will be taken. 6981 * @arg: a pointer to specific structure corresponding to the filter_op 6982 */ 6983 static int 6984 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 6985 enum rte_filter_op filter_op, 6986 void *arg) 6987 { 6988 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6989 int ret; 6990 6991 MAC_TYPE_FILTER_SUP(hw->mac.type); 6992 6993 if (filter_op == RTE_ETH_FILTER_NOP) 6994 return 0; 6995 6996 if (arg == NULL) { 6997 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6998 filter_op); 6999 return -EINVAL; 7000 } 7001 7002 switch (filter_op) { 7003 case RTE_ETH_FILTER_ADD: 7004 ret = ixgbe_add_del_ethertype_filter(dev, 7005 (struct rte_eth_ethertype_filter *)arg, 7006 TRUE); 7007 break; 7008 case RTE_ETH_FILTER_DELETE: 7009 ret = ixgbe_add_del_ethertype_filter(dev, 7010 (struct rte_eth_ethertype_filter *)arg, 7011 FALSE); 7012 break; 7013 case RTE_ETH_FILTER_GET: 7014 ret = ixgbe_get_ethertype_filter(dev, 7015 (struct rte_eth_ethertype_filter *)arg); 7016 break; 7017 default: 7018 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 7019 ret = -EINVAL; 7020 break; 7021 } 7022 return ret; 7023 } 7024 7025 static int 7026 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 7027 enum rte_filter_type filter_type, 7028 enum rte_filter_op filter_op, 7029 void *arg) 7030 { 7031 int ret = 0; 7032 7033 switch (filter_type) { 7034 case RTE_ETH_FILTER_NTUPLE: 7035 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); 7036 break; 7037 case RTE_ETH_FILTER_ETHERTYPE: 7038 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); 7039 break; 7040 case RTE_ETH_FILTER_SYN: 7041 ret = ixgbe_syn_filter_handle(dev, filter_op, arg); 7042 break; 7043 case RTE_ETH_FILTER_FDIR: 7044 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); 7045 break; 7046 case RTE_ETH_FILTER_L2_TUNNEL: 7047 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); 7048 break; 7049 case RTE_ETH_FILTER_GENERIC: 7050 if (filter_op != RTE_ETH_FILTER_GET) 7051 return -EINVAL; 7052 *(const void **)arg = &ixgbe_flow_ops; 7053 break; 7054 default: 7055 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 7056 filter_type); 7057 ret = -EINVAL; 7058 break; 7059 } 7060 7061 return ret; 7062 } 7063 7064 static u8 * 7065 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, 7066 u8 **mc_addr_ptr, u32 *vmdq) 7067 { 7068 u8 *mc_addr; 7069 7070 *vmdq = 0; 7071 mc_addr = *mc_addr_ptr; 7072 *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); 7073 return mc_addr; 7074 } 7075 7076 static int 7077 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 7078 struct rte_ether_addr *mc_addr_set, 7079 uint32_t nb_mc_addr) 7080 { 7081 struct ixgbe_hw *hw; 7082 u8 *mc_addr_list; 7083 7084 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7085 mc_addr_list = (u8 *)mc_addr_set; 7086 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 7087 ixgbe_dev_addr_list_itr, TRUE); 7088 } 7089 7090 static uint64_t 7091 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 7092 { 7093 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7094 uint64_t systime_cycles; 7095 7096 switch (hw->mac.type) { 7097 case ixgbe_mac_X550: 7098 case ixgbe_mac_X550EM_x: 7099 case ixgbe_mac_X550EM_a: 7100 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 7101 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 7102 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 7103 * NSEC_PER_SEC; 7104 break; 7105 default: 7106 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 7107 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 7108 << 32; 7109 } 7110 7111 return systime_cycles; 7112 } 7113 7114 static uint64_t 7115 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 7116 { 7117 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7118 uint64_t rx_tstamp_cycles; 7119 7120 switch (hw->mac.type) { 7121 case ixgbe_mac_X550: 7122 case ixgbe_mac_X550EM_x: 7123 case ixgbe_mac_X550EM_a: 7124 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 7125 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 7126 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 7127 * NSEC_PER_SEC; 7128 break; 7129 default: 7130 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 7131 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 7132 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 7133 << 32; 7134 } 7135 7136 return rx_tstamp_cycles; 7137 } 7138 7139 static uint64_t 7140 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 7141 { 7142 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7143 uint64_t tx_tstamp_cycles; 7144 7145 switch (hw->mac.type) { 7146 case ixgbe_mac_X550: 7147 case ixgbe_mac_X550EM_x: 7148 case ixgbe_mac_X550EM_a: 7149 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 7150 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 7151 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 7152 * NSEC_PER_SEC; 7153 break; 7154 default: 7155 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 7156 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 7157 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 7158 << 32; 7159 } 7160 7161 return tx_tstamp_cycles; 7162 } 7163 7164 static void 7165 ixgbe_start_timecounters(struct rte_eth_dev *dev) 7166 { 7167 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7168 struct ixgbe_adapter *adapter = dev->data->dev_private; 7169 struct rte_eth_link link; 7170 uint32_t incval = 0; 7171 uint32_t shift = 0; 7172 7173 /* Get current link speed. */ 7174 ixgbe_dev_link_update(dev, 1); 7175 rte_eth_linkstatus_get(dev, &link); 7176 7177 switch (link.link_speed) { 7178 case ETH_SPEED_NUM_100M: 7179 incval = IXGBE_INCVAL_100; 7180 shift = IXGBE_INCVAL_SHIFT_100; 7181 break; 7182 case ETH_SPEED_NUM_1G: 7183 incval = IXGBE_INCVAL_1GB; 7184 shift = IXGBE_INCVAL_SHIFT_1GB; 7185 break; 7186 case ETH_SPEED_NUM_10G: 7187 default: 7188 incval = IXGBE_INCVAL_10GB; 7189 shift = IXGBE_INCVAL_SHIFT_10GB; 7190 break; 7191 } 7192 7193 switch (hw->mac.type) { 7194 case ixgbe_mac_X550: 7195 case ixgbe_mac_X550EM_x: 7196 case ixgbe_mac_X550EM_a: 7197 /* Independent of link speed. */ 7198 incval = 1; 7199 /* Cycles read will be interpreted as ns. */ 7200 shift = 0; 7201 /* Fall-through */ 7202 case ixgbe_mac_X540: 7203 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 7204 break; 7205 case ixgbe_mac_82599EB: 7206 incval >>= IXGBE_INCVAL_SHIFT_82599; 7207 shift -= IXGBE_INCVAL_SHIFT_82599; 7208 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 7209 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 7210 break; 7211 default: 7212 /* Not supported. */ 7213 return; 7214 } 7215 7216 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 7217 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 7218 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 7219 7220 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 7221 adapter->systime_tc.cc_shift = shift; 7222 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 7223 7224 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 7225 adapter->rx_tstamp_tc.cc_shift = shift; 7226 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 7227 7228 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 7229 adapter->tx_tstamp_tc.cc_shift = shift; 7230 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 7231 } 7232 7233 static int 7234 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 7235 { 7236 struct ixgbe_adapter *adapter = dev->data->dev_private; 7237 7238 adapter->systime_tc.nsec += delta; 7239 adapter->rx_tstamp_tc.nsec += delta; 7240 adapter->tx_tstamp_tc.nsec += delta; 7241 7242 return 0; 7243 } 7244 7245 static int 7246 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 7247 { 7248 uint64_t ns; 7249 struct ixgbe_adapter *adapter = dev->data->dev_private; 7250 7251 ns = rte_timespec_to_ns(ts); 7252 /* Set the timecounters to a new value. */ 7253 adapter->systime_tc.nsec = ns; 7254 adapter->rx_tstamp_tc.nsec = ns; 7255 adapter->tx_tstamp_tc.nsec = ns; 7256 7257 return 0; 7258 } 7259 7260 static int 7261 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 7262 { 7263 uint64_t ns, systime_cycles; 7264 struct ixgbe_adapter *adapter = dev->data->dev_private; 7265 7266 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 7267 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 7268 *ts = rte_ns_to_timespec(ns); 7269 7270 return 0; 7271 } 7272 7273 static int 7274 ixgbe_timesync_enable(struct rte_eth_dev *dev) 7275 { 7276 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7277 uint32_t tsync_ctl; 7278 uint32_t tsauxc; 7279 7280 /* Stop the timesync system time. */ 7281 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 7282 /* Reset the timesync system time value. */ 7283 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 7284 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 7285 7286 /* Enable system time for platforms where it isn't on by default. */ 7287 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 7288 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 7289 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 7290 7291 ixgbe_start_timecounters(dev); 7292 7293 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7294 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 7295 (RTE_ETHER_TYPE_1588 | 7296 IXGBE_ETQF_FILTER_EN | 7297 IXGBE_ETQF_1588)); 7298 7299 /* Enable timestamping of received PTP packets. */ 7300 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7301 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 7302 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7303 7304 /* Enable timestamping of transmitted PTP packets. */ 7305 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7306 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 7307 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7308 7309 IXGBE_WRITE_FLUSH(hw); 7310 7311 return 0; 7312 } 7313 7314 static int 7315 ixgbe_timesync_disable(struct rte_eth_dev *dev) 7316 { 7317 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7318 uint32_t tsync_ctl; 7319 7320 /* Disable timestamping of transmitted PTP packets. */ 7321 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7322 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 7323 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 7324 7325 /* Disable timestamping of received PTP packets. */ 7326 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7327 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 7328 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 7329 7330 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 7331 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 7332 7333 /* Stop incrementating the System Time registers. */ 7334 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 7335 7336 return 0; 7337 } 7338 7339 static int 7340 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 7341 struct timespec *timestamp, 7342 uint32_t flags __rte_unused) 7343 { 7344 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7345 struct ixgbe_adapter *adapter = dev->data->dev_private; 7346 uint32_t tsync_rxctl; 7347 uint64_t rx_tstamp_cycles; 7348 uint64_t ns; 7349 7350 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 7351 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 7352 return -EINVAL; 7353 7354 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 7355 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 7356 *timestamp = rte_ns_to_timespec(ns); 7357 7358 return 0; 7359 } 7360 7361 static int 7362 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 7363 struct timespec *timestamp) 7364 { 7365 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7366 struct ixgbe_adapter *adapter = dev->data->dev_private; 7367 uint32_t tsync_txctl; 7368 uint64_t tx_tstamp_cycles; 7369 uint64_t ns; 7370 7371 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 7372 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 7373 return -EINVAL; 7374 7375 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 7376 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 7377 *timestamp = rte_ns_to_timespec(ns); 7378 7379 return 0; 7380 } 7381 7382 static int 7383 ixgbe_get_reg_length(struct rte_eth_dev *dev) 7384 { 7385 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7386 int count = 0; 7387 int g_ind = 0; 7388 const struct reg_info *reg_group; 7389 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7390 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7391 7392 while ((reg_group = reg_set[g_ind++])) 7393 count += ixgbe_regs_group_count(reg_group); 7394 7395 return count; 7396 } 7397 7398 static int 7399 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 7400 { 7401 int count = 0; 7402 int g_ind = 0; 7403 const struct reg_info *reg_group; 7404 7405 while ((reg_group = ixgbevf_regs[g_ind++])) 7406 count += ixgbe_regs_group_count(reg_group); 7407 7408 return count; 7409 } 7410 7411 static int 7412 ixgbe_get_regs(struct rte_eth_dev *dev, 7413 struct rte_dev_reg_info *regs) 7414 { 7415 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7416 uint32_t *data = regs->data; 7417 int g_ind = 0; 7418 int count = 0; 7419 const struct reg_info *reg_group; 7420 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 7421 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 7422 7423 if (data == NULL) { 7424 regs->length = ixgbe_get_reg_length(dev); 7425 regs->width = sizeof(uint32_t); 7426 return 0; 7427 } 7428 7429 /* Support only full register dump */ 7430 if ((regs->length == 0) || 7431 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 7432 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7433 hw->device_id; 7434 while ((reg_group = reg_set[g_ind++])) 7435 count += ixgbe_read_regs_group(dev, &data[count], 7436 reg_group); 7437 return 0; 7438 } 7439 7440 return -ENOTSUP; 7441 } 7442 7443 static int 7444 ixgbevf_get_regs(struct rte_eth_dev *dev, 7445 struct rte_dev_reg_info *regs) 7446 { 7447 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7448 uint32_t *data = regs->data; 7449 int g_ind = 0; 7450 int count = 0; 7451 const struct reg_info *reg_group; 7452 7453 if (data == NULL) { 7454 regs->length = ixgbevf_get_reg_length(dev); 7455 regs->width = sizeof(uint32_t); 7456 return 0; 7457 } 7458 7459 /* Support only full register dump */ 7460 if ((regs->length == 0) || 7461 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 7462 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 7463 hw->device_id; 7464 while ((reg_group = ixgbevf_regs[g_ind++])) 7465 count += ixgbe_read_regs_group(dev, &data[count], 7466 reg_group); 7467 return 0; 7468 } 7469 7470 return -ENOTSUP; 7471 } 7472 7473 static int 7474 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 7475 { 7476 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7477 7478 /* Return unit is byte count */ 7479 return hw->eeprom.word_size * 2; 7480 } 7481 7482 static int 7483 ixgbe_get_eeprom(struct rte_eth_dev *dev, 7484 struct rte_dev_eeprom_info *in_eeprom) 7485 { 7486 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7487 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7488 uint16_t *data = in_eeprom->data; 7489 int first, length; 7490 7491 first = in_eeprom->offset >> 1; 7492 length = in_eeprom->length >> 1; 7493 if ((first > hw->eeprom.word_size) || 7494 ((first + length) > hw->eeprom.word_size)) 7495 return -EINVAL; 7496 7497 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7498 7499 return eeprom->ops.read_buffer(hw, first, length, data); 7500 } 7501 7502 static int 7503 ixgbe_set_eeprom(struct rte_eth_dev *dev, 7504 struct rte_dev_eeprom_info *in_eeprom) 7505 { 7506 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7507 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 7508 uint16_t *data = in_eeprom->data; 7509 int first, length; 7510 7511 first = in_eeprom->offset >> 1; 7512 length = in_eeprom->length >> 1; 7513 if ((first > hw->eeprom.word_size) || 7514 ((first + length) > hw->eeprom.word_size)) 7515 return -EINVAL; 7516 7517 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 7518 7519 return eeprom->ops.write_buffer(hw, first, length, data); 7520 } 7521 7522 static int 7523 ixgbe_get_module_info(struct rte_eth_dev *dev, 7524 struct rte_eth_dev_module_info *modinfo) 7525 { 7526 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7527 uint32_t status; 7528 uint8_t sff8472_rev, addr_mode; 7529 bool page_swap = false; 7530 7531 /* Check whether we support SFF-8472 or not */ 7532 status = hw->phy.ops.read_i2c_eeprom(hw, 7533 IXGBE_SFF_SFF_8472_COMP, 7534 &sff8472_rev); 7535 if (status != 0) 7536 return -EIO; 7537 7538 /* addressing mode is not supported */ 7539 status = hw->phy.ops.read_i2c_eeprom(hw, 7540 IXGBE_SFF_SFF_8472_SWAP, 7541 &addr_mode); 7542 if (status != 0) 7543 return -EIO; 7544 7545 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { 7546 PMD_DRV_LOG(ERR, 7547 "Address change required to access page 0xA2, " 7548 "but not supported. Please report the module " 7549 "type to the driver maintainers."); 7550 page_swap = true; 7551 } 7552 7553 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { 7554 /* We have a SFP, but it does not support SFF-8472 */ 7555 modinfo->type = RTE_ETH_MODULE_SFF_8079; 7556 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; 7557 } else { 7558 /* We have a SFP which supports a revision of SFF-8472. */ 7559 modinfo->type = RTE_ETH_MODULE_SFF_8472; 7560 modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; 7561 } 7562 7563 return 0; 7564 } 7565 7566 static int 7567 ixgbe_get_module_eeprom(struct rte_eth_dev *dev, 7568 struct rte_dev_eeprom_info *info) 7569 { 7570 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7571 uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; 7572 uint8_t databyte = 0xFF; 7573 uint8_t *data = info->data; 7574 uint32_t i = 0; 7575 7576 if (info->length == 0) 7577 return -EINVAL; 7578 7579 for (i = info->offset; i < info->offset + info->length; i++) { 7580 if (i < RTE_ETH_MODULE_SFF_8079_LEN) 7581 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); 7582 else 7583 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); 7584 7585 if (status != 0) 7586 return -EIO; 7587 7588 data[i - info->offset] = databyte; 7589 } 7590 7591 return 0; 7592 } 7593 7594 uint16_t 7595 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 7596 switch (mac_type) { 7597 case ixgbe_mac_X550: 7598 case ixgbe_mac_X550EM_x: 7599 case ixgbe_mac_X550EM_a: 7600 return ETH_RSS_RETA_SIZE_512; 7601 case ixgbe_mac_X550_vf: 7602 case ixgbe_mac_X550EM_x_vf: 7603 case ixgbe_mac_X550EM_a_vf: 7604 return ETH_RSS_RETA_SIZE_64; 7605 case ixgbe_mac_X540_vf: 7606 case ixgbe_mac_82599_vf: 7607 return 0; 7608 default: 7609 return ETH_RSS_RETA_SIZE_128; 7610 } 7611 } 7612 7613 uint32_t 7614 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 7615 switch (mac_type) { 7616 case ixgbe_mac_X550: 7617 case ixgbe_mac_X550EM_x: 7618 case ixgbe_mac_X550EM_a: 7619 if (reta_idx < ETH_RSS_RETA_SIZE_128) 7620 return IXGBE_RETA(reta_idx >> 2); 7621 else 7622 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 7623 case ixgbe_mac_X550_vf: 7624 case ixgbe_mac_X550EM_x_vf: 7625 case ixgbe_mac_X550EM_a_vf: 7626 return IXGBE_VFRETA(reta_idx >> 2); 7627 default: 7628 return IXGBE_RETA(reta_idx >> 2); 7629 } 7630 } 7631 7632 uint32_t 7633 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 7634 switch (mac_type) { 7635 case ixgbe_mac_X550_vf: 7636 case ixgbe_mac_X550EM_x_vf: 7637 case ixgbe_mac_X550EM_a_vf: 7638 return IXGBE_VFMRQC; 7639 default: 7640 return IXGBE_MRQC; 7641 } 7642 } 7643 7644 uint32_t 7645 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 7646 switch (mac_type) { 7647 case ixgbe_mac_X550_vf: 7648 case ixgbe_mac_X550EM_x_vf: 7649 case ixgbe_mac_X550EM_a_vf: 7650 return IXGBE_VFRSSRK(i); 7651 default: 7652 return IXGBE_RSSRK(i); 7653 } 7654 } 7655 7656 bool 7657 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 7658 switch (mac_type) { 7659 case ixgbe_mac_82599_vf: 7660 case ixgbe_mac_X540_vf: 7661 return 0; 7662 default: 7663 return 1; 7664 } 7665 } 7666 7667 static int 7668 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 7669 struct rte_eth_dcb_info *dcb_info) 7670 { 7671 struct ixgbe_dcb_config *dcb_config = 7672 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 7673 struct ixgbe_dcb_tc_config *tc; 7674 struct rte_eth_dcb_tc_queue_mapping *tc_queue; 7675 uint8_t nb_tcs; 7676 uint8_t i, j; 7677 7678 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 7679 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 7680 else 7681 dcb_info->nb_tcs = 1; 7682 7683 tc_queue = &dcb_info->tc_queue; 7684 nb_tcs = dcb_info->nb_tcs; 7685 7686 if (dcb_config->vt_mode) { /* vt is enabled*/ 7687 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 7688 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 7689 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7690 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 7691 if (RTE_ETH_DEV_SRIOV(dev).active > 0) { 7692 for (j = 0; j < nb_tcs; j++) { 7693 tc_queue->tc_rxq[0][j].base = j; 7694 tc_queue->tc_rxq[0][j].nb_queue = 1; 7695 tc_queue->tc_txq[0][j].base = j; 7696 tc_queue->tc_txq[0][j].nb_queue = 1; 7697 } 7698 } else { 7699 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 7700 for (j = 0; j < nb_tcs; j++) { 7701 tc_queue->tc_rxq[i][j].base = 7702 i * nb_tcs + j; 7703 tc_queue->tc_rxq[i][j].nb_queue = 1; 7704 tc_queue->tc_txq[i][j].base = 7705 i * nb_tcs + j; 7706 tc_queue->tc_txq[i][j].nb_queue = 1; 7707 } 7708 } 7709 } 7710 } else { /* vt is disabled*/ 7711 struct rte_eth_dcb_rx_conf *rx_conf = 7712 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 7713 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 7714 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 7715 if (dcb_info->nb_tcs == ETH_4_TCS) { 7716 for (i = 0; i < dcb_info->nb_tcs; i++) { 7717 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 7718 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7719 } 7720 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7721 dcb_info->tc_queue.tc_txq[0][1].base = 64; 7722 dcb_info->tc_queue.tc_txq[0][2].base = 96; 7723 dcb_info->tc_queue.tc_txq[0][3].base = 112; 7724 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 7725 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7726 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7727 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7728 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 7729 for (i = 0; i < dcb_info->nb_tcs; i++) { 7730 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 7731 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 7732 } 7733 dcb_info->tc_queue.tc_txq[0][0].base = 0; 7734 dcb_info->tc_queue.tc_txq[0][1].base = 32; 7735 dcb_info->tc_queue.tc_txq[0][2].base = 64; 7736 dcb_info->tc_queue.tc_txq[0][3].base = 80; 7737 dcb_info->tc_queue.tc_txq[0][4].base = 96; 7738 dcb_info->tc_queue.tc_txq[0][5].base = 104; 7739 dcb_info->tc_queue.tc_txq[0][6].base = 112; 7740 dcb_info->tc_queue.tc_txq[0][7].base = 120; 7741 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 7742 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 7743 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 7744 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 7745 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 7746 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 7747 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 7748 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 7749 } 7750 } 7751 for (i = 0; i < dcb_info->nb_tcs; i++) { 7752 tc = &dcb_config->tc_config[i]; 7753 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 7754 } 7755 return 0; 7756 } 7757 7758 /* Update e-tag ether type */ 7759 static int 7760 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 7761 uint16_t ether_type) 7762 { 7763 uint32_t etag_etype; 7764 7765 if (hw->mac.type != ixgbe_mac_X550 && 7766 hw->mac.type != ixgbe_mac_X550EM_x && 7767 hw->mac.type != ixgbe_mac_X550EM_a) { 7768 return -ENOTSUP; 7769 } 7770 7771 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7772 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 7773 etag_etype |= ether_type; 7774 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7775 IXGBE_WRITE_FLUSH(hw); 7776 7777 return 0; 7778 } 7779 7780 /* Config l2 tunnel ether type */ 7781 static int 7782 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, 7783 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7784 { 7785 int ret = 0; 7786 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7787 struct ixgbe_l2_tn_info *l2_tn_info = 7788 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7789 7790 if (l2_tunnel == NULL) 7791 return -EINVAL; 7792 7793 switch (l2_tunnel->l2_tunnel_type) { 7794 case RTE_L2_TUNNEL_TYPE_E_TAG: 7795 l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; 7796 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); 7797 break; 7798 default: 7799 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7800 ret = -EINVAL; 7801 break; 7802 } 7803 7804 return ret; 7805 } 7806 7807 /* Enable e-tag tunnel */ 7808 static int 7809 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 7810 { 7811 uint32_t etag_etype; 7812 7813 if (hw->mac.type != ixgbe_mac_X550 && 7814 hw->mac.type != ixgbe_mac_X550EM_x && 7815 hw->mac.type != ixgbe_mac_X550EM_a) { 7816 return -ENOTSUP; 7817 } 7818 7819 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7820 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 7821 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7822 IXGBE_WRITE_FLUSH(hw); 7823 7824 return 0; 7825 } 7826 7827 /* Enable l2 tunnel */ 7828 static int 7829 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, 7830 enum rte_eth_tunnel_type l2_tunnel_type) 7831 { 7832 int ret = 0; 7833 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7834 struct ixgbe_l2_tn_info *l2_tn_info = 7835 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7836 7837 switch (l2_tunnel_type) { 7838 case RTE_L2_TUNNEL_TYPE_E_TAG: 7839 l2_tn_info->e_tag_en = TRUE; 7840 ret = ixgbe_e_tag_enable(hw); 7841 break; 7842 default: 7843 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7844 ret = -EINVAL; 7845 break; 7846 } 7847 7848 return ret; 7849 } 7850 7851 /* Disable e-tag tunnel */ 7852 static int 7853 ixgbe_e_tag_disable(struct ixgbe_hw *hw) 7854 { 7855 uint32_t etag_etype; 7856 7857 if (hw->mac.type != ixgbe_mac_X550 && 7858 hw->mac.type != ixgbe_mac_X550EM_x && 7859 hw->mac.type != ixgbe_mac_X550EM_a) { 7860 return -ENOTSUP; 7861 } 7862 7863 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 7864 etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; 7865 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 7866 IXGBE_WRITE_FLUSH(hw); 7867 7868 return 0; 7869 } 7870 7871 /* Disable l2 tunnel */ 7872 static int 7873 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, 7874 enum rte_eth_tunnel_type l2_tunnel_type) 7875 { 7876 int ret = 0; 7877 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7878 struct ixgbe_l2_tn_info *l2_tn_info = 7879 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 7880 7881 switch (l2_tunnel_type) { 7882 case RTE_L2_TUNNEL_TYPE_E_TAG: 7883 l2_tn_info->e_tag_en = FALSE; 7884 ret = ixgbe_e_tag_disable(hw); 7885 break; 7886 default: 7887 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7888 ret = -EINVAL; 7889 break; 7890 } 7891 7892 return ret; 7893 } 7894 7895 static int 7896 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 7897 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7898 { 7899 int ret = 0; 7900 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7901 uint32_t i, rar_entries; 7902 uint32_t rar_low, rar_high; 7903 7904 if (hw->mac.type != ixgbe_mac_X550 && 7905 hw->mac.type != ixgbe_mac_X550EM_x && 7906 hw->mac.type != ixgbe_mac_X550EM_a) { 7907 return -ENOTSUP; 7908 } 7909 7910 rar_entries = ixgbe_get_num_rx_addrs(hw); 7911 7912 for (i = 1; i < rar_entries; i++) { 7913 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7914 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 7915 if ((rar_high & IXGBE_RAH_AV) && 7916 (rar_high & IXGBE_RAH_ADTYPE) && 7917 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 7918 l2_tunnel->tunnel_id)) { 7919 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 7920 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 7921 7922 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 7923 7924 return ret; 7925 } 7926 } 7927 7928 return ret; 7929 } 7930 7931 static int 7932 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 7933 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7934 { 7935 int ret = 0; 7936 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7937 uint32_t i, rar_entries; 7938 uint32_t rar_low, rar_high; 7939 7940 if (hw->mac.type != ixgbe_mac_X550 && 7941 hw->mac.type != ixgbe_mac_X550EM_x && 7942 hw->mac.type != ixgbe_mac_X550EM_a) { 7943 return -ENOTSUP; 7944 } 7945 7946 /* One entry for one tunnel. Try to remove potential existing entry. */ 7947 ixgbe_e_tag_filter_del(dev, l2_tunnel); 7948 7949 rar_entries = ixgbe_get_num_rx_addrs(hw); 7950 7951 for (i = 1; i < rar_entries; i++) { 7952 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 7953 if (rar_high & IXGBE_RAH_AV) { 7954 continue; 7955 } else { 7956 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 7957 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 7958 rar_low = l2_tunnel->tunnel_id; 7959 7960 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 7961 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 7962 7963 return ret; 7964 } 7965 } 7966 7967 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 7968 " Please remove a rule before adding a new one."); 7969 return -EINVAL; 7970 } 7971 7972 static inline struct ixgbe_l2_tn_filter * 7973 ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, 7974 struct ixgbe_l2_tn_key *key) 7975 { 7976 int ret; 7977 7978 ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); 7979 if (ret < 0) 7980 return NULL; 7981 7982 return l2_tn_info->hash_map[ret]; 7983 } 7984 7985 static inline int 7986 ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 7987 struct ixgbe_l2_tn_filter *l2_tn_filter) 7988 { 7989 int ret; 7990 7991 ret = rte_hash_add_key(l2_tn_info->hash_handle, 7992 &l2_tn_filter->key); 7993 7994 if (ret < 0) { 7995 PMD_DRV_LOG(ERR, 7996 "Failed to insert L2 tunnel filter" 7997 " to hash table %d!", 7998 ret); 7999 return ret; 8000 } 8001 8002 l2_tn_info->hash_map[ret] = l2_tn_filter; 8003 8004 TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 8005 8006 return 0; 8007 } 8008 8009 static inline int 8010 ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, 8011 struct ixgbe_l2_tn_key *key) 8012 { 8013 int ret; 8014 struct ixgbe_l2_tn_filter *l2_tn_filter; 8015 8016 ret = rte_hash_del_key(l2_tn_info->hash_handle, key); 8017 8018 if (ret < 0) { 8019 PMD_DRV_LOG(ERR, 8020 "No such L2 tunnel filter to delete %d!", 8021 ret); 8022 return ret; 8023 } 8024 8025 l2_tn_filter = l2_tn_info->hash_map[ret]; 8026 l2_tn_info->hash_map[ret] = NULL; 8027 8028 TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); 8029 rte_free(l2_tn_filter); 8030 8031 return 0; 8032 } 8033 8034 /* Add l2 tunnel filter */ 8035 int 8036 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 8037 struct rte_eth_l2_tunnel_conf *l2_tunnel, 8038 bool restore) 8039 { 8040 int ret; 8041 struct ixgbe_l2_tn_info *l2_tn_info = 8042 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8043 struct ixgbe_l2_tn_key key; 8044 struct ixgbe_l2_tn_filter *node; 8045 8046 if (!restore) { 8047 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 8048 key.tn_id = l2_tunnel->tunnel_id; 8049 8050 node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); 8051 8052 if (node) { 8053 PMD_DRV_LOG(ERR, 8054 "The L2 tunnel filter already exists!"); 8055 return -EINVAL; 8056 } 8057 8058 node = rte_zmalloc("ixgbe_l2_tn", 8059 sizeof(struct ixgbe_l2_tn_filter), 8060 0); 8061 if (!node) 8062 return -ENOMEM; 8063 8064 rte_memcpy(&node->key, 8065 &key, 8066 sizeof(struct ixgbe_l2_tn_key)); 8067 node->pool = l2_tunnel->pool; 8068 ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); 8069 if (ret < 0) { 8070 rte_free(node); 8071 return ret; 8072 } 8073 } 8074 8075 switch (l2_tunnel->l2_tunnel_type) { 8076 case RTE_L2_TUNNEL_TYPE_E_TAG: 8077 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 8078 break; 8079 default: 8080 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8081 ret = -EINVAL; 8082 break; 8083 } 8084 8085 if ((!restore) && (ret < 0)) 8086 (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 8087 8088 return ret; 8089 } 8090 8091 /* Delete l2 tunnel filter */ 8092 int 8093 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 8094 struct rte_eth_l2_tunnel_conf *l2_tunnel) 8095 { 8096 int ret; 8097 struct ixgbe_l2_tn_info *l2_tn_info = 8098 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8099 struct ixgbe_l2_tn_key key; 8100 8101 key.l2_tn_type = l2_tunnel->l2_tunnel_type; 8102 key.tn_id = l2_tunnel->tunnel_id; 8103 ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); 8104 if (ret < 0) 8105 return ret; 8106 8107 switch (l2_tunnel->l2_tunnel_type) { 8108 case RTE_L2_TUNNEL_TYPE_E_TAG: 8109 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 8110 break; 8111 default: 8112 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8113 ret = -EINVAL; 8114 break; 8115 } 8116 8117 return ret; 8118 } 8119 8120 /** 8121 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. 8122 * @dev: pointer to rte_eth_dev structure 8123 * @filter_op:operation will be taken. 8124 * @arg: a pointer to specific structure corresponding to the filter_op 8125 */ 8126 static int 8127 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 8128 enum rte_filter_op filter_op, 8129 void *arg) 8130 { 8131 int ret; 8132 8133 if (filter_op == RTE_ETH_FILTER_NOP) 8134 return 0; 8135 8136 if (arg == NULL) { 8137 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 8138 filter_op); 8139 return -EINVAL; 8140 } 8141 8142 switch (filter_op) { 8143 case RTE_ETH_FILTER_ADD: 8144 ret = ixgbe_dev_l2_tunnel_filter_add 8145 (dev, 8146 (struct rte_eth_l2_tunnel_conf *)arg, 8147 FALSE); 8148 break; 8149 case RTE_ETH_FILTER_DELETE: 8150 ret = ixgbe_dev_l2_tunnel_filter_del 8151 (dev, 8152 (struct rte_eth_l2_tunnel_conf *)arg); 8153 break; 8154 default: 8155 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 8156 ret = -EINVAL; 8157 break; 8158 } 8159 return ret; 8160 } 8161 8162 static int 8163 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 8164 { 8165 int ret = 0; 8166 uint32_t ctrl; 8167 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8168 8169 if (hw->mac.type != ixgbe_mac_X550 && 8170 hw->mac.type != ixgbe_mac_X550EM_x && 8171 hw->mac.type != ixgbe_mac_X550EM_a) { 8172 return -ENOTSUP; 8173 } 8174 8175 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 8176 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 8177 if (en) 8178 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 8179 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 8180 8181 return ret; 8182 } 8183 8184 /* Enable l2 tunnel forwarding */ 8185 static int 8186 ixgbe_dev_l2_tunnel_forwarding_enable 8187 (struct rte_eth_dev *dev, 8188 enum rte_eth_tunnel_type l2_tunnel_type) 8189 { 8190 struct ixgbe_l2_tn_info *l2_tn_info = 8191 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8192 int ret = 0; 8193 8194 switch (l2_tunnel_type) { 8195 case RTE_L2_TUNNEL_TYPE_E_TAG: 8196 l2_tn_info->e_tag_fwd_en = TRUE; 8197 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); 8198 break; 8199 default: 8200 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8201 ret = -EINVAL; 8202 break; 8203 } 8204 8205 return ret; 8206 } 8207 8208 /* Disable l2 tunnel forwarding */ 8209 static int 8210 ixgbe_dev_l2_tunnel_forwarding_disable 8211 (struct rte_eth_dev *dev, 8212 enum rte_eth_tunnel_type l2_tunnel_type) 8213 { 8214 struct ixgbe_l2_tn_info *l2_tn_info = 8215 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8216 int ret = 0; 8217 8218 switch (l2_tunnel_type) { 8219 case RTE_L2_TUNNEL_TYPE_E_TAG: 8220 l2_tn_info->e_tag_fwd_en = FALSE; 8221 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); 8222 break; 8223 default: 8224 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8225 ret = -EINVAL; 8226 break; 8227 } 8228 8229 return ret; 8230 } 8231 8232 static int 8233 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, 8234 struct rte_eth_l2_tunnel_conf *l2_tunnel, 8235 bool en) 8236 { 8237 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 8238 int ret = 0; 8239 uint32_t vmtir, vmvir; 8240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8241 8242 if (l2_tunnel->vf_id >= pci_dev->max_vfs) { 8243 PMD_DRV_LOG(ERR, 8244 "VF id %u should be less than %u", 8245 l2_tunnel->vf_id, 8246 pci_dev->max_vfs); 8247 return -EINVAL; 8248 } 8249 8250 if (hw->mac.type != ixgbe_mac_X550 && 8251 hw->mac.type != ixgbe_mac_X550EM_x && 8252 hw->mac.type != ixgbe_mac_X550EM_a) { 8253 return -ENOTSUP; 8254 } 8255 8256 if (en) 8257 vmtir = l2_tunnel->tunnel_id; 8258 else 8259 vmtir = 0; 8260 8261 IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); 8262 8263 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); 8264 vmvir &= ~IXGBE_VMVIR_TAGA_MASK; 8265 if (en) 8266 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; 8267 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); 8268 8269 return ret; 8270 } 8271 8272 /* Enable l2 tunnel tag insertion */ 8273 static int 8274 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, 8275 struct rte_eth_l2_tunnel_conf *l2_tunnel) 8276 { 8277 int ret = 0; 8278 8279 switch (l2_tunnel->l2_tunnel_type) { 8280 case RTE_L2_TUNNEL_TYPE_E_TAG: 8281 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); 8282 break; 8283 default: 8284 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8285 ret = -EINVAL; 8286 break; 8287 } 8288 8289 return ret; 8290 } 8291 8292 /* Disable l2 tunnel tag insertion */ 8293 static int 8294 ixgbe_dev_l2_tunnel_insertion_disable 8295 (struct rte_eth_dev *dev, 8296 struct rte_eth_l2_tunnel_conf *l2_tunnel) 8297 { 8298 int ret = 0; 8299 8300 switch (l2_tunnel->l2_tunnel_type) { 8301 case RTE_L2_TUNNEL_TYPE_E_TAG: 8302 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); 8303 break; 8304 default: 8305 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8306 ret = -EINVAL; 8307 break; 8308 } 8309 8310 return ret; 8311 } 8312 8313 static int 8314 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, 8315 bool en) 8316 { 8317 int ret = 0; 8318 uint32_t qde; 8319 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8320 8321 if (hw->mac.type != ixgbe_mac_X550 && 8322 hw->mac.type != ixgbe_mac_X550EM_x && 8323 hw->mac.type != ixgbe_mac_X550EM_a) { 8324 return -ENOTSUP; 8325 } 8326 8327 qde = IXGBE_READ_REG(hw, IXGBE_QDE); 8328 if (en) 8329 qde |= IXGBE_QDE_STRIP_TAG; 8330 else 8331 qde &= ~IXGBE_QDE_STRIP_TAG; 8332 qde &= ~IXGBE_QDE_READ; 8333 qde |= IXGBE_QDE_WRITE; 8334 IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); 8335 8336 return ret; 8337 } 8338 8339 /* Enable l2 tunnel tag stripping */ 8340 static int 8341 ixgbe_dev_l2_tunnel_stripping_enable 8342 (struct rte_eth_dev *dev, 8343 enum rte_eth_tunnel_type l2_tunnel_type) 8344 { 8345 int ret = 0; 8346 8347 switch (l2_tunnel_type) { 8348 case RTE_L2_TUNNEL_TYPE_E_TAG: 8349 ret = ixgbe_e_tag_stripping_en_dis(dev, 1); 8350 break; 8351 default: 8352 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8353 ret = -EINVAL; 8354 break; 8355 } 8356 8357 return ret; 8358 } 8359 8360 /* Disable l2 tunnel tag stripping */ 8361 static int 8362 ixgbe_dev_l2_tunnel_stripping_disable 8363 (struct rte_eth_dev *dev, 8364 enum rte_eth_tunnel_type l2_tunnel_type) 8365 { 8366 int ret = 0; 8367 8368 switch (l2_tunnel_type) { 8369 case RTE_L2_TUNNEL_TYPE_E_TAG: 8370 ret = ixgbe_e_tag_stripping_en_dis(dev, 0); 8371 break; 8372 default: 8373 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8374 ret = -EINVAL; 8375 break; 8376 } 8377 8378 return ret; 8379 } 8380 8381 /* Enable/disable l2 tunnel offload functions */ 8382 static int 8383 ixgbe_dev_l2_tunnel_offload_set 8384 (struct rte_eth_dev *dev, 8385 struct rte_eth_l2_tunnel_conf *l2_tunnel, 8386 uint32_t mask, 8387 uint8_t en) 8388 { 8389 int ret = 0; 8390 8391 if (l2_tunnel == NULL) 8392 return -EINVAL; 8393 8394 ret = -EINVAL; 8395 if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { 8396 if (en) 8397 ret = ixgbe_dev_l2_tunnel_enable( 8398 dev, 8399 l2_tunnel->l2_tunnel_type); 8400 else 8401 ret = ixgbe_dev_l2_tunnel_disable( 8402 dev, 8403 l2_tunnel->l2_tunnel_type); 8404 } 8405 8406 if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { 8407 if (en) 8408 ret = ixgbe_dev_l2_tunnel_insertion_enable( 8409 dev, 8410 l2_tunnel); 8411 else 8412 ret = ixgbe_dev_l2_tunnel_insertion_disable( 8413 dev, 8414 l2_tunnel); 8415 } 8416 8417 if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { 8418 if (en) 8419 ret = ixgbe_dev_l2_tunnel_stripping_enable( 8420 dev, 8421 l2_tunnel->l2_tunnel_type); 8422 else 8423 ret = ixgbe_dev_l2_tunnel_stripping_disable( 8424 dev, 8425 l2_tunnel->l2_tunnel_type); 8426 } 8427 8428 if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { 8429 if (en) 8430 ret = ixgbe_dev_l2_tunnel_forwarding_enable( 8431 dev, 8432 l2_tunnel->l2_tunnel_type); 8433 else 8434 ret = ixgbe_dev_l2_tunnel_forwarding_disable( 8435 dev, 8436 l2_tunnel->l2_tunnel_type); 8437 } 8438 8439 return ret; 8440 } 8441 8442 static int 8443 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 8444 uint16_t port) 8445 { 8446 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 8447 IXGBE_WRITE_FLUSH(hw); 8448 8449 return 0; 8450 } 8451 8452 /* There's only one register for VxLAN UDP port. 8453 * So, we cannot add several ports. Will update it. 8454 */ 8455 static int 8456 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 8457 uint16_t port) 8458 { 8459 if (port == 0) { 8460 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 8461 return -EINVAL; 8462 } 8463 8464 return ixgbe_update_vxlan_port(hw, port); 8465 } 8466 8467 /* We cannot delete the VxLAN port. For there's a register for VxLAN 8468 * UDP port, it must have a value. 8469 * So, will reset it to the original value 0. 8470 */ 8471 static int 8472 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 8473 uint16_t port) 8474 { 8475 uint16_t cur_port; 8476 8477 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 8478 8479 if (cur_port != port) { 8480 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 8481 return -EINVAL; 8482 } 8483 8484 return ixgbe_update_vxlan_port(hw, 0); 8485 } 8486 8487 /* Add UDP tunneling port */ 8488 static int 8489 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 8490 struct rte_eth_udp_tunnel *udp_tunnel) 8491 { 8492 int ret = 0; 8493 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8494 8495 if (hw->mac.type != ixgbe_mac_X550 && 8496 hw->mac.type != ixgbe_mac_X550EM_x && 8497 hw->mac.type != ixgbe_mac_X550EM_a) { 8498 return -ENOTSUP; 8499 } 8500 8501 if (udp_tunnel == NULL) 8502 return -EINVAL; 8503 8504 switch (udp_tunnel->prot_type) { 8505 case RTE_TUNNEL_TYPE_VXLAN: 8506 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 8507 break; 8508 8509 case RTE_TUNNEL_TYPE_GENEVE: 8510 case RTE_TUNNEL_TYPE_TEREDO: 8511 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 8512 ret = -EINVAL; 8513 break; 8514 8515 default: 8516 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8517 ret = -EINVAL; 8518 break; 8519 } 8520 8521 return ret; 8522 } 8523 8524 /* Remove UDP tunneling port */ 8525 static int 8526 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 8527 struct rte_eth_udp_tunnel *udp_tunnel) 8528 { 8529 int ret = 0; 8530 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8531 8532 if (hw->mac.type != ixgbe_mac_X550 && 8533 hw->mac.type != ixgbe_mac_X550EM_x && 8534 hw->mac.type != ixgbe_mac_X550EM_a) { 8535 return -ENOTSUP; 8536 } 8537 8538 if (udp_tunnel == NULL) 8539 return -EINVAL; 8540 8541 switch (udp_tunnel->prot_type) { 8542 case RTE_TUNNEL_TYPE_VXLAN: 8543 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 8544 break; 8545 case RTE_TUNNEL_TYPE_GENEVE: 8546 case RTE_TUNNEL_TYPE_TEREDO: 8547 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 8548 ret = -EINVAL; 8549 break; 8550 default: 8551 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 8552 ret = -EINVAL; 8553 break; 8554 } 8555 8556 return ret; 8557 } 8558 8559 static int 8560 ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) 8561 { 8562 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8563 int ret; 8564 8565 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { 8566 case IXGBE_SUCCESS: 8567 ret = 0; 8568 break; 8569 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 8570 ret = -ENOTSUP; 8571 break; 8572 default: 8573 ret = -EAGAIN; 8574 break; 8575 } 8576 8577 return ret; 8578 } 8579 8580 static int 8581 ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) 8582 { 8583 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8584 int ret; 8585 8586 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { 8587 case IXGBE_SUCCESS: 8588 ret = 0; 8589 break; 8590 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 8591 ret = -ENOTSUP; 8592 break; 8593 default: 8594 ret = -EAGAIN; 8595 break; 8596 } 8597 8598 return ret; 8599 } 8600 8601 static int 8602 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 8603 { 8604 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8605 int ret; 8606 int mode = IXGBEVF_XCAST_MODE_ALLMULTI; 8607 8608 switch (hw->mac.ops.update_xcast_mode(hw, mode)) { 8609 case IXGBE_SUCCESS: 8610 ret = 0; 8611 break; 8612 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 8613 ret = -ENOTSUP; 8614 break; 8615 default: 8616 ret = -EAGAIN; 8617 break; 8618 } 8619 8620 return ret; 8621 } 8622 8623 static int 8624 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 8625 { 8626 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8627 int ret; 8628 8629 switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { 8630 case IXGBE_SUCCESS: 8631 ret = 0; 8632 break; 8633 case IXGBE_ERR_FEATURE_NOT_SUPPORTED: 8634 ret = -ENOTSUP; 8635 break; 8636 default: 8637 ret = -EAGAIN; 8638 break; 8639 } 8640 8641 return ret; 8642 } 8643 8644 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 8645 { 8646 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8647 u32 in_msg = 0; 8648 8649 /* peek the message first */ 8650 in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); 8651 8652 /* PF reset VF event */ 8653 if (in_msg == IXGBE_PF_CONTROL_MSG) { 8654 /* dummy mbx read to ack pf */ 8655 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 8656 return; 8657 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 8658 NULL); 8659 } 8660 } 8661 8662 static int 8663 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 8664 { 8665 uint32_t eicr; 8666 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8667 struct ixgbe_interrupt *intr = 8668 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8669 ixgbevf_intr_disable(dev); 8670 8671 /* read-on-clear nic registers here */ 8672 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 8673 intr->flags = 0; 8674 8675 /* only one misc vector supported - mailbox */ 8676 eicr &= IXGBE_VTEICR_MASK; 8677 if (eicr == IXGBE_MISC_VEC_ID) 8678 intr->flags |= IXGBE_FLAG_MAILBOX; 8679 8680 return 0; 8681 } 8682 8683 static int 8684 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 8685 { 8686 struct ixgbe_interrupt *intr = 8687 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 8688 8689 if (intr->flags & IXGBE_FLAG_MAILBOX) { 8690 ixgbevf_mbx_process(dev); 8691 intr->flags &= ~IXGBE_FLAG_MAILBOX; 8692 } 8693 8694 ixgbevf_intr_enable(dev); 8695 8696 return 0; 8697 } 8698 8699 static void 8700 ixgbevf_dev_interrupt_handler(void *param) 8701 { 8702 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 8703 8704 ixgbevf_dev_interrupt_get_status(dev); 8705 ixgbevf_dev_interrupt_action(dev); 8706 } 8707 8708 /** 8709 * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path 8710 * @hw: pointer to hardware structure 8711 * 8712 * Stops the transmit data path and waits for the HW to internally empty 8713 * the Tx security block 8714 **/ 8715 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) 8716 { 8717 #define IXGBE_MAX_SECTX_POLL 40 8718 8719 int i; 8720 int sectxreg; 8721 8722 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8723 sectxreg |= IXGBE_SECTXCTRL_TX_DIS; 8724 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8725 for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { 8726 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); 8727 if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) 8728 break; 8729 /* Use interrupt-safe sleep just in case */ 8730 usec_delay(1000); 8731 } 8732 8733 /* For informational purposes only */ 8734 if (i >= IXGBE_MAX_SECTX_POLL) 8735 PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " 8736 "path fully disabled. Continuing with init."); 8737 8738 return IXGBE_SUCCESS; 8739 } 8740 8741 /** 8742 * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path 8743 * @hw: pointer to hardware structure 8744 * 8745 * Enables the transmit data path. 8746 **/ 8747 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) 8748 { 8749 uint32_t sectxreg; 8750 8751 sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8752 sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; 8753 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); 8754 IXGBE_WRITE_FLUSH(hw); 8755 8756 return IXGBE_SUCCESS; 8757 } 8758 8759 /* restore n-tuple filter */ 8760 static inline void 8761 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) 8762 { 8763 struct ixgbe_filter_info *filter_info = 8764 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8765 struct ixgbe_5tuple_filter *node; 8766 8767 TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { 8768 ixgbe_inject_5tuple_filter(dev, node); 8769 } 8770 } 8771 8772 /* restore ethernet type filter */ 8773 static inline void 8774 ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) 8775 { 8776 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8777 struct ixgbe_filter_info *filter_info = 8778 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8779 int i; 8780 8781 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8782 if (filter_info->ethertype_mask & (1 << i)) { 8783 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 8784 filter_info->ethertype_filters[i].etqf); 8785 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 8786 filter_info->ethertype_filters[i].etqs); 8787 IXGBE_WRITE_FLUSH(hw); 8788 } 8789 } 8790 } 8791 8792 /* restore SYN filter */ 8793 static inline void 8794 ixgbe_syn_filter_restore(struct rte_eth_dev *dev) 8795 { 8796 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8797 struct ixgbe_filter_info *filter_info = 8798 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8799 uint32_t synqf; 8800 8801 synqf = filter_info->syn_info; 8802 8803 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 8804 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 8805 IXGBE_WRITE_FLUSH(hw); 8806 } 8807 } 8808 8809 /* restore L2 tunnel filter */ 8810 static inline void 8811 ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) 8812 { 8813 struct ixgbe_l2_tn_info *l2_tn_info = 8814 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8815 struct ixgbe_l2_tn_filter *node; 8816 struct rte_eth_l2_tunnel_conf l2_tn_conf; 8817 8818 TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { 8819 l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; 8820 l2_tn_conf.tunnel_id = node->key.tn_id; 8821 l2_tn_conf.pool = node->pool; 8822 (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); 8823 } 8824 } 8825 8826 /* restore rss filter */ 8827 static inline void 8828 ixgbe_rss_filter_restore(struct rte_eth_dev *dev) 8829 { 8830 struct ixgbe_filter_info *filter_info = 8831 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8832 8833 if (filter_info->rss_info.conf.queue_num) 8834 ixgbe_config_rss_filter(dev, 8835 &filter_info->rss_info, TRUE); 8836 } 8837 8838 static int 8839 ixgbe_filter_restore(struct rte_eth_dev *dev) 8840 { 8841 ixgbe_ntuple_filter_restore(dev); 8842 ixgbe_ethertype_filter_restore(dev); 8843 ixgbe_syn_filter_restore(dev); 8844 ixgbe_fdir_filter_restore(dev); 8845 ixgbe_l2_tn_filter_restore(dev); 8846 ixgbe_rss_filter_restore(dev); 8847 8848 return 0; 8849 } 8850 8851 static void 8852 ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) 8853 { 8854 struct ixgbe_l2_tn_info *l2_tn_info = 8855 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8856 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8857 8858 if (l2_tn_info->e_tag_en) 8859 (void)ixgbe_e_tag_enable(hw); 8860 8861 if (l2_tn_info->e_tag_fwd_en) 8862 (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); 8863 8864 (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); 8865 } 8866 8867 /* remove all the n-tuple filters */ 8868 void 8869 ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) 8870 { 8871 struct ixgbe_filter_info *filter_info = 8872 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8873 struct ixgbe_5tuple_filter *p_5tuple; 8874 8875 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) 8876 ixgbe_remove_5tuple_filter(dev, p_5tuple); 8877 } 8878 8879 /* remove all the ether type filters */ 8880 void 8881 ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) 8882 { 8883 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8884 struct ixgbe_filter_info *filter_info = 8885 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8886 int i; 8887 8888 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 8889 if (filter_info->ethertype_mask & (1 << i) && 8890 !filter_info->ethertype_filters[i].conf) { 8891 (void)ixgbe_ethertype_filter_remove(filter_info, 8892 (uint8_t)i); 8893 IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); 8894 IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); 8895 IXGBE_WRITE_FLUSH(hw); 8896 } 8897 } 8898 } 8899 8900 /* remove the SYN filter */ 8901 void 8902 ixgbe_clear_syn_filter(struct rte_eth_dev *dev) 8903 { 8904 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8905 struct ixgbe_filter_info *filter_info = 8906 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 8907 8908 if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { 8909 filter_info->syn_info = 0; 8910 8911 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); 8912 IXGBE_WRITE_FLUSH(hw); 8913 } 8914 } 8915 8916 /* remove all the L2 tunnel filters */ 8917 int 8918 ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) 8919 { 8920 struct ixgbe_l2_tn_info *l2_tn_info = 8921 IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); 8922 struct ixgbe_l2_tn_filter *l2_tn_filter; 8923 struct rte_eth_l2_tunnel_conf l2_tn_conf; 8924 int ret = 0; 8925 8926 while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { 8927 l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; 8928 l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; 8929 l2_tn_conf.pool = l2_tn_filter->pool; 8930 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); 8931 if (ret < 0) 8932 return ret; 8933 } 8934 8935 return 0; 8936 } 8937 8938 void 8939 ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 8940 struct ixgbe_macsec_setting *macsec_setting) 8941 { 8942 struct ixgbe_macsec_setting *macsec = 8943 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8944 8945 macsec->offload_en = macsec_setting->offload_en; 8946 macsec->encrypt_en = macsec_setting->encrypt_en; 8947 macsec->replayprotect_en = macsec_setting->replayprotect_en; 8948 } 8949 8950 void 8951 ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev) 8952 { 8953 struct ixgbe_macsec_setting *macsec = 8954 IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(dev->data->dev_private); 8955 8956 macsec->offload_en = 0; 8957 macsec->encrypt_en = 0; 8958 macsec->replayprotect_en = 0; 8959 } 8960 8961 void 8962 ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 8963 struct ixgbe_macsec_setting *macsec_setting) 8964 { 8965 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 8966 uint32_t ctrl; 8967 uint8_t en = macsec_setting->encrypt_en; 8968 uint8_t rp = macsec_setting->replayprotect_en; 8969 8970 /** 8971 * Workaround: 8972 * As no ixgbe_disable_sec_rx_path equivalent is 8973 * implemented for tx in the base code, and we are 8974 * not allowed to modify the base code in DPDK, so 8975 * just call the hand-written one directly for now. 8976 * The hardware support has been checked by 8977 * ixgbe_disable_sec_rx_path(). 8978 */ 8979 ixgbe_disable_sec_tx_path_generic(hw); 8980 8981 /* Enable Ethernet CRC (required by MACsec offload) */ 8982 ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); 8983 ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; 8984 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); 8985 8986 /* Enable the TX and RX crypto engines */ 8987 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 8988 ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; 8989 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 8990 8991 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 8992 ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; 8993 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 8994 8995 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 8996 ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; 8997 ctrl |= 0x3; 8998 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); 8999 9000 /* Enable SA lookup */ 9001 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 9002 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 9003 ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : 9004 IXGBE_LSECTXCTRL_AUTH; 9005 ctrl |= IXGBE_LSECTXCTRL_AISCI; 9006 ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; 9007 ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; 9008 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 9009 9010 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 9011 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 9012 ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; 9013 ctrl &= ~IXGBE_LSECRXCTRL_PLSH; 9014 if (rp) 9015 ctrl |= IXGBE_LSECRXCTRL_RP; 9016 else 9017 ctrl &= ~IXGBE_LSECRXCTRL_RP; 9018 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 9019 9020 /* Start the data paths */ 9021 ixgbe_enable_sec_rx_path(hw); 9022 /** 9023 * Workaround: 9024 * As no ixgbe_enable_sec_rx_path equivalent is 9025 * implemented for tx in the base code, and we are 9026 * not allowed to modify the base code in DPDK, so 9027 * just call the hand-written one directly for now. 9028 */ 9029 ixgbe_enable_sec_tx_path_generic(hw); 9030 } 9031 9032 void 9033 ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev) 9034 { 9035 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 9036 uint32_t ctrl; 9037 9038 /** 9039 * Workaround: 9040 * As no ixgbe_disable_sec_rx_path equivalent is 9041 * implemented for tx in the base code, and we are 9042 * not allowed to modify the base code in DPDK, so 9043 * just call the hand-written one directly for now. 9044 * The hardware support has been checked by 9045 * ixgbe_disable_sec_rx_path(). 9046 */ 9047 ixgbe_disable_sec_tx_path_generic(hw); 9048 9049 /* Disable the TX and RX crypto engines */ 9050 ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 9051 ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; 9052 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); 9053 9054 ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 9055 ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; 9056 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); 9057 9058 /* Disable SA lookup */ 9059 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); 9060 ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; 9061 ctrl |= IXGBE_LSECTXCTRL_DISABLE; 9062 IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); 9063 9064 ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); 9065 ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; 9066 ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; 9067 IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); 9068 9069 /* Start the data paths */ 9070 ixgbe_enable_sec_rx_path(hw); 9071 /** 9072 * Workaround: 9073 * As no ixgbe_enable_sec_rx_path equivalent is 9074 * implemented for tx in the base code, and we are 9075 * not allowed to modify the base code in DPDK, so 9076 * just call the hand-written one directly for now. 9077 */ 9078 ixgbe_enable_sec_tx_path_generic(hw); 9079 } 9080 9081 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); 9082 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 9083 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 9084 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); 9085 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 9086 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); 9087 RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, 9088 IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); 9089 9090 RTE_INIT(ixgbe_init_log) 9091 { 9092 ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); 9093 if (ixgbe_logtype_init >= 0) 9094 rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); 9095 ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); 9096 if (ixgbe_logtype_driver >= 0) 9097 rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); 9098 #ifdef RTE_LIBRTE_IXGBE_DEBUG_RX 9099 ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx"); 9100 if (ixgbe_logtype_rx >= 0) 9101 rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG); 9102 #endif 9103 9104 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX 9105 ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx"); 9106 if (ixgbe_logtype_tx >= 0) 9107 rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG); 9108 #endif 9109 9110 #ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE 9111 ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free"); 9112 if (ixgbe_logtype_tx_free >= 0) 9113 rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG); 9114 #endif 9115 } 9116