1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 #include <rte_byteorder.h> 44 #include <rte_common.h> 45 #include <rte_cycles.h> 46 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev.h> 59 #include <rte_atomic.h> 60 #include <rte_malloc.h> 61 #include <rte_random.h> 62 #include <rte_dev.h> 63 64 #include "ixgbe_logs.h" 65 #include "base/ixgbe_api.h" 66 #include "base/ixgbe_vf.h" 67 #include "base/ixgbe_common.h" 68 #include "ixgbe_ethdev.h" 69 #include "ixgbe_bypass.h" 70 #include "ixgbe_rxtx.h" 71 #include "base/ixgbe_type.h" 72 #include "base/ixgbe_phy.h" 73 #include "ixgbe_regs.h" 74 75 /* 76 * High threshold controlling when to start sending XOFF frames. Must be at 77 * least 8 bytes less than receive packet buffer size. This value is in units 78 * of 1024 bytes. 79 */ 80 #define IXGBE_FC_HI 0x80 81 82 /* 83 * Low threshold controlling when to start sending XON frames. This value is 84 * in units of 1024 bytes. 85 */ 86 #define IXGBE_FC_LO 0x40 87 88 /* Default minimum inter-interrupt interval for EITR configuration */ 89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E 90 91 /* Timer value included in XOFF frames. */ 92 #define IXGBE_FC_PAUSE 0x680 93 94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 95 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 96 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 97 98 #define IXGBE_MMW_SIZE_DEFAULT 0x4 99 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 100 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 101 102 /* 103 * Default values for RX/TX configuration 104 */ 105 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 106 #define IXGBE_DEFAULT_RX_PTHRESH 8 107 #define IXGBE_DEFAULT_RX_HTHRESH 8 108 #define IXGBE_DEFAULT_RX_WTHRESH 0 109 110 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 111 #define IXGBE_DEFAULT_TX_PTHRESH 32 112 #define IXGBE_DEFAULT_TX_HTHRESH 0 113 #define IXGBE_DEFAULT_TX_WTHRESH 0 114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 115 116 /* Bit shift and mask */ 117 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 118 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 119 #define IXGBE_8_BIT_WIDTH CHAR_BIT 120 #define IXGBE_8_BIT_MASK UINT8_MAX 121 122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 123 124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 125 126 #define IXGBE_HKEY_MAX_INDEX 10 127 128 /* Additional timesync values. */ 129 #define NSEC_PER_SEC 1000000000L 130 #define IXGBE_INCVAL_10GB 0x66666666 131 #define IXGBE_INCVAL_1GB 0x40000000 132 #define IXGBE_INCVAL_100 0x50000000 133 #define IXGBE_INCVAL_SHIFT_10GB 28 134 #define IXGBE_INCVAL_SHIFT_1GB 24 135 #define IXGBE_INCVAL_SHIFT_100 21 136 #define IXGBE_INCVAL_SHIFT_82599 7 137 #define IXGBE_INCPER_SHIFT_82599 24 138 139 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 140 141 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 142 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 143 #define DEFAULT_ETAG_ETYPE 0x893f 144 #define IXGBE_ETAG_ETYPE 0x00005084 145 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 146 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 147 #define IXGBE_RAH_ADTYPE 0x40000000 148 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 149 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 150 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 151 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 152 #define IXGBE_QDE_STRIP_TAG 0x00000004 153 #define IXGBE_VTEICR_MASK 0x07 154 155 enum ixgbevf_xcast_modes { 156 IXGBEVF_XCAST_MODE_NONE = 0, 157 IXGBEVF_XCAST_MODE_MULTI, 158 IXGBEVF_XCAST_MODE_ALLMULTI, 159 }; 160 161 #define IXGBE_EXVET_VET_EXT_SHIFT 16 162 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 163 164 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); 165 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 166 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 167 static int ixgbe_dev_start(struct rte_eth_dev *dev); 168 static void ixgbe_dev_stop(struct rte_eth_dev *dev); 169 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 170 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 171 static void ixgbe_dev_close(struct rte_eth_dev *dev); 172 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 173 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 174 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 175 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 176 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 177 int wait_to_complete); 178 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, 179 struct rte_eth_stats *stats); 180 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 181 struct rte_eth_xstat *xstats, unsigned n); 182 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 183 struct rte_eth_xstat *xstats, unsigned n); 184 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 185 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 186 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 187 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); 188 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 189 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); 190 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 191 uint16_t queue_id, 192 uint8_t stat_idx, 193 uint8_t is_rx); 194 static void ixgbe_dev_info_get(struct rte_eth_dev *dev, 195 struct rte_eth_dev_info *dev_info); 196 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 197 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, 198 struct rte_eth_dev_info *dev_info); 199 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 200 201 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 202 uint16_t vlan_id, int on); 203 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 204 enum rte_vlan_type vlan_type, 205 uint16_t tpid_id); 206 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 207 uint16_t queue, bool on); 208 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 209 int on); 210 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 211 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 212 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 213 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 214 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 215 216 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 217 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 218 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 219 struct rte_eth_fc_conf *fc_conf); 220 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 221 struct rte_eth_fc_conf *fc_conf); 222 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 223 struct rte_eth_pfc_conf *pfc_conf); 224 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 225 struct rte_eth_rss_reta_entry64 *reta_conf, 226 uint16_t reta_size); 227 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 228 struct rte_eth_rss_reta_entry64 *reta_conf, 229 uint16_t reta_size); 230 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 231 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); 232 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 233 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 234 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 235 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, 236 void *param); 237 static void ixgbe_dev_interrupt_delayed_handler(void *param); 238 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 239 uint32_t index, uint32_t pool); 240 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 241 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 242 struct ether_addr *mac_addr); 243 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 244 245 /* For Virtual Function support */ 246 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 247 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 248 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 249 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 250 static void ixgbevf_dev_stop(struct rte_eth_dev *dev); 251 static void ixgbevf_dev_close(struct rte_eth_dev *dev); 252 static void ixgbevf_intr_disable(struct ixgbe_hw *hw); 253 static void ixgbevf_intr_enable(struct ixgbe_hw *hw); 254 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 255 struct rte_eth_stats *stats); 256 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 257 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 258 uint16_t vlan_id, int on); 259 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 260 uint16_t queue, int on); 261 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 262 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 263 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 264 uint16_t queue_id); 265 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 266 uint16_t queue_id); 267 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 268 uint8_t queue, uint8_t msix_vector); 269 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 270 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 271 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 272 273 /* For Eth VMDQ APIs support */ 274 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 275 ether_addr * mac_addr, uint8_t on); 276 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 277 static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, 278 uint16_t rx_mask, uint8_t on); 279 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); 280 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); 281 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 282 uint64_t pool_mask, uint8_t vlan_on); 283 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 284 struct rte_eth_mirror_conf *mirror_conf, 285 uint8_t rule_id, uint8_t on); 286 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 287 uint8_t rule_id); 288 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 289 uint16_t queue_id); 290 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 291 uint16_t queue_id); 292 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 293 uint8_t queue, uint8_t msix_vector); 294 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 295 296 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 297 uint16_t queue_idx, uint16_t tx_rate); 298 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 299 uint16_t tx_rate, uint64_t q_msk); 300 301 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 302 struct ether_addr *mac_addr, 303 uint32_t index, uint32_t pool); 304 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 305 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 306 struct ether_addr *mac_addr); 307 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, 308 struct rte_eth_syn_filter *filter, 309 bool add); 310 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, 311 struct rte_eth_syn_filter *filter); 312 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 313 enum rte_filter_op filter_op, 314 void *arg); 315 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 316 struct ixgbe_5tuple_filter *filter); 317 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 318 struct ixgbe_5tuple_filter *filter); 319 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 320 struct rte_eth_ntuple_filter *filter, 321 bool add); 322 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 323 enum rte_filter_op filter_op, 324 void *arg); 325 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 326 struct rte_eth_ntuple_filter *filter); 327 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 328 struct rte_eth_ethertype_filter *filter, 329 bool add); 330 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 331 enum rte_filter_op filter_op, 332 void *arg); 333 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 334 struct rte_eth_ethertype_filter *filter); 335 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 336 enum rte_filter_type filter_type, 337 enum rte_filter_op filter_op, 338 void *arg); 339 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 340 341 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 342 struct ether_addr *mc_addr_set, 343 uint32_t nb_mc_addr); 344 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 345 struct rte_eth_dcb_info *dcb_info); 346 347 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 348 static int ixgbe_get_regs(struct rte_eth_dev *dev, 349 struct rte_dev_reg_info *regs); 350 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 351 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 352 struct rte_dev_eeprom_info *eeprom); 353 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 354 struct rte_dev_eeprom_info *eeprom); 355 356 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 357 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 358 struct rte_dev_reg_info *regs); 359 360 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 361 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 362 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 363 struct timespec *timestamp, 364 uint32_t flags); 365 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 366 struct timespec *timestamp); 367 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 368 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 369 struct timespec *timestamp); 370 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 371 const struct timespec *timestamp); 372 static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, 373 void *param); 374 375 static int ixgbe_dev_l2_tunnel_eth_type_conf 376 (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); 377 static int ixgbe_dev_l2_tunnel_offload_set 378 (struct rte_eth_dev *dev, 379 struct rte_eth_l2_tunnel_conf *l2_tunnel, 380 uint32_t mask, 381 uint8_t en); 382 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 383 enum rte_filter_op filter_op, 384 void *arg); 385 386 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 387 struct rte_eth_udp_tunnel *udp_tunnel); 388 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 389 struct rte_eth_udp_tunnel *udp_tunnel); 390 391 /* 392 * Define VF Stats MACRO for Non "cleared on read" register 393 */ 394 #define UPDATE_VF_STAT(reg, last, cur) \ 395 { \ 396 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 397 cur += (latest - last) & UINT_MAX; \ 398 last = latest; \ 399 } 400 401 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 402 { \ 403 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 404 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 405 u64 latest = ((new_msb << 32) | new_lsb); \ 406 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 407 last = latest; \ 408 } 409 410 #define IXGBE_SET_HWSTRIP(h, q) do {\ 411 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 412 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 413 (h)->bitmap[idx] |= 1 << bit;\ 414 } while (0) 415 416 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 417 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 418 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 419 (h)->bitmap[idx] &= ~(1 << bit);\ 420 } while (0) 421 422 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 423 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 424 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 425 (r) = (h)->bitmap[idx] >> bit & 1;\ 426 } while (0) 427 428 /* 429 * The set of PCI devices this driver supports 430 */ 431 static const struct rte_pci_id pci_id_ixgbe_map[] = { 432 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, 433 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, 434 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, 435 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, 436 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, 437 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, 438 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, 439 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, 440 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, 441 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, 442 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, 443 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, 444 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, 445 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, 446 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, 447 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, 448 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, 449 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, 450 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, 451 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, 452 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, 453 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, 454 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, 455 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, 456 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, 457 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, 458 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, 459 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, 460 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, 461 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, 462 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, 463 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, 464 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, 465 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, 466 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, 467 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, 468 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, 469 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, 470 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, 471 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, 472 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, 473 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, 474 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, 475 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, 476 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, 477 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, 478 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, 479 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, 480 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, 481 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, 482 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, 483 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, 484 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, 485 #ifdef RTE_NIC_BYPASS 486 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, 487 #endif 488 { .vendor_id = 0, /* sentinel */ }, 489 }; 490 491 /* 492 * The set of PCI devices this driver supports (for 82599 VF) 493 */ 494 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 495 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, 496 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, 497 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, 498 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, 499 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, 500 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, 501 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, 502 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, 503 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, 504 { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, 505 { .vendor_id = 0, /* sentinel */ }, 506 }; 507 508 static const struct rte_eth_desc_lim rx_desc_lim = { 509 .nb_max = IXGBE_MAX_RING_DESC, 510 .nb_min = IXGBE_MIN_RING_DESC, 511 .nb_align = IXGBE_RXD_ALIGN, 512 }; 513 514 static const struct rte_eth_desc_lim tx_desc_lim = { 515 .nb_max = IXGBE_MAX_RING_DESC, 516 .nb_min = IXGBE_MIN_RING_DESC, 517 .nb_align = IXGBE_TXD_ALIGN, 518 }; 519 520 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 521 .dev_configure = ixgbe_dev_configure, 522 .dev_start = ixgbe_dev_start, 523 .dev_stop = ixgbe_dev_stop, 524 .dev_set_link_up = ixgbe_dev_set_link_up, 525 .dev_set_link_down = ixgbe_dev_set_link_down, 526 .dev_close = ixgbe_dev_close, 527 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 528 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 529 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 530 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 531 .link_update = ixgbe_dev_link_update, 532 .stats_get = ixgbe_dev_stats_get, 533 .xstats_get = ixgbe_dev_xstats_get, 534 .stats_reset = ixgbe_dev_stats_reset, 535 .xstats_reset = ixgbe_dev_xstats_reset, 536 .xstats_get_names = ixgbe_dev_xstats_get_names, 537 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 538 .dev_infos_get = ixgbe_dev_info_get, 539 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 540 .mtu_set = ixgbe_dev_mtu_set, 541 .vlan_filter_set = ixgbe_vlan_filter_set, 542 .vlan_tpid_set = ixgbe_vlan_tpid_set, 543 .vlan_offload_set = ixgbe_vlan_offload_set, 544 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 545 .rx_queue_start = ixgbe_dev_rx_queue_start, 546 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 547 .tx_queue_start = ixgbe_dev_tx_queue_start, 548 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 549 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 550 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 551 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 552 .rx_queue_release = ixgbe_dev_rx_queue_release, 553 .rx_queue_count = ixgbe_dev_rx_queue_count, 554 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 555 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 556 .tx_queue_release = ixgbe_dev_tx_queue_release, 557 .dev_led_on = ixgbe_dev_led_on, 558 .dev_led_off = ixgbe_dev_led_off, 559 .flow_ctrl_get = ixgbe_flow_ctrl_get, 560 .flow_ctrl_set = ixgbe_flow_ctrl_set, 561 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 562 .mac_addr_add = ixgbe_add_rar, 563 .mac_addr_remove = ixgbe_remove_rar, 564 .mac_addr_set = ixgbe_set_default_mac_addr, 565 .uc_hash_table_set = ixgbe_uc_hash_table_set, 566 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 567 .mirror_rule_set = ixgbe_mirror_rule_set, 568 .mirror_rule_reset = ixgbe_mirror_rule_reset, 569 .set_vf_rx_mode = ixgbe_set_pool_rx_mode, 570 .set_vf_rx = ixgbe_set_pool_rx, 571 .set_vf_tx = ixgbe_set_pool_tx, 572 .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, 573 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 574 .set_vf_rate_limit = ixgbe_set_vf_rate_limit, 575 .reta_update = ixgbe_dev_rss_reta_update, 576 .reta_query = ixgbe_dev_rss_reta_query, 577 #ifdef RTE_NIC_BYPASS 578 .bypass_init = ixgbe_bypass_init, 579 .bypass_state_set = ixgbe_bypass_state_store, 580 .bypass_state_show = ixgbe_bypass_state_show, 581 .bypass_event_set = ixgbe_bypass_event_store, 582 .bypass_event_show = ixgbe_bypass_event_show, 583 .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, 584 .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, 585 .bypass_ver_show = ixgbe_bypass_ver_show, 586 .bypass_wd_reset = ixgbe_bypass_wd_reset, 587 #endif /* RTE_NIC_BYPASS */ 588 .rss_hash_update = ixgbe_dev_rss_hash_update, 589 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 590 .filter_ctrl = ixgbe_dev_filter_ctrl, 591 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 592 .rxq_info_get = ixgbe_rxq_info_get, 593 .txq_info_get = ixgbe_txq_info_get, 594 .timesync_enable = ixgbe_timesync_enable, 595 .timesync_disable = ixgbe_timesync_disable, 596 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 597 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 598 .get_reg = ixgbe_get_regs, 599 .get_eeprom_length = ixgbe_get_eeprom_length, 600 .get_eeprom = ixgbe_get_eeprom, 601 .set_eeprom = ixgbe_set_eeprom, 602 .get_dcb_info = ixgbe_dev_get_dcb_info, 603 .timesync_adjust_time = ixgbe_timesync_adjust_time, 604 .timesync_read_time = ixgbe_timesync_read_time, 605 .timesync_write_time = ixgbe_timesync_write_time, 606 .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, 607 .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, 608 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 609 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 610 }; 611 612 /* 613 * dev_ops for virtual function, bare necessities for basic vf 614 * operation have been implemented 615 */ 616 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 617 .dev_configure = ixgbevf_dev_configure, 618 .dev_start = ixgbevf_dev_start, 619 .dev_stop = ixgbevf_dev_stop, 620 .link_update = ixgbe_dev_link_update, 621 .stats_get = ixgbevf_dev_stats_get, 622 .xstats_get = ixgbevf_dev_xstats_get, 623 .stats_reset = ixgbevf_dev_stats_reset, 624 .xstats_reset = ixgbevf_dev_stats_reset, 625 .xstats_get_names = ixgbevf_dev_xstats_get_names, 626 .dev_close = ixgbevf_dev_close, 627 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 628 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 629 .dev_infos_get = ixgbevf_dev_info_get, 630 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 631 .mtu_set = ixgbevf_dev_set_mtu, 632 .vlan_filter_set = ixgbevf_vlan_filter_set, 633 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 634 .vlan_offload_set = ixgbevf_vlan_offload_set, 635 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 636 .rx_queue_release = ixgbe_dev_rx_queue_release, 637 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 638 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 639 .tx_queue_release = ixgbe_dev_tx_queue_release, 640 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 641 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 642 .mac_addr_add = ixgbevf_add_mac_addr, 643 .mac_addr_remove = ixgbevf_remove_mac_addr, 644 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 645 .rxq_info_get = ixgbe_rxq_info_get, 646 .txq_info_get = ixgbe_txq_info_get, 647 .mac_addr_set = ixgbevf_set_default_mac_addr, 648 .get_reg = ixgbevf_get_regs, 649 .reta_update = ixgbe_dev_rss_reta_update, 650 .reta_query = ixgbe_dev_rss_reta_query, 651 .rss_hash_update = ixgbe_dev_rss_hash_update, 652 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 653 }; 654 655 /* store statistics names and its offset in stats structure */ 656 struct rte_ixgbe_xstats_name_off { 657 char name[RTE_ETH_XSTATS_NAME_SIZE]; 658 unsigned offset; 659 }; 660 661 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 662 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 663 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 664 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 665 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 666 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 667 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 668 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 669 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 670 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 671 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 672 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 673 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 674 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 675 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 676 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 677 prc1023)}, 678 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 679 prc1522)}, 680 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 681 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 682 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 683 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 684 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 685 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 686 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 687 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 688 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 689 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 690 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 691 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 692 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 693 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 694 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 695 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 696 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 697 ptc1023)}, 698 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 699 ptc1522)}, 700 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 701 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 702 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 703 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 704 705 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 706 fdirustat_add)}, 707 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 708 fdirustat_remove)}, 709 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 710 fdirfstat_fadd)}, 711 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 712 fdirfstat_fremove)}, 713 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 714 fdirmatch)}, 715 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 716 fdirmiss)}, 717 718 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 719 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 720 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 721 fclast)}, 722 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 723 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 724 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 725 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 726 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 727 fcoe_noddp)}, 728 {"rx_fcoe_no_direct_data_placement_ext_buff", 729 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 730 731 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 732 lxontxc)}, 733 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 734 lxonrxc)}, 735 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 736 lxofftxc)}, 737 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 738 lxoffrxc)}, 739 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 740 }; 741 742 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 743 sizeof(rte_ixgbe_stats_strings[0])) 744 745 /* Per-queue statistics */ 746 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 747 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 748 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 749 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 750 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 751 }; 752 753 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 754 sizeof(rte_ixgbe_rxq_strings[0])) 755 #define IXGBE_NB_RXQ_PRIO_VALUES 8 756 757 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 758 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 759 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 760 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 761 pxon2offc)}, 762 }; 763 764 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 765 sizeof(rte_ixgbe_txq_strings[0])) 766 #define IXGBE_NB_TXQ_PRIO_VALUES 8 767 768 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 769 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 770 }; 771 772 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 773 sizeof(rte_ixgbevf_stats_strings[0])) 774 775 /** 776 * Atomically reads the link status information from global 777 * structure rte_eth_dev. 778 * 779 * @param dev 780 * - Pointer to the structure rte_eth_dev to read from. 781 * - Pointer to the buffer to be saved with the link status. 782 * 783 * @return 784 * - On success, zero. 785 * - On failure, negative value. 786 */ 787 static inline int 788 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, 789 struct rte_eth_link *link) 790 { 791 struct rte_eth_link *dst = link; 792 struct rte_eth_link *src = &(dev->data->dev_link); 793 794 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 795 *(uint64_t *)src) == 0) 796 return -1; 797 798 return 0; 799 } 800 801 /** 802 * Atomically writes the link status information into global 803 * structure rte_eth_dev. 804 * 805 * @param dev 806 * - Pointer to the structure rte_eth_dev to read from. 807 * - Pointer to the buffer to be saved with the link status. 808 * 809 * @return 810 * - On success, zero. 811 * - On failure, negative value. 812 */ 813 static inline int 814 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, 815 struct rte_eth_link *link) 816 { 817 struct rte_eth_link *dst = &(dev->data->dev_link); 818 struct rte_eth_link *src = link; 819 820 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 821 *(uint64_t *)src) == 0) 822 return -1; 823 824 return 0; 825 } 826 827 /* 828 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 829 */ 830 static inline int 831 ixgbe_is_sfp(struct ixgbe_hw *hw) 832 { 833 switch (hw->phy.type) { 834 case ixgbe_phy_sfp_avago: 835 case ixgbe_phy_sfp_ftl: 836 case ixgbe_phy_sfp_intel: 837 case ixgbe_phy_sfp_unknown: 838 case ixgbe_phy_sfp_passive_tyco: 839 case ixgbe_phy_sfp_passive_unknown: 840 return 1; 841 default: 842 return 0; 843 } 844 } 845 846 static inline int32_t 847 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 848 { 849 uint32_t ctrl_ext; 850 int32_t status; 851 852 status = ixgbe_reset_hw(hw); 853 854 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 855 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 856 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 857 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 858 IXGBE_WRITE_FLUSH(hw); 859 860 return status; 861 } 862 863 static inline void 864 ixgbe_enable_intr(struct rte_eth_dev *dev) 865 { 866 struct ixgbe_interrupt *intr = 867 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 868 struct ixgbe_hw *hw = 869 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 870 871 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 872 IXGBE_WRITE_FLUSH(hw); 873 } 874 875 /* 876 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 877 */ 878 static void 879 ixgbe_disable_intr(struct ixgbe_hw *hw) 880 { 881 PMD_INIT_FUNC_TRACE(); 882 883 if (hw->mac.type == ixgbe_mac_82598EB) { 884 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 885 } else { 886 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 887 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 888 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 889 } 890 IXGBE_WRITE_FLUSH(hw); 891 } 892 893 /* 894 * This function resets queue statistics mapping registers. 895 * From Niantic datasheet, Initialization of Statistics section: 896 * "...if software requires the queue counters, the RQSMR and TQSM registers 897 * must be re-programmed following a device reset. 898 */ 899 static void 900 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 901 { 902 uint32_t i; 903 904 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 905 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 906 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 907 } 908 } 909 910 911 static int 912 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 913 uint16_t queue_id, 914 uint8_t stat_idx, 915 uint8_t is_rx) 916 { 917 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 918 #define NB_QMAP_FIELDS_PER_QSM_REG 4 919 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 920 921 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 922 struct ixgbe_stat_mapping_registers *stat_mappings = 923 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 924 uint32_t qsmr_mask = 0; 925 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 926 uint32_t q_map; 927 uint8_t n, offset; 928 929 if ((hw->mac.type != ixgbe_mac_82599EB) && 930 (hw->mac.type != ixgbe_mac_X540) && 931 (hw->mac.type != ixgbe_mac_X550) && 932 (hw->mac.type != ixgbe_mac_X550EM_x) && 933 (hw->mac.type != ixgbe_mac_X550EM_a)) 934 return -ENOSYS; 935 936 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 937 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 938 queue_id, stat_idx); 939 940 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 941 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 942 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 943 return -EIO; 944 } 945 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 946 947 /* Now clear any previous stat_idx set */ 948 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 949 if (!is_rx) 950 stat_mappings->tqsm[n] &= ~clearing_mask; 951 else 952 stat_mappings->rqsmr[n] &= ~clearing_mask; 953 954 q_map = (uint32_t)stat_idx; 955 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 956 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 957 if (!is_rx) 958 stat_mappings->tqsm[n] |= qsmr_mask; 959 else 960 stat_mappings->rqsmr[n] |= qsmr_mask; 961 962 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 963 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 964 queue_id, stat_idx); 965 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 966 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 967 968 /* Now write the mapping in the appropriate register */ 969 if (is_rx) { 970 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 971 stat_mappings->rqsmr[n], n); 972 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 973 } else { 974 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 975 stat_mappings->tqsm[n], n); 976 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 977 } 978 return 0; 979 } 980 981 static void 982 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 983 { 984 struct ixgbe_stat_mapping_registers *stat_mappings = 985 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 986 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 987 int i; 988 989 /* write whatever was in stat mapping table to the NIC */ 990 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 991 /* rx */ 992 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 993 994 /* tx */ 995 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 996 } 997 } 998 999 static void 1000 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 1001 { 1002 uint8_t i; 1003 struct ixgbe_dcb_tc_config *tc; 1004 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 1005 1006 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 1007 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 1008 for (i = 0; i < dcb_max_tc; i++) { 1009 tc = &dcb_config->tc_config[i]; 1010 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 1011 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 1012 (uint8_t)(100/dcb_max_tc + (i & 1)); 1013 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 1014 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 1015 (uint8_t)(100/dcb_max_tc + (i & 1)); 1016 tc->pfc = ixgbe_dcb_pfc_disabled; 1017 } 1018 1019 /* Initialize default user to priority mapping, UPx->TC0 */ 1020 tc = &dcb_config->tc_config[0]; 1021 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 1022 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 1023 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 1024 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 1025 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 1026 } 1027 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 1028 dcb_config->pfc_mode_enable = false; 1029 dcb_config->vt_mode = true; 1030 dcb_config->round_robin_enable = false; 1031 /* support all DCB capabilities in 82599 */ 1032 dcb_config->support.capabilities = 0xFF; 1033 1034 /*we only support 4 Tcs for X540, X550 */ 1035 if (hw->mac.type == ixgbe_mac_X540 || 1036 hw->mac.type == ixgbe_mac_X550 || 1037 hw->mac.type == ixgbe_mac_X550EM_x || 1038 hw->mac.type == ixgbe_mac_X550EM_a) { 1039 dcb_config->num_tcs.pg_tcs = 4; 1040 dcb_config->num_tcs.pfc_tcs = 4; 1041 } 1042 } 1043 1044 /* 1045 * Ensure that all locks are released before first NVM or PHY access 1046 */ 1047 static void 1048 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 1049 { 1050 uint16_t mask; 1051 1052 /* 1053 * Phy lock should not fail in this early stage. If this is the case, 1054 * it is due to an improper exit of the application. 1055 * So force the release of the faulty lock. Release of common lock 1056 * is done automatically by swfw_sync function. 1057 */ 1058 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1059 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1060 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1061 } 1062 ixgbe_release_swfw_semaphore(hw, mask); 1063 1064 /* 1065 * These ones are more tricky since they are common to all ports; but 1066 * swfw_sync retries last long enough (1s) to be almost sure that if 1067 * lock can not be taken it is due to an improper lock of the 1068 * semaphore. 1069 */ 1070 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1071 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1072 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1073 } 1074 ixgbe_release_swfw_semaphore(hw, mask); 1075 } 1076 1077 /* 1078 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1079 * It returns 0 on success. 1080 */ 1081 static int 1082 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) 1083 { 1084 struct rte_pci_device *pci_dev; 1085 struct ixgbe_hw *hw = 1086 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1087 struct ixgbe_vfta *shadow_vfta = 1088 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1089 struct ixgbe_hwstrip *hwstrip = 1090 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1091 struct ixgbe_dcb_config *dcb_config = 1092 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1093 struct ixgbe_filter_info *filter_info = 1094 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1095 uint32_t ctrl_ext; 1096 uint16_t csum; 1097 int diag, i; 1098 1099 PMD_INIT_FUNC_TRACE(); 1100 1101 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1102 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1103 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1104 1105 /* 1106 * For secondary processes, we don't initialise any further as primary 1107 * has already done this work. Only check we don't need a different 1108 * RX and TX function. 1109 */ 1110 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1111 struct ixgbe_tx_queue *txq; 1112 /* TX queue function in primary, set by last queue initialized 1113 * Tx queue may not initialized by primary process 1114 */ 1115 if (eth_dev->data->tx_queues) { 1116 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1117 ixgbe_set_tx_function(eth_dev, txq); 1118 } else { 1119 /* Use default TX function if we get here */ 1120 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1121 "Using default TX function."); 1122 } 1123 1124 ixgbe_set_rx_function(eth_dev); 1125 1126 return 0; 1127 } 1128 pci_dev = eth_dev->pci_dev; 1129 1130 rte_eth_copy_pci_info(eth_dev, pci_dev); 1131 1132 /* Vendor and Device ID need to be set before init of shared code */ 1133 hw->device_id = pci_dev->id.device_id; 1134 hw->vendor_id = pci_dev->id.vendor_id; 1135 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1136 hw->allow_unsupported_sfp = 1; 1137 1138 /* Initialize the shared code (base driver) */ 1139 #ifdef RTE_NIC_BYPASS 1140 diag = ixgbe_bypass_init_shared_code(hw); 1141 #else 1142 diag = ixgbe_init_shared_code(hw); 1143 #endif /* RTE_NIC_BYPASS */ 1144 1145 if (diag != IXGBE_SUCCESS) { 1146 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1147 return -EIO; 1148 } 1149 1150 /* pick up the PCI bus settings for reporting later */ 1151 ixgbe_get_bus_info(hw); 1152 1153 /* Unlock any pending hardware semaphore */ 1154 ixgbe_swfw_lock_reset(hw); 1155 1156 /* Initialize DCB configuration*/ 1157 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1158 ixgbe_dcb_init(hw, dcb_config); 1159 /* Get Hardware Flow Control setting */ 1160 hw->fc.requested_mode = ixgbe_fc_full; 1161 hw->fc.current_mode = ixgbe_fc_full; 1162 hw->fc.pause_time = IXGBE_FC_PAUSE; 1163 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1164 hw->fc.low_water[i] = IXGBE_FC_LO; 1165 hw->fc.high_water[i] = IXGBE_FC_HI; 1166 } 1167 hw->fc.send_xon = 1; 1168 1169 /* Make sure we have a good EEPROM before we read from it */ 1170 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1171 if (diag != IXGBE_SUCCESS) { 1172 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1173 return -EIO; 1174 } 1175 1176 #ifdef RTE_NIC_BYPASS 1177 diag = ixgbe_bypass_init_hw(hw); 1178 #else 1179 diag = ixgbe_init_hw(hw); 1180 #endif /* RTE_NIC_BYPASS */ 1181 1182 /* 1183 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1184 * is called too soon after the kernel driver unbinding/binding occurs. 1185 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1186 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1187 * also called. See ixgbe_identify_phy_82599(). The reason for the 1188 * failure is not known, and only occuts when virtualisation features 1189 * are disabled in the bios. A delay of 100ms was found to be enough by 1190 * trial-and-error, and is doubled to be safe. 1191 */ 1192 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1193 rte_delay_ms(200); 1194 diag = ixgbe_init_hw(hw); 1195 } 1196 1197 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1198 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1199 "LOM. Please be aware there may be issues associated " 1200 "with your hardware."); 1201 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1202 "please contact your Intel or hardware representative " 1203 "who provided you with this hardware."); 1204 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1205 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1206 if (diag) { 1207 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1208 return -EIO; 1209 } 1210 1211 /* Reset the hw statistics */ 1212 ixgbe_dev_stats_reset(eth_dev); 1213 1214 /* disable interrupt */ 1215 ixgbe_disable_intr(hw); 1216 1217 /* reset mappings for queue statistics hw counters*/ 1218 ixgbe_reset_qstat_mappings(hw); 1219 1220 /* Allocate memory for storing MAC addresses */ 1221 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1222 hw->mac.num_rar_entries, 0); 1223 if (eth_dev->data->mac_addrs == NULL) { 1224 PMD_INIT_LOG(ERR, 1225 "Failed to allocate %u bytes needed to store " 1226 "MAC addresses", 1227 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1228 return -ENOMEM; 1229 } 1230 /* Copy the permanent MAC address */ 1231 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 1232 ð_dev->data->mac_addrs[0]); 1233 1234 /* Allocate memory for storing hash filter MAC addresses */ 1235 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1236 IXGBE_VMDQ_NUM_UC_MAC, 0); 1237 if (eth_dev->data->hash_mac_addrs == NULL) { 1238 PMD_INIT_LOG(ERR, 1239 "Failed to allocate %d bytes needed to store MAC addresses", 1240 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1241 return -ENOMEM; 1242 } 1243 1244 /* initialize the vfta */ 1245 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1246 1247 /* initialize the hw strip bitmap*/ 1248 memset(hwstrip, 0, sizeof(*hwstrip)); 1249 1250 /* initialize PF if max_vfs not zero */ 1251 ixgbe_pf_host_init(eth_dev); 1252 1253 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1254 /* let hardware know driver is loaded */ 1255 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1256 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1257 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1258 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1259 IXGBE_WRITE_FLUSH(hw); 1260 1261 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1262 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1263 (int) hw->mac.type, (int) hw->phy.type, 1264 (int) hw->phy.sfp_type); 1265 else 1266 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1267 (int) hw->mac.type, (int) hw->phy.type); 1268 1269 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1270 eth_dev->data->port_id, pci_dev->id.vendor_id, 1271 pci_dev->id.device_id); 1272 1273 rte_intr_callback_register(&pci_dev->intr_handle, 1274 ixgbe_dev_interrupt_handler, 1275 (void *)eth_dev); 1276 1277 /* enable uio/vfio intr/eventfd mapping */ 1278 rte_intr_enable(&pci_dev->intr_handle); 1279 1280 /* enable support intr */ 1281 ixgbe_enable_intr(eth_dev); 1282 1283 /* initialize 5tuple filter list */ 1284 TAILQ_INIT(&filter_info->fivetuple_list); 1285 memset(filter_info->fivetuple_mask, 0, 1286 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1287 1288 return 0; 1289 } 1290 1291 static int 1292 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1293 { 1294 struct rte_pci_device *pci_dev; 1295 struct ixgbe_hw *hw; 1296 1297 PMD_INIT_FUNC_TRACE(); 1298 1299 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1300 return -EPERM; 1301 1302 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1303 pci_dev = eth_dev->pci_dev; 1304 1305 if (hw->adapter_stopped == 0) 1306 ixgbe_dev_close(eth_dev); 1307 1308 eth_dev->dev_ops = NULL; 1309 eth_dev->rx_pkt_burst = NULL; 1310 eth_dev->tx_pkt_burst = NULL; 1311 1312 /* Unlock any pending hardware semaphore */ 1313 ixgbe_swfw_lock_reset(hw); 1314 1315 /* disable uio intr before callback unregister */ 1316 rte_intr_disable(&(pci_dev->intr_handle)); 1317 rte_intr_callback_unregister(&(pci_dev->intr_handle), 1318 ixgbe_dev_interrupt_handler, (void *)eth_dev); 1319 1320 /* uninitialize PF if max_vfs not zero */ 1321 ixgbe_pf_host_uninit(eth_dev); 1322 1323 rte_free(eth_dev->data->mac_addrs); 1324 eth_dev->data->mac_addrs = NULL; 1325 1326 rte_free(eth_dev->data->hash_mac_addrs); 1327 eth_dev->data->hash_mac_addrs = NULL; 1328 1329 return 0; 1330 } 1331 1332 /* 1333 * Negotiate mailbox API version with the PF. 1334 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1335 * Then we try to negotiate starting with the most recent one. 1336 * If all negotiation attempts fail, then we will proceed with 1337 * the default one (ixgbe_mbox_api_10). 1338 */ 1339 static void 1340 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1341 { 1342 int32_t i; 1343 1344 /* start with highest supported, proceed down */ 1345 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1346 ixgbe_mbox_api_12, 1347 ixgbe_mbox_api_11, 1348 ixgbe_mbox_api_10, 1349 }; 1350 1351 for (i = 0; 1352 i != RTE_DIM(sup_ver) && 1353 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1354 i++) 1355 ; 1356 } 1357 1358 static void 1359 generate_random_mac_addr(struct ether_addr *mac_addr) 1360 { 1361 uint64_t random; 1362 1363 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1364 mac_addr->addr_bytes[0] = 0x00; 1365 mac_addr->addr_bytes[1] = 0x09; 1366 mac_addr->addr_bytes[2] = 0xC0; 1367 /* Force indication of locally assigned MAC address. */ 1368 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; 1369 /* Generate the last 3 bytes of the MAC address with a random number. */ 1370 random = rte_rand(); 1371 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1372 } 1373 1374 /* 1375 * Virtual Function device init 1376 */ 1377 static int 1378 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1379 { 1380 int diag; 1381 uint32_t tc, tcs; 1382 struct rte_pci_device *pci_dev; 1383 struct ixgbe_hw *hw = 1384 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1385 struct ixgbe_vfta *shadow_vfta = 1386 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1387 struct ixgbe_hwstrip *hwstrip = 1388 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1389 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 1390 1391 PMD_INIT_FUNC_TRACE(); 1392 1393 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1394 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1395 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1396 1397 /* for secondary processes, we don't initialise any further as primary 1398 * has already done this work. Only check we don't need a different 1399 * RX function 1400 */ 1401 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1402 struct ixgbe_tx_queue *txq; 1403 /* TX queue function in primary, set by last queue initialized 1404 * Tx queue may not initialized by primary process 1405 */ 1406 if (eth_dev->data->tx_queues) { 1407 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1408 ixgbe_set_tx_function(eth_dev, txq); 1409 } else { 1410 /* Use default TX function if we get here */ 1411 PMD_INIT_LOG(NOTICE, 1412 "No TX queues configured yet. Using default TX function."); 1413 } 1414 1415 ixgbe_set_rx_function(eth_dev); 1416 1417 return 0; 1418 } 1419 1420 pci_dev = eth_dev->pci_dev; 1421 1422 rte_eth_copy_pci_info(eth_dev, pci_dev); 1423 1424 hw->device_id = pci_dev->id.device_id; 1425 hw->vendor_id = pci_dev->id.vendor_id; 1426 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1427 1428 /* initialize the vfta */ 1429 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1430 1431 /* initialize the hw strip bitmap*/ 1432 memset(hwstrip, 0, sizeof(*hwstrip)); 1433 1434 /* Initialize the shared code (base driver) */ 1435 diag = ixgbe_init_shared_code(hw); 1436 if (diag != IXGBE_SUCCESS) { 1437 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1438 return -EIO; 1439 } 1440 1441 /* init_mailbox_params */ 1442 hw->mbx.ops.init_params(hw); 1443 1444 /* Reset the hw statistics */ 1445 ixgbevf_dev_stats_reset(eth_dev); 1446 1447 /* Disable the interrupts for VF */ 1448 ixgbevf_intr_disable(hw); 1449 1450 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1451 diag = hw->mac.ops.reset_hw(hw); 1452 1453 /* 1454 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1455 * the underlying PF driver has not assigned a MAC address to the VF. 1456 * In this case, assign a random MAC address. 1457 */ 1458 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1459 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1460 return diag; 1461 } 1462 1463 /* negotiate mailbox API version to use with the PF. */ 1464 ixgbevf_negotiate_api(hw); 1465 1466 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1467 ixgbevf_get_queues(hw, &tcs, &tc); 1468 1469 /* Allocate memory for storing MAC addresses */ 1470 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * 1471 hw->mac.num_rar_entries, 0); 1472 if (eth_dev->data->mac_addrs == NULL) { 1473 PMD_INIT_LOG(ERR, 1474 "Failed to allocate %u bytes needed to store " 1475 "MAC addresses", 1476 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1477 return -ENOMEM; 1478 } 1479 1480 /* Generate a random MAC address, if none was assigned by PF. */ 1481 if (is_zero_ether_addr(perm_addr)) { 1482 generate_random_mac_addr(perm_addr); 1483 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1484 if (diag) { 1485 rte_free(eth_dev->data->mac_addrs); 1486 eth_dev->data->mac_addrs = NULL; 1487 return diag; 1488 } 1489 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1490 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1491 "%02x:%02x:%02x:%02x:%02x:%02x", 1492 perm_addr->addr_bytes[0], 1493 perm_addr->addr_bytes[1], 1494 perm_addr->addr_bytes[2], 1495 perm_addr->addr_bytes[3], 1496 perm_addr->addr_bytes[4], 1497 perm_addr->addr_bytes[5]); 1498 } 1499 1500 /* Copy the permanent MAC address */ 1501 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1502 1503 /* reset the hardware with the new settings */ 1504 diag = hw->mac.ops.start_hw(hw); 1505 switch (diag) { 1506 case 0: 1507 break; 1508 1509 default: 1510 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1511 return -EIO; 1512 } 1513 1514 rte_intr_callback_register(&pci_dev->intr_handle, 1515 ixgbevf_dev_interrupt_handler, 1516 (void *)eth_dev); 1517 rte_intr_enable(&pci_dev->intr_handle); 1518 ixgbevf_intr_enable(hw); 1519 1520 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1521 eth_dev->data->port_id, pci_dev->id.vendor_id, 1522 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1523 1524 return 0; 1525 } 1526 1527 /* Virtual Function device uninit */ 1528 1529 static int 1530 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1531 { 1532 struct ixgbe_hw *hw; 1533 struct rte_pci_device *pci_dev = eth_dev->pci_dev; 1534 1535 PMD_INIT_FUNC_TRACE(); 1536 1537 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1538 return -EPERM; 1539 1540 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1541 1542 if (hw->adapter_stopped == 0) 1543 ixgbevf_dev_close(eth_dev); 1544 1545 eth_dev->dev_ops = NULL; 1546 eth_dev->rx_pkt_burst = NULL; 1547 eth_dev->tx_pkt_burst = NULL; 1548 1549 /* Disable the interrupts for VF */ 1550 ixgbevf_intr_disable(hw); 1551 1552 rte_free(eth_dev->data->mac_addrs); 1553 eth_dev->data->mac_addrs = NULL; 1554 1555 rte_intr_disable(&pci_dev->intr_handle); 1556 rte_intr_callback_unregister(&pci_dev->intr_handle, 1557 ixgbevf_dev_interrupt_handler, 1558 (void *)eth_dev); 1559 1560 return 0; 1561 } 1562 1563 static struct eth_driver rte_ixgbe_pmd = { 1564 .pci_drv = { 1565 .id_table = pci_id_ixgbe_map, 1566 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1567 RTE_PCI_DRV_DETACHABLE, 1568 .probe = rte_eth_dev_pci_probe, 1569 .remove = rte_eth_dev_pci_remove, 1570 }, 1571 .eth_dev_init = eth_ixgbe_dev_init, 1572 .eth_dev_uninit = eth_ixgbe_dev_uninit, 1573 .dev_private_size = sizeof(struct ixgbe_adapter), 1574 }; 1575 1576 /* 1577 * virtual function driver struct 1578 */ 1579 static struct eth_driver rte_ixgbevf_pmd = { 1580 .pci_drv = { 1581 .id_table = pci_id_ixgbevf_map, 1582 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, 1583 .probe = rte_eth_dev_pci_probe, 1584 .remove = rte_eth_dev_pci_remove, 1585 }, 1586 .eth_dev_init = eth_ixgbevf_dev_init, 1587 .eth_dev_uninit = eth_ixgbevf_dev_uninit, 1588 .dev_private_size = sizeof(struct ixgbe_adapter), 1589 }; 1590 1591 static int 1592 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1593 { 1594 struct ixgbe_hw *hw = 1595 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1596 struct ixgbe_vfta *shadow_vfta = 1597 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1598 uint32_t vfta; 1599 uint32_t vid_idx; 1600 uint32_t vid_bit; 1601 1602 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1603 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1604 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1605 if (on) 1606 vfta |= vid_bit; 1607 else 1608 vfta &= ~vid_bit; 1609 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1610 1611 /* update local VFTA copy */ 1612 shadow_vfta->vfta[vid_idx] = vfta; 1613 1614 return 0; 1615 } 1616 1617 static void 1618 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1619 { 1620 if (on) 1621 ixgbe_vlan_hw_strip_enable(dev, queue); 1622 else 1623 ixgbe_vlan_hw_strip_disable(dev, queue); 1624 } 1625 1626 static int 1627 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1628 enum rte_vlan_type vlan_type, 1629 uint16_t tpid) 1630 { 1631 struct ixgbe_hw *hw = 1632 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1633 int ret = 0; 1634 uint32_t reg; 1635 uint32_t qinq; 1636 1637 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1638 qinq &= IXGBE_DMATXCTL_GDV; 1639 1640 switch (vlan_type) { 1641 case ETH_VLAN_TYPE_INNER: 1642 if (qinq) { 1643 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1644 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1645 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1646 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1647 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1648 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1649 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1650 } else { 1651 ret = -ENOTSUP; 1652 PMD_DRV_LOG(ERR, "Inner type is not supported" 1653 " by single VLAN"); 1654 } 1655 break; 1656 case ETH_VLAN_TYPE_OUTER: 1657 if (qinq) { 1658 /* Only the high 16-bits is valid */ 1659 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1660 IXGBE_EXVET_VET_EXT_SHIFT); 1661 } else { 1662 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1663 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1664 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1665 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1666 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1667 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1668 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1669 } 1670 1671 break; 1672 default: 1673 ret = -EINVAL; 1674 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1675 break; 1676 } 1677 1678 return ret; 1679 } 1680 1681 void 1682 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1683 { 1684 struct ixgbe_hw *hw = 1685 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1686 uint32_t vlnctrl; 1687 1688 PMD_INIT_FUNC_TRACE(); 1689 1690 /* Filter Table Disable */ 1691 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1692 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1693 1694 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1695 } 1696 1697 void 1698 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1699 { 1700 struct ixgbe_hw *hw = 1701 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1702 struct ixgbe_vfta *shadow_vfta = 1703 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1704 uint32_t vlnctrl; 1705 uint16_t i; 1706 1707 PMD_INIT_FUNC_TRACE(); 1708 1709 /* Filter Table Enable */ 1710 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1711 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1712 vlnctrl |= IXGBE_VLNCTRL_VFE; 1713 1714 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1715 1716 /* write whatever is in local vfta copy */ 1717 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1718 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1719 } 1720 1721 static void 1722 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1723 { 1724 struct ixgbe_hwstrip *hwstrip = 1725 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1726 struct ixgbe_rx_queue *rxq; 1727 1728 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1729 return; 1730 1731 if (on) 1732 IXGBE_SET_HWSTRIP(hwstrip, queue); 1733 else 1734 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1735 1736 if (queue >= dev->data->nb_rx_queues) 1737 return; 1738 1739 rxq = dev->data->rx_queues[queue]; 1740 1741 if (on) 1742 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; 1743 else 1744 rxq->vlan_flags = PKT_RX_VLAN_PKT; 1745 } 1746 1747 static void 1748 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1749 { 1750 struct ixgbe_hw *hw = 1751 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1752 uint32_t ctrl; 1753 1754 PMD_INIT_FUNC_TRACE(); 1755 1756 if (hw->mac.type == ixgbe_mac_82598EB) { 1757 /* No queue level support */ 1758 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1759 return; 1760 } 1761 1762 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1763 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1764 ctrl &= ~IXGBE_RXDCTL_VME; 1765 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1766 1767 /* record those setting for HW strip per queue */ 1768 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1769 } 1770 1771 static void 1772 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1773 { 1774 struct ixgbe_hw *hw = 1775 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1776 uint32_t ctrl; 1777 1778 PMD_INIT_FUNC_TRACE(); 1779 1780 if (hw->mac.type == ixgbe_mac_82598EB) { 1781 /* No queue level supported */ 1782 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1783 return; 1784 } 1785 1786 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1787 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1788 ctrl |= IXGBE_RXDCTL_VME; 1789 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1790 1791 /* record those setting for HW strip per queue */ 1792 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 1793 } 1794 1795 void 1796 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) 1797 { 1798 struct ixgbe_hw *hw = 1799 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1800 uint32_t ctrl; 1801 uint16_t i; 1802 struct ixgbe_rx_queue *rxq; 1803 1804 PMD_INIT_FUNC_TRACE(); 1805 1806 if (hw->mac.type == ixgbe_mac_82598EB) { 1807 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1808 ctrl &= ~IXGBE_VLNCTRL_VME; 1809 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1810 } else { 1811 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1812 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1813 rxq = dev->data->rx_queues[i]; 1814 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 1815 ctrl &= ~IXGBE_RXDCTL_VME; 1816 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 1817 1818 /* record those setting for HW strip per queue */ 1819 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); 1820 } 1821 } 1822 } 1823 1824 void 1825 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) 1826 { 1827 struct ixgbe_hw *hw = 1828 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1829 uint32_t ctrl; 1830 uint16_t i; 1831 struct ixgbe_rx_queue *rxq; 1832 1833 PMD_INIT_FUNC_TRACE(); 1834 1835 if (hw->mac.type == ixgbe_mac_82598EB) { 1836 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1837 ctrl |= IXGBE_VLNCTRL_VME; 1838 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1839 } else { 1840 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1841 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1842 rxq = dev->data->rx_queues[i]; 1843 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 1844 ctrl |= IXGBE_RXDCTL_VME; 1845 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 1846 1847 /* record those setting for HW strip per queue */ 1848 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); 1849 } 1850 } 1851 } 1852 1853 static void 1854 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 1855 { 1856 struct ixgbe_hw *hw = 1857 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1858 uint32_t ctrl; 1859 1860 PMD_INIT_FUNC_TRACE(); 1861 1862 /* DMATXCTRL: Geric Double VLAN Disable */ 1863 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1864 ctrl &= ~IXGBE_DMATXCTL_GDV; 1865 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 1866 1867 /* CTRL_EXT: Global Double VLAN Disable */ 1868 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1869 ctrl &= ~IXGBE_EXTENDED_VLAN; 1870 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 1871 1872 } 1873 1874 static void 1875 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 1876 { 1877 struct ixgbe_hw *hw = 1878 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1879 uint32_t ctrl; 1880 1881 PMD_INIT_FUNC_TRACE(); 1882 1883 /* DMATXCTRL: Geric Double VLAN Enable */ 1884 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1885 ctrl |= IXGBE_DMATXCTL_GDV; 1886 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 1887 1888 /* CTRL_EXT: Global Double VLAN Enable */ 1889 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1890 ctrl |= IXGBE_EXTENDED_VLAN; 1891 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 1892 1893 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 1894 if (hw->mac.type == ixgbe_mac_X550 || 1895 hw->mac.type == ixgbe_mac_X550EM_x || 1896 hw->mac.type == ixgbe_mac_X550EM_a) { 1897 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 1898 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 1899 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 1900 } 1901 1902 /* 1903 * VET EXT field in the EXVET register = 0x8100 by default 1904 * So no need to change. Same to VT field of DMATXCTL register 1905 */ 1906 } 1907 1908 static void 1909 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1910 { 1911 if (mask & ETH_VLAN_STRIP_MASK) { 1912 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1913 ixgbe_vlan_hw_strip_enable_all(dev); 1914 else 1915 ixgbe_vlan_hw_strip_disable_all(dev); 1916 } 1917 1918 if (mask & ETH_VLAN_FILTER_MASK) { 1919 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 1920 ixgbe_vlan_hw_filter_enable(dev); 1921 else 1922 ixgbe_vlan_hw_filter_disable(dev); 1923 } 1924 1925 if (mask & ETH_VLAN_EXTEND_MASK) { 1926 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 1927 ixgbe_vlan_hw_extend_enable(dev); 1928 else 1929 ixgbe_vlan_hw_extend_disable(dev); 1930 } 1931 } 1932 1933 static void 1934 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1935 { 1936 struct ixgbe_hw *hw = 1937 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1938 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 1939 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1940 1941 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 1942 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 1943 } 1944 1945 static int 1946 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 1947 { 1948 switch (nb_rx_q) { 1949 case 1: 1950 case 2: 1951 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 1952 break; 1953 case 4: 1954 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 1955 break; 1956 default: 1957 return -EINVAL; 1958 } 1959 1960 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; 1961 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; 1962 1963 return 0; 1964 } 1965 1966 static int 1967 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 1968 { 1969 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1970 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1971 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1972 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1973 1974 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1975 /* check multi-queue mode */ 1976 switch (dev_conf->rxmode.mq_mode) { 1977 case ETH_MQ_RX_VMDQ_DCB: 1978 case ETH_MQ_RX_VMDQ_DCB_RSS: 1979 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 1980 PMD_INIT_LOG(ERR, "SRIOV active," 1981 " unsupported mq_mode rx %d.", 1982 dev_conf->rxmode.mq_mode); 1983 return -EINVAL; 1984 case ETH_MQ_RX_RSS: 1985 case ETH_MQ_RX_VMDQ_RSS: 1986 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 1987 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 1988 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 1989 PMD_INIT_LOG(ERR, "SRIOV is active," 1990 " invalid queue number" 1991 " for VMDQ RSS, allowed" 1992 " value are 1, 2 or 4."); 1993 return -EINVAL; 1994 } 1995 break; 1996 case ETH_MQ_RX_VMDQ_ONLY: 1997 case ETH_MQ_RX_NONE: 1998 /* if nothing mq mode configure, use default scheme */ 1999 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 2000 if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) 2001 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 2002 break; 2003 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 2004 /* SRIOV only works in VMDq enable mode */ 2005 PMD_INIT_LOG(ERR, "SRIOV is active," 2006 " wrong mq_mode rx %d.", 2007 dev_conf->rxmode.mq_mode); 2008 return -EINVAL; 2009 } 2010 2011 switch (dev_conf->txmode.mq_mode) { 2012 case ETH_MQ_TX_VMDQ_DCB: 2013 /* DCB VMDQ in SRIOV mode, not implement yet */ 2014 PMD_INIT_LOG(ERR, "SRIOV is active," 2015 " unsupported VMDQ mq_mode tx %d.", 2016 dev_conf->txmode.mq_mode); 2017 return -EINVAL; 2018 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 2019 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 2020 break; 2021 } 2022 2023 /* check valid queue number */ 2024 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 2025 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 2026 PMD_INIT_LOG(ERR, "SRIOV is active," 2027 " nb_rx_q=%d nb_tx_q=%d queue number" 2028 " must be less than or equal to %d.", 2029 nb_rx_q, nb_tx_q, 2030 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2031 return -EINVAL; 2032 } 2033 } else { 2034 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2035 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2036 " not supported."); 2037 return -EINVAL; 2038 } 2039 /* check configuration for vmdb+dcb mode */ 2040 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2041 const struct rte_eth_vmdq_dcb_conf *conf; 2042 2043 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2044 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2045 IXGBE_VMDQ_DCB_NB_QUEUES); 2046 return -EINVAL; 2047 } 2048 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2049 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2050 conf->nb_queue_pools == ETH_32_POOLS)) { 2051 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2052 " nb_queue_pools must be %d or %d.", 2053 ETH_16_POOLS, ETH_32_POOLS); 2054 return -EINVAL; 2055 } 2056 } 2057 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2058 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2059 2060 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2061 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2062 IXGBE_VMDQ_DCB_NB_QUEUES); 2063 return -EINVAL; 2064 } 2065 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2066 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2067 conf->nb_queue_pools == ETH_32_POOLS)) { 2068 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2069 " nb_queue_pools != %d and" 2070 " nb_queue_pools != %d.", 2071 ETH_16_POOLS, ETH_32_POOLS); 2072 return -EINVAL; 2073 } 2074 } 2075 2076 /* For DCB mode check our configuration before we go further */ 2077 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2078 const struct rte_eth_dcb_rx_conf *conf; 2079 2080 if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { 2081 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", 2082 IXGBE_DCB_NB_QUEUES); 2083 return -EINVAL; 2084 } 2085 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2086 if (!(conf->nb_tcs == ETH_4_TCS || 2087 conf->nb_tcs == ETH_8_TCS)) { 2088 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2089 " and nb_tcs != %d.", 2090 ETH_4_TCS, ETH_8_TCS); 2091 return -EINVAL; 2092 } 2093 } 2094 2095 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2096 const struct rte_eth_dcb_tx_conf *conf; 2097 2098 if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { 2099 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", 2100 IXGBE_DCB_NB_QUEUES); 2101 return -EINVAL; 2102 } 2103 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2104 if (!(conf->nb_tcs == ETH_4_TCS || 2105 conf->nb_tcs == ETH_8_TCS)) { 2106 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2107 " and nb_tcs != %d.", 2108 ETH_4_TCS, ETH_8_TCS); 2109 return -EINVAL; 2110 } 2111 } 2112 2113 /* 2114 * When DCB/VT is off, maximum number of queues changes, 2115 * except for 82598EB, which remains constant. 2116 */ 2117 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2118 hw->mac.type != ixgbe_mac_82598EB) { 2119 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2120 PMD_INIT_LOG(ERR, 2121 "Neither VT nor DCB are enabled, " 2122 "nb_tx_q > %d.", 2123 IXGBE_NONE_MODE_TX_NB_QUEUES); 2124 return -EINVAL; 2125 } 2126 } 2127 } 2128 return 0; 2129 } 2130 2131 static int 2132 ixgbe_dev_configure(struct rte_eth_dev *dev) 2133 { 2134 struct ixgbe_interrupt *intr = 2135 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2136 struct ixgbe_adapter *adapter = 2137 (struct ixgbe_adapter *)dev->data->dev_private; 2138 int ret; 2139 2140 PMD_INIT_FUNC_TRACE(); 2141 /* multipe queue mode checking */ 2142 ret = ixgbe_check_mq_mode(dev); 2143 if (ret != 0) { 2144 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2145 ret); 2146 return ret; 2147 } 2148 2149 /* set flag to update link status after init */ 2150 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2151 2152 /* 2153 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2154 * allocation or vector Rx preconditions we will reset it. 2155 */ 2156 adapter->rx_bulk_alloc_allowed = true; 2157 adapter->rx_vec_allowed = true; 2158 2159 return 0; 2160 } 2161 2162 static void 2163 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2164 { 2165 struct ixgbe_hw *hw = 2166 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2167 struct ixgbe_interrupt *intr = 2168 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2169 uint32_t gpie; 2170 2171 /* only set up it on X550EM_X */ 2172 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2173 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2174 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2175 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2176 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2177 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2178 } 2179 } 2180 2181 /* 2182 * Configure device link speed and setup link. 2183 * It returns 0 on success. 2184 */ 2185 static int 2186 ixgbe_dev_start(struct rte_eth_dev *dev) 2187 { 2188 struct ixgbe_hw *hw = 2189 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2190 struct ixgbe_vf_info *vfinfo = 2191 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2192 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 2193 uint32_t intr_vector = 0; 2194 int err, link_up = 0, negotiate = 0; 2195 uint32_t speed = 0; 2196 int mask = 0; 2197 int status; 2198 uint16_t vf, idx; 2199 uint32_t *link_speeds; 2200 2201 PMD_INIT_FUNC_TRACE(); 2202 2203 /* IXGBE devices don't support: 2204 * - half duplex (checked afterwards for valid speeds) 2205 * - fixed speed: TODO implement 2206 */ 2207 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { 2208 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", 2209 dev->data->port_id); 2210 return -EINVAL; 2211 } 2212 2213 /* disable uio/vfio intr/eventfd mapping */ 2214 rte_intr_disable(intr_handle); 2215 2216 /* stop adapter */ 2217 hw->adapter_stopped = 0; 2218 ixgbe_stop_adapter(hw); 2219 2220 /* reinitialize adapter 2221 * this calls reset and start 2222 */ 2223 status = ixgbe_pf_reset_hw(hw); 2224 if (status != 0) 2225 return -1; 2226 hw->mac.ops.start_hw(hw); 2227 hw->mac.get_link_status = true; 2228 2229 /* configure PF module if SRIOV enabled */ 2230 ixgbe_pf_host_configure(dev); 2231 2232 ixgbe_dev_phy_intr_setup(dev); 2233 2234 /* check and configure queue intr-vector mapping */ 2235 if ((rte_intr_cap_multiple(intr_handle) || 2236 !RTE_ETH_DEV_SRIOV(dev).active) && 2237 dev->data->dev_conf.intr_conf.rxq != 0) { 2238 intr_vector = dev->data->nb_rx_queues; 2239 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2240 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2241 IXGBE_MAX_INTR_QUEUE_NUM); 2242 return -ENOTSUP; 2243 } 2244 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2245 return -1; 2246 } 2247 2248 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2249 intr_handle->intr_vec = 2250 rte_zmalloc("intr_vec", 2251 dev->data->nb_rx_queues * sizeof(int), 0); 2252 if (intr_handle->intr_vec == NULL) { 2253 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2254 " intr_vec\n", dev->data->nb_rx_queues); 2255 return -ENOMEM; 2256 } 2257 } 2258 2259 /* confiugre msix for sleep until rx interrupt */ 2260 ixgbe_configure_msix(dev); 2261 2262 /* initialize transmission unit */ 2263 ixgbe_dev_tx_init(dev); 2264 2265 /* This can fail when allocating mbufs for descriptor rings */ 2266 err = ixgbe_dev_rx_init(dev); 2267 if (err) { 2268 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2269 goto error; 2270 } 2271 2272 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2273 ETH_VLAN_EXTEND_MASK; 2274 ixgbe_vlan_offload_set(dev, mask); 2275 2276 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2277 /* Enable vlan filtering for VMDq */ 2278 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2279 } 2280 2281 /* Configure DCB hw */ 2282 ixgbe_configure_dcb(dev); 2283 2284 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2285 err = ixgbe_fdir_configure(dev); 2286 if (err) 2287 goto error; 2288 } 2289 2290 /* Restore vf rate limit */ 2291 if (vfinfo != NULL) { 2292 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) 2293 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2294 if (vfinfo[vf].tx_rate[idx] != 0) 2295 ixgbe_set_vf_rate_limit(dev, vf, 2296 vfinfo[vf].tx_rate[idx], 2297 1 << idx); 2298 } 2299 2300 ixgbe_restore_statistics_mapping(dev); 2301 2302 err = ixgbe_dev_rxtx_start(dev); 2303 if (err < 0) { 2304 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2305 goto error; 2306 } 2307 2308 /* Skip link setup if loopback mode is enabled for 82599. */ 2309 if (hw->mac.type == ixgbe_mac_82599EB && 2310 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) 2311 goto skip_link_setup; 2312 2313 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2314 err = hw->mac.ops.setup_sfp(hw); 2315 if (err) 2316 goto error; 2317 } 2318 2319 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2320 /* Turn on the copper */ 2321 ixgbe_set_phy_power(hw, true); 2322 } else { 2323 /* Turn on the laser */ 2324 ixgbe_enable_tx_laser(hw); 2325 } 2326 2327 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2328 if (err) 2329 goto error; 2330 dev->data->dev_link.link_status = link_up; 2331 2332 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2333 if (err) 2334 goto error; 2335 2336 link_speeds = &dev->data->dev_conf.link_speeds; 2337 if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2338 ETH_LINK_SPEED_10G)) { 2339 PMD_INIT_LOG(ERR, "Invalid link setting"); 2340 goto error; 2341 } 2342 2343 speed = 0x0; 2344 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2345 speed = (hw->mac.type != ixgbe_mac_82598EB) ? 2346 IXGBE_LINK_SPEED_82599_AUTONEG : 2347 IXGBE_LINK_SPEED_82598_AUTONEG; 2348 } else { 2349 if (*link_speeds & ETH_LINK_SPEED_10G) 2350 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2351 if (*link_speeds & ETH_LINK_SPEED_1G) 2352 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2353 if (*link_speeds & ETH_LINK_SPEED_100M) 2354 speed |= IXGBE_LINK_SPEED_100_FULL; 2355 } 2356 2357 err = ixgbe_setup_link(hw, speed, link_up); 2358 if (err) 2359 goto error; 2360 2361 skip_link_setup: 2362 2363 if (rte_intr_allow_others(intr_handle)) { 2364 /* check if lsc interrupt is enabled */ 2365 if (dev->data->dev_conf.intr_conf.lsc != 0) 2366 ixgbe_dev_lsc_interrupt_setup(dev); 2367 } else { 2368 rte_intr_callback_unregister(intr_handle, 2369 ixgbe_dev_interrupt_handler, 2370 (void *)dev); 2371 if (dev->data->dev_conf.intr_conf.lsc != 0) 2372 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2373 " no intr multiplex\n"); 2374 } 2375 2376 /* check if rxq interrupt is enabled */ 2377 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2378 rte_intr_dp_is_en(intr_handle)) 2379 ixgbe_dev_rxq_interrupt_setup(dev); 2380 2381 /* enable uio/vfio intr/eventfd mapping */ 2382 rte_intr_enable(intr_handle); 2383 2384 /* resume enabled intr since hw reset */ 2385 ixgbe_enable_intr(dev); 2386 2387 return 0; 2388 2389 error: 2390 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2391 ixgbe_dev_clear_queues(dev); 2392 return -EIO; 2393 } 2394 2395 /* 2396 * Stop device: disable rx and tx functions to allow for reconfiguring. 2397 */ 2398 static void 2399 ixgbe_dev_stop(struct rte_eth_dev *dev) 2400 { 2401 struct rte_eth_link link; 2402 struct ixgbe_hw *hw = 2403 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2404 struct ixgbe_vf_info *vfinfo = 2405 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2406 struct ixgbe_filter_info *filter_info = 2407 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 2408 struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; 2409 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 2410 int vf; 2411 2412 PMD_INIT_FUNC_TRACE(); 2413 2414 /* disable interrupts */ 2415 ixgbe_disable_intr(hw); 2416 2417 /* reset the NIC */ 2418 ixgbe_pf_reset_hw(hw); 2419 hw->adapter_stopped = 0; 2420 2421 /* stop adapter */ 2422 ixgbe_stop_adapter(hw); 2423 2424 for (vf = 0; vfinfo != NULL && 2425 vf < dev->pci_dev->max_vfs; vf++) 2426 vfinfo[vf].clear_to_send = false; 2427 2428 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2429 /* Turn off the copper */ 2430 ixgbe_set_phy_power(hw, false); 2431 } else { 2432 /* Turn off the laser */ 2433 ixgbe_disable_tx_laser(hw); 2434 } 2435 2436 ixgbe_dev_clear_queues(dev); 2437 2438 /* Clear stored conf */ 2439 dev->data->scattered_rx = 0; 2440 dev->data->lro = 0; 2441 2442 /* Clear recorded link status */ 2443 memset(&link, 0, sizeof(link)); 2444 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 2445 2446 /* Remove all ntuple filters of the device */ 2447 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); 2448 p_5tuple != NULL; p_5tuple = p_5tuple_next) { 2449 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); 2450 TAILQ_REMOVE(&filter_info->fivetuple_list, 2451 p_5tuple, entries); 2452 rte_free(p_5tuple); 2453 } 2454 memset(filter_info->fivetuple_mask, 0, 2455 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 2456 2457 if (!rte_intr_allow_others(intr_handle)) 2458 /* resume to the default handler */ 2459 rte_intr_callback_register(intr_handle, 2460 ixgbe_dev_interrupt_handler, 2461 (void *)dev); 2462 2463 /* Clean datapath event and queue/vec mapping */ 2464 rte_intr_efd_disable(intr_handle); 2465 if (intr_handle->intr_vec != NULL) { 2466 rte_free(intr_handle->intr_vec); 2467 intr_handle->intr_vec = NULL; 2468 } 2469 } 2470 2471 /* 2472 * Set device link up: enable tx. 2473 */ 2474 static int 2475 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2476 { 2477 struct ixgbe_hw *hw = 2478 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2479 if (hw->mac.type == ixgbe_mac_82599EB) { 2480 #ifdef RTE_NIC_BYPASS 2481 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2482 /* Not suported in bypass mode */ 2483 PMD_INIT_LOG(ERR, "Set link up is not supported " 2484 "by device id 0x%x", hw->device_id); 2485 return -ENOTSUP; 2486 } 2487 #endif 2488 } 2489 2490 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2491 /* Turn on the copper */ 2492 ixgbe_set_phy_power(hw, true); 2493 } else { 2494 /* Turn on the laser */ 2495 ixgbe_enable_tx_laser(hw); 2496 } 2497 2498 return 0; 2499 } 2500 2501 /* 2502 * Set device link down: disable tx. 2503 */ 2504 static int 2505 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2506 { 2507 struct ixgbe_hw *hw = 2508 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2509 if (hw->mac.type == ixgbe_mac_82599EB) { 2510 #ifdef RTE_NIC_BYPASS 2511 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2512 /* Not suported in bypass mode */ 2513 PMD_INIT_LOG(ERR, "Set link down is not supported " 2514 "by device id 0x%x", hw->device_id); 2515 return -ENOTSUP; 2516 } 2517 #endif 2518 } 2519 2520 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2521 /* Turn off the copper */ 2522 ixgbe_set_phy_power(hw, false); 2523 } else { 2524 /* Turn off the laser */ 2525 ixgbe_disable_tx_laser(hw); 2526 } 2527 2528 return 0; 2529 } 2530 2531 /* 2532 * Reest and stop device. 2533 */ 2534 static void 2535 ixgbe_dev_close(struct rte_eth_dev *dev) 2536 { 2537 struct ixgbe_hw *hw = 2538 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2539 2540 PMD_INIT_FUNC_TRACE(); 2541 2542 ixgbe_pf_reset_hw(hw); 2543 2544 ixgbe_dev_stop(dev); 2545 hw->adapter_stopped = 1; 2546 2547 ixgbe_dev_free_queues(dev); 2548 2549 ixgbe_disable_pcie_master(hw); 2550 2551 /* reprogram the RAR[0] in case user changed it. */ 2552 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2553 } 2554 2555 static void 2556 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 2557 struct ixgbe_hw_stats *hw_stats, 2558 uint64_t *total_missed_rx, uint64_t *total_qbrc, 2559 uint64_t *total_qprc, uint64_t *total_qprdc) 2560 { 2561 uint32_t bprc, lxon, lxoff, total; 2562 uint32_t delta_gprc = 0; 2563 unsigned i; 2564 /* Workaround for RX byte count not including CRC bytes when CRC 2565 + * strip is enabled. CRC bytes are removed from counters when crc_strip 2566 * is disabled. 2567 + */ 2568 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 2569 IXGBE_HLREG0_RXCRCSTRP); 2570 2571 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2572 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 2573 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 2574 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 2575 2576 for (i = 0; i < 8; i++) { 2577 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 2578 2579 /* global total per queue */ 2580 hw_stats->mpc[i] += mp; 2581 /* Running comprehensive total for stats display */ 2582 *total_missed_rx += hw_stats->mpc[i]; 2583 if (hw->mac.type == ixgbe_mac_82598EB) { 2584 hw_stats->rnbc[i] += 2585 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 2586 hw_stats->pxonrxc[i] += 2587 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 2588 hw_stats->pxoffrxc[i] += 2589 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 2590 } else { 2591 hw_stats->pxonrxc[i] += 2592 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 2593 hw_stats->pxoffrxc[i] += 2594 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 2595 hw_stats->pxon2offc[i] += 2596 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 2597 } 2598 hw_stats->pxontxc[i] += 2599 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 2600 hw_stats->pxofftxc[i] += 2601 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 2602 } 2603 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 2604 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 2605 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 2606 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 2607 2608 delta_gprc += delta_qprc; 2609 2610 hw_stats->qprc[i] += delta_qprc; 2611 hw_stats->qptc[i] += delta_qptc; 2612 2613 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 2614 hw_stats->qbrc[i] += 2615 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 2616 if (crc_strip == 0) 2617 hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; 2618 2619 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 2620 hw_stats->qbtc[i] += 2621 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 2622 2623 hw_stats->qprdc[i] += delta_qprdc; 2624 *total_qprdc += hw_stats->qprdc[i]; 2625 2626 *total_qprc += hw_stats->qprc[i]; 2627 *total_qbrc += hw_stats->qbrc[i]; 2628 } 2629 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 2630 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 2631 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 2632 2633 /* 2634 * An errata states that gprc actually counts good + missed packets: 2635 * Workaround to set gprc to summated queue packet receives 2636 */ 2637 hw_stats->gprc = *total_qprc; 2638 2639 if (hw->mac.type != ixgbe_mac_82598EB) { 2640 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 2641 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 2642 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 2643 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 2644 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 2645 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 2646 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 2647 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 2648 } else { 2649 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 2650 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 2651 /* 82598 only has a counter in the high register */ 2652 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 2653 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 2654 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 2655 } 2656 uint64_t old_tpr = hw_stats->tpr; 2657 2658 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 2659 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 2660 2661 if (crc_strip == 0) 2662 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; 2663 2664 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 2665 hw_stats->gptc += delta_gptc; 2666 hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; 2667 hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; 2668 2669 /* 2670 * Workaround: mprc hardware is incorrectly counting 2671 * broadcasts, so for now we subtract those. 2672 */ 2673 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 2674 hw_stats->bprc += bprc; 2675 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 2676 if (hw->mac.type == ixgbe_mac_82598EB) 2677 hw_stats->mprc -= bprc; 2678 2679 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 2680 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 2681 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 2682 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 2683 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 2684 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 2685 2686 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 2687 hw_stats->lxontxc += lxon; 2688 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 2689 hw_stats->lxofftxc += lxoff; 2690 total = lxon + lxoff; 2691 2692 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 2693 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 2694 hw_stats->gptc -= total; 2695 hw_stats->mptc -= total; 2696 hw_stats->ptc64 -= total; 2697 hw_stats->gotc -= total * ETHER_MIN_LEN; 2698 2699 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2700 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 2701 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 2702 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 2703 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 2704 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 2705 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 2706 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 2707 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 2708 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 2709 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 2710 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 2711 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 2712 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 2713 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 2714 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 2715 /* Only read FCOE on 82599 */ 2716 if (hw->mac.type != ixgbe_mac_82598EB) { 2717 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 2718 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 2719 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 2720 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 2721 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 2722 } 2723 2724 /* Flow Director Stats registers */ 2725 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 2726 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 2727 } 2728 2729 /* 2730 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 2731 */ 2732 static void 2733 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2734 { 2735 struct ixgbe_hw *hw = 2736 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2737 struct ixgbe_hw_stats *hw_stats = 2738 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2739 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 2740 unsigned i; 2741 2742 total_missed_rx = 0; 2743 total_qbrc = 0; 2744 total_qprc = 0; 2745 total_qprdc = 0; 2746 2747 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, 2748 &total_qprc, &total_qprdc); 2749 2750 if (stats == NULL) 2751 return; 2752 2753 /* Fill out the rte_eth_stats statistics structure */ 2754 stats->ipackets = total_qprc; 2755 stats->ibytes = total_qbrc; 2756 stats->opackets = hw_stats->gptc; 2757 stats->obytes = hw_stats->gotc; 2758 2759 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 2760 stats->q_ipackets[i] = hw_stats->qprc[i]; 2761 stats->q_opackets[i] = hw_stats->qptc[i]; 2762 stats->q_ibytes[i] = hw_stats->qbrc[i]; 2763 stats->q_obytes[i] = hw_stats->qbtc[i]; 2764 stats->q_errors[i] = hw_stats->qprdc[i]; 2765 } 2766 2767 /* Rx Errors */ 2768 stats->imissed = total_missed_rx; 2769 stats->ierrors = hw_stats->crcerrs + 2770 hw_stats->mspdc + 2771 hw_stats->rlec + 2772 hw_stats->ruc + 2773 hw_stats->roc + 2774 hw_stats->illerrc + 2775 hw_stats->errbc + 2776 hw_stats->rfc + 2777 hw_stats->fccrc + 2778 hw_stats->fclast; 2779 2780 /* Tx Errors */ 2781 stats->oerrors = 0; 2782 } 2783 2784 static void 2785 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 2786 { 2787 struct ixgbe_hw_stats *stats = 2788 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2789 2790 /* HW registers are cleared on read */ 2791 ixgbe_dev_stats_get(dev, NULL); 2792 2793 /* Reset software totals */ 2794 memset(stats, 0, sizeof(*stats)); 2795 } 2796 2797 /* This function calculates the number of xstats based on the current config */ 2798 static unsigned 2799 ixgbe_xstats_calc_num(void) { 2800 return IXGBE_NB_HW_STATS + 2801 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 2802 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 2803 } 2804 2805 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2806 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) 2807 { 2808 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 2809 unsigned stat, i, count; 2810 2811 if (xstats_names != NULL) { 2812 count = 0; 2813 2814 /* Note: limit >= cnt_stats checked upstream 2815 * in rte_eth_xstats_names() 2816 */ 2817 2818 /* Extended stats from ixgbe_hw_stats */ 2819 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 2820 snprintf(xstats_names[count].name, 2821 sizeof(xstats_names[count].name), 2822 "%s", 2823 rte_ixgbe_stats_strings[i].name); 2824 count++; 2825 } 2826 2827 /* RX Priority Stats */ 2828 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 2829 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 2830 snprintf(xstats_names[count].name, 2831 sizeof(xstats_names[count].name), 2832 "rx_priority%u_%s", i, 2833 rte_ixgbe_rxq_strings[stat].name); 2834 count++; 2835 } 2836 } 2837 2838 /* TX Priority Stats */ 2839 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 2840 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 2841 snprintf(xstats_names[count].name, 2842 sizeof(xstats_names[count].name), 2843 "tx_priority%u_%s", i, 2844 rte_ixgbe_txq_strings[stat].name); 2845 count++; 2846 } 2847 } 2848 } 2849 return cnt_stats; 2850 } 2851 2852 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2853 struct rte_eth_xstat_name *xstats_names, unsigned limit) 2854 { 2855 unsigned i; 2856 2857 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 2858 return -ENOMEM; 2859 2860 if (xstats_names != NULL) 2861 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 2862 snprintf(xstats_names[i].name, 2863 sizeof(xstats_names[i].name), 2864 "%s", rte_ixgbevf_stats_strings[i].name); 2865 return IXGBEVF_NB_XSTATS; 2866 } 2867 2868 static int 2869 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2870 unsigned n) 2871 { 2872 struct ixgbe_hw *hw = 2873 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2874 struct ixgbe_hw_stats *hw_stats = 2875 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2876 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 2877 unsigned i, stat, count = 0; 2878 2879 count = ixgbe_xstats_calc_num(); 2880 2881 if (n < count) 2882 return count; 2883 2884 total_missed_rx = 0; 2885 total_qbrc = 0; 2886 total_qprc = 0; 2887 total_qprdc = 0; 2888 2889 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, 2890 &total_qprc, &total_qprdc); 2891 2892 /* If this is a reset xstats is NULL, and we have cleared the 2893 * registers by reading them. 2894 */ 2895 if (!xstats) 2896 return 0; 2897 2898 /* Extended stats from ixgbe_hw_stats */ 2899 count = 0; 2900 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 2901 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2902 rte_ixgbe_stats_strings[i].offset); 2903 count++; 2904 } 2905 2906 /* RX Priority Stats */ 2907 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 2908 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 2909 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2910 rte_ixgbe_rxq_strings[stat].offset + 2911 (sizeof(uint64_t) * i)); 2912 count++; 2913 } 2914 } 2915 2916 /* TX Priority Stats */ 2917 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 2918 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 2919 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2920 rte_ixgbe_txq_strings[stat].offset + 2921 (sizeof(uint64_t) * i)); 2922 count++; 2923 } 2924 } 2925 return count; 2926 } 2927 2928 static void 2929 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 2930 { 2931 struct ixgbe_hw_stats *stats = 2932 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2933 2934 unsigned count = ixgbe_xstats_calc_num(); 2935 2936 /* HW registers are cleared on read */ 2937 ixgbe_dev_xstats_get(dev, NULL, count); 2938 2939 /* Reset software totals */ 2940 memset(stats, 0, sizeof(*stats)); 2941 } 2942 2943 static void 2944 ixgbevf_update_stats(struct rte_eth_dev *dev) 2945 { 2946 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2947 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2948 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2949 2950 /* Good Rx packet, include VF loopback */ 2951 UPDATE_VF_STAT(IXGBE_VFGPRC, 2952 hw_stats->last_vfgprc, hw_stats->vfgprc); 2953 2954 /* Good Rx octets, include VF loopback */ 2955 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2956 hw_stats->last_vfgorc, hw_stats->vfgorc); 2957 2958 /* Good Tx packet, include VF loopback */ 2959 UPDATE_VF_STAT(IXGBE_VFGPTC, 2960 hw_stats->last_vfgptc, hw_stats->vfgptc); 2961 2962 /* Good Tx octets, include VF loopback */ 2963 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2964 hw_stats->last_vfgotc, hw_stats->vfgotc); 2965 2966 /* Rx Multicst Packet */ 2967 UPDATE_VF_STAT(IXGBE_VFMPRC, 2968 hw_stats->last_vfmprc, hw_stats->vfmprc); 2969 } 2970 2971 static int 2972 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2973 unsigned n) 2974 { 2975 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2976 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2977 unsigned i; 2978 2979 if (n < IXGBEVF_NB_XSTATS) 2980 return IXGBEVF_NB_XSTATS; 2981 2982 ixgbevf_update_stats(dev); 2983 2984 if (!xstats) 2985 return 0; 2986 2987 /* Extended stats */ 2988 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 2989 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2990 rte_ixgbevf_stats_strings[i].offset); 2991 } 2992 2993 return IXGBEVF_NB_XSTATS; 2994 } 2995 2996 static void 2997 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2998 { 2999 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3000 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3001 3002 ixgbevf_update_stats(dev); 3003 3004 if (stats == NULL) 3005 return; 3006 3007 stats->ipackets = hw_stats->vfgprc; 3008 stats->ibytes = hw_stats->vfgorc; 3009 stats->opackets = hw_stats->vfgptc; 3010 stats->obytes = hw_stats->vfgotc; 3011 } 3012 3013 static void 3014 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 3015 { 3016 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 3017 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 3018 3019 /* Sync HW register to the last stats */ 3020 ixgbevf_dev_stats_get(dev, NULL); 3021 3022 /* reset HW current stats*/ 3023 hw_stats->vfgprc = 0; 3024 hw_stats->vfgorc = 0; 3025 hw_stats->vfgptc = 0; 3026 hw_stats->vfgotc = 0; 3027 } 3028 3029 static void 3030 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3031 { 3032 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3033 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3034 3035 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3036 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3037 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3038 /* 3039 * When DCB/VT is off, maximum number of queues changes, 3040 * except for 82598EB, which remains constant. 3041 */ 3042 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3043 hw->mac.type != ixgbe_mac_82598EB) 3044 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3045 } 3046 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3047 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3048 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3049 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3050 dev_info->max_vfs = dev->pci_dev->max_vfs; 3051 if (hw->mac.type == ixgbe_mac_82598EB) 3052 dev_info->max_vmdq_pools = ETH_16_POOLS; 3053 else 3054 dev_info->max_vmdq_pools = ETH_64_POOLS; 3055 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3056 dev_info->rx_offload_capa = 3057 DEV_RX_OFFLOAD_VLAN_STRIP | 3058 DEV_RX_OFFLOAD_IPV4_CKSUM | 3059 DEV_RX_OFFLOAD_UDP_CKSUM | 3060 DEV_RX_OFFLOAD_TCP_CKSUM; 3061 3062 /* 3063 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV 3064 * mode. 3065 */ 3066 if ((hw->mac.type == ixgbe_mac_82599EB || 3067 hw->mac.type == ixgbe_mac_X540) && 3068 !RTE_ETH_DEV_SRIOV(dev).active) 3069 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; 3070 3071 if (hw->mac.type == ixgbe_mac_X550 || 3072 hw->mac.type == ixgbe_mac_X550EM_x || 3073 hw->mac.type == ixgbe_mac_X550EM_a) 3074 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 3075 3076 dev_info->tx_offload_capa = 3077 DEV_TX_OFFLOAD_VLAN_INSERT | 3078 DEV_TX_OFFLOAD_IPV4_CKSUM | 3079 DEV_TX_OFFLOAD_UDP_CKSUM | 3080 DEV_TX_OFFLOAD_TCP_CKSUM | 3081 DEV_TX_OFFLOAD_SCTP_CKSUM | 3082 DEV_TX_OFFLOAD_TCP_TSO; 3083 3084 if (hw->mac.type == ixgbe_mac_X550 || 3085 hw->mac.type == ixgbe_mac_X550EM_x || 3086 hw->mac.type == ixgbe_mac_X550EM_a) 3087 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 3088 3089 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3090 .rx_thresh = { 3091 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3092 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3093 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3094 }, 3095 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3096 .rx_drop_en = 0, 3097 }; 3098 3099 dev_info->default_txconf = (struct rte_eth_txconf) { 3100 .tx_thresh = { 3101 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3102 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3103 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3104 }, 3105 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3106 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3107 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 3108 ETH_TXQ_FLAGS_NOOFFLOADS, 3109 }; 3110 3111 dev_info->rx_desc_lim = rx_desc_lim; 3112 dev_info->tx_desc_lim = tx_desc_lim; 3113 3114 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3115 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3116 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3117 3118 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3119 if (hw->mac.type == ixgbe_mac_X540 || 3120 hw->mac.type == ixgbe_mac_X540_vf || 3121 hw->mac.type == ixgbe_mac_X550 || 3122 hw->mac.type == ixgbe_mac_X550_vf) { 3123 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3124 } 3125 } 3126 3127 static const uint32_t * 3128 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3129 { 3130 static const uint32_t ptypes[] = { 3131 /* For non-vec functions, 3132 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3133 * for vec functions, 3134 * refers to _recv_raw_pkts_vec(). 3135 */ 3136 RTE_PTYPE_L2_ETHER, 3137 RTE_PTYPE_L3_IPV4, 3138 RTE_PTYPE_L3_IPV4_EXT, 3139 RTE_PTYPE_L3_IPV6, 3140 RTE_PTYPE_L3_IPV6_EXT, 3141 RTE_PTYPE_L4_SCTP, 3142 RTE_PTYPE_L4_TCP, 3143 RTE_PTYPE_L4_UDP, 3144 RTE_PTYPE_TUNNEL_IP, 3145 RTE_PTYPE_INNER_L3_IPV6, 3146 RTE_PTYPE_INNER_L3_IPV6_EXT, 3147 RTE_PTYPE_INNER_L4_TCP, 3148 RTE_PTYPE_INNER_L4_UDP, 3149 RTE_PTYPE_UNKNOWN 3150 }; 3151 3152 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3153 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3154 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3155 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3156 return ptypes; 3157 return NULL; 3158 } 3159 3160 static void 3161 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3162 struct rte_eth_dev_info *dev_info) 3163 { 3164 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3165 3166 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3167 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3168 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3169 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ 3170 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3171 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3172 dev_info->max_vfs = dev->pci_dev->max_vfs; 3173 if (hw->mac.type == ixgbe_mac_82598EB) 3174 dev_info->max_vmdq_pools = ETH_16_POOLS; 3175 else 3176 dev_info->max_vmdq_pools = ETH_64_POOLS; 3177 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 3178 DEV_RX_OFFLOAD_IPV4_CKSUM | 3179 DEV_RX_OFFLOAD_UDP_CKSUM | 3180 DEV_RX_OFFLOAD_TCP_CKSUM; 3181 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 3182 DEV_TX_OFFLOAD_IPV4_CKSUM | 3183 DEV_TX_OFFLOAD_UDP_CKSUM | 3184 DEV_TX_OFFLOAD_TCP_CKSUM | 3185 DEV_TX_OFFLOAD_SCTP_CKSUM | 3186 DEV_TX_OFFLOAD_TCP_TSO; 3187 3188 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3189 .rx_thresh = { 3190 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3191 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3192 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3193 }, 3194 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3195 .rx_drop_en = 0, 3196 }; 3197 3198 dev_info->default_txconf = (struct rte_eth_txconf) { 3199 .tx_thresh = { 3200 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3201 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3202 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3203 }, 3204 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3205 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3206 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 3207 ETH_TXQ_FLAGS_NOOFFLOADS, 3208 }; 3209 3210 dev_info->rx_desc_lim = rx_desc_lim; 3211 dev_info->tx_desc_lim = tx_desc_lim; 3212 } 3213 3214 /* return 0 means link status changed, -1 means not changed */ 3215 static int 3216 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 3217 { 3218 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3219 struct rte_eth_link link, old; 3220 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 3221 int link_up; 3222 int diag; 3223 3224 link.link_status = ETH_LINK_DOWN; 3225 link.link_speed = 0; 3226 link.link_duplex = ETH_LINK_HALF_DUPLEX; 3227 memset(&old, 0, sizeof(old)); 3228 rte_ixgbe_dev_atomic_read_link_status(dev, &old); 3229 3230 hw->mac.get_link_status = true; 3231 3232 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 3233 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 3234 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); 3235 else 3236 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); 3237 3238 if (diag != 0) { 3239 link.link_speed = ETH_SPEED_NUM_100M; 3240 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3241 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3242 if (link.link_status == old.link_status) 3243 return -1; 3244 return 0; 3245 } 3246 3247 if (link_up == 0) { 3248 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3249 if (link.link_status == old.link_status) 3250 return -1; 3251 return 0; 3252 } 3253 link.link_status = ETH_LINK_UP; 3254 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3255 3256 switch (link_speed) { 3257 default: 3258 case IXGBE_LINK_SPEED_UNKNOWN: 3259 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3260 link.link_speed = ETH_SPEED_NUM_100M; 3261 break; 3262 3263 case IXGBE_LINK_SPEED_100_FULL: 3264 link.link_speed = ETH_SPEED_NUM_100M; 3265 break; 3266 3267 case IXGBE_LINK_SPEED_1GB_FULL: 3268 link.link_speed = ETH_SPEED_NUM_1G; 3269 break; 3270 3271 case IXGBE_LINK_SPEED_10GB_FULL: 3272 link.link_speed = ETH_SPEED_NUM_10G; 3273 break; 3274 } 3275 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3276 3277 if (link.link_status == old.link_status) 3278 return -1; 3279 3280 return 0; 3281 } 3282 3283 static void 3284 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 3285 { 3286 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3287 uint32_t fctrl; 3288 3289 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3290 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3291 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3292 } 3293 3294 static void 3295 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 3296 { 3297 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3298 uint32_t fctrl; 3299 3300 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3301 fctrl &= (~IXGBE_FCTRL_UPE); 3302 if (dev->data->all_multicast == 1) 3303 fctrl |= IXGBE_FCTRL_MPE; 3304 else 3305 fctrl &= (~IXGBE_FCTRL_MPE); 3306 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3307 } 3308 3309 static void 3310 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 3311 { 3312 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3313 uint32_t fctrl; 3314 3315 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3316 fctrl |= IXGBE_FCTRL_MPE; 3317 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3318 } 3319 3320 static void 3321 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 3322 { 3323 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3324 uint32_t fctrl; 3325 3326 if (dev->data->promiscuous == 1) 3327 return; /* must remain in all_multicast mode */ 3328 3329 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3330 fctrl &= (~IXGBE_FCTRL_MPE); 3331 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3332 } 3333 3334 /** 3335 * It clears the interrupt causes and enables the interrupt. 3336 * It will be called once only during nic initialized. 3337 * 3338 * @param dev 3339 * Pointer to struct rte_eth_dev. 3340 * 3341 * @return 3342 * - On success, zero. 3343 * - On failure, a negative value. 3344 */ 3345 static int 3346 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) 3347 { 3348 struct ixgbe_interrupt *intr = 3349 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3350 3351 ixgbe_dev_link_status_print(dev); 3352 intr->mask |= IXGBE_EICR_LSC; 3353 3354 return 0; 3355 } 3356 3357 /** 3358 * It clears the interrupt causes and enables the interrupt. 3359 * It will be called once only during nic initialized. 3360 * 3361 * @param dev 3362 * Pointer to struct rte_eth_dev. 3363 * 3364 * @return 3365 * - On success, zero. 3366 * - On failure, a negative value. 3367 */ 3368 static int 3369 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 3370 { 3371 struct ixgbe_interrupt *intr = 3372 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3373 3374 intr->mask |= IXGBE_EICR_RTX_QUEUE; 3375 3376 return 0; 3377 } 3378 3379 /* 3380 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 3381 * 3382 * @param dev 3383 * Pointer to struct rte_eth_dev. 3384 * 3385 * @return 3386 * - On success, zero. 3387 * - On failure, a negative value. 3388 */ 3389 static int 3390 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 3391 { 3392 uint32_t eicr; 3393 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3394 struct ixgbe_interrupt *intr = 3395 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3396 3397 /* clear all cause mask */ 3398 ixgbe_disable_intr(hw); 3399 3400 /* read-on-clear nic registers here */ 3401 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3402 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 3403 3404 intr->flags = 0; 3405 3406 /* set flag for async link update */ 3407 if (eicr & IXGBE_EICR_LSC) 3408 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3409 3410 if (eicr & IXGBE_EICR_MAILBOX) 3411 intr->flags |= IXGBE_FLAG_MAILBOX; 3412 3413 if (hw->mac.type == ixgbe_mac_X550EM_x && 3414 hw->phy.type == ixgbe_phy_x550em_ext_t && 3415 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 3416 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 3417 3418 return 0; 3419 } 3420 3421 /** 3422 * It gets and then prints the link status. 3423 * 3424 * @param dev 3425 * Pointer to struct rte_eth_dev. 3426 * 3427 * @return 3428 * - On success, zero. 3429 * - On failure, a negative value. 3430 */ 3431 static void 3432 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 3433 { 3434 struct rte_eth_link link; 3435 3436 memset(&link, 0, sizeof(link)); 3437 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 3438 if (link.link_status) { 3439 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 3440 (int)(dev->data->port_id), 3441 (unsigned)link.link_speed, 3442 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 3443 "full-duplex" : "half-duplex"); 3444 } else { 3445 PMD_INIT_LOG(INFO, " Port %d: Link Down", 3446 (int)(dev->data->port_id)); 3447 } 3448 PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, 3449 dev->pci_dev->addr.domain, 3450 dev->pci_dev->addr.bus, 3451 dev->pci_dev->addr.devid, 3452 dev->pci_dev->addr.function); 3453 } 3454 3455 /* 3456 * It executes link_update after knowing an interrupt occurred. 3457 * 3458 * @param dev 3459 * Pointer to struct rte_eth_dev. 3460 * 3461 * @return 3462 * - On success, zero. 3463 * - On failure, a negative value. 3464 */ 3465 static int 3466 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 3467 { 3468 struct ixgbe_interrupt *intr = 3469 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3470 int64_t timeout; 3471 struct rte_eth_link link; 3472 int intr_enable_delay = false; 3473 struct ixgbe_hw *hw = 3474 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3475 3476 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 3477 3478 if (intr->flags & IXGBE_FLAG_MAILBOX) { 3479 ixgbe_pf_mbx_process(dev); 3480 intr->flags &= ~IXGBE_FLAG_MAILBOX; 3481 } 3482 3483 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 3484 ixgbe_handle_lasi(hw); 3485 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 3486 } 3487 3488 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 3489 /* get the link status before link update, for predicting later */ 3490 memset(&link, 0, sizeof(link)); 3491 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 3492 3493 ixgbe_dev_link_update(dev, 0); 3494 3495 /* likely to up */ 3496 if (!link.link_status) 3497 /* handle it 1 sec later, wait it being stable */ 3498 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 3499 /* likely to down */ 3500 else 3501 /* handle it 4 sec later, wait it being stable */ 3502 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 3503 3504 ixgbe_dev_link_status_print(dev); 3505 3506 intr_enable_delay = true; 3507 } 3508 3509 if (intr_enable_delay) { 3510 if (rte_eal_alarm_set(timeout * 1000, 3511 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 3512 PMD_DRV_LOG(ERR, "Error setting alarm"); 3513 } else { 3514 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 3515 ixgbe_enable_intr(dev); 3516 rte_intr_enable(&(dev->pci_dev->intr_handle)); 3517 } 3518 3519 3520 return 0; 3521 } 3522 3523 /** 3524 * Interrupt handler which shall be registered for alarm callback for delayed 3525 * handling specific interrupt to wait for the stable nic state. As the 3526 * NIC interrupt state is not stable for ixgbe after link is just down, 3527 * it needs to wait 4 seconds to get the stable status. 3528 * 3529 * @param handle 3530 * Pointer to interrupt handle. 3531 * @param param 3532 * The address of parameter (struct rte_eth_dev *) regsitered before. 3533 * 3534 * @return 3535 * void 3536 */ 3537 static void 3538 ixgbe_dev_interrupt_delayed_handler(void *param) 3539 { 3540 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3541 struct ixgbe_interrupt *intr = 3542 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3543 struct ixgbe_hw *hw = 3544 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3545 uint32_t eicr; 3546 3547 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3548 if (eicr & IXGBE_EICR_MAILBOX) 3549 ixgbe_pf_mbx_process(dev); 3550 3551 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 3552 ixgbe_handle_lasi(hw); 3553 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 3554 } 3555 3556 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 3557 ixgbe_dev_link_update(dev, 0); 3558 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 3559 ixgbe_dev_link_status_print(dev); 3560 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); 3561 } 3562 3563 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 3564 ixgbe_enable_intr(dev); 3565 rte_intr_enable(&(dev->pci_dev->intr_handle)); 3566 } 3567 3568 /** 3569 * Interrupt handler triggered by NIC for handling 3570 * specific interrupt. 3571 * 3572 * @param handle 3573 * Pointer to interrupt handle. 3574 * @param param 3575 * The address of parameter (struct rte_eth_dev *) regsitered before. 3576 * 3577 * @return 3578 * void 3579 */ 3580 static void 3581 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 3582 void *param) 3583 { 3584 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3585 3586 ixgbe_dev_interrupt_get_status(dev); 3587 ixgbe_dev_interrupt_action(dev); 3588 } 3589 3590 static int 3591 ixgbe_dev_led_on(struct rte_eth_dev *dev) 3592 { 3593 struct ixgbe_hw *hw; 3594 3595 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3596 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 3597 } 3598 3599 static int 3600 ixgbe_dev_led_off(struct rte_eth_dev *dev) 3601 { 3602 struct ixgbe_hw *hw; 3603 3604 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3605 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 3606 } 3607 3608 static int 3609 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3610 { 3611 struct ixgbe_hw *hw; 3612 uint32_t mflcn_reg; 3613 uint32_t fccfg_reg; 3614 int rx_pause; 3615 int tx_pause; 3616 3617 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3618 3619 fc_conf->pause_time = hw->fc.pause_time; 3620 fc_conf->high_water = hw->fc.high_water[0]; 3621 fc_conf->low_water = hw->fc.low_water[0]; 3622 fc_conf->send_xon = hw->fc.send_xon; 3623 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 3624 3625 /* 3626 * Return rx_pause status according to actual setting of 3627 * MFLCN register. 3628 */ 3629 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3630 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 3631 rx_pause = 1; 3632 else 3633 rx_pause = 0; 3634 3635 /* 3636 * Return tx_pause status according to actual setting of 3637 * FCCFG register. 3638 */ 3639 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 3640 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 3641 tx_pause = 1; 3642 else 3643 tx_pause = 0; 3644 3645 if (rx_pause && tx_pause) 3646 fc_conf->mode = RTE_FC_FULL; 3647 else if (rx_pause) 3648 fc_conf->mode = RTE_FC_RX_PAUSE; 3649 else if (tx_pause) 3650 fc_conf->mode = RTE_FC_TX_PAUSE; 3651 else 3652 fc_conf->mode = RTE_FC_NONE; 3653 3654 return 0; 3655 } 3656 3657 static int 3658 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3659 { 3660 struct ixgbe_hw *hw; 3661 int err; 3662 uint32_t rx_buf_size; 3663 uint32_t max_high_water; 3664 uint32_t mflcn; 3665 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 3666 ixgbe_fc_none, 3667 ixgbe_fc_rx_pause, 3668 ixgbe_fc_tx_pause, 3669 ixgbe_fc_full 3670 }; 3671 3672 PMD_INIT_FUNC_TRACE(); 3673 3674 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3675 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 3676 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3677 3678 /* 3679 * At least reserve one Ethernet frame for watermark 3680 * high_water/low_water in kilo bytes for ixgbe 3681 */ 3682 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 3683 if ((fc_conf->high_water > max_high_water) || 3684 (fc_conf->high_water < fc_conf->low_water)) { 3685 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3686 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3687 return -EINVAL; 3688 } 3689 3690 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 3691 hw->fc.pause_time = fc_conf->pause_time; 3692 hw->fc.high_water[0] = fc_conf->high_water; 3693 hw->fc.low_water[0] = fc_conf->low_water; 3694 hw->fc.send_xon = fc_conf->send_xon; 3695 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 3696 3697 err = ixgbe_fc_enable(hw); 3698 3699 /* Not negotiated is not an error case */ 3700 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { 3701 3702 /* check if we want to forward MAC frames - driver doesn't have native 3703 * capability to do that, so we'll write the registers ourselves */ 3704 3705 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3706 3707 /* set or clear MFLCN.PMCF bit depending on configuration */ 3708 if (fc_conf->mac_ctrl_frame_fwd != 0) 3709 mflcn |= IXGBE_MFLCN_PMCF; 3710 else 3711 mflcn &= ~IXGBE_MFLCN_PMCF; 3712 3713 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 3714 IXGBE_WRITE_FLUSH(hw); 3715 3716 return 0; 3717 } 3718 3719 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); 3720 return -EIO; 3721 } 3722 3723 /** 3724 * ixgbe_pfc_enable_generic - Enable flow control 3725 * @hw: pointer to hardware structure 3726 * @tc_num: traffic class number 3727 * Enable flow control according to the current settings. 3728 */ 3729 static int 3730 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 3731 { 3732 int ret_val = 0; 3733 uint32_t mflcn_reg, fccfg_reg; 3734 uint32_t reg; 3735 uint32_t fcrtl, fcrth; 3736 uint8_t i; 3737 uint8_t nb_rx_en; 3738 3739 /* Validate the water mark configuration */ 3740 if (!hw->fc.pause_time) { 3741 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3742 goto out; 3743 } 3744 3745 /* Low water mark of zero causes XOFF floods */ 3746 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 3747 /* High/Low water can not be 0 */ 3748 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 3749 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 3750 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3751 goto out; 3752 } 3753 3754 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 3755 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 3756 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3757 goto out; 3758 } 3759 } 3760 /* Negotiate the fc mode to use */ 3761 ixgbe_fc_autoneg(hw); 3762 3763 /* Disable any previous flow control settings */ 3764 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3765 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 3766 3767 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 3768 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 3769 3770 switch (hw->fc.current_mode) { 3771 case ixgbe_fc_none: 3772 /* 3773 * If the count of enabled RX Priority Flow control >1, 3774 * and the TX pause can not be disabled 3775 */ 3776 nb_rx_en = 0; 3777 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 3778 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 3779 if (reg & IXGBE_FCRTH_FCEN) 3780 nb_rx_en++; 3781 } 3782 if (nb_rx_en > 1) 3783 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3784 break; 3785 case ixgbe_fc_rx_pause: 3786 /* 3787 * Rx Flow control is enabled and Tx Flow control is 3788 * disabled by software override. Since there really 3789 * isn't a way to advertise that we are capable of RX 3790 * Pause ONLY, we will advertise that we support both 3791 * symmetric and asymmetric Rx PAUSE. Later, we will 3792 * disable the adapter's ability to send PAUSE frames. 3793 */ 3794 mflcn_reg |= IXGBE_MFLCN_RPFCE; 3795 /* 3796 * If the count of enabled RX Priority Flow control >1, 3797 * and the TX pause can not be disabled 3798 */ 3799 nb_rx_en = 0; 3800 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 3801 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 3802 if (reg & IXGBE_FCRTH_FCEN) 3803 nb_rx_en++; 3804 } 3805 if (nb_rx_en > 1) 3806 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3807 break; 3808 case ixgbe_fc_tx_pause: 3809 /* 3810 * Tx Flow control is enabled, and Rx Flow control is 3811 * disabled by software override. 3812 */ 3813 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3814 break; 3815 case ixgbe_fc_full: 3816 /* Flow control (both Rx and Tx) is enabled by SW override. */ 3817 mflcn_reg |= IXGBE_MFLCN_RPFCE; 3818 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3819 break; 3820 default: 3821 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 3822 ret_val = IXGBE_ERR_CONFIG; 3823 goto out; 3824 } 3825 3826 /* Set 802.3x based flow control settings. */ 3827 mflcn_reg |= IXGBE_MFLCN_DPF; 3828 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 3829 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 3830 3831 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 3832 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 3833 hw->fc.high_water[tc_num]) { 3834 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 3835 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 3836 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 3837 } else { 3838 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 3839 /* 3840 * In order to prevent Tx hangs when the internal Tx 3841 * switch is enabled we must set the high water mark 3842 * to the maximum FCRTH value. This allows the Tx 3843 * switch to function even under heavy Rx workloads. 3844 */ 3845 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 3846 } 3847 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 3848 3849 /* Configure pause time (2 TCs per register) */ 3850 reg = hw->fc.pause_time * 0x00010001; 3851 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 3852 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 3853 3854 /* Configure flow control refresh threshold value */ 3855 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 3856 3857 out: 3858 return ret_val; 3859 } 3860 3861 static int 3862 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 3863 { 3864 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3865 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 3866 3867 if (hw->mac.type != ixgbe_mac_82598EB) { 3868 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 3869 } 3870 return ret_val; 3871 } 3872 3873 static int 3874 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 3875 { 3876 int err; 3877 uint32_t rx_buf_size; 3878 uint32_t max_high_water; 3879 uint8_t tc_num; 3880 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 3881 struct ixgbe_hw *hw = 3882 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3883 struct ixgbe_dcb_config *dcb_config = 3884 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 3885 3886 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 3887 ixgbe_fc_none, 3888 ixgbe_fc_rx_pause, 3889 ixgbe_fc_tx_pause, 3890 ixgbe_fc_full 3891 }; 3892 3893 PMD_INIT_FUNC_TRACE(); 3894 3895 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 3896 tc_num = map[pfc_conf->priority]; 3897 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 3898 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3899 /* 3900 * At least reserve one Ethernet frame for watermark 3901 * high_water/low_water in kilo bytes for ixgbe 3902 */ 3903 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 3904 if ((pfc_conf->fc.high_water > max_high_water) || 3905 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 3906 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3907 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3908 return -EINVAL; 3909 } 3910 3911 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 3912 hw->fc.pause_time = pfc_conf->fc.pause_time; 3913 hw->fc.send_xon = pfc_conf->fc.send_xon; 3914 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 3915 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 3916 3917 err = ixgbe_dcb_pfc_enable(dev, tc_num); 3918 3919 /* Not negotiated is not an error case */ 3920 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 3921 return 0; 3922 3923 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 3924 return -EIO; 3925 } 3926 3927 static int 3928 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 3929 struct rte_eth_rss_reta_entry64 *reta_conf, 3930 uint16_t reta_size) 3931 { 3932 uint16_t i, sp_reta_size; 3933 uint8_t j, mask; 3934 uint32_t reta, r; 3935 uint16_t idx, shift; 3936 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3937 uint32_t reta_reg; 3938 3939 PMD_INIT_FUNC_TRACE(); 3940 3941 if (!ixgbe_rss_update_sp(hw->mac.type)) { 3942 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 3943 "NIC."); 3944 return -ENOTSUP; 3945 } 3946 3947 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 3948 if (reta_size != sp_reta_size) { 3949 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3950 "(%d) doesn't match the number hardware can supported " 3951 "(%d)\n", reta_size, sp_reta_size); 3952 return -EINVAL; 3953 } 3954 3955 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 3956 idx = i / RTE_RETA_GROUP_SIZE; 3957 shift = i % RTE_RETA_GROUP_SIZE; 3958 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3959 IXGBE_4_BIT_MASK); 3960 if (!mask) 3961 continue; 3962 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 3963 if (mask == IXGBE_4_BIT_MASK) 3964 r = 0; 3965 else 3966 r = IXGBE_READ_REG(hw, reta_reg); 3967 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 3968 if (mask & (0x1 << j)) 3969 reta |= reta_conf[idx].reta[shift + j] << 3970 (CHAR_BIT * j); 3971 else 3972 reta |= r & (IXGBE_8_BIT_MASK << 3973 (CHAR_BIT * j)); 3974 } 3975 IXGBE_WRITE_REG(hw, reta_reg, reta); 3976 } 3977 3978 return 0; 3979 } 3980 3981 static int 3982 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 3983 struct rte_eth_rss_reta_entry64 *reta_conf, 3984 uint16_t reta_size) 3985 { 3986 uint16_t i, sp_reta_size; 3987 uint8_t j, mask; 3988 uint32_t reta; 3989 uint16_t idx, shift; 3990 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3991 uint32_t reta_reg; 3992 3993 PMD_INIT_FUNC_TRACE(); 3994 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 3995 if (reta_size != sp_reta_size) { 3996 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3997 "(%d) doesn't match the number hardware can supported " 3998 "(%d)\n", reta_size, sp_reta_size); 3999 return -EINVAL; 4000 } 4001 4002 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 4003 idx = i / RTE_RETA_GROUP_SIZE; 4004 shift = i % RTE_RETA_GROUP_SIZE; 4005 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 4006 IXGBE_4_BIT_MASK); 4007 if (!mask) 4008 continue; 4009 4010 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 4011 reta = IXGBE_READ_REG(hw, reta_reg); 4012 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 4013 if (mask & (0x1 << j)) 4014 reta_conf[idx].reta[shift + j] = 4015 ((reta >> (CHAR_BIT * j)) & 4016 IXGBE_8_BIT_MASK); 4017 } 4018 } 4019 4020 return 0; 4021 } 4022 4023 static void 4024 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 4025 uint32_t index, uint32_t pool) 4026 { 4027 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4028 uint32_t enable_addr = 1; 4029 4030 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); 4031 } 4032 4033 static void 4034 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 4035 { 4036 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4037 4038 ixgbe_clear_rar(hw, index); 4039 } 4040 4041 static void 4042 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 4043 { 4044 ixgbe_remove_rar(dev, 0); 4045 4046 ixgbe_add_rar(dev, addr, 0, 0); 4047 } 4048 4049 static int 4050 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4051 { 4052 uint32_t hlreg0; 4053 uint32_t maxfrs; 4054 struct ixgbe_hw *hw; 4055 struct rte_eth_dev_info dev_info; 4056 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 4057 4058 ixgbe_dev_info_get(dev, &dev_info); 4059 4060 /* check that mtu is within the allowed range */ 4061 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 4062 return -EINVAL; 4063 4064 /* refuse mtu that requires the support of scattered packets when this 4065 * feature has not been enabled before. 4066 */ 4067 if (!dev->data->scattered_rx && 4068 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 4069 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) 4070 return -EINVAL; 4071 4072 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4073 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4074 4075 /* switch to jumbo mode if needed */ 4076 if (frame_size > ETHER_MAX_LEN) { 4077 dev->data->dev_conf.rxmode.jumbo_frame = 1; 4078 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 4079 } else { 4080 dev->data->dev_conf.rxmode.jumbo_frame = 0; 4081 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 4082 } 4083 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4084 4085 /* update max frame size */ 4086 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4087 4088 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 4089 maxfrs &= 0x0000FFFF; 4090 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 4091 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 4092 4093 return 0; 4094 } 4095 4096 /* 4097 * Virtual Function operations 4098 */ 4099 static void 4100 ixgbevf_intr_disable(struct ixgbe_hw *hw) 4101 { 4102 PMD_INIT_FUNC_TRACE(); 4103 4104 /* Clear interrupt mask to stop from interrupts being generated */ 4105 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 4106 4107 IXGBE_WRITE_FLUSH(hw); 4108 } 4109 4110 static void 4111 ixgbevf_intr_enable(struct ixgbe_hw *hw) 4112 { 4113 PMD_INIT_FUNC_TRACE(); 4114 4115 /* VF enable interrupt autoclean */ 4116 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 4117 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 4118 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 4119 4120 IXGBE_WRITE_FLUSH(hw); 4121 } 4122 4123 static int 4124 ixgbevf_dev_configure(struct rte_eth_dev *dev) 4125 { 4126 struct rte_eth_conf *conf = &dev->data->dev_conf; 4127 struct ixgbe_adapter *adapter = 4128 (struct ixgbe_adapter *)dev->data->dev_private; 4129 4130 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 4131 dev->data->port_id); 4132 4133 /* 4134 * VF has no ability to enable/disable HW CRC 4135 * Keep the persistent behavior the same as Host PF 4136 */ 4137 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 4138 if (!conf->rxmode.hw_strip_crc) { 4139 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 4140 conf->rxmode.hw_strip_crc = 1; 4141 } 4142 #else 4143 if (conf->rxmode.hw_strip_crc) { 4144 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 4145 conf->rxmode.hw_strip_crc = 0; 4146 } 4147 #endif 4148 4149 /* 4150 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 4151 * allocation or vector Rx preconditions we will reset it. 4152 */ 4153 adapter->rx_bulk_alloc_allowed = true; 4154 adapter->rx_vec_allowed = true; 4155 4156 return 0; 4157 } 4158 4159 static int 4160 ixgbevf_dev_start(struct rte_eth_dev *dev) 4161 { 4162 struct ixgbe_hw *hw = 4163 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4164 uint32_t intr_vector = 0; 4165 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4166 4167 int err, mask = 0; 4168 4169 PMD_INIT_FUNC_TRACE(); 4170 4171 hw->mac.ops.reset_hw(hw); 4172 hw->mac.get_link_status = true; 4173 4174 /* negotiate mailbox API version to use with the PF. */ 4175 ixgbevf_negotiate_api(hw); 4176 4177 ixgbevf_dev_tx_init(dev); 4178 4179 /* This can fail when allocating mbufs for descriptor rings */ 4180 err = ixgbevf_dev_rx_init(dev); 4181 if (err) { 4182 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 4183 ixgbe_dev_clear_queues(dev); 4184 return err; 4185 } 4186 4187 /* Set vfta */ 4188 ixgbevf_set_vfta_all(dev, 1); 4189 4190 /* Set HW strip */ 4191 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 4192 ETH_VLAN_EXTEND_MASK; 4193 ixgbevf_vlan_offload_set(dev, mask); 4194 4195 ixgbevf_dev_rxtx_start(dev); 4196 4197 /* check and configure queue intr-vector mapping */ 4198 if (dev->data->dev_conf.intr_conf.rxq != 0) { 4199 intr_vector = dev->data->nb_rx_queues; 4200 if (rte_intr_efd_enable(intr_handle, intr_vector)) 4201 return -1; 4202 } 4203 4204 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 4205 intr_handle->intr_vec = 4206 rte_zmalloc("intr_vec", 4207 dev->data->nb_rx_queues * sizeof(int), 0); 4208 if (intr_handle->intr_vec == NULL) { 4209 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 4210 " intr_vec\n", dev->data->nb_rx_queues); 4211 return -ENOMEM; 4212 } 4213 } 4214 ixgbevf_configure_msix(dev); 4215 4216 rte_intr_enable(intr_handle); 4217 4218 /* Re-enable interrupt for VF */ 4219 ixgbevf_intr_enable(hw); 4220 4221 return 0; 4222 } 4223 4224 static void 4225 ixgbevf_dev_stop(struct rte_eth_dev *dev) 4226 { 4227 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4228 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4229 4230 PMD_INIT_FUNC_TRACE(); 4231 4232 ixgbevf_intr_disable(hw); 4233 4234 hw->adapter_stopped = 1; 4235 ixgbe_stop_adapter(hw); 4236 4237 /* 4238 * Clear what we set, but we still keep shadow_vfta to 4239 * restore after device starts 4240 */ 4241 ixgbevf_set_vfta_all(dev, 0); 4242 4243 /* Clear stored conf */ 4244 dev->data->scattered_rx = 0; 4245 4246 ixgbe_dev_clear_queues(dev); 4247 4248 /* Clean datapath event and queue/vec mapping */ 4249 rte_intr_efd_disable(intr_handle); 4250 if (intr_handle->intr_vec != NULL) { 4251 rte_free(intr_handle->intr_vec); 4252 intr_handle->intr_vec = NULL; 4253 } 4254 } 4255 4256 static void 4257 ixgbevf_dev_close(struct rte_eth_dev *dev) 4258 { 4259 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4260 4261 PMD_INIT_FUNC_TRACE(); 4262 4263 ixgbe_reset_hw(hw); 4264 4265 ixgbevf_dev_stop(dev); 4266 4267 ixgbe_dev_free_queues(dev); 4268 4269 /** 4270 * Remove the VF MAC address ro ensure 4271 * that the VF traffic goes to the PF 4272 * after stop, close and detach of the VF 4273 **/ 4274 ixgbevf_remove_mac_addr(dev, 0); 4275 } 4276 4277 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 4278 { 4279 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4280 struct ixgbe_vfta *shadow_vfta = 4281 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 4282 int i = 0, j = 0, vfta = 0, mask = 1; 4283 4284 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 4285 vfta = shadow_vfta->vfta[i]; 4286 if (vfta) { 4287 mask = 1; 4288 for (j = 0; j < 32; j++) { 4289 if (vfta & mask) 4290 ixgbe_set_vfta(hw, (i<<5)+j, 0, 4291 on, false); 4292 mask <<= 1; 4293 } 4294 } 4295 } 4296 4297 } 4298 4299 static int 4300 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 4301 { 4302 struct ixgbe_hw *hw = 4303 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4304 struct ixgbe_vfta *shadow_vfta = 4305 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 4306 uint32_t vid_idx = 0; 4307 uint32_t vid_bit = 0; 4308 int ret = 0; 4309 4310 PMD_INIT_FUNC_TRACE(); 4311 4312 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 4313 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 4314 if (ret) { 4315 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 4316 return ret; 4317 } 4318 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 4319 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 4320 4321 /* Save what we set and retore it after device reset */ 4322 if (on) 4323 shadow_vfta->vfta[vid_idx] |= vid_bit; 4324 else 4325 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 4326 4327 return 0; 4328 } 4329 4330 static void 4331 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 4332 { 4333 struct ixgbe_hw *hw = 4334 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4335 uint32_t ctrl; 4336 4337 PMD_INIT_FUNC_TRACE(); 4338 4339 if (queue >= hw->mac.max_rx_queues) 4340 return; 4341 4342 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 4343 if (on) 4344 ctrl |= IXGBE_RXDCTL_VME; 4345 else 4346 ctrl &= ~IXGBE_RXDCTL_VME; 4347 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 4348 4349 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 4350 } 4351 4352 static void 4353 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 4354 { 4355 struct ixgbe_hw *hw = 4356 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4357 uint16_t i; 4358 int on = 0; 4359 4360 /* VF function only support hw strip feature, others are not support */ 4361 if (mask & ETH_VLAN_STRIP_MASK) { 4362 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); 4363 4364 for (i = 0; i < hw->mac.max_rx_queues; i++) 4365 ixgbevf_vlan_strip_queue_set(dev, i, on); 4366 } 4367 } 4368 4369 static int 4370 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) 4371 { 4372 uint32_t reg_val; 4373 4374 /* we only need to do this if VMDq is enabled */ 4375 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 4376 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 4377 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); 4378 return -1; 4379 } 4380 4381 return 0; 4382 } 4383 4384 static uint32_t 4385 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) 4386 { 4387 uint32_t vector = 0; 4388 4389 switch (hw->mac.mc_filter_type) { 4390 case 0: /* use bits [47:36] of the address */ 4391 vector = ((uc_addr->addr_bytes[4] >> 4) | 4392 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 4393 break; 4394 case 1: /* use bits [46:35] of the address */ 4395 vector = ((uc_addr->addr_bytes[4] >> 3) | 4396 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 4397 break; 4398 case 2: /* use bits [45:34] of the address */ 4399 vector = ((uc_addr->addr_bytes[4] >> 2) | 4400 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 4401 break; 4402 case 3: /* use bits [43:32] of the address */ 4403 vector = ((uc_addr->addr_bytes[4]) | 4404 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 4405 break; 4406 default: /* Invalid mc_filter_type */ 4407 break; 4408 } 4409 4410 /* vector can only be 12-bits or boundary will be exceeded */ 4411 vector &= 0xFFF; 4412 return vector; 4413 } 4414 4415 static int 4416 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 4417 uint8_t on) 4418 { 4419 uint32_t vector; 4420 uint32_t uta_idx; 4421 uint32_t reg_val; 4422 uint32_t uta_shift; 4423 uint32_t rc; 4424 const uint32_t ixgbe_uta_idx_mask = 0x7F; 4425 const uint32_t ixgbe_uta_bit_shift = 5; 4426 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 4427 const uint32_t bit1 = 0x1; 4428 4429 struct ixgbe_hw *hw = 4430 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4431 struct ixgbe_uta_info *uta_info = 4432 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 4433 4434 /* The UTA table only exists on 82599 hardware and newer */ 4435 if (hw->mac.type < ixgbe_mac_82599EB) 4436 return -ENOTSUP; 4437 4438 vector = ixgbe_uta_vector(hw, mac_addr); 4439 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 4440 uta_shift = vector & ixgbe_uta_bit_mask; 4441 4442 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 4443 if (rc == on) 4444 return 0; 4445 4446 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 4447 if (on) { 4448 uta_info->uta_in_use++; 4449 reg_val |= (bit1 << uta_shift); 4450 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 4451 } else { 4452 uta_info->uta_in_use--; 4453 reg_val &= ~(bit1 << uta_shift); 4454 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 4455 } 4456 4457 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 4458 4459 if (uta_info->uta_in_use > 0) 4460 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 4461 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 4462 else 4463 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 4464 4465 return 0; 4466 } 4467 4468 static int 4469 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 4470 { 4471 int i; 4472 struct ixgbe_hw *hw = 4473 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4474 struct ixgbe_uta_info *uta_info = 4475 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 4476 4477 /* The UTA table only exists on 82599 hardware and newer */ 4478 if (hw->mac.type < ixgbe_mac_82599EB) 4479 return -ENOTSUP; 4480 4481 if (on) { 4482 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 4483 uta_info->uta_shadow[i] = ~0; 4484 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 4485 } 4486 } else { 4487 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 4488 uta_info->uta_shadow[i] = 0; 4489 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 4490 } 4491 } 4492 return 0; 4493 4494 } 4495 4496 uint32_t 4497 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 4498 { 4499 uint32_t new_val = orig_val; 4500 4501 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 4502 new_val |= IXGBE_VMOLR_AUPE; 4503 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 4504 new_val |= IXGBE_VMOLR_ROMPE; 4505 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 4506 new_val |= IXGBE_VMOLR_ROPE; 4507 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 4508 new_val |= IXGBE_VMOLR_BAM; 4509 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 4510 new_val |= IXGBE_VMOLR_MPE; 4511 4512 return new_val; 4513 } 4514 4515 static int 4516 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, 4517 uint16_t rx_mask, uint8_t on) 4518 { 4519 int val = 0; 4520 4521 struct ixgbe_hw *hw = 4522 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4523 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); 4524 4525 if (hw->mac.type == ixgbe_mac_82598EB) { 4526 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" 4527 " on 82599 hardware and newer"); 4528 return -ENOTSUP; 4529 } 4530 if (ixgbe_vmdq_mode_check(hw) < 0) 4531 return -ENOTSUP; 4532 4533 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); 4534 4535 if (on) 4536 vmolr |= val; 4537 else 4538 vmolr &= ~val; 4539 4540 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4541 4542 return 0; 4543 } 4544 4545 static int 4546 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) 4547 { 4548 uint32_t reg, addr; 4549 uint32_t val; 4550 const uint8_t bit1 = 0x1; 4551 4552 struct ixgbe_hw *hw = 4553 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4554 4555 if (ixgbe_vmdq_mode_check(hw) < 0) 4556 return -ENOTSUP; 4557 4558 if (pool >= ETH_64_POOLS) 4559 return -EINVAL; 4560 4561 /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ 4562 if (pool >= 32) { 4563 addr = IXGBE_VFRE(1); 4564 val = bit1 << (pool - 32); 4565 } else { 4566 addr = IXGBE_VFRE(0); 4567 val = bit1 << pool; 4568 } 4569 4570 reg = IXGBE_READ_REG(hw, addr); 4571 4572 if (on) 4573 reg |= val; 4574 else 4575 reg &= ~val; 4576 4577 IXGBE_WRITE_REG(hw, addr, reg); 4578 4579 return 0; 4580 } 4581 4582 static int 4583 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) 4584 { 4585 uint32_t reg, addr; 4586 uint32_t val; 4587 const uint8_t bit1 = 0x1; 4588 4589 struct ixgbe_hw *hw = 4590 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4591 4592 if (ixgbe_vmdq_mode_check(hw) < 0) 4593 return -ENOTSUP; 4594 4595 if (pool >= ETH_64_POOLS) 4596 return -EINVAL; 4597 4598 /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ 4599 if (pool >= 32) { 4600 addr = IXGBE_VFTE(1); 4601 val = bit1 << (pool - 32); 4602 } else { 4603 addr = IXGBE_VFTE(0); 4604 val = bit1 << pool; 4605 } 4606 4607 reg = IXGBE_READ_REG(hw, addr); 4608 4609 if (on) 4610 reg |= val; 4611 else 4612 reg &= ~val; 4613 4614 IXGBE_WRITE_REG(hw, addr, reg); 4615 4616 return 0; 4617 } 4618 4619 static int 4620 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 4621 uint64_t pool_mask, uint8_t vlan_on) 4622 { 4623 int ret = 0; 4624 uint16_t pool_idx; 4625 struct ixgbe_hw *hw = 4626 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4627 4628 if (ixgbe_vmdq_mode_check(hw) < 0) 4629 return -ENOTSUP; 4630 for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { 4631 if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { 4632 ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, 4633 vlan_on, false); 4634 if (ret < 0) 4635 return ret; 4636 } 4637 } 4638 4639 return ret; 4640 } 4641 4642 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 4643 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 4644 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 4645 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 4646 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 4647 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 4648 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 4649 4650 static int 4651 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 4652 struct rte_eth_mirror_conf *mirror_conf, 4653 uint8_t rule_id, uint8_t on) 4654 { 4655 uint32_t mr_ctl, vlvf; 4656 uint32_t mp_lsb = 0; 4657 uint32_t mv_msb = 0; 4658 uint32_t mv_lsb = 0; 4659 uint32_t mp_msb = 0; 4660 uint8_t i = 0; 4661 int reg_index = 0; 4662 uint64_t vlan_mask = 0; 4663 4664 const uint8_t pool_mask_offset = 32; 4665 const uint8_t vlan_mask_offset = 32; 4666 const uint8_t dst_pool_offset = 8; 4667 const uint8_t rule_mr_offset = 4; 4668 const uint8_t mirror_rule_mask = 0x0F; 4669 4670 struct ixgbe_mirror_info *mr_info = 4671 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 4672 struct ixgbe_hw *hw = 4673 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4674 uint8_t mirror_type = 0; 4675 4676 if (ixgbe_vmdq_mode_check(hw) < 0) 4677 return -ENOTSUP; 4678 4679 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 4680 return -EINVAL; 4681 4682 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 4683 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 4684 mirror_conf->rule_type); 4685 return -EINVAL; 4686 } 4687 4688 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 4689 mirror_type |= IXGBE_MRCTL_VLME; 4690 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ 4691 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 4692 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 4693 /* search vlan id related pool vlan filter index */ 4694 reg_index = ixgbe_find_vlvf_slot(hw, 4695 mirror_conf->vlan.vlan_id[i], 4696 false); 4697 if (reg_index < 0) 4698 return -EINVAL; 4699 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); 4700 if ((vlvf & IXGBE_VLVF_VIEN) && 4701 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 4702 mirror_conf->vlan.vlan_id[i])) 4703 vlan_mask |= (1ULL << reg_index); 4704 else 4705 return -EINVAL; 4706 } 4707 } 4708 4709 if (on) { 4710 mv_lsb = vlan_mask & 0xFFFFFFFF; 4711 mv_msb = vlan_mask >> vlan_mask_offset; 4712 4713 mr_info->mr_conf[rule_id].vlan.vlan_mask = 4714 mirror_conf->vlan.vlan_mask; 4715 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 4716 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 4717 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 4718 mirror_conf->vlan.vlan_id[i]; 4719 } 4720 } else { 4721 mv_lsb = 0; 4722 mv_msb = 0; 4723 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 4724 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 4725 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 4726 } 4727 } 4728 4729 /* 4730 * if enable pool mirror, write related pool mask register,if disable 4731 * pool mirror, clear PFMRVM register 4732 */ 4733 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 4734 mirror_type |= IXGBE_MRCTL_VPME; 4735 if (on) { 4736 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 4737 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 4738 mr_info->mr_conf[rule_id].pool_mask = 4739 mirror_conf->pool_mask; 4740 4741 } else { 4742 mp_lsb = 0; 4743 mp_msb = 0; 4744 mr_info->mr_conf[rule_id].pool_mask = 0; 4745 } 4746 } 4747 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 4748 mirror_type |= IXGBE_MRCTL_UPME; 4749 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 4750 mirror_type |= IXGBE_MRCTL_DPME; 4751 4752 /* read mirror control register and recalculate it */ 4753 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 4754 4755 if (on) { 4756 mr_ctl |= mirror_type; 4757 mr_ctl &= mirror_rule_mask; 4758 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 4759 } else 4760 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 4761 4762 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 4763 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 4764 4765 /* write mirrror control register */ 4766 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 4767 4768 /* write pool mirrror control register */ 4769 if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { 4770 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 4771 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 4772 mp_msb); 4773 } 4774 /* write VLAN mirrror control register */ 4775 if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { 4776 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 4777 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 4778 mv_msb); 4779 } 4780 4781 return 0; 4782 } 4783 4784 static int 4785 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 4786 { 4787 int mr_ctl = 0; 4788 uint32_t lsb_val = 0; 4789 uint32_t msb_val = 0; 4790 const uint8_t rule_mr_offset = 4; 4791 4792 struct ixgbe_hw *hw = 4793 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4794 struct ixgbe_mirror_info *mr_info = 4795 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 4796 4797 if (ixgbe_vmdq_mode_check(hw) < 0) 4798 return -ENOTSUP; 4799 4800 memset(&mr_info->mr_conf[rule_id], 0, 4801 sizeof(struct rte_eth_mirror_conf)); 4802 4803 /* clear PFVMCTL register */ 4804 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 4805 4806 /* clear pool mask register */ 4807 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 4808 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 4809 4810 /* clear vlan mask register */ 4811 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 4812 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 4813 4814 return 0; 4815 } 4816 4817 static int 4818 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 4819 { 4820 uint32_t mask; 4821 struct ixgbe_hw *hw = 4822 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4823 4824 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 4825 mask |= (1 << IXGBE_MISC_VEC_ID); 4826 RTE_SET_USED(queue_id); 4827 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 4828 4829 rte_intr_enable(&dev->pci_dev->intr_handle); 4830 4831 return 0; 4832 } 4833 4834 static int 4835 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 4836 { 4837 uint32_t mask; 4838 struct ixgbe_hw *hw = 4839 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4840 4841 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 4842 mask &= ~(1 << IXGBE_MISC_VEC_ID); 4843 RTE_SET_USED(queue_id); 4844 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 4845 4846 return 0; 4847 } 4848 4849 static int 4850 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 4851 { 4852 uint32_t mask; 4853 struct ixgbe_hw *hw = 4854 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4855 struct ixgbe_interrupt *intr = 4856 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4857 4858 if (queue_id < 16) { 4859 ixgbe_disable_intr(hw); 4860 intr->mask |= (1 << queue_id); 4861 ixgbe_enable_intr(dev); 4862 } else if (queue_id < 32) { 4863 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 4864 mask &= (1 << queue_id); 4865 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 4866 } else if (queue_id < 64) { 4867 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 4868 mask &= (1 << (queue_id - 32)); 4869 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 4870 } 4871 rte_intr_enable(&dev->pci_dev->intr_handle); 4872 4873 return 0; 4874 } 4875 4876 static int 4877 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 4878 { 4879 uint32_t mask; 4880 struct ixgbe_hw *hw = 4881 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4882 struct ixgbe_interrupt *intr = 4883 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4884 4885 if (queue_id < 16) { 4886 ixgbe_disable_intr(hw); 4887 intr->mask &= ~(1 << queue_id); 4888 ixgbe_enable_intr(dev); 4889 } else if (queue_id < 32) { 4890 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 4891 mask &= ~(1 << queue_id); 4892 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 4893 } else if (queue_id < 64) { 4894 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 4895 mask &= ~(1 << (queue_id - 32)); 4896 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 4897 } 4898 4899 return 0; 4900 } 4901 4902 static void 4903 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 4904 uint8_t queue, uint8_t msix_vector) 4905 { 4906 uint32_t tmp, idx; 4907 4908 if (direction == -1) { 4909 /* other causes */ 4910 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4911 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 4912 tmp &= ~0xFF; 4913 tmp |= msix_vector; 4914 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 4915 } else { 4916 /* rx or tx cause */ 4917 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4918 idx = ((16 * (queue & 1)) + (8 * direction)); 4919 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 4920 tmp &= ~(0xFF << idx); 4921 tmp |= (msix_vector << idx); 4922 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 4923 } 4924 } 4925 4926 /** 4927 * set the IVAR registers, mapping interrupt causes to vectors 4928 * @param hw 4929 * pointer to ixgbe_hw struct 4930 * @direction 4931 * 0 for Rx, 1 for Tx, -1 for other causes 4932 * @queue 4933 * queue to map the corresponding interrupt to 4934 * @msix_vector 4935 * the vector to map to the corresponding queue 4936 */ 4937 static void 4938 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 4939 uint8_t queue, uint8_t msix_vector) 4940 { 4941 uint32_t tmp, idx; 4942 4943 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4944 if (hw->mac.type == ixgbe_mac_82598EB) { 4945 if (direction == -1) 4946 direction = 0; 4947 idx = (((direction * 64) + queue) >> 2) & 0x1F; 4948 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 4949 tmp &= ~(0xFF << (8 * (queue & 0x3))); 4950 tmp |= (msix_vector << (8 * (queue & 0x3))); 4951 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 4952 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 4953 (hw->mac.type == ixgbe_mac_X540)) { 4954 if (direction == -1) { 4955 /* other causes */ 4956 idx = ((queue & 1) * 8); 4957 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4958 tmp &= ~(0xFF << idx); 4959 tmp |= (msix_vector << idx); 4960 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 4961 } else { 4962 /* rx or tx causes */ 4963 idx = ((16 * (queue & 1)) + (8 * direction)); 4964 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 4965 tmp &= ~(0xFF << idx); 4966 tmp |= (msix_vector << idx); 4967 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 4968 } 4969 } 4970 } 4971 4972 static void 4973 ixgbevf_configure_msix(struct rte_eth_dev *dev) 4974 { 4975 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4976 struct ixgbe_hw *hw = 4977 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4978 uint32_t q_idx; 4979 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 4980 4981 /* Configure VF other cause ivar */ 4982 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 4983 4984 /* won't configure msix register if no mapping is done 4985 * between intr vector and event fd. 4986 */ 4987 if (!rte_intr_dp_is_en(intr_handle)) 4988 return; 4989 4990 /* Configure all RX queues of VF */ 4991 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 4992 /* Force all queue use vector 0, 4993 * as IXGBE_VF_MAXMSIVECOTR = 1 4994 */ 4995 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 4996 intr_handle->intr_vec[q_idx] = vector_idx; 4997 } 4998 } 4999 5000 /** 5001 * Sets up the hardware to properly generate MSI-X interrupts 5002 * @hw 5003 * board private structure 5004 */ 5005 static void 5006 ixgbe_configure_msix(struct rte_eth_dev *dev) 5007 { 5008 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 5009 struct ixgbe_hw *hw = 5010 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5011 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 5012 uint32_t vec = IXGBE_MISC_VEC_ID; 5013 uint32_t mask; 5014 uint32_t gpie; 5015 5016 /* won't configure msix register if no mapping is done 5017 * between intr vector and event fd 5018 */ 5019 if (!rte_intr_dp_is_en(intr_handle)) 5020 return; 5021 5022 if (rte_intr_allow_others(intr_handle)) 5023 vec = base = IXGBE_RX_VEC_START; 5024 5025 /* setup GPIE for MSI-x mode */ 5026 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 5027 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 5028 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 5029 /* auto clearing and auto setting corresponding bits in EIMS 5030 * when MSI-X interrupt is triggered 5031 */ 5032 if (hw->mac.type == ixgbe_mac_82598EB) { 5033 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5034 } else { 5035 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5036 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5037 } 5038 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5039 5040 /* Populate the IVAR table and set the ITR values to the 5041 * corresponding register. 5042 */ 5043 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 5044 queue_id++) { 5045 /* by default, 1:1 mapping */ 5046 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 5047 intr_handle->intr_vec[queue_id] = vec; 5048 if (vec < base + intr_handle->nb_efd - 1) 5049 vec++; 5050 } 5051 5052 switch (hw->mac.type) { 5053 case ixgbe_mac_82598EB: 5054 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 5055 IXGBE_MISC_VEC_ID); 5056 break; 5057 case ixgbe_mac_82599EB: 5058 case ixgbe_mac_X540: 5059 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 5060 break; 5061 default: 5062 break; 5063 } 5064 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 5065 IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); 5066 5067 /* set up to autoclear timer, and the vectors */ 5068 mask = IXGBE_EIMS_ENABLE_MASK; 5069 mask &= ~(IXGBE_EIMS_OTHER | 5070 IXGBE_EIMS_MAILBOX | 5071 IXGBE_EIMS_LSC); 5072 5073 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 5074 } 5075 5076 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 5077 uint16_t queue_idx, uint16_t tx_rate) 5078 { 5079 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5080 uint32_t rf_dec, rf_int; 5081 uint32_t bcnrc_val; 5082 uint16_t link_speed = dev->data->dev_link.link_speed; 5083 5084 if (queue_idx >= hw->mac.max_tx_queues) 5085 return -EINVAL; 5086 5087 if (tx_rate != 0) { 5088 /* Calculate the rate factor values to set */ 5089 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 5090 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 5091 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 5092 5093 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 5094 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 5095 IXGBE_RTTBCNRC_RF_INT_MASK_M); 5096 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 5097 } else { 5098 bcnrc_val = 0; 5099 } 5100 5101 /* 5102 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 5103 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 5104 * set as 0x4. 5105 */ 5106 if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && 5107 (dev->data->dev_conf.rxmode.max_rx_pkt_len >= 5108 IXGBE_MAX_JUMBO_FRAME_SIZE)) 5109 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 5110 IXGBE_MMW_SIZE_JUMBO_FRAME); 5111 else 5112 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 5113 IXGBE_MMW_SIZE_DEFAULT); 5114 5115 /* Set RTTBCNRC of queue X */ 5116 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 5117 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 5118 IXGBE_WRITE_FLUSH(hw); 5119 5120 return 0; 5121 } 5122 5123 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 5124 uint16_t tx_rate, uint64_t q_msk) 5125 { 5126 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5127 struct ixgbe_vf_info *vfinfo = 5128 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 5129 uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 5130 uint32_t queue_stride = 5131 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 5132 uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; 5133 uint32_t queue_end = queue_idx + nb_q_per_pool - 1; 5134 uint16_t total_rate = 0; 5135 5136 if (queue_end >= hw->mac.max_tx_queues) 5137 return -EINVAL; 5138 5139 if (vfinfo != NULL) { 5140 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { 5141 if (vf_idx == vf) 5142 continue; 5143 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 5144 idx++) 5145 total_rate += vfinfo[vf_idx].tx_rate[idx]; 5146 } 5147 } else 5148 return -EINVAL; 5149 5150 /* Store tx_rate for this vf. */ 5151 for (idx = 0; idx < nb_q_per_pool; idx++) { 5152 if (((uint64_t)0x1 << idx) & q_msk) { 5153 if (vfinfo[vf].tx_rate[idx] != tx_rate) 5154 vfinfo[vf].tx_rate[idx] = tx_rate; 5155 total_rate += tx_rate; 5156 } 5157 } 5158 5159 if (total_rate > dev->data->dev_link.link_speed) { 5160 /* 5161 * Reset stored TX rate of the VF if it causes exceed 5162 * link speed. 5163 */ 5164 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 5165 return -EINVAL; 5166 } 5167 5168 /* Set RTTBCNRC of each queue/pool for vf X */ 5169 for (; queue_idx <= queue_end; queue_idx++) { 5170 if (0x1 & q_msk) 5171 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 5172 q_msk = q_msk >> 1; 5173 } 5174 5175 return 0; 5176 } 5177 5178 static void 5179 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 5180 __attribute__((unused)) uint32_t index, 5181 __attribute__((unused)) uint32_t pool) 5182 { 5183 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5184 int diag; 5185 5186 /* 5187 * On a 82599 VF, adding again the same MAC addr is not an idempotent 5188 * operation. Trap this case to avoid exhausting the [very limited] 5189 * set of PF resources used to store VF MAC addresses. 5190 */ 5191 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 5192 return; 5193 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 5194 if (diag == 0) 5195 return; 5196 PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); 5197 } 5198 5199 static void 5200 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 5201 { 5202 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5203 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 5204 struct ether_addr *mac_addr; 5205 uint32_t i; 5206 int diag; 5207 5208 /* 5209 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 5210 * not support the deletion of a given MAC address. 5211 * Instead, it imposes to delete all MAC addresses, then to add again 5212 * all MAC addresses with the exception of the one to be deleted. 5213 */ 5214 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 5215 5216 /* 5217 * Add again all MAC addresses, with the exception of the deleted one 5218 * and of the permanent MAC address. 5219 */ 5220 for (i = 0, mac_addr = dev->data->mac_addrs; 5221 i < hw->mac.num_rar_entries; i++, mac_addr++) { 5222 /* Skip the deleted MAC address */ 5223 if (i == index) 5224 continue; 5225 /* Skip NULL MAC addresses */ 5226 if (is_zero_ether_addr(mac_addr)) 5227 continue; 5228 /* Skip the permanent MAC address */ 5229 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 5230 continue; 5231 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 5232 if (diag != 0) 5233 PMD_DRV_LOG(ERR, 5234 "Adding again MAC address " 5235 "%02x:%02x:%02x:%02x:%02x:%02x failed " 5236 "diag=%d", 5237 mac_addr->addr_bytes[0], 5238 mac_addr->addr_bytes[1], 5239 mac_addr->addr_bytes[2], 5240 mac_addr->addr_bytes[3], 5241 mac_addr->addr_bytes[4], 5242 mac_addr->addr_bytes[5], 5243 diag); 5244 } 5245 } 5246 5247 static void 5248 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 5249 { 5250 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5251 5252 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 5253 } 5254 5255 #define MAC_TYPE_FILTER_SUP(type) do {\ 5256 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ 5257 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ 5258 (type) != ixgbe_mac_X550EM_a)\ 5259 return -ENOTSUP;\ 5260 } while (0) 5261 5262 static int 5263 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 5264 struct rte_eth_syn_filter *filter, 5265 bool add) 5266 { 5267 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5268 uint32_t synqf; 5269 5270 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 5271 return -EINVAL; 5272 5273 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 5274 5275 if (add) { 5276 if (synqf & IXGBE_SYN_FILTER_ENABLE) 5277 return -EINVAL; 5278 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 5279 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 5280 5281 if (filter->hig_pri) 5282 synqf |= IXGBE_SYN_FILTER_SYNQFP; 5283 else 5284 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 5285 } else { 5286 if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) 5287 return -ENOENT; 5288 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 5289 } 5290 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 5291 IXGBE_WRITE_FLUSH(hw); 5292 return 0; 5293 } 5294 5295 static int 5296 ixgbe_syn_filter_get(struct rte_eth_dev *dev, 5297 struct rte_eth_syn_filter *filter) 5298 { 5299 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5300 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 5301 5302 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 5303 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; 5304 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); 5305 return 0; 5306 } 5307 return -ENOENT; 5308 } 5309 5310 static int 5311 ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 5312 enum rte_filter_op filter_op, 5313 void *arg) 5314 { 5315 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5316 int ret; 5317 5318 MAC_TYPE_FILTER_SUP(hw->mac.type); 5319 5320 if (filter_op == RTE_ETH_FILTER_NOP) 5321 return 0; 5322 5323 if (arg == NULL) { 5324 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 5325 filter_op); 5326 return -EINVAL; 5327 } 5328 5329 switch (filter_op) { 5330 case RTE_ETH_FILTER_ADD: 5331 ret = ixgbe_syn_filter_set(dev, 5332 (struct rte_eth_syn_filter *)arg, 5333 TRUE); 5334 break; 5335 case RTE_ETH_FILTER_DELETE: 5336 ret = ixgbe_syn_filter_set(dev, 5337 (struct rte_eth_syn_filter *)arg, 5338 FALSE); 5339 break; 5340 case RTE_ETH_FILTER_GET: 5341 ret = ixgbe_syn_filter_get(dev, 5342 (struct rte_eth_syn_filter *)arg); 5343 break; 5344 default: 5345 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); 5346 ret = -EINVAL; 5347 break; 5348 } 5349 5350 return ret; 5351 } 5352 5353 5354 static inline enum ixgbe_5tuple_protocol 5355 convert_protocol_type(uint8_t protocol_value) 5356 { 5357 if (protocol_value == IPPROTO_TCP) 5358 return IXGBE_FILTER_PROTOCOL_TCP; 5359 else if (protocol_value == IPPROTO_UDP) 5360 return IXGBE_FILTER_PROTOCOL_UDP; 5361 else if (protocol_value == IPPROTO_SCTP) 5362 return IXGBE_FILTER_PROTOCOL_SCTP; 5363 else 5364 return IXGBE_FILTER_PROTOCOL_NONE; 5365 } 5366 5367 /* 5368 * add a 5tuple filter 5369 * 5370 * @param 5371 * dev: Pointer to struct rte_eth_dev. 5372 * index: the index the filter allocates. 5373 * filter: ponter to the filter that will be added. 5374 * rx_queue: the queue id the filter assigned to. 5375 * 5376 * @return 5377 * - On success, zero. 5378 * - On failure, a negative value. 5379 */ 5380 static int 5381 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 5382 struct ixgbe_5tuple_filter *filter) 5383 { 5384 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5385 struct ixgbe_filter_info *filter_info = 5386 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5387 int i, idx, shift; 5388 uint32_t ftqf, sdpqf; 5389 uint32_t l34timir = 0; 5390 uint8_t mask = 0xff; 5391 5392 /* 5393 * look for an unused 5tuple filter index, 5394 * and insert the filter to list. 5395 */ 5396 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 5397 idx = i / (sizeof(uint32_t) * NBBY); 5398 shift = i % (sizeof(uint32_t) * NBBY); 5399 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 5400 filter_info->fivetuple_mask[idx] |= 1 << shift; 5401 filter->index = i; 5402 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 5403 filter, 5404 entries); 5405 break; 5406 } 5407 } 5408 if (i >= IXGBE_MAX_FTQF_FILTERS) { 5409 PMD_DRV_LOG(ERR, "5tuple filters are full."); 5410 return -ENOSYS; 5411 } 5412 5413 sdpqf = (uint32_t)(filter->filter_info.dst_port << 5414 IXGBE_SDPQF_DSTPORT_SHIFT); 5415 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 5416 5417 ftqf = (uint32_t)(filter->filter_info.proto & 5418 IXGBE_FTQF_PROTOCOL_MASK); 5419 ftqf |= (uint32_t)((filter->filter_info.priority & 5420 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 5421 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 5422 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 5423 if (filter->filter_info.dst_ip_mask == 0) 5424 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 5425 if (filter->filter_info.src_port_mask == 0) 5426 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 5427 if (filter->filter_info.dst_port_mask == 0) 5428 mask &= IXGBE_FTQF_DEST_PORT_MASK; 5429 if (filter->filter_info.proto_mask == 0) 5430 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 5431 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 5432 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 5433 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 5434 5435 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 5436 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 5437 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 5438 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 5439 5440 l34timir |= IXGBE_L34T_IMIR_RESERVE; 5441 l34timir |= (uint32_t)(filter->queue << 5442 IXGBE_L34T_IMIR_QUEUE_SHIFT); 5443 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 5444 return 0; 5445 } 5446 5447 /* 5448 * remove a 5tuple filter 5449 * 5450 * @param 5451 * dev: Pointer to struct rte_eth_dev. 5452 * filter: the pointer of the filter will be removed. 5453 */ 5454 static void 5455 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 5456 struct ixgbe_5tuple_filter *filter) 5457 { 5458 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5459 struct ixgbe_filter_info *filter_info = 5460 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5461 uint16_t index = filter->index; 5462 5463 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 5464 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 5465 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 5466 rte_free(filter); 5467 5468 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 5469 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 5470 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 5471 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 5472 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 5473 } 5474 5475 static int 5476 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 5477 { 5478 struct ixgbe_hw *hw; 5479 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 5480 5481 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5482 5483 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) 5484 return -EINVAL; 5485 5486 /* refuse mtu that requires the support of scattered packets when this 5487 * feature has not been enabled before. 5488 */ 5489 if (!dev->data->scattered_rx && 5490 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 5491 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) 5492 return -EINVAL; 5493 5494 /* 5495 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 5496 * request of the version 2.0 of the mailbox API. 5497 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 5498 * of the mailbox API. 5499 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 5500 * prior to 3.11.33 which contains the following change: 5501 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 5502 */ 5503 ixgbevf_rlpml_set_vf(hw, max_frame); 5504 5505 /* update max frame size */ 5506 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 5507 return 0; 5508 } 5509 5510 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 5511 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ 5512 return -ENOTSUP;\ 5513 } while (0) 5514 5515 static inline struct ixgbe_5tuple_filter * 5516 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 5517 struct ixgbe_5tuple_filter_info *key) 5518 { 5519 struct ixgbe_5tuple_filter *it; 5520 5521 TAILQ_FOREACH(it, filter_list, entries) { 5522 if (memcmp(key, &it->filter_info, 5523 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 5524 return it; 5525 } 5526 } 5527 return NULL; 5528 } 5529 5530 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 5531 static inline int 5532 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 5533 struct ixgbe_5tuple_filter_info *filter_info) 5534 { 5535 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 5536 filter->priority > IXGBE_5TUPLE_MAX_PRI || 5537 filter->priority < IXGBE_5TUPLE_MIN_PRI) 5538 return -EINVAL; 5539 5540 switch (filter->dst_ip_mask) { 5541 case UINT32_MAX: 5542 filter_info->dst_ip_mask = 0; 5543 filter_info->dst_ip = filter->dst_ip; 5544 break; 5545 case 0: 5546 filter_info->dst_ip_mask = 1; 5547 break; 5548 default: 5549 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 5550 return -EINVAL; 5551 } 5552 5553 switch (filter->src_ip_mask) { 5554 case UINT32_MAX: 5555 filter_info->src_ip_mask = 0; 5556 filter_info->src_ip = filter->src_ip; 5557 break; 5558 case 0: 5559 filter_info->src_ip_mask = 1; 5560 break; 5561 default: 5562 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 5563 return -EINVAL; 5564 } 5565 5566 switch (filter->dst_port_mask) { 5567 case UINT16_MAX: 5568 filter_info->dst_port_mask = 0; 5569 filter_info->dst_port = filter->dst_port; 5570 break; 5571 case 0: 5572 filter_info->dst_port_mask = 1; 5573 break; 5574 default: 5575 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 5576 return -EINVAL; 5577 } 5578 5579 switch (filter->src_port_mask) { 5580 case UINT16_MAX: 5581 filter_info->src_port_mask = 0; 5582 filter_info->src_port = filter->src_port; 5583 break; 5584 case 0: 5585 filter_info->src_port_mask = 1; 5586 break; 5587 default: 5588 PMD_DRV_LOG(ERR, "invalid src_port mask."); 5589 return -EINVAL; 5590 } 5591 5592 switch (filter->proto_mask) { 5593 case UINT8_MAX: 5594 filter_info->proto_mask = 0; 5595 filter_info->proto = 5596 convert_protocol_type(filter->proto); 5597 break; 5598 case 0: 5599 filter_info->proto_mask = 1; 5600 break; 5601 default: 5602 PMD_DRV_LOG(ERR, "invalid protocol mask."); 5603 return -EINVAL; 5604 } 5605 5606 filter_info->priority = (uint8_t)filter->priority; 5607 return 0; 5608 } 5609 5610 /* 5611 * add or delete a ntuple filter 5612 * 5613 * @param 5614 * dev: Pointer to struct rte_eth_dev. 5615 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 5616 * add: if true, add filter, if false, remove filter 5617 * 5618 * @return 5619 * - On success, zero. 5620 * - On failure, a negative value. 5621 */ 5622 static int 5623 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 5624 struct rte_eth_ntuple_filter *ntuple_filter, 5625 bool add) 5626 { 5627 struct ixgbe_filter_info *filter_info = 5628 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5629 struct ixgbe_5tuple_filter_info filter_5tuple; 5630 struct ixgbe_5tuple_filter *filter; 5631 int ret; 5632 5633 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 5634 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 5635 return -EINVAL; 5636 } 5637 5638 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 5639 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 5640 if (ret < 0) 5641 return ret; 5642 5643 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 5644 &filter_5tuple); 5645 if (filter != NULL && add) { 5646 PMD_DRV_LOG(ERR, "filter exists."); 5647 return -EEXIST; 5648 } 5649 if (filter == NULL && !add) { 5650 PMD_DRV_LOG(ERR, "filter doesn't exist."); 5651 return -ENOENT; 5652 } 5653 5654 if (add) { 5655 filter = rte_zmalloc("ixgbe_5tuple_filter", 5656 sizeof(struct ixgbe_5tuple_filter), 0); 5657 if (filter == NULL) 5658 return -ENOMEM; 5659 (void)rte_memcpy(&filter->filter_info, 5660 &filter_5tuple, 5661 sizeof(struct ixgbe_5tuple_filter_info)); 5662 filter->queue = ntuple_filter->queue; 5663 ret = ixgbe_add_5tuple_filter(dev, filter); 5664 if (ret < 0) { 5665 rte_free(filter); 5666 return ret; 5667 } 5668 } else 5669 ixgbe_remove_5tuple_filter(dev, filter); 5670 5671 return 0; 5672 } 5673 5674 /* 5675 * get a ntuple filter 5676 * 5677 * @param 5678 * dev: Pointer to struct rte_eth_dev. 5679 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 5680 * 5681 * @return 5682 * - On success, zero. 5683 * - On failure, a negative value. 5684 */ 5685 static int 5686 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 5687 struct rte_eth_ntuple_filter *ntuple_filter) 5688 { 5689 struct ixgbe_filter_info *filter_info = 5690 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5691 struct ixgbe_5tuple_filter_info filter_5tuple; 5692 struct ixgbe_5tuple_filter *filter; 5693 int ret; 5694 5695 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 5696 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 5697 return -EINVAL; 5698 } 5699 5700 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 5701 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 5702 if (ret < 0) 5703 return ret; 5704 5705 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 5706 &filter_5tuple); 5707 if (filter == NULL) { 5708 PMD_DRV_LOG(ERR, "filter doesn't exist."); 5709 return -ENOENT; 5710 } 5711 ntuple_filter->queue = filter->queue; 5712 return 0; 5713 } 5714 5715 /* 5716 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. 5717 * @dev: pointer to rte_eth_dev structure 5718 * @filter_op:operation will be taken. 5719 * @arg: a pointer to specific structure corresponding to the filter_op 5720 * 5721 * @return 5722 * - On success, zero. 5723 * - On failure, a negative value. 5724 */ 5725 static int 5726 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 5727 enum rte_filter_op filter_op, 5728 void *arg) 5729 { 5730 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5731 int ret; 5732 5733 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 5734 5735 if (filter_op == RTE_ETH_FILTER_NOP) 5736 return 0; 5737 5738 if (arg == NULL) { 5739 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 5740 filter_op); 5741 return -EINVAL; 5742 } 5743 5744 switch (filter_op) { 5745 case RTE_ETH_FILTER_ADD: 5746 ret = ixgbe_add_del_ntuple_filter(dev, 5747 (struct rte_eth_ntuple_filter *)arg, 5748 TRUE); 5749 break; 5750 case RTE_ETH_FILTER_DELETE: 5751 ret = ixgbe_add_del_ntuple_filter(dev, 5752 (struct rte_eth_ntuple_filter *)arg, 5753 FALSE); 5754 break; 5755 case RTE_ETH_FILTER_GET: 5756 ret = ixgbe_get_ntuple_filter(dev, 5757 (struct rte_eth_ntuple_filter *)arg); 5758 break; 5759 default: 5760 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 5761 ret = -EINVAL; 5762 break; 5763 } 5764 return ret; 5765 } 5766 5767 static inline int 5768 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, 5769 uint16_t ethertype) 5770 { 5771 int i; 5772 5773 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 5774 if (filter_info->ethertype_filters[i] == ethertype && 5775 (filter_info->ethertype_mask & (1 << i))) 5776 return i; 5777 } 5778 return -1; 5779 } 5780 5781 static inline int 5782 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, 5783 uint16_t ethertype) 5784 { 5785 int i; 5786 5787 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 5788 if (!(filter_info->ethertype_mask & (1 << i))) { 5789 filter_info->ethertype_mask |= 1 << i; 5790 filter_info->ethertype_filters[i] = ethertype; 5791 return i; 5792 } 5793 } 5794 return -1; 5795 } 5796 5797 static inline int 5798 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, 5799 uint8_t idx) 5800 { 5801 if (idx >= IXGBE_MAX_ETQF_FILTERS) 5802 return -1; 5803 filter_info->ethertype_mask &= ~(1 << idx); 5804 filter_info->ethertype_filters[idx] = 0; 5805 return idx; 5806 } 5807 5808 static int 5809 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 5810 struct rte_eth_ethertype_filter *filter, 5811 bool add) 5812 { 5813 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5814 struct ixgbe_filter_info *filter_info = 5815 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5816 uint32_t etqf = 0; 5817 uint32_t etqs = 0; 5818 int ret; 5819 5820 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 5821 return -EINVAL; 5822 5823 if (filter->ether_type == ETHER_TYPE_IPv4 || 5824 filter->ether_type == ETHER_TYPE_IPv6) { 5825 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 5826 " ethertype filter.", filter->ether_type); 5827 return -EINVAL; 5828 } 5829 5830 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 5831 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 5832 return -EINVAL; 5833 } 5834 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 5835 PMD_DRV_LOG(ERR, "drop option is unsupported."); 5836 return -EINVAL; 5837 } 5838 5839 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 5840 if (ret >= 0 && add) { 5841 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 5842 filter->ether_type); 5843 return -EEXIST; 5844 } 5845 if (ret < 0 && !add) { 5846 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 5847 filter->ether_type); 5848 return -ENOENT; 5849 } 5850 5851 if (add) { 5852 ret = ixgbe_ethertype_filter_insert(filter_info, 5853 filter->ether_type); 5854 if (ret < 0) { 5855 PMD_DRV_LOG(ERR, "ethertype filters are full."); 5856 return -ENOSYS; 5857 } 5858 etqf = IXGBE_ETQF_FILTER_EN; 5859 etqf |= (uint32_t)filter->ether_type; 5860 etqs |= (uint32_t)((filter->queue << 5861 IXGBE_ETQS_RX_QUEUE_SHIFT) & 5862 IXGBE_ETQS_RX_QUEUE); 5863 etqs |= IXGBE_ETQS_QUEUE_EN; 5864 } else { 5865 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 5866 if (ret < 0) 5867 return -ENOSYS; 5868 } 5869 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 5870 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 5871 IXGBE_WRITE_FLUSH(hw); 5872 5873 return 0; 5874 } 5875 5876 static int 5877 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 5878 struct rte_eth_ethertype_filter *filter) 5879 { 5880 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5881 struct ixgbe_filter_info *filter_info = 5882 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5883 uint32_t etqf, etqs; 5884 int ret; 5885 5886 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 5887 if (ret < 0) { 5888 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 5889 filter->ether_type); 5890 return -ENOENT; 5891 } 5892 5893 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); 5894 if (etqf & IXGBE_ETQF_FILTER_EN) { 5895 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); 5896 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; 5897 filter->flags = 0; 5898 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> 5899 IXGBE_ETQS_RX_QUEUE_SHIFT; 5900 return 0; 5901 } 5902 return -ENOENT; 5903 } 5904 5905 /* 5906 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. 5907 * @dev: pointer to rte_eth_dev structure 5908 * @filter_op:operation will be taken. 5909 * @arg: a pointer to specific structure corresponding to the filter_op 5910 */ 5911 static int 5912 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 5913 enum rte_filter_op filter_op, 5914 void *arg) 5915 { 5916 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5917 int ret; 5918 5919 MAC_TYPE_FILTER_SUP(hw->mac.type); 5920 5921 if (filter_op == RTE_ETH_FILTER_NOP) 5922 return 0; 5923 5924 if (arg == NULL) { 5925 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 5926 filter_op); 5927 return -EINVAL; 5928 } 5929 5930 switch (filter_op) { 5931 case RTE_ETH_FILTER_ADD: 5932 ret = ixgbe_add_del_ethertype_filter(dev, 5933 (struct rte_eth_ethertype_filter *)arg, 5934 TRUE); 5935 break; 5936 case RTE_ETH_FILTER_DELETE: 5937 ret = ixgbe_add_del_ethertype_filter(dev, 5938 (struct rte_eth_ethertype_filter *)arg, 5939 FALSE); 5940 break; 5941 case RTE_ETH_FILTER_GET: 5942 ret = ixgbe_get_ethertype_filter(dev, 5943 (struct rte_eth_ethertype_filter *)arg); 5944 break; 5945 default: 5946 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 5947 ret = -EINVAL; 5948 break; 5949 } 5950 return ret; 5951 } 5952 5953 static int 5954 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 5955 enum rte_filter_type filter_type, 5956 enum rte_filter_op filter_op, 5957 void *arg) 5958 { 5959 int ret = -EINVAL; 5960 5961 switch (filter_type) { 5962 case RTE_ETH_FILTER_NTUPLE: 5963 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); 5964 break; 5965 case RTE_ETH_FILTER_ETHERTYPE: 5966 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); 5967 break; 5968 case RTE_ETH_FILTER_SYN: 5969 ret = ixgbe_syn_filter_handle(dev, filter_op, arg); 5970 break; 5971 case RTE_ETH_FILTER_FDIR: 5972 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); 5973 break; 5974 case RTE_ETH_FILTER_L2_TUNNEL: 5975 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); 5976 break; 5977 default: 5978 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 5979 filter_type); 5980 break; 5981 } 5982 5983 return ret; 5984 } 5985 5986 static u8 * 5987 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, 5988 u8 **mc_addr_ptr, u32 *vmdq) 5989 { 5990 u8 *mc_addr; 5991 5992 *vmdq = 0; 5993 mc_addr = *mc_addr_ptr; 5994 *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); 5995 return mc_addr; 5996 } 5997 5998 static int 5999 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 6000 struct ether_addr *mc_addr_set, 6001 uint32_t nb_mc_addr) 6002 { 6003 struct ixgbe_hw *hw; 6004 u8 *mc_addr_list; 6005 6006 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6007 mc_addr_list = (u8 *)mc_addr_set; 6008 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 6009 ixgbe_dev_addr_list_itr, TRUE); 6010 } 6011 6012 static uint64_t 6013 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 6014 { 6015 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6016 uint64_t systime_cycles; 6017 6018 switch (hw->mac.type) { 6019 case ixgbe_mac_X550: 6020 case ixgbe_mac_X550EM_x: 6021 case ixgbe_mac_X550EM_a: 6022 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 6023 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6024 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6025 * NSEC_PER_SEC; 6026 break; 6027 default: 6028 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 6029 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 6030 << 32; 6031 } 6032 6033 return systime_cycles; 6034 } 6035 6036 static uint64_t 6037 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6038 { 6039 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6040 uint64_t rx_tstamp_cycles; 6041 6042 switch (hw->mac.type) { 6043 case ixgbe_mac_X550: 6044 case ixgbe_mac_X550EM_x: 6045 case ixgbe_mac_X550EM_a: 6046 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6047 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6048 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6049 * NSEC_PER_SEC; 6050 break; 6051 default: 6052 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6053 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6054 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6055 << 32; 6056 } 6057 6058 return rx_tstamp_cycles; 6059 } 6060 6061 static uint64_t 6062 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6063 { 6064 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6065 uint64_t tx_tstamp_cycles; 6066 6067 switch (hw->mac.type) { 6068 case ixgbe_mac_X550: 6069 case ixgbe_mac_X550EM_x: 6070 case ixgbe_mac_X550EM_a: 6071 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6072 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6073 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6074 * NSEC_PER_SEC; 6075 break; 6076 default: 6077 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6078 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6079 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6080 << 32; 6081 } 6082 6083 return tx_tstamp_cycles; 6084 } 6085 6086 static void 6087 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6088 { 6089 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6090 struct ixgbe_adapter *adapter = 6091 (struct ixgbe_adapter *)dev->data->dev_private; 6092 struct rte_eth_link link; 6093 uint32_t incval = 0; 6094 uint32_t shift = 0; 6095 6096 /* Get current link speed. */ 6097 memset(&link, 0, sizeof(link)); 6098 ixgbe_dev_link_update(dev, 1); 6099 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 6100 6101 switch (link.link_speed) { 6102 case ETH_SPEED_NUM_100M: 6103 incval = IXGBE_INCVAL_100; 6104 shift = IXGBE_INCVAL_SHIFT_100; 6105 break; 6106 case ETH_SPEED_NUM_1G: 6107 incval = IXGBE_INCVAL_1GB; 6108 shift = IXGBE_INCVAL_SHIFT_1GB; 6109 break; 6110 case ETH_SPEED_NUM_10G: 6111 default: 6112 incval = IXGBE_INCVAL_10GB; 6113 shift = IXGBE_INCVAL_SHIFT_10GB; 6114 break; 6115 } 6116 6117 switch (hw->mac.type) { 6118 case ixgbe_mac_X550: 6119 case ixgbe_mac_X550EM_x: 6120 case ixgbe_mac_X550EM_a: 6121 /* Independent of link speed. */ 6122 incval = 1; 6123 /* Cycles read will be interpreted as ns. */ 6124 shift = 0; 6125 /* Fall-through */ 6126 case ixgbe_mac_X540: 6127 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6128 break; 6129 case ixgbe_mac_82599EB: 6130 incval >>= IXGBE_INCVAL_SHIFT_82599; 6131 shift -= IXGBE_INCVAL_SHIFT_82599; 6132 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6133 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6134 break; 6135 default: 6136 /* Not supported. */ 6137 return; 6138 } 6139 6140 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6141 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6142 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6143 6144 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6145 adapter->systime_tc.cc_shift = shift; 6146 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6147 6148 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6149 adapter->rx_tstamp_tc.cc_shift = shift; 6150 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6151 6152 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6153 adapter->tx_tstamp_tc.cc_shift = shift; 6154 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6155 } 6156 6157 static int 6158 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6159 { 6160 struct ixgbe_adapter *adapter = 6161 (struct ixgbe_adapter *)dev->data->dev_private; 6162 6163 adapter->systime_tc.nsec += delta; 6164 adapter->rx_tstamp_tc.nsec += delta; 6165 adapter->tx_tstamp_tc.nsec += delta; 6166 6167 return 0; 6168 } 6169 6170 static int 6171 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6172 { 6173 uint64_t ns; 6174 struct ixgbe_adapter *adapter = 6175 (struct ixgbe_adapter *)dev->data->dev_private; 6176 6177 ns = rte_timespec_to_ns(ts); 6178 /* Set the timecounters to a new value. */ 6179 adapter->systime_tc.nsec = ns; 6180 adapter->rx_tstamp_tc.nsec = ns; 6181 adapter->tx_tstamp_tc.nsec = ns; 6182 6183 return 0; 6184 } 6185 6186 static int 6187 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6188 { 6189 uint64_t ns, systime_cycles; 6190 struct ixgbe_adapter *adapter = 6191 (struct ixgbe_adapter *)dev->data->dev_private; 6192 6193 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6194 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6195 *ts = rte_ns_to_timespec(ns); 6196 6197 return 0; 6198 } 6199 6200 static int 6201 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6202 { 6203 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6204 uint32_t tsync_ctl; 6205 uint32_t tsauxc; 6206 6207 /* Stop the timesync system time. */ 6208 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6209 /* Reset the timesync system time value. */ 6210 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6211 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6212 6213 /* Enable system time for platforms where it isn't on by default. */ 6214 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6215 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6216 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6217 6218 ixgbe_start_timecounters(dev); 6219 6220 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6221 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6222 (ETHER_TYPE_1588 | 6223 IXGBE_ETQF_FILTER_EN | 6224 IXGBE_ETQF_1588)); 6225 6226 /* Enable timestamping of received PTP packets. */ 6227 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6228 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6229 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6230 6231 /* Enable timestamping of transmitted PTP packets. */ 6232 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6233 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6234 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6235 6236 IXGBE_WRITE_FLUSH(hw); 6237 6238 return 0; 6239 } 6240 6241 static int 6242 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6243 { 6244 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6245 uint32_t tsync_ctl; 6246 6247 /* Disable timestamping of transmitted PTP packets. */ 6248 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6249 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6250 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6251 6252 /* Disable timestamping of received PTP packets. */ 6253 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6254 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6255 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6256 6257 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6258 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6259 6260 /* Stop incrementating the System Time registers. */ 6261 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6262 6263 return 0; 6264 } 6265 6266 static int 6267 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6268 struct timespec *timestamp, 6269 uint32_t flags __rte_unused) 6270 { 6271 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6272 struct ixgbe_adapter *adapter = 6273 (struct ixgbe_adapter *)dev->data->dev_private; 6274 uint32_t tsync_rxctl; 6275 uint64_t rx_tstamp_cycles; 6276 uint64_t ns; 6277 6278 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6279 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6280 return -EINVAL; 6281 6282 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6283 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6284 *timestamp = rte_ns_to_timespec(ns); 6285 6286 return 0; 6287 } 6288 6289 static int 6290 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6291 struct timespec *timestamp) 6292 { 6293 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6294 struct ixgbe_adapter *adapter = 6295 (struct ixgbe_adapter *)dev->data->dev_private; 6296 uint32_t tsync_txctl; 6297 uint64_t tx_tstamp_cycles; 6298 uint64_t ns; 6299 6300 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6301 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6302 return -EINVAL; 6303 6304 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6305 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6306 *timestamp = rte_ns_to_timespec(ns); 6307 6308 return 0; 6309 } 6310 6311 static int 6312 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6313 { 6314 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6315 int count = 0; 6316 int g_ind = 0; 6317 const struct reg_info *reg_group; 6318 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6319 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6320 6321 while ((reg_group = reg_set[g_ind++])) 6322 count += ixgbe_regs_group_count(reg_group); 6323 6324 return count; 6325 } 6326 6327 static int 6328 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6329 { 6330 int count = 0; 6331 int g_ind = 0; 6332 const struct reg_info *reg_group; 6333 6334 while ((reg_group = ixgbevf_regs[g_ind++])) 6335 count += ixgbe_regs_group_count(reg_group); 6336 6337 return count; 6338 } 6339 6340 static int 6341 ixgbe_get_regs(struct rte_eth_dev *dev, 6342 struct rte_dev_reg_info *regs) 6343 { 6344 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6345 uint32_t *data = regs->data; 6346 int g_ind = 0; 6347 int count = 0; 6348 const struct reg_info *reg_group; 6349 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6350 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6351 6352 if (data == NULL) { 6353 regs->length = ixgbe_get_reg_length(dev); 6354 regs->width = sizeof(uint32_t); 6355 return 0; 6356 } 6357 6358 /* Support only full register dump */ 6359 if ((regs->length == 0) || 6360 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6361 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6362 hw->device_id; 6363 while ((reg_group = reg_set[g_ind++])) 6364 count += ixgbe_read_regs_group(dev, &data[count], 6365 reg_group); 6366 return 0; 6367 } 6368 6369 return -ENOTSUP; 6370 } 6371 6372 static int 6373 ixgbevf_get_regs(struct rte_eth_dev *dev, 6374 struct rte_dev_reg_info *regs) 6375 { 6376 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6377 uint32_t *data = regs->data; 6378 int g_ind = 0; 6379 int count = 0; 6380 const struct reg_info *reg_group; 6381 6382 if (data == NULL) { 6383 regs->length = ixgbevf_get_reg_length(dev); 6384 regs->width = sizeof(uint32_t); 6385 return 0; 6386 } 6387 6388 /* Support only full register dump */ 6389 if ((regs->length == 0) || 6390 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 6391 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6392 hw->device_id; 6393 while ((reg_group = ixgbevf_regs[g_ind++])) 6394 count += ixgbe_read_regs_group(dev, &data[count], 6395 reg_group); 6396 return 0; 6397 } 6398 6399 return -ENOTSUP; 6400 } 6401 6402 static int 6403 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 6404 { 6405 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6406 6407 /* Return unit is byte count */ 6408 return hw->eeprom.word_size * 2; 6409 } 6410 6411 static int 6412 ixgbe_get_eeprom(struct rte_eth_dev *dev, 6413 struct rte_dev_eeprom_info *in_eeprom) 6414 { 6415 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6416 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 6417 uint16_t *data = in_eeprom->data; 6418 int first, length; 6419 6420 first = in_eeprom->offset >> 1; 6421 length = in_eeprom->length >> 1; 6422 if ((first > hw->eeprom.word_size) || 6423 ((first + length) > hw->eeprom.word_size)) 6424 return -EINVAL; 6425 6426 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 6427 6428 return eeprom->ops.read_buffer(hw, first, length, data); 6429 } 6430 6431 static int 6432 ixgbe_set_eeprom(struct rte_eth_dev *dev, 6433 struct rte_dev_eeprom_info *in_eeprom) 6434 { 6435 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6436 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 6437 uint16_t *data = in_eeprom->data; 6438 int first, length; 6439 6440 first = in_eeprom->offset >> 1; 6441 length = in_eeprom->length >> 1; 6442 if ((first > hw->eeprom.word_size) || 6443 ((first + length) > hw->eeprom.word_size)) 6444 return -EINVAL; 6445 6446 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 6447 6448 return eeprom->ops.write_buffer(hw, first, length, data); 6449 } 6450 6451 uint16_t 6452 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 6453 switch (mac_type) { 6454 case ixgbe_mac_X550: 6455 case ixgbe_mac_X550EM_x: 6456 case ixgbe_mac_X550EM_a: 6457 return ETH_RSS_RETA_SIZE_512; 6458 case ixgbe_mac_X550_vf: 6459 case ixgbe_mac_X550EM_x_vf: 6460 case ixgbe_mac_X550EM_a_vf: 6461 return ETH_RSS_RETA_SIZE_64; 6462 default: 6463 return ETH_RSS_RETA_SIZE_128; 6464 } 6465 } 6466 6467 uint32_t 6468 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 6469 switch (mac_type) { 6470 case ixgbe_mac_X550: 6471 case ixgbe_mac_X550EM_x: 6472 case ixgbe_mac_X550EM_a: 6473 if (reta_idx < ETH_RSS_RETA_SIZE_128) 6474 return IXGBE_RETA(reta_idx >> 2); 6475 else 6476 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 6477 case ixgbe_mac_X550_vf: 6478 case ixgbe_mac_X550EM_x_vf: 6479 case ixgbe_mac_X550EM_a_vf: 6480 return IXGBE_VFRETA(reta_idx >> 2); 6481 default: 6482 return IXGBE_RETA(reta_idx >> 2); 6483 } 6484 } 6485 6486 uint32_t 6487 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 6488 switch (mac_type) { 6489 case ixgbe_mac_X550_vf: 6490 case ixgbe_mac_X550EM_x_vf: 6491 case ixgbe_mac_X550EM_a_vf: 6492 return IXGBE_VFMRQC; 6493 default: 6494 return IXGBE_MRQC; 6495 } 6496 } 6497 6498 uint32_t 6499 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 6500 switch (mac_type) { 6501 case ixgbe_mac_X550_vf: 6502 case ixgbe_mac_X550EM_x_vf: 6503 case ixgbe_mac_X550EM_a_vf: 6504 return IXGBE_VFRSSRK(i); 6505 default: 6506 return IXGBE_RSSRK(i); 6507 } 6508 } 6509 6510 bool 6511 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 6512 switch (mac_type) { 6513 case ixgbe_mac_82599_vf: 6514 case ixgbe_mac_X540_vf: 6515 return 0; 6516 default: 6517 return 1; 6518 } 6519 } 6520 6521 static int 6522 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 6523 struct rte_eth_dcb_info *dcb_info) 6524 { 6525 struct ixgbe_dcb_config *dcb_config = 6526 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 6527 struct ixgbe_dcb_tc_config *tc; 6528 uint8_t i, j; 6529 6530 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 6531 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 6532 else 6533 dcb_info->nb_tcs = 1; 6534 6535 if (dcb_config->vt_mode) { /* vt is enabled*/ 6536 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 6537 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 6538 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 6539 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 6540 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 6541 for (j = 0; j < dcb_info->nb_tcs; j++) { 6542 dcb_info->tc_queue.tc_rxq[i][j].base = 6543 i * dcb_info->nb_tcs + j; 6544 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; 6545 dcb_info->tc_queue.tc_txq[i][j].base = 6546 i * dcb_info->nb_tcs + j; 6547 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; 6548 } 6549 } 6550 } else { /* vt is disabled*/ 6551 struct rte_eth_dcb_rx_conf *rx_conf = 6552 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 6553 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 6554 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 6555 if (dcb_info->nb_tcs == ETH_4_TCS) { 6556 for (i = 0; i < dcb_info->nb_tcs; i++) { 6557 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 6558 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 6559 } 6560 dcb_info->tc_queue.tc_txq[0][0].base = 0; 6561 dcb_info->tc_queue.tc_txq[0][1].base = 64; 6562 dcb_info->tc_queue.tc_txq[0][2].base = 96; 6563 dcb_info->tc_queue.tc_txq[0][3].base = 112; 6564 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 6565 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 6566 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 6567 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 6568 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 6569 for (i = 0; i < dcb_info->nb_tcs; i++) { 6570 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 6571 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 6572 } 6573 dcb_info->tc_queue.tc_txq[0][0].base = 0; 6574 dcb_info->tc_queue.tc_txq[0][1].base = 32; 6575 dcb_info->tc_queue.tc_txq[0][2].base = 64; 6576 dcb_info->tc_queue.tc_txq[0][3].base = 80; 6577 dcb_info->tc_queue.tc_txq[0][4].base = 96; 6578 dcb_info->tc_queue.tc_txq[0][5].base = 104; 6579 dcb_info->tc_queue.tc_txq[0][6].base = 112; 6580 dcb_info->tc_queue.tc_txq[0][7].base = 120; 6581 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 6582 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 6583 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 6584 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 6585 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 6586 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 6587 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 6588 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 6589 } 6590 } 6591 for (i = 0; i < dcb_info->nb_tcs; i++) { 6592 tc = &dcb_config->tc_config[i]; 6593 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 6594 } 6595 return 0; 6596 } 6597 6598 /* Update e-tag ether type */ 6599 static int 6600 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 6601 uint16_t ether_type) 6602 { 6603 uint32_t etag_etype; 6604 6605 if (hw->mac.type != ixgbe_mac_X550 && 6606 hw->mac.type != ixgbe_mac_X550EM_x && 6607 hw->mac.type != ixgbe_mac_X550EM_a) { 6608 return -ENOTSUP; 6609 } 6610 6611 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6612 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 6613 etag_etype |= ether_type; 6614 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6615 IXGBE_WRITE_FLUSH(hw); 6616 6617 return 0; 6618 } 6619 6620 /* Config l2 tunnel ether type */ 6621 static int 6622 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, 6623 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6624 { 6625 int ret = 0; 6626 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6627 6628 if (l2_tunnel == NULL) 6629 return -EINVAL; 6630 6631 switch (l2_tunnel->l2_tunnel_type) { 6632 case RTE_L2_TUNNEL_TYPE_E_TAG: 6633 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); 6634 break; 6635 default: 6636 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6637 ret = -EINVAL; 6638 break; 6639 } 6640 6641 return ret; 6642 } 6643 6644 /* Enable e-tag tunnel */ 6645 static int 6646 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 6647 { 6648 uint32_t etag_etype; 6649 6650 if (hw->mac.type != ixgbe_mac_X550 && 6651 hw->mac.type != ixgbe_mac_X550EM_x && 6652 hw->mac.type != ixgbe_mac_X550EM_a) { 6653 return -ENOTSUP; 6654 } 6655 6656 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6657 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 6658 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6659 IXGBE_WRITE_FLUSH(hw); 6660 6661 return 0; 6662 } 6663 6664 /* Enable l2 tunnel */ 6665 static int 6666 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, 6667 enum rte_eth_tunnel_type l2_tunnel_type) 6668 { 6669 int ret = 0; 6670 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6671 6672 switch (l2_tunnel_type) { 6673 case RTE_L2_TUNNEL_TYPE_E_TAG: 6674 ret = ixgbe_e_tag_enable(hw); 6675 break; 6676 default: 6677 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6678 ret = -EINVAL; 6679 break; 6680 } 6681 6682 return ret; 6683 } 6684 6685 /* Disable e-tag tunnel */ 6686 static int 6687 ixgbe_e_tag_disable(struct ixgbe_hw *hw) 6688 { 6689 uint32_t etag_etype; 6690 6691 if (hw->mac.type != ixgbe_mac_X550 && 6692 hw->mac.type != ixgbe_mac_X550EM_x && 6693 hw->mac.type != ixgbe_mac_X550EM_a) { 6694 return -ENOTSUP; 6695 } 6696 6697 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6698 etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; 6699 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6700 IXGBE_WRITE_FLUSH(hw); 6701 6702 return 0; 6703 } 6704 6705 /* Disable l2 tunnel */ 6706 static int 6707 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, 6708 enum rte_eth_tunnel_type l2_tunnel_type) 6709 { 6710 int ret = 0; 6711 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6712 6713 switch (l2_tunnel_type) { 6714 case RTE_L2_TUNNEL_TYPE_E_TAG: 6715 ret = ixgbe_e_tag_disable(hw); 6716 break; 6717 default: 6718 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6719 ret = -EINVAL; 6720 break; 6721 } 6722 6723 return ret; 6724 } 6725 6726 static int 6727 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 6728 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6729 { 6730 int ret = 0; 6731 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6732 uint32_t i, rar_entries; 6733 uint32_t rar_low, rar_high; 6734 6735 if (hw->mac.type != ixgbe_mac_X550 && 6736 hw->mac.type != ixgbe_mac_X550EM_x && 6737 hw->mac.type != ixgbe_mac_X550EM_a) { 6738 return -ENOTSUP; 6739 } 6740 6741 rar_entries = ixgbe_get_num_rx_addrs(hw); 6742 6743 for (i = 1; i < rar_entries; i++) { 6744 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 6745 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 6746 if ((rar_high & IXGBE_RAH_AV) && 6747 (rar_high & IXGBE_RAH_ADTYPE) && 6748 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 6749 l2_tunnel->tunnel_id)) { 6750 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 6751 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 6752 6753 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 6754 6755 return ret; 6756 } 6757 } 6758 6759 return ret; 6760 } 6761 6762 static int 6763 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 6764 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6765 { 6766 int ret = 0; 6767 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6768 uint32_t i, rar_entries; 6769 uint32_t rar_low, rar_high; 6770 6771 if (hw->mac.type != ixgbe_mac_X550 && 6772 hw->mac.type != ixgbe_mac_X550EM_x && 6773 hw->mac.type != ixgbe_mac_X550EM_a) { 6774 return -ENOTSUP; 6775 } 6776 6777 /* One entry for one tunnel. Try to remove potential existing entry. */ 6778 ixgbe_e_tag_filter_del(dev, l2_tunnel); 6779 6780 rar_entries = ixgbe_get_num_rx_addrs(hw); 6781 6782 for (i = 1; i < rar_entries; i++) { 6783 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 6784 if (rar_high & IXGBE_RAH_AV) { 6785 continue; 6786 } else { 6787 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 6788 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 6789 rar_low = l2_tunnel->tunnel_id; 6790 6791 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 6792 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 6793 6794 return ret; 6795 } 6796 } 6797 6798 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 6799 " Please remove a rule before adding a new one."); 6800 return -EINVAL; 6801 } 6802 6803 /* Add l2 tunnel filter */ 6804 static int 6805 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 6806 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6807 { 6808 int ret = 0; 6809 6810 switch (l2_tunnel->l2_tunnel_type) { 6811 case RTE_L2_TUNNEL_TYPE_E_TAG: 6812 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 6813 break; 6814 default: 6815 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6816 ret = -EINVAL; 6817 break; 6818 } 6819 6820 return ret; 6821 } 6822 6823 /* Delete l2 tunnel filter */ 6824 static int 6825 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 6826 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6827 { 6828 int ret = 0; 6829 6830 switch (l2_tunnel->l2_tunnel_type) { 6831 case RTE_L2_TUNNEL_TYPE_E_TAG: 6832 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 6833 break; 6834 default: 6835 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6836 ret = -EINVAL; 6837 break; 6838 } 6839 6840 return ret; 6841 } 6842 6843 /** 6844 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. 6845 * @dev: pointer to rte_eth_dev structure 6846 * @filter_op:operation will be taken. 6847 * @arg: a pointer to specific structure corresponding to the filter_op 6848 */ 6849 static int 6850 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 6851 enum rte_filter_op filter_op, 6852 void *arg) 6853 { 6854 int ret = 0; 6855 6856 if (filter_op == RTE_ETH_FILTER_NOP) 6857 return 0; 6858 6859 if (arg == NULL) { 6860 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6861 filter_op); 6862 return -EINVAL; 6863 } 6864 6865 switch (filter_op) { 6866 case RTE_ETH_FILTER_ADD: 6867 ret = ixgbe_dev_l2_tunnel_filter_add 6868 (dev, 6869 (struct rte_eth_l2_tunnel_conf *)arg); 6870 break; 6871 case RTE_ETH_FILTER_DELETE: 6872 ret = ixgbe_dev_l2_tunnel_filter_del 6873 (dev, 6874 (struct rte_eth_l2_tunnel_conf *)arg); 6875 break; 6876 default: 6877 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 6878 ret = -EINVAL; 6879 break; 6880 } 6881 return ret; 6882 } 6883 6884 static int 6885 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 6886 { 6887 int ret = 0; 6888 uint32_t ctrl; 6889 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6890 6891 if (hw->mac.type != ixgbe_mac_X550 && 6892 hw->mac.type != ixgbe_mac_X550EM_x && 6893 hw->mac.type != ixgbe_mac_X550EM_a) { 6894 return -ENOTSUP; 6895 } 6896 6897 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 6898 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 6899 if (en) 6900 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 6901 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 6902 6903 return ret; 6904 } 6905 6906 /* Enable l2 tunnel forwarding */ 6907 static int 6908 ixgbe_dev_l2_tunnel_forwarding_enable 6909 (struct rte_eth_dev *dev, 6910 enum rte_eth_tunnel_type l2_tunnel_type) 6911 { 6912 int ret = 0; 6913 6914 switch (l2_tunnel_type) { 6915 case RTE_L2_TUNNEL_TYPE_E_TAG: 6916 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); 6917 break; 6918 default: 6919 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6920 ret = -EINVAL; 6921 break; 6922 } 6923 6924 return ret; 6925 } 6926 6927 /* Disable l2 tunnel forwarding */ 6928 static int 6929 ixgbe_dev_l2_tunnel_forwarding_disable 6930 (struct rte_eth_dev *dev, 6931 enum rte_eth_tunnel_type l2_tunnel_type) 6932 { 6933 int ret = 0; 6934 6935 switch (l2_tunnel_type) { 6936 case RTE_L2_TUNNEL_TYPE_E_TAG: 6937 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); 6938 break; 6939 default: 6940 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6941 ret = -EINVAL; 6942 break; 6943 } 6944 6945 return ret; 6946 } 6947 6948 static int 6949 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, 6950 struct rte_eth_l2_tunnel_conf *l2_tunnel, 6951 bool en) 6952 { 6953 int ret = 0; 6954 uint32_t vmtir, vmvir; 6955 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6956 6957 if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { 6958 PMD_DRV_LOG(ERR, 6959 "VF id %u should be less than %u", 6960 l2_tunnel->vf_id, 6961 dev->pci_dev->max_vfs); 6962 return -EINVAL; 6963 } 6964 6965 if (hw->mac.type != ixgbe_mac_X550 && 6966 hw->mac.type != ixgbe_mac_X550EM_x && 6967 hw->mac.type != ixgbe_mac_X550EM_a) { 6968 return -ENOTSUP; 6969 } 6970 6971 if (en) 6972 vmtir = l2_tunnel->tunnel_id; 6973 else 6974 vmtir = 0; 6975 6976 IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); 6977 6978 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); 6979 vmvir &= ~IXGBE_VMVIR_TAGA_MASK; 6980 if (en) 6981 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; 6982 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); 6983 6984 return ret; 6985 } 6986 6987 /* Enable l2 tunnel tag insertion */ 6988 static int 6989 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, 6990 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6991 { 6992 int ret = 0; 6993 6994 switch (l2_tunnel->l2_tunnel_type) { 6995 case RTE_L2_TUNNEL_TYPE_E_TAG: 6996 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); 6997 break; 6998 default: 6999 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7000 ret = -EINVAL; 7001 break; 7002 } 7003 7004 return ret; 7005 } 7006 7007 /* Disable l2 tunnel tag insertion */ 7008 static int 7009 ixgbe_dev_l2_tunnel_insertion_disable 7010 (struct rte_eth_dev *dev, 7011 struct rte_eth_l2_tunnel_conf *l2_tunnel) 7012 { 7013 int ret = 0; 7014 7015 switch (l2_tunnel->l2_tunnel_type) { 7016 case RTE_L2_TUNNEL_TYPE_E_TAG: 7017 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); 7018 break; 7019 default: 7020 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7021 ret = -EINVAL; 7022 break; 7023 } 7024 7025 return ret; 7026 } 7027 7028 static int 7029 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, 7030 bool en) 7031 { 7032 int ret = 0; 7033 uint32_t qde; 7034 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7035 7036 if (hw->mac.type != ixgbe_mac_X550 && 7037 hw->mac.type != ixgbe_mac_X550EM_x && 7038 hw->mac.type != ixgbe_mac_X550EM_a) { 7039 return -ENOTSUP; 7040 } 7041 7042 qde = IXGBE_READ_REG(hw, IXGBE_QDE); 7043 if (en) 7044 qde |= IXGBE_QDE_STRIP_TAG; 7045 else 7046 qde &= ~IXGBE_QDE_STRIP_TAG; 7047 qde &= ~IXGBE_QDE_READ; 7048 qde |= IXGBE_QDE_WRITE; 7049 IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); 7050 7051 return ret; 7052 } 7053 7054 /* Enable l2 tunnel tag stripping */ 7055 static int 7056 ixgbe_dev_l2_tunnel_stripping_enable 7057 (struct rte_eth_dev *dev, 7058 enum rte_eth_tunnel_type l2_tunnel_type) 7059 { 7060 int ret = 0; 7061 7062 switch (l2_tunnel_type) { 7063 case RTE_L2_TUNNEL_TYPE_E_TAG: 7064 ret = ixgbe_e_tag_stripping_en_dis(dev, 1); 7065 break; 7066 default: 7067 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7068 ret = -EINVAL; 7069 break; 7070 } 7071 7072 return ret; 7073 } 7074 7075 /* Disable l2 tunnel tag stripping */ 7076 static int 7077 ixgbe_dev_l2_tunnel_stripping_disable 7078 (struct rte_eth_dev *dev, 7079 enum rte_eth_tunnel_type l2_tunnel_type) 7080 { 7081 int ret = 0; 7082 7083 switch (l2_tunnel_type) { 7084 case RTE_L2_TUNNEL_TYPE_E_TAG: 7085 ret = ixgbe_e_tag_stripping_en_dis(dev, 0); 7086 break; 7087 default: 7088 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7089 ret = -EINVAL; 7090 break; 7091 } 7092 7093 return ret; 7094 } 7095 7096 /* Enable/disable l2 tunnel offload functions */ 7097 static int 7098 ixgbe_dev_l2_tunnel_offload_set 7099 (struct rte_eth_dev *dev, 7100 struct rte_eth_l2_tunnel_conf *l2_tunnel, 7101 uint32_t mask, 7102 uint8_t en) 7103 { 7104 int ret = 0; 7105 7106 if (l2_tunnel == NULL) 7107 return -EINVAL; 7108 7109 ret = -EINVAL; 7110 if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { 7111 if (en) 7112 ret = ixgbe_dev_l2_tunnel_enable( 7113 dev, 7114 l2_tunnel->l2_tunnel_type); 7115 else 7116 ret = ixgbe_dev_l2_tunnel_disable( 7117 dev, 7118 l2_tunnel->l2_tunnel_type); 7119 } 7120 7121 if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { 7122 if (en) 7123 ret = ixgbe_dev_l2_tunnel_insertion_enable( 7124 dev, 7125 l2_tunnel); 7126 else 7127 ret = ixgbe_dev_l2_tunnel_insertion_disable( 7128 dev, 7129 l2_tunnel); 7130 } 7131 7132 if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { 7133 if (en) 7134 ret = ixgbe_dev_l2_tunnel_stripping_enable( 7135 dev, 7136 l2_tunnel->l2_tunnel_type); 7137 else 7138 ret = ixgbe_dev_l2_tunnel_stripping_disable( 7139 dev, 7140 l2_tunnel->l2_tunnel_type); 7141 } 7142 7143 if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { 7144 if (en) 7145 ret = ixgbe_dev_l2_tunnel_forwarding_enable( 7146 dev, 7147 l2_tunnel->l2_tunnel_type); 7148 else 7149 ret = ixgbe_dev_l2_tunnel_forwarding_disable( 7150 dev, 7151 l2_tunnel->l2_tunnel_type); 7152 } 7153 7154 return ret; 7155 } 7156 7157 static int 7158 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7159 uint16_t port) 7160 { 7161 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7162 IXGBE_WRITE_FLUSH(hw); 7163 7164 return 0; 7165 } 7166 7167 /* There's only one register for VxLAN UDP port. 7168 * So, we cannot add several ports. Will update it. 7169 */ 7170 static int 7171 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7172 uint16_t port) 7173 { 7174 if (port == 0) { 7175 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7176 return -EINVAL; 7177 } 7178 7179 return ixgbe_update_vxlan_port(hw, port); 7180 } 7181 7182 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7183 * UDP port, it must have a value. 7184 * So, will reset it to the original value 0. 7185 */ 7186 static int 7187 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7188 uint16_t port) 7189 { 7190 uint16_t cur_port; 7191 7192 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7193 7194 if (cur_port != port) { 7195 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7196 return -EINVAL; 7197 } 7198 7199 return ixgbe_update_vxlan_port(hw, 0); 7200 } 7201 7202 /* Add UDP tunneling port */ 7203 static int 7204 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7205 struct rte_eth_udp_tunnel *udp_tunnel) 7206 { 7207 int ret = 0; 7208 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7209 7210 if (hw->mac.type != ixgbe_mac_X550 && 7211 hw->mac.type != ixgbe_mac_X550EM_x && 7212 hw->mac.type != ixgbe_mac_X550EM_a) { 7213 return -ENOTSUP; 7214 } 7215 7216 if (udp_tunnel == NULL) 7217 return -EINVAL; 7218 7219 switch (udp_tunnel->prot_type) { 7220 case RTE_TUNNEL_TYPE_VXLAN: 7221 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7222 break; 7223 7224 case RTE_TUNNEL_TYPE_GENEVE: 7225 case RTE_TUNNEL_TYPE_TEREDO: 7226 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7227 ret = -EINVAL; 7228 break; 7229 7230 default: 7231 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7232 ret = -EINVAL; 7233 break; 7234 } 7235 7236 return ret; 7237 } 7238 7239 /* Remove UDP tunneling port */ 7240 static int 7241 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7242 struct rte_eth_udp_tunnel *udp_tunnel) 7243 { 7244 int ret = 0; 7245 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7246 7247 if (hw->mac.type != ixgbe_mac_X550 && 7248 hw->mac.type != ixgbe_mac_X550EM_x && 7249 hw->mac.type != ixgbe_mac_X550EM_a) { 7250 return -ENOTSUP; 7251 } 7252 7253 if (udp_tunnel == NULL) 7254 return -EINVAL; 7255 7256 switch (udp_tunnel->prot_type) { 7257 case RTE_TUNNEL_TYPE_VXLAN: 7258 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7259 break; 7260 case RTE_TUNNEL_TYPE_GENEVE: 7261 case RTE_TUNNEL_TYPE_TEREDO: 7262 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7263 ret = -EINVAL; 7264 break; 7265 default: 7266 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7267 ret = -EINVAL; 7268 break; 7269 } 7270 7271 return ret; 7272 } 7273 7274 static void 7275 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7276 { 7277 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7278 7279 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); 7280 } 7281 7282 static void 7283 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7284 { 7285 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7286 7287 hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); 7288 } 7289 7290 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7291 { 7292 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7293 u32 in_msg = 0; 7294 7295 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7296 return; 7297 7298 /* PF reset VF event */ 7299 if (in_msg == IXGBE_PF_CONTROL_MSG) 7300 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); 7301 } 7302 7303 static int 7304 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7305 { 7306 uint32_t eicr; 7307 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7308 struct ixgbe_interrupt *intr = 7309 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7310 ixgbevf_intr_disable(hw); 7311 7312 /* read-on-clear nic registers here */ 7313 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7314 intr->flags = 0; 7315 7316 /* only one misc vector supported - mailbox */ 7317 eicr &= IXGBE_VTEICR_MASK; 7318 if (eicr == IXGBE_MISC_VEC_ID) 7319 intr->flags |= IXGBE_FLAG_MAILBOX; 7320 7321 return 0; 7322 } 7323 7324 static int 7325 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7326 { 7327 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7328 struct ixgbe_interrupt *intr = 7329 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7330 7331 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7332 ixgbevf_mbx_process(dev); 7333 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7334 } 7335 7336 ixgbevf_intr_enable(hw); 7337 7338 return 0; 7339 } 7340 7341 static void 7342 ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 7343 void *param) 7344 { 7345 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7346 7347 ixgbevf_dev_interrupt_get_status(dev); 7348 ixgbevf_dev_interrupt_action(dev); 7349 } 7350 7351 DRIVER_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv); 7352 DRIVER_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); 7353 DRIVER_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv); 7354 DRIVER_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); 7355