1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <inttypes.h> 42 #include <netinet/in.h> 43 #include <rte_byteorder.h> 44 #include <rte_common.h> 45 #include <rte_cycles.h> 46 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev.h> 59 #include <rte_atomic.h> 60 #include <rte_malloc.h> 61 #include <rte_random.h> 62 #include <rte_dev.h> 63 64 #include "ixgbe_logs.h" 65 #include "base/ixgbe_api.h" 66 #include "base/ixgbe_vf.h" 67 #include "base/ixgbe_common.h" 68 #include "ixgbe_ethdev.h" 69 #include "ixgbe_bypass.h" 70 #include "ixgbe_rxtx.h" 71 #include "base/ixgbe_type.h" 72 #include "base/ixgbe_phy.h" 73 #include "ixgbe_regs.h" 74 75 /* 76 * High threshold controlling when to start sending XOFF frames. Must be at 77 * least 8 bytes less than receive packet buffer size. This value is in units 78 * of 1024 bytes. 79 */ 80 #define IXGBE_FC_HI 0x80 81 82 /* 83 * Low threshold controlling when to start sending XON frames. This value is 84 * in units of 1024 bytes. 85 */ 86 #define IXGBE_FC_LO 0x40 87 88 /* Default minimum inter-interrupt interval for EITR configuration */ 89 #define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E 90 91 /* Timer value included in XOFF frames. */ 92 #define IXGBE_FC_PAUSE 0x680 93 94 #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 95 #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 96 #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 97 98 #define IXGBE_MMW_SIZE_DEFAULT 0x4 99 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 100 #define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ 101 102 /* 103 * Default values for RX/TX configuration 104 */ 105 #define IXGBE_DEFAULT_RX_FREE_THRESH 32 106 #define IXGBE_DEFAULT_RX_PTHRESH 8 107 #define IXGBE_DEFAULT_RX_HTHRESH 8 108 #define IXGBE_DEFAULT_RX_WTHRESH 0 109 110 #define IXGBE_DEFAULT_TX_FREE_THRESH 32 111 #define IXGBE_DEFAULT_TX_PTHRESH 32 112 #define IXGBE_DEFAULT_TX_HTHRESH 0 113 #define IXGBE_DEFAULT_TX_WTHRESH 0 114 #define IXGBE_DEFAULT_TX_RSBIT_THRESH 32 115 116 /* Bit shift and mask */ 117 #define IXGBE_4_BIT_WIDTH (CHAR_BIT / 2) 118 #define IXGBE_4_BIT_MASK RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t) 119 #define IXGBE_8_BIT_WIDTH CHAR_BIT 120 #define IXGBE_8_BIT_MASK UINT8_MAX 121 122 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ 123 124 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) 125 126 #define IXGBE_HKEY_MAX_INDEX 10 127 128 /* Additional timesync values. */ 129 #define NSEC_PER_SEC 1000000000L 130 #define IXGBE_INCVAL_10GB 0x66666666 131 #define IXGBE_INCVAL_1GB 0x40000000 132 #define IXGBE_INCVAL_100 0x50000000 133 #define IXGBE_INCVAL_SHIFT_10GB 28 134 #define IXGBE_INCVAL_SHIFT_1GB 24 135 #define IXGBE_INCVAL_SHIFT_100 21 136 #define IXGBE_INCVAL_SHIFT_82599 7 137 #define IXGBE_INCPER_SHIFT_82599 24 138 139 #define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 140 141 #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 142 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 143 #define DEFAULT_ETAG_ETYPE 0x893f 144 #define IXGBE_ETAG_ETYPE 0x00005084 145 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff 146 #define IXGBE_ETAG_ETYPE_VALID 0x80000000 147 #define IXGBE_RAH_ADTYPE 0x40000000 148 #define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff 149 #define IXGBE_VMVIR_TAGA_MASK 0x18000000 150 #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 151 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ 152 #define IXGBE_QDE_STRIP_TAG 0x00000004 153 #define IXGBE_VTEICR_MASK 0x07 154 155 enum ixgbevf_xcast_modes { 156 IXGBEVF_XCAST_MODE_NONE = 0, 157 IXGBEVF_XCAST_MODE_MULTI, 158 IXGBEVF_XCAST_MODE_ALLMULTI, 159 }; 160 161 #define IXGBE_EXVET_VET_EXT_SHIFT 16 162 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 163 164 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); 165 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); 166 static int ixgbe_dev_configure(struct rte_eth_dev *dev); 167 static int ixgbe_dev_start(struct rte_eth_dev *dev); 168 static void ixgbe_dev_stop(struct rte_eth_dev *dev); 169 static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); 170 static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); 171 static void ixgbe_dev_close(struct rte_eth_dev *dev); 172 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 173 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 174 static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 175 static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 176 static int ixgbe_dev_link_update(struct rte_eth_dev *dev, 177 int wait_to_complete); 178 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, 179 struct rte_eth_stats *stats); 180 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, 181 struct rte_eth_xstat *xstats, unsigned n); 182 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, 183 struct rte_eth_xstat *xstats, unsigned n); 184 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); 185 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); 186 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 187 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); 188 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 189 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); 190 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 191 uint16_t queue_id, 192 uint8_t stat_idx, 193 uint8_t is_rx); 194 static void ixgbe_dev_info_get(struct rte_eth_dev *dev, 195 struct rte_eth_dev_info *dev_info); 196 static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 197 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, 198 struct rte_eth_dev_info *dev_info); 199 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 200 201 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, 202 uint16_t vlan_id, int on); 203 static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 204 enum rte_vlan_type vlan_type, 205 uint16_t tpid_id); 206 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 207 uint16_t queue, bool on); 208 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, 209 int on); 210 static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); 211 static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); 212 static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); 213 static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); 214 static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev); 215 216 static int ixgbe_dev_led_on(struct rte_eth_dev *dev); 217 static int ixgbe_dev_led_off(struct rte_eth_dev *dev); 218 static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, 219 struct rte_eth_fc_conf *fc_conf); 220 static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, 221 struct rte_eth_fc_conf *fc_conf); 222 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 223 struct rte_eth_pfc_conf *pfc_conf); 224 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 225 struct rte_eth_rss_reta_entry64 *reta_conf, 226 uint16_t reta_size); 227 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 228 struct rte_eth_rss_reta_entry64 *reta_conf, 229 uint16_t reta_size); 230 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); 231 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); 232 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); 233 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); 234 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); 235 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, 236 void *param); 237 static void ixgbe_dev_interrupt_delayed_handler(void *param); 238 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 239 uint32_t index, uint32_t pool); 240 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); 241 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, 242 struct ether_addr *mac_addr); 243 static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); 244 245 /* For Virtual Function support */ 246 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); 247 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); 248 static int ixgbevf_dev_configure(struct rte_eth_dev *dev); 249 static int ixgbevf_dev_start(struct rte_eth_dev *dev); 250 static void ixgbevf_dev_stop(struct rte_eth_dev *dev); 251 static void ixgbevf_dev_close(struct rte_eth_dev *dev); 252 static void ixgbevf_intr_disable(struct ixgbe_hw *hw); 253 static void ixgbevf_intr_enable(struct ixgbe_hw *hw); 254 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, 255 struct rte_eth_stats *stats); 256 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); 257 static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 258 uint16_t vlan_id, int on); 259 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, 260 uint16_t queue, int on); 261 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); 262 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); 263 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 264 uint16_t queue_id); 265 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 266 uint16_t queue_id); 267 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 268 uint8_t queue, uint8_t msix_vector); 269 static void ixgbevf_configure_msix(struct rte_eth_dev *dev); 270 static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); 271 static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); 272 273 /* For Eth VMDQ APIs support */ 274 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct 275 ether_addr * mac_addr, uint8_t on); 276 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); 277 static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, 278 uint16_t rx_mask, uint8_t on); 279 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); 280 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); 281 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 282 uint64_t pool_mask, uint8_t vlan_on); 283 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 284 struct rte_eth_mirror_conf *mirror_conf, 285 uint8_t rule_id, uint8_t on); 286 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 287 uint8_t rule_id); 288 static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, 289 uint16_t queue_id); 290 static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 291 uint16_t queue_id); 292 static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 293 uint8_t queue, uint8_t msix_vector); 294 static void ixgbe_configure_msix(struct rte_eth_dev *dev); 295 296 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 297 uint16_t queue_idx, uint16_t tx_rate); 298 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 299 uint16_t tx_rate, uint64_t q_msk); 300 301 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, 302 struct ether_addr *mac_addr, 303 uint32_t index, uint32_t pool); 304 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); 305 static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, 306 struct ether_addr *mac_addr); 307 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, 308 struct rte_eth_syn_filter *filter, 309 bool add); 310 static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, 311 struct rte_eth_syn_filter *filter); 312 static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 313 enum rte_filter_op filter_op, 314 void *arg); 315 static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 316 struct ixgbe_5tuple_filter *filter); 317 static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 318 struct ixgbe_5tuple_filter *filter); 319 static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 320 struct rte_eth_ntuple_filter *filter, 321 bool add); 322 static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 323 enum rte_filter_op filter_op, 324 void *arg); 325 static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 326 struct rte_eth_ntuple_filter *filter); 327 static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 328 struct rte_eth_ethertype_filter *filter, 329 bool add); 330 static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 331 enum rte_filter_op filter_op, 332 void *arg); 333 static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 334 struct rte_eth_ethertype_filter *filter); 335 static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 336 enum rte_filter_type filter_type, 337 enum rte_filter_op filter_op, 338 void *arg); 339 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); 340 341 static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 342 struct ether_addr *mc_addr_set, 343 uint32_t nb_mc_addr); 344 static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 345 struct rte_eth_dcb_info *dcb_info); 346 347 static int ixgbe_get_reg_length(struct rte_eth_dev *dev); 348 static int ixgbe_get_regs(struct rte_eth_dev *dev, 349 struct rte_dev_reg_info *regs); 350 static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); 351 static int ixgbe_get_eeprom(struct rte_eth_dev *dev, 352 struct rte_dev_eeprom_info *eeprom); 353 static int ixgbe_set_eeprom(struct rte_eth_dev *dev, 354 struct rte_dev_eeprom_info *eeprom); 355 356 static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); 357 static int ixgbevf_get_regs(struct rte_eth_dev *dev, 358 struct rte_dev_reg_info *regs); 359 360 static int ixgbe_timesync_enable(struct rte_eth_dev *dev); 361 static int ixgbe_timesync_disable(struct rte_eth_dev *dev); 362 static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 363 struct timespec *timestamp, 364 uint32_t flags); 365 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 366 struct timespec *timestamp); 367 static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 368 static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, 369 struct timespec *timestamp); 370 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, 371 const struct timespec *timestamp); 372 static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, 373 void *param); 374 375 static int ixgbe_dev_l2_tunnel_eth_type_conf 376 (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); 377 static int ixgbe_dev_l2_tunnel_offload_set 378 (struct rte_eth_dev *dev, 379 struct rte_eth_l2_tunnel_conf *l2_tunnel, 380 uint32_t mask, 381 uint8_t en); 382 static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 383 enum rte_filter_op filter_op, 384 void *arg); 385 386 static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 387 struct rte_eth_udp_tunnel *udp_tunnel); 388 static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 389 struct rte_eth_udp_tunnel *udp_tunnel); 390 391 /* 392 * Define VF Stats MACRO for Non "cleared on read" register 393 */ 394 #define UPDATE_VF_STAT(reg, last, cur) \ 395 { \ 396 uint32_t latest = IXGBE_READ_REG(hw, reg); \ 397 cur += (latest - last) & UINT_MAX; \ 398 last = latest; \ 399 } 400 401 #define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ 402 { \ 403 u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ 404 u64 new_msb = IXGBE_READ_REG(hw, msb); \ 405 u64 latest = ((new_msb << 32) | new_lsb); \ 406 cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ 407 last = latest; \ 408 } 409 410 #define IXGBE_SET_HWSTRIP(h, q) do {\ 411 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 412 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 413 (h)->bitmap[idx] |= 1 << bit;\ 414 } while (0) 415 416 #define IXGBE_CLEAR_HWSTRIP(h, q) do {\ 417 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 418 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 419 (h)->bitmap[idx] &= ~(1 << bit);\ 420 } while (0) 421 422 #define IXGBE_GET_HWSTRIP(h, q, r) do {\ 423 uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ 424 uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ 425 (r) = (h)->bitmap[idx] >> bit & 1;\ 426 } while (0) 427 428 /* 429 * The set of PCI devices this driver supports 430 */ 431 static const struct rte_pci_id pci_id_ixgbe_map[] = { 432 433 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 434 #include "rte_pci_dev_ids.h" 435 436 { .vendor_id = 0, /* sentinel */ }, 437 }; 438 439 440 /* 441 * The set of PCI devices this driver supports (for 82599 VF) 442 */ 443 static const struct rte_pci_id pci_id_ixgbevf_map[] = { 444 445 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 446 #include "rte_pci_dev_ids.h" 447 { .vendor_id = 0, /* sentinel */ }, 448 449 }; 450 451 static const struct rte_eth_desc_lim rx_desc_lim = { 452 .nb_max = IXGBE_MAX_RING_DESC, 453 .nb_min = IXGBE_MIN_RING_DESC, 454 .nb_align = IXGBE_RXD_ALIGN, 455 }; 456 457 static const struct rte_eth_desc_lim tx_desc_lim = { 458 .nb_max = IXGBE_MAX_RING_DESC, 459 .nb_min = IXGBE_MIN_RING_DESC, 460 .nb_align = IXGBE_TXD_ALIGN, 461 }; 462 463 static const struct eth_dev_ops ixgbe_eth_dev_ops = { 464 .dev_configure = ixgbe_dev_configure, 465 .dev_start = ixgbe_dev_start, 466 .dev_stop = ixgbe_dev_stop, 467 .dev_set_link_up = ixgbe_dev_set_link_up, 468 .dev_set_link_down = ixgbe_dev_set_link_down, 469 .dev_close = ixgbe_dev_close, 470 .promiscuous_enable = ixgbe_dev_promiscuous_enable, 471 .promiscuous_disable = ixgbe_dev_promiscuous_disable, 472 .allmulticast_enable = ixgbe_dev_allmulticast_enable, 473 .allmulticast_disable = ixgbe_dev_allmulticast_disable, 474 .link_update = ixgbe_dev_link_update, 475 .stats_get = ixgbe_dev_stats_get, 476 .xstats_get = ixgbe_dev_xstats_get, 477 .stats_reset = ixgbe_dev_stats_reset, 478 .xstats_reset = ixgbe_dev_xstats_reset, 479 .xstats_get_names = ixgbe_dev_xstats_get_names, 480 .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, 481 .dev_infos_get = ixgbe_dev_info_get, 482 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 483 .mtu_set = ixgbe_dev_mtu_set, 484 .vlan_filter_set = ixgbe_vlan_filter_set, 485 .vlan_tpid_set = ixgbe_vlan_tpid_set, 486 .vlan_offload_set = ixgbe_vlan_offload_set, 487 .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, 488 .rx_queue_start = ixgbe_dev_rx_queue_start, 489 .rx_queue_stop = ixgbe_dev_rx_queue_stop, 490 .tx_queue_start = ixgbe_dev_tx_queue_start, 491 .tx_queue_stop = ixgbe_dev_tx_queue_stop, 492 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 493 .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, 494 .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, 495 .rx_queue_release = ixgbe_dev_rx_queue_release, 496 .rx_queue_count = ixgbe_dev_rx_queue_count, 497 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 498 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 499 .tx_queue_release = ixgbe_dev_tx_queue_release, 500 .dev_led_on = ixgbe_dev_led_on, 501 .dev_led_off = ixgbe_dev_led_off, 502 .flow_ctrl_get = ixgbe_flow_ctrl_get, 503 .flow_ctrl_set = ixgbe_flow_ctrl_set, 504 .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, 505 .mac_addr_add = ixgbe_add_rar, 506 .mac_addr_remove = ixgbe_remove_rar, 507 .mac_addr_set = ixgbe_set_default_mac_addr, 508 .uc_hash_table_set = ixgbe_uc_hash_table_set, 509 .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, 510 .mirror_rule_set = ixgbe_mirror_rule_set, 511 .mirror_rule_reset = ixgbe_mirror_rule_reset, 512 .set_vf_rx_mode = ixgbe_set_pool_rx_mode, 513 .set_vf_rx = ixgbe_set_pool_rx, 514 .set_vf_tx = ixgbe_set_pool_tx, 515 .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, 516 .set_queue_rate_limit = ixgbe_set_queue_rate_limit, 517 .set_vf_rate_limit = ixgbe_set_vf_rate_limit, 518 .reta_update = ixgbe_dev_rss_reta_update, 519 .reta_query = ixgbe_dev_rss_reta_query, 520 #ifdef RTE_NIC_BYPASS 521 .bypass_init = ixgbe_bypass_init, 522 .bypass_state_set = ixgbe_bypass_state_store, 523 .bypass_state_show = ixgbe_bypass_state_show, 524 .bypass_event_set = ixgbe_bypass_event_store, 525 .bypass_event_show = ixgbe_bypass_event_show, 526 .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, 527 .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, 528 .bypass_ver_show = ixgbe_bypass_ver_show, 529 .bypass_wd_reset = ixgbe_bypass_wd_reset, 530 #endif /* RTE_NIC_BYPASS */ 531 .rss_hash_update = ixgbe_dev_rss_hash_update, 532 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 533 .filter_ctrl = ixgbe_dev_filter_ctrl, 534 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 535 .rxq_info_get = ixgbe_rxq_info_get, 536 .txq_info_get = ixgbe_txq_info_get, 537 .timesync_enable = ixgbe_timesync_enable, 538 .timesync_disable = ixgbe_timesync_disable, 539 .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, 540 .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, 541 .get_reg = ixgbe_get_regs, 542 .get_eeprom_length = ixgbe_get_eeprom_length, 543 .get_eeprom = ixgbe_get_eeprom, 544 .set_eeprom = ixgbe_set_eeprom, 545 .get_dcb_info = ixgbe_dev_get_dcb_info, 546 .timesync_adjust_time = ixgbe_timesync_adjust_time, 547 .timesync_read_time = ixgbe_timesync_read_time, 548 .timesync_write_time = ixgbe_timesync_write_time, 549 .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, 550 .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, 551 .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, 552 .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, 553 }; 554 555 /* 556 * dev_ops for virtual function, bare necessities for basic vf 557 * operation have been implemented 558 */ 559 static const struct eth_dev_ops ixgbevf_eth_dev_ops = { 560 .dev_configure = ixgbevf_dev_configure, 561 .dev_start = ixgbevf_dev_start, 562 .dev_stop = ixgbevf_dev_stop, 563 .link_update = ixgbe_dev_link_update, 564 .stats_get = ixgbevf_dev_stats_get, 565 .xstats_get = ixgbevf_dev_xstats_get, 566 .stats_reset = ixgbevf_dev_stats_reset, 567 .xstats_reset = ixgbevf_dev_stats_reset, 568 .xstats_get_names = ixgbevf_dev_xstats_get_names, 569 .dev_close = ixgbevf_dev_close, 570 .allmulticast_enable = ixgbevf_dev_allmulticast_enable, 571 .allmulticast_disable = ixgbevf_dev_allmulticast_disable, 572 .dev_infos_get = ixgbevf_dev_info_get, 573 .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, 574 .mtu_set = ixgbevf_dev_set_mtu, 575 .vlan_filter_set = ixgbevf_vlan_filter_set, 576 .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, 577 .vlan_offload_set = ixgbevf_vlan_offload_set, 578 .rx_queue_setup = ixgbe_dev_rx_queue_setup, 579 .rx_queue_release = ixgbe_dev_rx_queue_release, 580 .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, 581 .tx_queue_setup = ixgbe_dev_tx_queue_setup, 582 .tx_queue_release = ixgbe_dev_tx_queue_release, 583 .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, 584 .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, 585 .mac_addr_add = ixgbevf_add_mac_addr, 586 .mac_addr_remove = ixgbevf_remove_mac_addr, 587 .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, 588 .rxq_info_get = ixgbe_rxq_info_get, 589 .txq_info_get = ixgbe_txq_info_get, 590 .mac_addr_set = ixgbevf_set_default_mac_addr, 591 .get_reg = ixgbevf_get_regs, 592 .reta_update = ixgbe_dev_rss_reta_update, 593 .reta_query = ixgbe_dev_rss_reta_query, 594 .rss_hash_update = ixgbe_dev_rss_hash_update, 595 .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, 596 }; 597 598 /* store statistics names and its offset in stats structure */ 599 struct rte_ixgbe_xstats_name_off { 600 char name[RTE_ETH_XSTATS_NAME_SIZE]; 601 unsigned offset; 602 }; 603 604 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { 605 {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, 606 {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, 607 {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, 608 {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, 609 {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, 610 {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, 611 {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, 612 {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, 613 {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, 614 {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, 615 {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, 616 {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, 617 {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, 618 {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, 619 {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 620 prc1023)}, 621 {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 622 prc1522)}, 623 {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, 624 {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, 625 {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, 626 {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, 627 {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, 628 {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, 629 {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, 630 {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, 631 {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, 632 {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, 633 {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, 634 {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, 635 {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, 636 {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, 637 {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, 638 {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, 639 {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, 640 ptc1023)}, 641 {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, 642 ptc1522)}, 643 {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, 644 {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, 645 {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, 646 {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, 647 648 {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, 649 fdirustat_add)}, 650 {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, 651 fdirustat_remove)}, 652 {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, 653 fdirfstat_fadd)}, 654 {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, 655 fdirfstat_fremove)}, 656 {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, 657 fdirmatch)}, 658 {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, 659 fdirmiss)}, 660 661 {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, 662 {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, 663 {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, 664 fclast)}, 665 {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, 666 {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, 667 {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, 668 {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, 669 {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, 670 fcoe_noddp)}, 671 {"rx_fcoe_no_direct_data_placement_ext_buff", 672 offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, 673 674 {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 675 lxontxc)}, 676 {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, 677 lxonrxc)}, 678 {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 679 lxofftxc)}, 680 {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, 681 lxoffrxc)}, 682 {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, 683 }; 684 685 #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ 686 sizeof(rte_ixgbe_stats_strings[0])) 687 688 /* Per-queue statistics */ 689 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { 690 {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, 691 {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, 692 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, 693 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, 694 }; 695 696 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ 697 sizeof(rte_ixgbe_rxq_strings[0])) 698 #define IXGBE_NB_RXQ_PRIO_VALUES 8 699 700 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { 701 {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, 702 {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, 703 {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, 704 pxon2offc)}, 705 }; 706 707 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ 708 sizeof(rte_ixgbe_txq_strings[0])) 709 #define IXGBE_NB_TXQ_PRIO_VALUES 8 710 711 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { 712 {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, 713 }; 714 715 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ 716 sizeof(rte_ixgbevf_stats_strings[0])) 717 718 /** 719 * Atomically reads the link status information from global 720 * structure rte_eth_dev. 721 * 722 * @param dev 723 * - Pointer to the structure rte_eth_dev to read from. 724 * - Pointer to the buffer to be saved with the link status. 725 * 726 * @return 727 * - On success, zero. 728 * - On failure, negative value. 729 */ 730 static inline int 731 rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, 732 struct rte_eth_link *link) 733 { 734 struct rte_eth_link *dst = link; 735 struct rte_eth_link *src = &(dev->data->dev_link); 736 737 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 738 *(uint64_t *)src) == 0) 739 return -1; 740 741 return 0; 742 } 743 744 /** 745 * Atomically writes the link status information into global 746 * structure rte_eth_dev. 747 * 748 * @param dev 749 * - Pointer to the structure rte_eth_dev to read from. 750 * - Pointer to the buffer to be saved with the link status. 751 * 752 * @return 753 * - On success, zero. 754 * - On failure, negative value. 755 */ 756 static inline int 757 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, 758 struct rte_eth_link *link) 759 { 760 struct rte_eth_link *dst = &(dev->data->dev_link); 761 struct rte_eth_link *src = link; 762 763 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 764 *(uint64_t *)src) == 0) 765 return -1; 766 767 return 0; 768 } 769 770 /* 771 * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. 772 */ 773 static inline int 774 ixgbe_is_sfp(struct ixgbe_hw *hw) 775 { 776 switch (hw->phy.type) { 777 case ixgbe_phy_sfp_avago: 778 case ixgbe_phy_sfp_ftl: 779 case ixgbe_phy_sfp_intel: 780 case ixgbe_phy_sfp_unknown: 781 case ixgbe_phy_sfp_passive_tyco: 782 case ixgbe_phy_sfp_passive_unknown: 783 return 1; 784 default: 785 return 0; 786 } 787 } 788 789 static inline int32_t 790 ixgbe_pf_reset_hw(struct ixgbe_hw *hw) 791 { 792 uint32_t ctrl_ext; 793 int32_t status; 794 795 status = ixgbe_reset_hw(hw); 796 797 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 798 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 799 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 800 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 801 IXGBE_WRITE_FLUSH(hw); 802 803 return status; 804 } 805 806 static inline void 807 ixgbe_enable_intr(struct rte_eth_dev *dev) 808 { 809 struct ixgbe_interrupt *intr = 810 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 811 struct ixgbe_hw *hw = 812 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 813 814 IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); 815 IXGBE_WRITE_FLUSH(hw); 816 } 817 818 /* 819 * This function is based on ixgbe_disable_intr() in base/ixgbe.h. 820 */ 821 static void 822 ixgbe_disable_intr(struct ixgbe_hw *hw) 823 { 824 PMD_INIT_FUNC_TRACE(); 825 826 if (hw->mac.type == ixgbe_mac_82598EB) { 827 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); 828 } else { 829 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); 830 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); 831 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); 832 } 833 IXGBE_WRITE_FLUSH(hw); 834 } 835 836 /* 837 * This function resets queue statistics mapping registers. 838 * From Niantic datasheet, Initialization of Statistics section: 839 * "...if software requires the queue counters, the RQSMR and TQSM registers 840 * must be re-programmed following a device reset. 841 */ 842 static void 843 ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) 844 { 845 uint32_t i; 846 847 for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { 848 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); 849 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); 850 } 851 } 852 853 854 static int 855 ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, 856 uint16_t queue_id, 857 uint8_t stat_idx, 858 uint8_t is_rx) 859 { 860 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 861 #define NB_QMAP_FIELDS_PER_QSM_REG 4 862 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 863 864 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 865 struct ixgbe_stat_mapping_registers *stat_mappings = 866 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private); 867 uint32_t qsmr_mask = 0; 868 uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK; 869 uint32_t q_map; 870 uint8_t n, offset; 871 872 if ((hw->mac.type != ixgbe_mac_82599EB) && 873 (hw->mac.type != ixgbe_mac_X540) && 874 (hw->mac.type != ixgbe_mac_X550) && 875 (hw->mac.type != ixgbe_mac_X550EM_x) && 876 (hw->mac.type != ixgbe_mac_X550EM_a)) 877 return -ENOSYS; 878 879 PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", 880 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 881 queue_id, stat_idx); 882 883 n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG); 884 if (n >= IXGBE_NB_STAT_MAPPING_REGS) { 885 PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded"); 886 return -EIO; 887 } 888 offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG); 889 890 /* Now clear any previous stat_idx set */ 891 clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 892 if (!is_rx) 893 stat_mappings->tqsm[n] &= ~clearing_mask; 894 else 895 stat_mappings->rqsmr[n] &= ~clearing_mask; 896 897 q_map = (uint32_t)stat_idx; 898 q_map &= QMAP_FIELD_RESERVED_BITS_MASK; 899 qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset); 900 if (!is_rx) 901 stat_mappings->tqsm[n] |= qsmr_mask; 902 else 903 stat_mappings->rqsmr[n] |= qsmr_mask; 904 905 PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", 906 (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", 907 queue_id, stat_idx); 908 PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, 909 is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); 910 911 /* Now write the mapping in the appropriate register */ 912 if (is_rx) { 913 PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", 914 stat_mappings->rqsmr[n], n); 915 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); 916 } else { 917 PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", 918 stat_mappings->tqsm[n], n); 919 IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); 920 } 921 return 0; 922 } 923 924 static void 925 ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) 926 { 927 struct ixgbe_stat_mapping_registers *stat_mappings = 928 IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); 929 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 930 int i; 931 932 /* write whatever was in stat mapping table to the NIC */ 933 for (i = 0; i < IXGBE_NB_STAT_MAPPING_REGS; i++) { 934 /* rx */ 935 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), stat_mappings->rqsmr[i]); 936 937 /* tx */ 938 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), stat_mappings->tqsm[i]); 939 } 940 } 941 942 static void 943 ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) 944 { 945 uint8_t i; 946 struct ixgbe_dcb_tc_config *tc; 947 uint8_t dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS; 948 949 dcb_config->num_tcs.pg_tcs = dcb_max_tc; 950 dcb_config->num_tcs.pfc_tcs = dcb_max_tc; 951 for (i = 0; i < dcb_max_tc; i++) { 952 tc = &dcb_config->tc_config[i]; 953 tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i; 954 tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 955 (uint8_t)(100/dcb_max_tc + (i & 1)); 956 tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i; 957 tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 958 (uint8_t)(100/dcb_max_tc + (i & 1)); 959 tc->pfc = ixgbe_dcb_pfc_disabled; 960 } 961 962 /* Initialize default user to priority mapping, UPx->TC0 */ 963 tc = &dcb_config->tc_config[0]; 964 tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; 965 tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; 966 for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { 967 dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; 968 dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; 969 } 970 dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal; 971 dcb_config->pfc_mode_enable = false; 972 dcb_config->vt_mode = true; 973 dcb_config->round_robin_enable = false; 974 /* support all DCB capabilities in 82599 */ 975 dcb_config->support.capabilities = 0xFF; 976 977 /*we only support 4 Tcs for X540, X550 */ 978 if (hw->mac.type == ixgbe_mac_X540 || 979 hw->mac.type == ixgbe_mac_X550 || 980 hw->mac.type == ixgbe_mac_X550EM_x || 981 hw->mac.type == ixgbe_mac_X550EM_a) { 982 dcb_config->num_tcs.pg_tcs = 4; 983 dcb_config->num_tcs.pfc_tcs = 4; 984 } 985 } 986 987 /* 988 * Ensure that all locks are released before first NVM or PHY access 989 */ 990 static void 991 ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) 992 { 993 uint16_t mask; 994 995 /* 996 * Phy lock should not fail in this early stage. If this is the case, 997 * it is due to an improper exit of the application. 998 * So force the release of the faulty lock. Release of common lock 999 * is done automatically by swfw_sync function. 1000 */ 1001 mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; 1002 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1003 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func); 1004 } 1005 ixgbe_release_swfw_semaphore(hw, mask); 1006 1007 /* 1008 * These ones are more tricky since they are common to all ports; but 1009 * swfw_sync retries last long enough (1s) to be almost sure that if 1010 * lock can not be taken it is due to an improper lock of the 1011 * semaphore. 1012 */ 1013 mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; 1014 if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { 1015 PMD_DRV_LOG(DEBUG, "SWFW common locks released"); 1016 } 1017 ixgbe_release_swfw_semaphore(hw, mask); 1018 } 1019 1020 /* 1021 * This function is based on code in ixgbe_attach() in base/ixgbe.c. 1022 * It returns 0 on success. 1023 */ 1024 static int 1025 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) 1026 { 1027 struct rte_pci_device *pci_dev; 1028 struct ixgbe_hw *hw = 1029 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1030 struct ixgbe_vfta *shadow_vfta = 1031 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1032 struct ixgbe_hwstrip *hwstrip = 1033 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1034 struct ixgbe_dcb_config *dcb_config = 1035 IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); 1036 struct ixgbe_filter_info *filter_info = 1037 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); 1038 uint32_t ctrl_ext; 1039 uint16_t csum; 1040 int diag, i; 1041 1042 PMD_INIT_FUNC_TRACE(); 1043 1044 eth_dev->dev_ops = &ixgbe_eth_dev_ops; 1045 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1046 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1047 1048 /* 1049 * For secondary processes, we don't initialise any further as primary 1050 * has already done this work. Only check we don't need a different 1051 * RX and TX function. 1052 */ 1053 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1054 struct ixgbe_tx_queue *txq; 1055 /* TX queue function in primary, set by last queue initialized 1056 * Tx queue may not initialized by primary process 1057 */ 1058 if (eth_dev->data->tx_queues) { 1059 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; 1060 ixgbe_set_tx_function(eth_dev, txq); 1061 } else { 1062 /* Use default TX function if we get here */ 1063 PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " 1064 "Using default TX function."); 1065 } 1066 1067 ixgbe_set_rx_function(eth_dev); 1068 1069 return 0; 1070 } 1071 pci_dev = eth_dev->pci_dev; 1072 1073 rte_eth_copy_pci_info(eth_dev, pci_dev); 1074 1075 /* Vendor and Device ID need to be set before init of shared code */ 1076 hw->device_id = pci_dev->id.device_id; 1077 hw->vendor_id = pci_dev->id.vendor_id; 1078 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1079 hw->allow_unsupported_sfp = 1; 1080 1081 /* Initialize the shared code (base driver) */ 1082 #ifdef RTE_NIC_BYPASS 1083 diag = ixgbe_bypass_init_shared_code(hw); 1084 #else 1085 diag = ixgbe_init_shared_code(hw); 1086 #endif /* RTE_NIC_BYPASS */ 1087 1088 if (diag != IXGBE_SUCCESS) { 1089 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); 1090 return -EIO; 1091 } 1092 1093 /* pick up the PCI bus settings for reporting later */ 1094 ixgbe_get_bus_info(hw); 1095 1096 /* Unlock any pending hardware semaphore */ 1097 ixgbe_swfw_lock_reset(hw); 1098 1099 /* Initialize DCB configuration*/ 1100 memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); 1101 ixgbe_dcb_init(hw, dcb_config); 1102 /* Get Hardware Flow Control setting */ 1103 hw->fc.requested_mode = ixgbe_fc_full; 1104 hw->fc.current_mode = ixgbe_fc_full; 1105 hw->fc.pause_time = IXGBE_FC_PAUSE; 1106 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 1107 hw->fc.low_water[i] = IXGBE_FC_LO; 1108 hw->fc.high_water[i] = IXGBE_FC_HI; 1109 } 1110 hw->fc.send_xon = 1; 1111 1112 /* Make sure we have a good EEPROM before we read from it */ 1113 diag = ixgbe_validate_eeprom_checksum(hw, &csum); 1114 if (diag != IXGBE_SUCCESS) { 1115 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); 1116 return -EIO; 1117 } 1118 1119 #ifdef RTE_NIC_BYPASS 1120 diag = ixgbe_bypass_init_hw(hw); 1121 #else 1122 diag = ixgbe_init_hw(hw); 1123 #endif /* RTE_NIC_BYPASS */ 1124 1125 /* 1126 * Devices with copper phys will fail to initialise if ixgbe_init_hw() 1127 * is called too soon after the kernel driver unbinding/binding occurs. 1128 * The failure occurs in ixgbe_identify_phy_generic() for all devices, 1129 * but for non-copper devies, ixgbe_identify_sfp_module_generic() is 1130 * also called. See ixgbe_identify_phy_82599(). The reason for the 1131 * failure is not known, and only occuts when virtualisation features 1132 * are disabled in the bios. A delay of 100ms was found to be enough by 1133 * trial-and-error, and is doubled to be safe. 1134 */ 1135 if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { 1136 rte_delay_ms(200); 1137 diag = ixgbe_init_hw(hw); 1138 } 1139 1140 if (diag == IXGBE_ERR_EEPROM_VERSION) { 1141 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" 1142 "LOM. Please be aware there may be issues associated " 1143 "with your hardware."); 1144 PMD_INIT_LOG(ERR, "If you are experiencing problems " 1145 "please contact your Intel or hardware representative " 1146 "who provided you with this hardware."); 1147 } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) 1148 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); 1149 if (diag) { 1150 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); 1151 return -EIO; 1152 } 1153 1154 /* Reset the hw statistics */ 1155 ixgbe_dev_stats_reset(eth_dev); 1156 1157 /* disable interrupt */ 1158 ixgbe_disable_intr(hw); 1159 1160 /* reset mappings for queue statistics hw counters*/ 1161 ixgbe_reset_qstat_mappings(hw); 1162 1163 /* Allocate memory for storing MAC addresses */ 1164 eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1165 hw->mac.num_rar_entries, 0); 1166 if (eth_dev->data->mac_addrs == NULL) { 1167 PMD_INIT_LOG(ERR, 1168 "Failed to allocate %u bytes needed to store " 1169 "MAC addresses", 1170 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1171 return -ENOMEM; 1172 } 1173 /* Copy the permanent MAC address */ 1174 ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, 1175 ð_dev->data->mac_addrs[0]); 1176 1177 /* Allocate memory for storing hash filter MAC addresses */ 1178 eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * 1179 IXGBE_VMDQ_NUM_UC_MAC, 0); 1180 if (eth_dev->data->hash_mac_addrs == NULL) { 1181 PMD_INIT_LOG(ERR, 1182 "Failed to allocate %d bytes needed to store MAC addresses", 1183 ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); 1184 return -ENOMEM; 1185 } 1186 1187 /* initialize the vfta */ 1188 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1189 1190 /* initialize the hw strip bitmap*/ 1191 memset(hwstrip, 0, sizeof(*hwstrip)); 1192 1193 /* initialize PF if max_vfs not zero */ 1194 ixgbe_pf_host_init(eth_dev); 1195 1196 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1197 /* let hardware know driver is loaded */ 1198 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 1199 /* Set PF Reset Done bit so PF/VF Mail Ops can work */ 1200 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 1201 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 1202 IXGBE_WRITE_FLUSH(hw); 1203 1204 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 1205 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d", 1206 (int) hw->mac.type, (int) hw->phy.type, 1207 (int) hw->phy.sfp_type); 1208 else 1209 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d", 1210 (int) hw->mac.type, (int) hw->phy.type); 1211 1212 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 1213 eth_dev->data->port_id, pci_dev->id.vendor_id, 1214 pci_dev->id.device_id); 1215 1216 rte_intr_callback_register(&pci_dev->intr_handle, 1217 ixgbe_dev_interrupt_handler, 1218 (void *)eth_dev); 1219 1220 /* enable uio/vfio intr/eventfd mapping */ 1221 rte_intr_enable(&pci_dev->intr_handle); 1222 1223 /* enable support intr */ 1224 ixgbe_enable_intr(eth_dev); 1225 1226 /* initialize 5tuple filter list */ 1227 TAILQ_INIT(&filter_info->fivetuple_list); 1228 memset(filter_info->fivetuple_mask, 0, 1229 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 1230 1231 return 0; 1232 } 1233 1234 static int 1235 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) 1236 { 1237 struct rte_pci_device *pci_dev; 1238 struct ixgbe_hw *hw; 1239 1240 PMD_INIT_FUNC_TRACE(); 1241 1242 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1243 return -EPERM; 1244 1245 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1246 pci_dev = eth_dev->pci_dev; 1247 1248 if (hw->adapter_stopped == 0) 1249 ixgbe_dev_close(eth_dev); 1250 1251 eth_dev->dev_ops = NULL; 1252 eth_dev->rx_pkt_burst = NULL; 1253 eth_dev->tx_pkt_burst = NULL; 1254 1255 /* Unlock any pending hardware semaphore */ 1256 ixgbe_swfw_lock_reset(hw); 1257 1258 /* disable uio intr before callback unregister */ 1259 rte_intr_disable(&(pci_dev->intr_handle)); 1260 rte_intr_callback_unregister(&(pci_dev->intr_handle), 1261 ixgbe_dev_interrupt_handler, (void *)eth_dev); 1262 1263 /* uninitialize PF if max_vfs not zero */ 1264 ixgbe_pf_host_uninit(eth_dev); 1265 1266 rte_free(eth_dev->data->mac_addrs); 1267 eth_dev->data->mac_addrs = NULL; 1268 1269 rte_free(eth_dev->data->hash_mac_addrs); 1270 eth_dev->data->hash_mac_addrs = NULL; 1271 1272 return 0; 1273 } 1274 1275 /* 1276 * Negotiate mailbox API version with the PF. 1277 * After reset API version is always set to the basic one (ixgbe_mbox_api_10). 1278 * Then we try to negotiate starting with the most recent one. 1279 * If all negotiation attempts fail, then we will proceed with 1280 * the default one (ixgbe_mbox_api_10). 1281 */ 1282 static void 1283 ixgbevf_negotiate_api(struct ixgbe_hw *hw) 1284 { 1285 int32_t i; 1286 1287 /* start with highest supported, proceed down */ 1288 static const enum ixgbe_pfvf_api_rev sup_ver[] = { 1289 ixgbe_mbox_api_12, 1290 ixgbe_mbox_api_11, 1291 ixgbe_mbox_api_10, 1292 }; 1293 1294 for (i = 0; 1295 i != RTE_DIM(sup_ver) && 1296 ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; 1297 i++) 1298 ; 1299 } 1300 1301 static void 1302 generate_random_mac_addr(struct ether_addr *mac_addr) 1303 { 1304 uint64_t random; 1305 1306 /* Set Organizationally Unique Identifier (OUI) prefix. */ 1307 mac_addr->addr_bytes[0] = 0x00; 1308 mac_addr->addr_bytes[1] = 0x09; 1309 mac_addr->addr_bytes[2] = 0xC0; 1310 /* Force indication of locally assigned MAC address. */ 1311 mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; 1312 /* Generate the last 3 bytes of the MAC address with a random number. */ 1313 random = rte_rand(); 1314 memcpy(&mac_addr->addr_bytes[3], &random, 3); 1315 } 1316 1317 /* 1318 * Virtual Function device init 1319 */ 1320 static int 1321 eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) 1322 { 1323 int diag; 1324 uint32_t tc, tcs; 1325 struct rte_pci_device *pci_dev; 1326 struct ixgbe_hw *hw = 1327 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1328 struct ixgbe_vfta *shadow_vfta = 1329 IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); 1330 struct ixgbe_hwstrip *hwstrip = 1331 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); 1332 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 1333 1334 PMD_INIT_FUNC_TRACE(); 1335 1336 eth_dev->dev_ops = &ixgbevf_eth_dev_ops; 1337 eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; 1338 eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; 1339 1340 /* for secondary processes, we don't initialise any further as primary 1341 * has already done this work. Only check we don't need a different 1342 * RX function 1343 */ 1344 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1345 struct ixgbe_tx_queue *txq; 1346 /* TX queue function in primary, set by last queue initialized 1347 * Tx queue may not initialized by primary process 1348 */ 1349 if (eth_dev->data->tx_queues) { 1350 txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; 1351 ixgbe_set_tx_function(eth_dev, txq); 1352 } else { 1353 /* Use default TX function if we get here */ 1354 PMD_INIT_LOG(NOTICE, 1355 "No TX queues configured yet. Using default TX function."); 1356 } 1357 1358 ixgbe_set_rx_function(eth_dev); 1359 1360 return 0; 1361 } 1362 1363 pci_dev = eth_dev->pci_dev; 1364 1365 rte_eth_copy_pci_info(eth_dev, pci_dev); 1366 1367 hw->device_id = pci_dev->id.device_id; 1368 hw->vendor_id = pci_dev->id.vendor_id; 1369 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; 1370 1371 /* initialize the vfta */ 1372 memset(shadow_vfta, 0, sizeof(*shadow_vfta)); 1373 1374 /* initialize the hw strip bitmap*/ 1375 memset(hwstrip, 0, sizeof(*hwstrip)); 1376 1377 /* Initialize the shared code (base driver) */ 1378 diag = ixgbe_init_shared_code(hw); 1379 if (diag != IXGBE_SUCCESS) { 1380 PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); 1381 return -EIO; 1382 } 1383 1384 /* init_mailbox_params */ 1385 hw->mbx.ops.init_params(hw); 1386 1387 /* Reset the hw statistics */ 1388 ixgbevf_dev_stats_reset(eth_dev); 1389 1390 /* Disable the interrupts for VF */ 1391 ixgbevf_intr_disable(hw); 1392 1393 hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ 1394 diag = hw->mac.ops.reset_hw(hw); 1395 1396 /* 1397 * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when 1398 * the underlying PF driver has not assigned a MAC address to the VF. 1399 * In this case, assign a random MAC address. 1400 */ 1401 if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { 1402 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1403 return diag; 1404 } 1405 1406 /* negotiate mailbox API version to use with the PF. */ 1407 ixgbevf_negotiate_api(hw); 1408 1409 /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ 1410 ixgbevf_get_queues(hw, &tcs, &tc); 1411 1412 /* Allocate memory for storing MAC addresses */ 1413 eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * 1414 hw->mac.num_rar_entries, 0); 1415 if (eth_dev->data->mac_addrs == NULL) { 1416 PMD_INIT_LOG(ERR, 1417 "Failed to allocate %u bytes needed to store " 1418 "MAC addresses", 1419 ETHER_ADDR_LEN * hw->mac.num_rar_entries); 1420 return -ENOMEM; 1421 } 1422 1423 /* Generate a random MAC address, if none was assigned by PF. */ 1424 if (is_zero_ether_addr(perm_addr)) { 1425 generate_random_mac_addr(perm_addr); 1426 diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); 1427 if (diag) { 1428 rte_free(eth_dev->data->mac_addrs); 1429 eth_dev->data->mac_addrs = NULL; 1430 return diag; 1431 } 1432 PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); 1433 PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " 1434 "%02x:%02x:%02x:%02x:%02x:%02x", 1435 perm_addr->addr_bytes[0], 1436 perm_addr->addr_bytes[1], 1437 perm_addr->addr_bytes[2], 1438 perm_addr->addr_bytes[3], 1439 perm_addr->addr_bytes[4], 1440 perm_addr->addr_bytes[5]); 1441 } 1442 1443 /* Copy the permanent MAC address */ 1444 ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); 1445 1446 /* reset the hardware with the new settings */ 1447 diag = hw->mac.ops.start_hw(hw); 1448 switch (diag) { 1449 case 0: 1450 break; 1451 1452 default: 1453 PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); 1454 return -EIO; 1455 } 1456 1457 rte_intr_callback_register(&pci_dev->intr_handle, 1458 ixgbevf_dev_interrupt_handler, 1459 (void *)eth_dev); 1460 rte_intr_enable(&pci_dev->intr_handle); 1461 ixgbevf_intr_enable(hw); 1462 1463 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", 1464 eth_dev->data->port_id, pci_dev->id.vendor_id, 1465 pci_dev->id.device_id, "ixgbe_mac_82599_vf"); 1466 1467 return 0; 1468 } 1469 1470 /* Virtual Function device uninit */ 1471 1472 static int 1473 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) 1474 { 1475 struct ixgbe_hw *hw; 1476 struct rte_pci_device *pci_dev = eth_dev->pci_dev; 1477 1478 PMD_INIT_FUNC_TRACE(); 1479 1480 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1481 return -EPERM; 1482 1483 hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 1484 1485 if (hw->adapter_stopped == 0) 1486 ixgbevf_dev_close(eth_dev); 1487 1488 eth_dev->dev_ops = NULL; 1489 eth_dev->rx_pkt_burst = NULL; 1490 eth_dev->tx_pkt_burst = NULL; 1491 1492 /* Disable the interrupts for VF */ 1493 ixgbevf_intr_disable(hw); 1494 1495 rte_free(eth_dev->data->mac_addrs); 1496 eth_dev->data->mac_addrs = NULL; 1497 1498 rte_intr_disable(&pci_dev->intr_handle); 1499 rte_intr_callback_unregister(&pci_dev->intr_handle, 1500 ixgbevf_dev_interrupt_handler, 1501 (void *)eth_dev); 1502 1503 return 0; 1504 } 1505 1506 static struct eth_driver rte_ixgbe_pmd = { 1507 .pci_drv = { 1508 .name = "rte_ixgbe_pmd", 1509 .id_table = pci_id_ixgbe_map, 1510 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 1511 RTE_PCI_DRV_DETACHABLE, 1512 }, 1513 .eth_dev_init = eth_ixgbe_dev_init, 1514 .eth_dev_uninit = eth_ixgbe_dev_uninit, 1515 .dev_private_size = sizeof(struct ixgbe_adapter), 1516 }; 1517 1518 /* 1519 * virtual function driver struct 1520 */ 1521 static struct eth_driver rte_ixgbevf_pmd = { 1522 .pci_drv = { 1523 .name = "rte_ixgbevf_pmd", 1524 .id_table = pci_id_ixgbevf_map, 1525 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, 1526 }, 1527 .eth_dev_init = eth_ixgbevf_dev_init, 1528 .eth_dev_uninit = eth_ixgbevf_dev_uninit, 1529 .dev_private_size = sizeof(struct ixgbe_adapter), 1530 }; 1531 1532 /* 1533 * Driver initialization routine. 1534 * Invoked once at EAL init time. 1535 * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. 1536 */ 1537 static int 1538 rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) 1539 { 1540 PMD_INIT_FUNC_TRACE(); 1541 1542 rte_eth_driver_register(&rte_ixgbe_pmd); 1543 return 0; 1544 } 1545 1546 /* 1547 * VF Driver initialization routine. 1548 * Invoked one at EAL init time. 1549 * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. 1550 */ 1551 static int 1552 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) 1553 { 1554 PMD_INIT_FUNC_TRACE(); 1555 1556 rte_eth_driver_register(&rte_ixgbevf_pmd); 1557 return 0; 1558 } 1559 1560 static int 1561 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 1562 { 1563 struct ixgbe_hw *hw = 1564 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1565 struct ixgbe_vfta *shadow_vfta = 1566 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1567 uint32_t vfta; 1568 uint32_t vid_idx; 1569 uint32_t vid_bit; 1570 1571 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 1572 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 1573 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); 1574 if (on) 1575 vfta |= vid_bit; 1576 else 1577 vfta &= ~vid_bit; 1578 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); 1579 1580 /* update local VFTA copy */ 1581 shadow_vfta->vfta[vid_idx] = vfta; 1582 1583 return 0; 1584 } 1585 1586 static void 1587 ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 1588 { 1589 if (on) 1590 ixgbe_vlan_hw_strip_enable(dev, queue); 1591 else 1592 ixgbe_vlan_hw_strip_disable(dev, queue); 1593 } 1594 1595 static int 1596 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, 1597 enum rte_vlan_type vlan_type, 1598 uint16_t tpid) 1599 { 1600 struct ixgbe_hw *hw = 1601 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1602 int ret = 0; 1603 uint32_t reg; 1604 uint32_t qinq; 1605 1606 qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1607 qinq &= IXGBE_DMATXCTL_GDV; 1608 1609 switch (vlan_type) { 1610 case ETH_VLAN_TYPE_INNER: 1611 if (qinq) { 1612 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1613 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1614 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1615 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1616 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1617 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1618 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1619 } else { 1620 ret = -ENOTSUP; 1621 PMD_DRV_LOG(ERR, "Inner type is not supported" 1622 " by single VLAN"); 1623 } 1624 break; 1625 case ETH_VLAN_TYPE_OUTER: 1626 if (qinq) { 1627 /* Only the high 16-bits is valid */ 1628 IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << 1629 IXGBE_EXVET_VET_EXT_SHIFT); 1630 } else { 1631 reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1632 reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; 1633 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); 1634 reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1635 reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) 1636 | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); 1637 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); 1638 } 1639 1640 break; 1641 default: 1642 ret = -EINVAL; 1643 PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); 1644 break; 1645 } 1646 1647 return ret; 1648 } 1649 1650 void 1651 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) 1652 { 1653 struct ixgbe_hw *hw = 1654 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1655 uint32_t vlnctrl; 1656 1657 PMD_INIT_FUNC_TRACE(); 1658 1659 /* Filter Table Disable */ 1660 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1661 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1662 1663 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1664 } 1665 1666 void 1667 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1668 { 1669 struct ixgbe_hw *hw = 1670 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1671 struct ixgbe_vfta *shadow_vfta = 1672 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 1673 uint32_t vlnctrl; 1674 uint16_t i; 1675 1676 PMD_INIT_FUNC_TRACE(); 1677 1678 /* Filter Table Enable */ 1679 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1680 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 1681 vlnctrl |= IXGBE_VLNCTRL_VFE; 1682 1683 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1684 1685 /* write whatever is in local vfta copy */ 1686 for (i = 0; i < IXGBE_VFTA_SIZE; i++) 1687 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); 1688 } 1689 1690 static void 1691 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) 1692 { 1693 struct ixgbe_hwstrip *hwstrip = 1694 IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); 1695 struct ixgbe_rx_queue *rxq; 1696 1697 if (queue >= IXGBE_MAX_RX_QUEUE_NUM) 1698 return; 1699 1700 if (on) 1701 IXGBE_SET_HWSTRIP(hwstrip, queue); 1702 else 1703 IXGBE_CLEAR_HWSTRIP(hwstrip, queue); 1704 1705 if (queue >= dev->data->nb_rx_queues) 1706 return; 1707 1708 rxq = dev->data->rx_queues[queue]; 1709 1710 if (on) 1711 rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; 1712 else 1713 rxq->vlan_flags = PKT_RX_VLAN_PKT; 1714 } 1715 1716 static void 1717 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) 1718 { 1719 struct ixgbe_hw *hw = 1720 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1721 uint32_t ctrl; 1722 1723 PMD_INIT_FUNC_TRACE(); 1724 1725 if (hw->mac.type == ixgbe_mac_82598EB) { 1726 /* No queue level support */ 1727 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1728 return; 1729 } 1730 1731 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1732 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1733 ctrl &= ~IXGBE_RXDCTL_VME; 1734 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1735 1736 /* record those setting for HW strip per queue */ 1737 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); 1738 } 1739 1740 static void 1741 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) 1742 { 1743 struct ixgbe_hw *hw = 1744 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1745 uint32_t ctrl; 1746 1747 PMD_INIT_FUNC_TRACE(); 1748 1749 if (hw->mac.type == ixgbe_mac_82598EB) { 1750 /* No queue level supported */ 1751 PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); 1752 return; 1753 } 1754 1755 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1756 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 1757 ctrl |= IXGBE_RXDCTL_VME; 1758 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 1759 1760 /* record those setting for HW strip per queue */ 1761 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); 1762 } 1763 1764 void 1765 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) 1766 { 1767 struct ixgbe_hw *hw = 1768 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1769 uint32_t ctrl; 1770 uint16_t i; 1771 struct ixgbe_rx_queue *rxq; 1772 1773 PMD_INIT_FUNC_TRACE(); 1774 1775 if (hw->mac.type == ixgbe_mac_82598EB) { 1776 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1777 ctrl &= ~IXGBE_VLNCTRL_VME; 1778 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1779 } else { 1780 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1781 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1782 rxq = dev->data->rx_queues[i]; 1783 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 1784 ctrl &= ~IXGBE_RXDCTL_VME; 1785 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 1786 1787 /* record those setting for HW strip per queue */ 1788 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); 1789 } 1790 } 1791 } 1792 1793 void 1794 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) 1795 { 1796 struct ixgbe_hw *hw = 1797 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1798 uint32_t ctrl; 1799 uint16_t i; 1800 struct ixgbe_rx_queue *rxq; 1801 1802 PMD_INIT_FUNC_TRACE(); 1803 1804 if (hw->mac.type == ixgbe_mac_82598EB) { 1805 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1806 ctrl |= IXGBE_VLNCTRL_VME; 1807 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); 1808 } else { 1809 /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ 1810 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1811 rxq = dev->data->rx_queues[i]; 1812 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); 1813 ctrl |= IXGBE_RXDCTL_VME; 1814 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); 1815 1816 /* record those setting for HW strip per queue */ 1817 ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); 1818 } 1819 } 1820 } 1821 1822 static void 1823 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) 1824 { 1825 struct ixgbe_hw *hw = 1826 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1827 uint32_t ctrl; 1828 1829 PMD_INIT_FUNC_TRACE(); 1830 1831 /* DMATXCTRL: Geric Double VLAN Disable */ 1832 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1833 ctrl &= ~IXGBE_DMATXCTL_GDV; 1834 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 1835 1836 /* CTRL_EXT: Global Double VLAN Disable */ 1837 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1838 ctrl &= ~IXGBE_EXTENDED_VLAN; 1839 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 1840 1841 } 1842 1843 static void 1844 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) 1845 { 1846 struct ixgbe_hw *hw = 1847 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1848 uint32_t ctrl; 1849 1850 PMD_INIT_FUNC_TRACE(); 1851 1852 /* DMATXCTRL: Geric Double VLAN Enable */ 1853 ctrl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1854 ctrl |= IXGBE_DMATXCTL_GDV; 1855 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, ctrl); 1856 1857 /* CTRL_EXT: Global Double VLAN Enable */ 1858 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 1859 ctrl |= IXGBE_EXTENDED_VLAN; 1860 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); 1861 1862 /* Clear pooling mode of PFVTCTL. It's required by X550. */ 1863 if (hw->mac.type == ixgbe_mac_X550 || 1864 hw->mac.type == ixgbe_mac_X550EM_x || 1865 hw->mac.type == ixgbe_mac_X550EM_a) { 1866 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 1867 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 1868 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 1869 } 1870 1871 /* 1872 * VET EXT field in the EXVET register = 0x8100 by default 1873 * So no need to change. Same to VT field of DMATXCTL register 1874 */ 1875 } 1876 1877 static void 1878 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1879 { 1880 if (mask & ETH_VLAN_STRIP_MASK) { 1881 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1882 ixgbe_vlan_hw_strip_enable_all(dev); 1883 else 1884 ixgbe_vlan_hw_strip_disable_all(dev); 1885 } 1886 1887 if (mask & ETH_VLAN_FILTER_MASK) { 1888 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 1889 ixgbe_vlan_hw_filter_enable(dev); 1890 else 1891 ixgbe_vlan_hw_filter_disable(dev); 1892 } 1893 1894 if (mask & ETH_VLAN_EXTEND_MASK) { 1895 if (dev->data->dev_conf.rxmode.hw_vlan_extend) 1896 ixgbe_vlan_hw_extend_enable(dev); 1897 else 1898 ixgbe_vlan_hw_extend_disable(dev); 1899 } 1900 } 1901 1902 static void 1903 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) 1904 { 1905 struct ixgbe_hw *hw = 1906 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1907 /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ 1908 uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1909 1910 vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ 1911 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); 1912 } 1913 1914 static int 1915 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) 1916 { 1917 switch (nb_rx_q) { 1918 case 1: 1919 case 2: 1920 RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; 1921 break; 1922 case 4: 1923 RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; 1924 break; 1925 default: 1926 return -EINVAL; 1927 } 1928 1929 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; 1930 RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; 1931 1932 return 0; 1933 } 1934 1935 static int 1936 ixgbe_check_mq_mode(struct rte_eth_dev *dev) 1937 { 1938 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 1939 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1940 uint16_t nb_rx_q = dev->data->nb_rx_queues; 1941 uint16_t nb_tx_q = dev->data->nb_tx_queues; 1942 1943 if (RTE_ETH_DEV_SRIOV(dev).active != 0) { 1944 /* check multi-queue mode */ 1945 switch (dev_conf->rxmode.mq_mode) { 1946 case ETH_MQ_RX_VMDQ_DCB: 1947 case ETH_MQ_RX_VMDQ_DCB_RSS: 1948 /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ 1949 PMD_INIT_LOG(ERR, "SRIOV active," 1950 " unsupported mq_mode rx %d.", 1951 dev_conf->rxmode.mq_mode); 1952 return -EINVAL; 1953 case ETH_MQ_RX_RSS: 1954 case ETH_MQ_RX_VMDQ_RSS: 1955 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; 1956 if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) 1957 if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { 1958 PMD_INIT_LOG(ERR, "SRIOV is active," 1959 " invalid queue number" 1960 " for VMDQ RSS, allowed" 1961 " value are 1, 2 or 4."); 1962 return -EINVAL; 1963 } 1964 break; 1965 case ETH_MQ_RX_VMDQ_ONLY: 1966 case ETH_MQ_RX_NONE: 1967 /* if nothing mq mode configure, use default scheme */ 1968 dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; 1969 if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) 1970 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; 1971 break; 1972 default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ 1973 /* SRIOV only works in VMDq enable mode */ 1974 PMD_INIT_LOG(ERR, "SRIOV is active," 1975 " wrong mq_mode rx %d.", 1976 dev_conf->rxmode.mq_mode); 1977 return -EINVAL; 1978 } 1979 1980 switch (dev_conf->txmode.mq_mode) { 1981 case ETH_MQ_TX_VMDQ_DCB: 1982 /* DCB VMDQ in SRIOV mode, not implement yet */ 1983 PMD_INIT_LOG(ERR, "SRIOV is active," 1984 " unsupported VMDQ mq_mode tx %d.", 1985 dev_conf->txmode.mq_mode); 1986 return -EINVAL; 1987 default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ 1988 dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; 1989 break; 1990 } 1991 1992 /* check valid queue number */ 1993 if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || 1994 (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { 1995 PMD_INIT_LOG(ERR, "SRIOV is active," 1996 " nb_rx_q=%d nb_tx_q=%d queue number" 1997 " must be less than or equal to %d.", 1998 nb_rx_q, nb_tx_q, 1999 RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); 2000 return -EINVAL; 2001 } 2002 } else { 2003 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { 2004 PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" 2005 " not supported."); 2006 return -EINVAL; 2007 } 2008 /* check configuration for vmdb+dcb mode */ 2009 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { 2010 const struct rte_eth_vmdq_dcb_conf *conf; 2011 2012 if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2013 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", 2014 IXGBE_VMDQ_DCB_NB_QUEUES); 2015 return -EINVAL; 2016 } 2017 conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; 2018 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2019 conf->nb_queue_pools == ETH_32_POOLS)) { 2020 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2021 " nb_queue_pools must be %d or %d.", 2022 ETH_16_POOLS, ETH_32_POOLS); 2023 return -EINVAL; 2024 } 2025 } 2026 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { 2027 const struct rte_eth_vmdq_dcb_tx_conf *conf; 2028 2029 if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { 2030 PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", 2031 IXGBE_VMDQ_DCB_NB_QUEUES); 2032 return -EINVAL; 2033 } 2034 conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; 2035 if (!(conf->nb_queue_pools == ETH_16_POOLS || 2036 conf->nb_queue_pools == ETH_32_POOLS)) { 2037 PMD_INIT_LOG(ERR, "VMDQ+DCB selected," 2038 " nb_queue_pools != %d and" 2039 " nb_queue_pools != %d.", 2040 ETH_16_POOLS, ETH_32_POOLS); 2041 return -EINVAL; 2042 } 2043 } 2044 2045 /* For DCB mode check our configuration before we go further */ 2046 if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { 2047 const struct rte_eth_dcb_rx_conf *conf; 2048 2049 if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { 2050 PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", 2051 IXGBE_DCB_NB_QUEUES); 2052 return -EINVAL; 2053 } 2054 conf = &dev_conf->rx_adv_conf.dcb_rx_conf; 2055 if (!(conf->nb_tcs == ETH_4_TCS || 2056 conf->nb_tcs == ETH_8_TCS)) { 2057 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2058 " and nb_tcs != %d.", 2059 ETH_4_TCS, ETH_8_TCS); 2060 return -EINVAL; 2061 } 2062 } 2063 2064 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { 2065 const struct rte_eth_dcb_tx_conf *conf; 2066 2067 if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { 2068 PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", 2069 IXGBE_DCB_NB_QUEUES); 2070 return -EINVAL; 2071 } 2072 conf = &dev_conf->tx_adv_conf.dcb_tx_conf; 2073 if (!(conf->nb_tcs == ETH_4_TCS || 2074 conf->nb_tcs == ETH_8_TCS)) { 2075 PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" 2076 " and nb_tcs != %d.", 2077 ETH_4_TCS, ETH_8_TCS); 2078 return -EINVAL; 2079 } 2080 } 2081 2082 /* 2083 * When DCB/VT is off, maximum number of queues changes, 2084 * except for 82598EB, which remains constant. 2085 */ 2086 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 2087 hw->mac.type != ixgbe_mac_82598EB) { 2088 if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { 2089 PMD_INIT_LOG(ERR, 2090 "Neither VT nor DCB are enabled, " 2091 "nb_tx_q > %d.", 2092 IXGBE_NONE_MODE_TX_NB_QUEUES); 2093 return -EINVAL; 2094 } 2095 } 2096 } 2097 return 0; 2098 } 2099 2100 static int 2101 ixgbe_dev_configure(struct rte_eth_dev *dev) 2102 { 2103 struct ixgbe_interrupt *intr = 2104 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2105 struct ixgbe_adapter *adapter = 2106 (struct ixgbe_adapter *)dev->data->dev_private; 2107 int ret; 2108 2109 PMD_INIT_FUNC_TRACE(); 2110 /* multipe queue mode checking */ 2111 ret = ixgbe_check_mq_mode(dev); 2112 if (ret != 0) { 2113 PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", 2114 ret); 2115 return ret; 2116 } 2117 2118 /* set flag to update link status after init */ 2119 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2120 2121 /* 2122 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 2123 * allocation or vector Rx preconditions we will reset it. 2124 */ 2125 adapter->rx_bulk_alloc_allowed = true; 2126 adapter->rx_vec_allowed = true; 2127 2128 return 0; 2129 } 2130 2131 static void 2132 ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) 2133 { 2134 struct ixgbe_hw *hw = 2135 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2136 struct ixgbe_interrupt *intr = 2137 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 2138 uint32_t gpie; 2139 2140 /* only set up it on X550EM_X */ 2141 if (hw->mac.type == ixgbe_mac_X550EM_x) { 2142 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2143 gpie |= IXGBE_SDP0_GPIEN_X550EM_x; 2144 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2145 if (hw->phy.type == ixgbe_phy_x550em_ext_t) 2146 intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; 2147 } 2148 } 2149 2150 /* 2151 * Configure device link speed and setup link. 2152 * It returns 0 on success. 2153 */ 2154 static int 2155 ixgbe_dev_start(struct rte_eth_dev *dev) 2156 { 2157 struct ixgbe_hw *hw = 2158 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2159 struct ixgbe_vf_info *vfinfo = 2160 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2161 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 2162 uint32_t intr_vector = 0; 2163 int err, link_up = 0, negotiate = 0; 2164 uint32_t speed = 0; 2165 int mask = 0; 2166 int status; 2167 uint16_t vf, idx; 2168 uint32_t *link_speeds; 2169 2170 PMD_INIT_FUNC_TRACE(); 2171 2172 /* IXGBE devices don't support: 2173 * - half duplex (checked afterwards for valid speeds) 2174 * - fixed speed: TODO implement 2175 */ 2176 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { 2177 PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", 2178 dev->data->port_id); 2179 return -EINVAL; 2180 } 2181 2182 /* disable uio/vfio intr/eventfd mapping */ 2183 rte_intr_disable(intr_handle); 2184 2185 /* stop adapter */ 2186 hw->adapter_stopped = 0; 2187 ixgbe_stop_adapter(hw); 2188 2189 /* reinitialize adapter 2190 * this calls reset and start 2191 */ 2192 status = ixgbe_pf_reset_hw(hw); 2193 if (status != 0) 2194 return -1; 2195 hw->mac.ops.start_hw(hw); 2196 hw->mac.get_link_status = true; 2197 2198 /* configure PF module if SRIOV enabled */ 2199 ixgbe_pf_host_configure(dev); 2200 2201 ixgbe_dev_phy_intr_setup(dev); 2202 2203 /* check and configure queue intr-vector mapping */ 2204 if ((rte_intr_cap_multiple(intr_handle) || 2205 !RTE_ETH_DEV_SRIOV(dev).active) && 2206 dev->data->dev_conf.intr_conf.rxq != 0) { 2207 intr_vector = dev->data->nb_rx_queues; 2208 if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { 2209 PMD_INIT_LOG(ERR, "At most %d intr queues supported", 2210 IXGBE_MAX_INTR_QUEUE_NUM); 2211 return -ENOTSUP; 2212 } 2213 if (rte_intr_efd_enable(intr_handle, intr_vector)) 2214 return -1; 2215 } 2216 2217 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 2218 intr_handle->intr_vec = 2219 rte_zmalloc("intr_vec", 2220 dev->data->nb_rx_queues * sizeof(int), 0); 2221 if (intr_handle->intr_vec == NULL) { 2222 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 2223 " intr_vec\n", dev->data->nb_rx_queues); 2224 return -ENOMEM; 2225 } 2226 } 2227 2228 /* confiugre msix for sleep until rx interrupt */ 2229 ixgbe_configure_msix(dev); 2230 2231 /* initialize transmission unit */ 2232 ixgbe_dev_tx_init(dev); 2233 2234 /* This can fail when allocating mbufs for descriptor rings */ 2235 err = ixgbe_dev_rx_init(dev); 2236 if (err) { 2237 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); 2238 goto error; 2239 } 2240 2241 err = ixgbe_dev_rxtx_start(dev); 2242 if (err < 0) { 2243 PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); 2244 goto error; 2245 } 2246 2247 /* Skip link setup if loopback mode is enabled for 82599. */ 2248 if (hw->mac.type == ixgbe_mac_82599EB && 2249 dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) 2250 goto skip_link_setup; 2251 2252 if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { 2253 err = hw->mac.ops.setup_sfp(hw); 2254 if (err) 2255 goto error; 2256 } 2257 2258 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2259 /* Turn on the copper */ 2260 ixgbe_set_phy_power(hw, true); 2261 } else { 2262 /* Turn on the laser */ 2263 ixgbe_enable_tx_laser(hw); 2264 } 2265 2266 err = ixgbe_check_link(hw, &speed, &link_up, 0); 2267 if (err) 2268 goto error; 2269 dev->data->dev_link.link_status = link_up; 2270 2271 err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); 2272 if (err) 2273 goto error; 2274 2275 link_speeds = &dev->data->dev_conf.link_speeds; 2276 if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | 2277 ETH_LINK_SPEED_10G)) { 2278 PMD_INIT_LOG(ERR, "Invalid link setting"); 2279 goto error; 2280 } 2281 2282 speed = 0x0; 2283 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { 2284 speed = (hw->mac.type != ixgbe_mac_82598EB) ? 2285 IXGBE_LINK_SPEED_82599_AUTONEG : 2286 IXGBE_LINK_SPEED_82598_AUTONEG; 2287 } else { 2288 if (*link_speeds & ETH_LINK_SPEED_10G) 2289 speed |= IXGBE_LINK_SPEED_10GB_FULL; 2290 if (*link_speeds & ETH_LINK_SPEED_1G) 2291 speed |= IXGBE_LINK_SPEED_1GB_FULL; 2292 if (*link_speeds & ETH_LINK_SPEED_100M) 2293 speed |= IXGBE_LINK_SPEED_100_FULL; 2294 } 2295 2296 err = ixgbe_setup_link(hw, speed, link_up); 2297 if (err) 2298 goto error; 2299 2300 skip_link_setup: 2301 2302 if (rte_intr_allow_others(intr_handle)) { 2303 /* check if lsc interrupt is enabled */ 2304 if (dev->data->dev_conf.intr_conf.lsc != 0) 2305 ixgbe_dev_lsc_interrupt_setup(dev); 2306 } else { 2307 rte_intr_callback_unregister(intr_handle, 2308 ixgbe_dev_interrupt_handler, 2309 (void *)dev); 2310 if (dev->data->dev_conf.intr_conf.lsc != 0) 2311 PMD_INIT_LOG(INFO, "lsc won't enable because of" 2312 " no intr multiplex\n"); 2313 } 2314 2315 /* check if rxq interrupt is enabled */ 2316 if (dev->data->dev_conf.intr_conf.rxq != 0 && 2317 rte_intr_dp_is_en(intr_handle)) 2318 ixgbe_dev_rxq_interrupt_setup(dev); 2319 2320 /* enable uio/vfio intr/eventfd mapping */ 2321 rte_intr_enable(intr_handle); 2322 2323 /* resume enabled intr since hw reset */ 2324 ixgbe_enable_intr(dev); 2325 2326 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 2327 ETH_VLAN_EXTEND_MASK; 2328 ixgbe_vlan_offload_set(dev, mask); 2329 2330 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { 2331 /* Enable vlan filtering for VMDq */ 2332 ixgbe_vmdq_vlan_hw_filter_enable(dev); 2333 } 2334 2335 /* Configure DCB hw */ 2336 ixgbe_configure_dcb(dev); 2337 2338 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { 2339 err = ixgbe_fdir_configure(dev); 2340 if (err) 2341 goto error; 2342 } 2343 2344 /* Restore vf rate limit */ 2345 if (vfinfo != NULL) { 2346 for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) 2347 for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) 2348 if (vfinfo[vf].tx_rate[idx] != 0) 2349 ixgbe_set_vf_rate_limit(dev, vf, 2350 vfinfo[vf].tx_rate[idx], 2351 1 << idx); 2352 } 2353 2354 ixgbe_restore_statistics_mapping(dev); 2355 2356 return 0; 2357 2358 error: 2359 PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); 2360 ixgbe_dev_clear_queues(dev); 2361 return -EIO; 2362 } 2363 2364 /* 2365 * Stop device: disable rx and tx functions to allow for reconfiguring. 2366 */ 2367 static void 2368 ixgbe_dev_stop(struct rte_eth_dev *dev) 2369 { 2370 struct rte_eth_link link; 2371 struct ixgbe_hw *hw = 2372 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2373 struct ixgbe_vf_info *vfinfo = 2374 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); 2375 struct ixgbe_filter_info *filter_info = 2376 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 2377 struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; 2378 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 2379 int vf; 2380 2381 PMD_INIT_FUNC_TRACE(); 2382 2383 /* disable interrupts */ 2384 ixgbe_disable_intr(hw); 2385 2386 /* reset the NIC */ 2387 ixgbe_pf_reset_hw(hw); 2388 hw->adapter_stopped = 0; 2389 2390 /* stop adapter */ 2391 ixgbe_stop_adapter(hw); 2392 2393 for (vf = 0; vfinfo != NULL && 2394 vf < dev->pci_dev->max_vfs; vf++) 2395 vfinfo[vf].clear_to_send = false; 2396 2397 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2398 /* Turn off the copper */ 2399 ixgbe_set_phy_power(hw, false); 2400 } else { 2401 /* Turn off the laser */ 2402 ixgbe_disable_tx_laser(hw); 2403 } 2404 2405 ixgbe_dev_clear_queues(dev); 2406 2407 /* Clear stored conf */ 2408 dev->data->scattered_rx = 0; 2409 dev->data->lro = 0; 2410 2411 /* Clear recorded link status */ 2412 memset(&link, 0, sizeof(link)); 2413 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 2414 2415 /* Remove all ntuple filters of the device */ 2416 for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); 2417 p_5tuple != NULL; p_5tuple = p_5tuple_next) { 2418 p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); 2419 TAILQ_REMOVE(&filter_info->fivetuple_list, 2420 p_5tuple, entries); 2421 rte_free(p_5tuple); 2422 } 2423 memset(filter_info->fivetuple_mask, 0, 2424 sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); 2425 2426 if (!rte_intr_allow_others(intr_handle)) 2427 /* resume to the default handler */ 2428 rte_intr_callback_register(intr_handle, 2429 ixgbe_dev_interrupt_handler, 2430 (void *)dev); 2431 2432 /* Clean datapath event and queue/vec mapping */ 2433 rte_intr_efd_disable(intr_handle); 2434 if (intr_handle->intr_vec != NULL) { 2435 rte_free(intr_handle->intr_vec); 2436 intr_handle->intr_vec = NULL; 2437 } 2438 } 2439 2440 /* 2441 * Set device link up: enable tx. 2442 */ 2443 static int 2444 ixgbe_dev_set_link_up(struct rte_eth_dev *dev) 2445 { 2446 struct ixgbe_hw *hw = 2447 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2448 if (hw->mac.type == ixgbe_mac_82599EB) { 2449 #ifdef RTE_NIC_BYPASS 2450 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2451 /* Not suported in bypass mode */ 2452 PMD_INIT_LOG(ERR, "Set link up is not supported " 2453 "by device id 0x%x", hw->device_id); 2454 return -ENOTSUP; 2455 } 2456 #endif 2457 } 2458 2459 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2460 /* Turn on the copper */ 2461 ixgbe_set_phy_power(hw, true); 2462 } else { 2463 /* Turn on the laser */ 2464 ixgbe_enable_tx_laser(hw); 2465 } 2466 2467 return 0; 2468 } 2469 2470 /* 2471 * Set device link down: disable tx. 2472 */ 2473 static int 2474 ixgbe_dev_set_link_down(struct rte_eth_dev *dev) 2475 { 2476 struct ixgbe_hw *hw = 2477 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2478 if (hw->mac.type == ixgbe_mac_82599EB) { 2479 #ifdef RTE_NIC_BYPASS 2480 if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { 2481 /* Not suported in bypass mode */ 2482 PMD_INIT_LOG(ERR, "Set link down is not supported " 2483 "by device id 0x%x", hw->device_id); 2484 return -ENOTSUP; 2485 } 2486 #endif 2487 } 2488 2489 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 2490 /* Turn off the copper */ 2491 ixgbe_set_phy_power(hw, false); 2492 } else { 2493 /* Turn off the laser */ 2494 ixgbe_disable_tx_laser(hw); 2495 } 2496 2497 return 0; 2498 } 2499 2500 /* 2501 * Reest and stop device. 2502 */ 2503 static void 2504 ixgbe_dev_close(struct rte_eth_dev *dev) 2505 { 2506 struct ixgbe_hw *hw = 2507 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2508 2509 PMD_INIT_FUNC_TRACE(); 2510 2511 ixgbe_pf_reset_hw(hw); 2512 2513 ixgbe_dev_stop(dev); 2514 hw->adapter_stopped = 1; 2515 2516 ixgbe_dev_free_queues(dev); 2517 2518 ixgbe_disable_pcie_master(hw); 2519 2520 /* reprogram the RAR[0] in case user changed it. */ 2521 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2522 } 2523 2524 static void 2525 ixgbe_read_stats_registers(struct ixgbe_hw *hw, 2526 struct ixgbe_hw_stats *hw_stats, 2527 uint64_t *total_missed_rx, uint64_t *total_qbrc, 2528 uint64_t *total_qprc, uint64_t *total_qprdc) 2529 { 2530 uint32_t bprc, lxon, lxoff, total; 2531 uint32_t delta_gprc = 0; 2532 unsigned i; 2533 /* Workaround for RX byte count not including CRC bytes when CRC 2534 + * strip is enabled. CRC bytes are removed from counters when crc_strip 2535 * is disabled. 2536 + */ 2537 int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & 2538 IXGBE_HLREG0_RXCRCSTRP); 2539 2540 hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 2541 hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); 2542 hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); 2543 hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); 2544 2545 for (i = 0; i < 8; i++) { 2546 uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 2547 2548 /* global total per queue */ 2549 hw_stats->mpc[i] += mp; 2550 /* Running comprehensive total for stats display */ 2551 *total_missed_rx += hw_stats->mpc[i]; 2552 if (hw->mac.type == ixgbe_mac_82598EB) { 2553 hw_stats->rnbc[i] += 2554 IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 2555 hw_stats->pxonrxc[i] += 2556 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 2557 hw_stats->pxoffrxc[i] += 2558 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 2559 } else { 2560 hw_stats->pxonrxc[i] += 2561 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); 2562 hw_stats->pxoffrxc[i] += 2563 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 2564 hw_stats->pxon2offc[i] += 2565 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); 2566 } 2567 hw_stats->pxontxc[i] += 2568 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 2569 hw_stats->pxofftxc[i] += 2570 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 2571 } 2572 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 2573 uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 2574 uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 2575 uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 2576 2577 delta_gprc += delta_qprc; 2578 2579 hw_stats->qprc[i] += delta_qprc; 2580 hw_stats->qptc[i] += delta_qptc; 2581 2582 hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); 2583 hw_stats->qbrc[i] += 2584 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); 2585 if (crc_strip == 0) 2586 hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; 2587 2588 hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); 2589 hw_stats->qbtc[i] += 2590 ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); 2591 2592 hw_stats->qprdc[i] += delta_qprdc; 2593 *total_qprdc += hw_stats->qprdc[i]; 2594 2595 *total_qprc += hw_stats->qprc[i]; 2596 *total_qbrc += hw_stats->qbrc[i]; 2597 } 2598 hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); 2599 hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); 2600 hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 2601 2602 /* 2603 * An errata states that gprc actually counts good + missed packets: 2604 * Workaround to set gprc to summated queue packet receives 2605 */ 2606 hw_stats->gprc = *total_qprc; 2607 2608 if (hw->mac.type != ixgbe_mac_82598EB) { 2609 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 2610 hw_stats->gorc += ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); 2611 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 2612 hw_stats->gotc += ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); 2613 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 2614 hw_stats->tor += ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); 2615 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 2616 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 2617 } else { 2618 hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 2619 hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 2620 /* 82598 only has a counter in the high register */ 2621 hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 2622 hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 2623 hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); 2624 } 2625 uint64_t old_tpr = hw_stats->tpr; 2626 2627 hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 2628 hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); 2629 2630 if (crc_strip == 0) 2631 hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; 2632 2633 uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); 2634 hw_stats->gptc += delta_gptc; 2635 hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; 2636 hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; 2637 2638 /* 2639 * Workaround: mprc hardware is incorrectly counting 2640 * broadcasts, so for now we subtract those. 2641 */ 2642 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 2643 hw_stats->bprc += bprc; 2644 hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 2645 if (hw->mac.type == ixgbe_mac_82598EB) 2646 hw_stats->mprc -= bprc; 2647 2648 hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 2649 hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 2650 hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 2651 hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 2652 hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 2653 hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 2654 2655 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 2656 hw_stats->lxontxc += lxon; 2657 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 2658 hw_stats->lxofftxc += lxoff; 2659 total = lxon + lxoff; 2660 2661 hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 2662 hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 2663 hw_stats->gptc -= total; 2664 hw_stats->mptc -= total; 2665 hw_stats->ptc64 -= total; 2666 hw_stats->gotc -= total * ETHER_MIN_LEN; 2667 2668 hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 2669 hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 2670 hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); 2671 hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 2672 hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); 2673 hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); 2674 hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); 2675 hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 2676 hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 2677 hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 2678 hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 2679 hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 2680 hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 2681 hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); 2682 hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 2683 hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); 2684 /* Only read FCOE on 82599 */ 2685 if (hw->mac.type != ixgbe_mac_82598EB) { 2686 hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 2687 hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 2688 hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 2689 hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 2690 hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 2691 } 2692 2693 /* Flow Director Stats registers */ 2694 hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 2695 hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 2696 } 2697 2698 /* 2699 * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c 2700 */ 2701 static void 2702 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2703 { 2704 struct ixgbe_hw *hw = 2705 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2706 struct ixgbe_hw_stats *hw_stats = 2707 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2708 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 2709 unsigned i; 2710 2711 total_missed_rx = 0; 2712 total_qbrc = 0; 2713 total_qprc = 0; 2714 total_qprdc = 0; 2715 2716 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, 2717 &total_qprc, &total_qprdc); 2718 2719 if (stats == NULL) 2720 return; 2721 2722 /* Fill out the rte_eth_stats statistics structure */ 2723 stats->ipackets = total_qprc; 2724 stats->ibytes = total_qbrc; 2725 stats->opackets = hw_stats->gptc; 2726 stats->obytes = hw_stats->gotc; 2727 2728 for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { 2729 stats->q_ipackets[i] = hw_stats->qprc[i]; 2730 stats->q_opackets[i] = hw_stats->qptc[i]; 2731 stats->q_ibytes[i] = hw_stats->qbrc[i]; 2732 stats->q_obytes[i] = hw_stats->qbtc[i]; 2733 stats->q_errors[i] = hw_stats->qprdc[i]; 2734 } 2735 2736 /* Rx Errors */ 2737 stats->imissed = total_missed_rx; 2738 stats->ierrors = hw_stats->crcerrs + 2739 hw_stats->mspdc + 2740 hw_stats->rlec + 2741 hw_stats->ruc + 2742 hw_stats->roc + 2743 hw_stats->illerrc + 2744 hw_stats->errbc + 2745 hw_stats->rfc + 2746 hw_stats->fccrc + 2747 hw_stats->fclast; 2748 2749 /* Tx Errors */ 2750 stats->oerrors = 0; 2751 } 2752 2753 static void 2754 ixgbe_dev_stats_reset(struct rte_eth_dev *dev) 2755 { 2756 struct ixgbe_hw_stats *stats = 2757 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2758 2759 /* HW registers are cleared on read */ 2760 ixgbe_dev_stats_get(dev, NULL); 2761 2762 /* Reset software totals */ 2763 memset(stats, 0, sizeof(*stats)); 2764 } 2765 2766 /* This function calculates the number of xstats based on the current config */ 2767 static unsigned 2768 ixgbe_xstats_calc_num(void) { 2769 return IXGBE_NB_HW_STATS + 2770 (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + 2771 (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); 2772 } 2773 2774 static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2775 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) 2776 { 2777 const unsigned cnt_stats = ixgbe_xstats_calc_num(); 2778 unsigned stat, i, count; 2779 2780 if (xstats_names != NULL) { 2781 count = 0; 2782 2783 /* Note: limit >= cnt_stats checked upstream 2784 * in rte_eth_xstats_names() 2785 */ 2786 2787 /* Extended stats from ixgbe_hw_stats */ 2788 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 2789 snprintf(xstats_names[count].name, 2790 sizeof(xstats_names[count].name), 2791 "%s", 2792 rte_ixgbe_stats_strings[i].name); 2793 count++; 2794 } 2795 2796 /* RX Priority Stats */ 2797 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 2798 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 2799 snprintf(xstats_names[count].name, 2800 sizeof(xstats_names[count].name), 2801 "rx_priority%u_%s", i, 2802 rte_ixgbe_rxq_strings[stat].name); 2803 count++; 2804 } 2805 } 2806 2807 /* TX Priority Stats */ 2808 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 2809 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 2810 snprintf(xstats_names[count].name, 2811 sizeof(xstats_names[count].name), 2812 "tx_priority%u_%s", i, 2813 rte_ixgbe_txq_strings[stat].name); 2814 count++; 2815 } 2816 } 2817 } 2818 return cnt_stats; 2819 } 2820 2821 static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, 2822 struct rte_eth_xstat_name *xstats_names, unsigned limit) 2823 { 2824 unsigned i; 2825 2826 if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) 2827 return -ENOMEM; 2828 2829 if (xstats_names != NULL) 2830 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) 2831 snprintf(xstats_names[i].name, 2832 sizeof(xstats_names[i].name), 2833 "%s", rte_ixgbevf_stats_strings[i].name); 2834 return IXGBEVF_NB_XSTATS; 2835 } 2836 2837 static int 2838 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2839 unsigned n) 2840 { 2841 struct ixgbe_hw *hw = 2842 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2843 struct ixgbe_hw_stats *hw_stats = 2844 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2845 uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; 2846 unsigned i, stat, count = 0; 2847 2848 count = ixgbe_xstats_calc_num(); 2849 2850 if (n < count) 2851 return count; 2852 2853 total_missed_rx = 0; 2854 total_qbrc = 0; 2855 total_qprc = 0; 2856 total_qprdc = 0; 2857 2858 ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, 2859 &total_qprc, &total_qprdc); 2860 2861 /* If this is a reset xstats is NULL, and we have cleared the 2862 * registers by reading them. 2863 */ 2864 if (!xstats) 2865 return 0; 2866 2867 /* Extended stats from ixgbe_hw_stats */ 2868 count = 0; 2869 for (i = 0; i < IXGBE_NB_HW_STATS; i++) { 2870 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2871 rte_ixgbe_stats_strings[i].offset); 2872 count++; 2873 } 2874 2875 /* RX Priority Stats */ 2876 for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { 2877 for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { 2878 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2879 rte_ixgbe_rxq_strings[stat].offset + 2880 (sizeof(uint64_t) * i)); 2881 count++; 2882 } 2883 } 2884 2885 /* TX Priority Stats */ 2886 for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { 2887 for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { 2888 xstats[count].value = *(uint64_t *)(((char *)hw_stats) + 2889 rte_ixgbe_txq_strings[stat].offset + 2890 (sizeof(uint64_t) * i)); 2891 count++; 2892 } 2893 } 2894 return count; 2895 } 2896 2897 static void 2898 ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) 2899 { 2900 struct ixgbe_hw_stats *stats = 2901 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2902 2903 unsigned count = ixgbe_xstats_calc_num(); 2904 2905 /* HW registers are cleared on read */ 2906 ixgbe_dev_xstats_get(dev, NULL, count); 2907 2908 /* Reset software totals */ 2909 memset(stats, 0, sizeof(*stats)); 2910 } 2911 2912 static void 2913 ixgbevf_update_stats(struct rte_eth_dev *dev) 2914 { 2915 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2916 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2917 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2918 2919 /* Good Rx packet, include VF loopback */ 2920 UPDATE_VF_STAT(IXGBE_VFGPRC, 2921 hw_stats->last_vfgprc, hw_stats->vfgprc); 2922 2923 /* Good Rx octets, include VF loopback */ 2924 UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2925 hw_stats->last_vfgorc, hw_stats->vfgorc); 2926 2927 /* Good Tx packet, include VF loopback */ 2928 UPDATE_VF_STAT(IXGBE_VFGPTC, 2929 hw_stats->last_vfgptc, hw_stats->vfgptc); 2930 2931 /* Good Tx octets, include VF loopback */ 2932 UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2933 hw_stats->last_vfgotc, hw_stats->vfgotc); 2934 2935 /* Rx Multicst Packet */ 2936 UPDATE_VF_STAT(IXGBE_VFMPRC, 2937 hw_stats->last_vfmprc, hw_stats->vfmprc); 2938 } 2939 2940 static int 2941 ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 2942 unsigned n) 2943 { 2944 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2945 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2946 unsigned i; 2947 2948 if (n < IXGBEVF_NB_XSTATS) 2949 return IXGBEVF_NB_XSTATS; 2950 2951 ixgbevf_update_stats(dev); 2952 2953 if (!xstats) 2954 return 0; 2955 2956 /* Extended stats */ 2957 for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { 2958 xstats[i].value = *(uint64_t *)(((char *)hw_stats) + 2959 rte_ixgbevf_stats_strings[i].offset); 2960 } 2961 2962 return IXGBEVF_NB_XSTATS; 2963 } 2964 2965 static void 2966 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 2967 { 2968 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2969 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2970 2971 ixgbevf_update_stats(dev); 2972 2973 if (stats == NULL) 2974 return; 2975 2976 stats->ipackets = hw_stats->vfgprc; 2977 stats->ibytes = hw_stats->vfgorc; 2978 stats->opackets = hw_stats->vfgptc; 2979 stats->obytes = hw_stats->vfgotc; 2980 } 2981 2982 static void 2983 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) 2984 { 2985 struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) 2986 IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); 2987 2988 /* Sync HW register to the last stats */ 2989 ixgbevf_dev_stats_get(dev, NULL); 2990 2991 /* reset HW current stats*/ 2992 hw_stats->vfgprc = 0; 2993 hw_stats->vfgorc = 0; 2994 hw_stats->vfgptc = 0; 2995 hw_stats->vfgotc = 0; 2996 } 2997 2998 static void 2999 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3000 { 3001 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3002 struct rte_eth_conf *dev_conf = &dev->data->dev_conf; 3003 3004 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3005 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3006 if (RTE_ETH_DEV_SRIOV(dev).active == 0) { 3007 /* 3008 * When DCB/VT is off, maximum number of queues changes, 3009 * except for 82598EB, which remains constant. 3010 */ 3011 if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && 3012 hw->mac.type != ixgbe_mac_82598EB) 3013 dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; 3014 } 3015 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ 3016 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ 3017 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3018 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3019 dev_info->max_vfs = dev->pci_dev->max_vfs; 3020 if (hw->mac.type == ixgbe_mac_82598EB) 3021 dev_info->max_vmdq_pools = ETH_16_POOLS; 3022 else 3023 dev_info->max_vmdq_pools = ETH_64_POOLS; 3024 dev_info->vmdq_queue_num = dev_info->max_rx_queues; 3025 dev_info->rx_offload_capa = 3026 DEV_RX_OFFLOAD_VLAN_STRIP | 3027 DEV_RX_OFFLOAD_IPV4_CKSUM | 3028 DEV_RX_OFFLOAD_UDP_CKSUM | 3029 DEV_RX_OFFLOAD_TCP_CKSUM; 3030 3031 /* 3032 * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV 3033 * mode. 3034 */ 3035 if ((hw->mac.type == ixgbe_mac_82599EB || 3036 hw->mac.type == ixgbe_mac_X540) && 3037 !RTE_ETH_DEV_SRIOV(dev).active) 3038 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; 3039 3040 if (hw->mac.type == ixgbe_mac_X550 || 3041 hw->mac.type == ixgbe_mac_X550EM_x || 3042 hw->mac.type == ixgbe_mac_X550EM_a) 3043 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 3044 3045 dev_info->tx_offload_capa = 3046 DEV_TX_OFFLOAD_VLAN_INSERT | 3047 DEV_TX_OFFLOAD_IPV4_CKSUM | 3048 DEV_TX_OFFLOAD_UDP_CKSUM | 3049 DEV_TX_OFFLOAD_TCP_CKSUM | 3050 DEV_TX_OFFLOAD_SCTP_CKSUM | 3051 DEV_TX_OFFLOAD_TCP_TSO; 3052 3053 if (hw->mac.type == ixgbe_mac_X550 || 3054 hw->mac.type == ixgbe_mac_X550EM_x || 3055 hw->mac.type == ixgbe_mac_X550EM_a) 3056 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; 3057 3058 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3059 .rx_thresh = { 3060 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3061 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3062 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3063 }, 3064 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3065 .rx_drop_en = 0, 3066 }; 3067 3068 dev_info->default_txconf = (struct rte_eth_txconf) { 3069 .tx_thresh = { 3070 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3071 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3072 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3073 }, 3074 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3075 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3076 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 3077 ETH_TXQ_FLAGS_NOOFFLOADS, 3078 }; 3079 3080 dev_info->rx_desc_lim = rx_desc_lim; 3081 dev_info->tx_desc_lim = tx_desc_lim; 3082 3083 dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); 3084 dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); 3085 dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; 3086 3087 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; 3088 if (hw->mac.type == ixgbe_mac_X540 || 3089 hw->mac.type == ixgbe_mac_X540_vf || 3090 hw->mac.type == ixgbe_mac_X550 || 3091 hw->mac.type == ixgbe_mac_X550_vf) { 3092 dev_info->speed_capa |= ETH_LINK_SPEED_100M; 3093 } 3094 } 3095 3096 static const uint32_t * 3097 ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) 3098 { 3099 static const uint32_t ptypes[] = { 3100 /* For non-vec functions, 3101 * refers to ixgbe_rxd_pkt_info_to_pkt_type(); 3102 * for vec functions, 3103 * refers to _recv_raw_pkts_vec(). 3104 */ 3105 RTE_PTYPE_L2_ETHER, 3106 RTE_PTYPE_L3_IPV4, 3107 RTE_PTYPE_L3_IPV4_EXT, 3108 RTE_PTYPE_L3_IPV6, 3109 RTE_PTYPE_L3_IPV6_EXT, 3110 RTE_PTYPE_L4_SCTP, 3111 RTE_PTYPE_L4_TCP, 3112 RTE_PTYPE_L4_UDP, 3113 RTE_PTYPE_TUNNEL_IP, 3114 RTE_PTYPE_INNER_L3_IPV6, 3115 RTE_PTYPE_INNER_L3_IPV6_EXT, 3116 RTE_PTYPE_INNER_L4_TCP, 3117 RTE_PTYPE_INNER_L4_UDP, 3118 RTE_PTYPE_UNKNOWN 3119 }; 3120 3121 if (dev->rx_pkt_burst == ixgbe_recv_pkts || 3122 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || 3123 dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || 3124 dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) 3125 return ptypes; 3126 return NULL; 3127 } 3128 3129 static void 3130 ixgbevf_dev_info_get(struct rte_eth_dev *dev, 3131 struct rte_eth_dev_info *dev_info) 3132 { 3133 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3134 3135 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; 3136 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; 3137 dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ 3138 dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ 3139 dev_info->max_mac_addrs = hw->mac.num_rar_entries; 3140 dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; 3141 dev_info->max_vfs = dev->pci_dev->max_vfs; 3142 if (hw->mac.type == ixgbe_mac_82598EB) 3143 dev_info->max_vmdq_pools = ETH_16_POOLS; 3144 else 3145 dev_info->max_vmdq_pools = ETH_64_POOLS; 3146 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | 3147 DEV_RX_OFFLOAD_IPV4_CKSUM | 3148 DEV_RX_OFFLOAD_UDP_CKSUM | 3149 DEV_RX_OFFLOAD_TCP_CKSUM; 3150 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | 3151 DEV_TX_OFFLOAD_IPV4_CKSUM | 3152 DEV_TX_OFFLOAD_UDP_CKSUM | 3153 DEV_TX_OFFLOAD_TCP_CKSUM | 3154 DEV_TX_OFFLOAD_SCTP_CKSUM | 3155 DEV_TX_OFFLOAD_TCP_TSO; 3156 3157 dev_info->default_rxconf = (struct rte_eth_rxconf) { 3158 .rx_thresh = { 3159 .pthresh = IXGBE_DEFAULT_RX_PTHRESH, 3160 .hthresh = IXGBE_DEFAULT_RX_HTHRESH, 3161 .wthresh = IXGBE_DEFAULT_RX_WTHRESH, 3162 }, 3163 .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, 3164 .rx_drop_en = 0, 3165 }; 3166 3167 dev_info->default_txconf = (struct rte_eth_txconf) { 3168 .tx_thresh = { 3169 .pthresh = IXGBE_DEFAULT_TX_PTHRESH, 3170 .hthresh = IXGBE_DEFAULT_TX_HTHRESH, 3171 .wthresh = IXGBE_DEFAULT_TX_WTHRESH, 3172 }, 3173 .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, 3174 .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, 3175 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 3176 ETH_TXQ_FLAGS_NOOFFLOADS, 3177 }; 3178 3179 dev_info->rx_desc_lim = rx_desc_lim; 3180 dev_info->tx_desc_lim = tx_desc_lim; 3181 } 3182 3183 /* return 0 means link status changed, -1 means not changed */ 3184 static int 3185 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 3186 { 3187 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3188 struct rte_eth_link link, old; 3189 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 3190 int link_up; 3191 int diag; 3192 3193 link.link_status = ETH_LINK_DOWN; 3194 link.link_speed = 0; 3195 link.link_duplex = ETH_LINK_HALF_DUPLEX; 3196 memset(&old, 0, sizeof(old)); 3197 rte_ixgbe_dev_atomic_read_link_status(dev, &old); 3198 3199 hw->mac.get_link_status = true; 3200 3201 /* check if it needs to wait to complete, if lsc interrupt is enabled */ 3202 if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) 3203 diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); 3204 else 3205 diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); 3206 3207 if (diag != 0) { 3208 link.link_speed = ETH_SPEED_NUM_100M; 3209 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3210 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3211 if (link.link_status == old.link_status) 3212 return -1; 3213 return 0; 3214 } 3215 3216 if (link_up == 0) { 3217 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3218 if (link.link_status == old.link_status) 3219 return -1; 3220 return 0; 3221 } 3222 link.link_status = ETH_LINK_UP; 3223 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3224 3225 switch (link_speed) { 3226 default: 3227 case IXGBE_LINK_SPEED_UNKNOWN: 3228 link.link_duplex = ETH_LINK_FULL_DUPLEX; 3229 link.link_speed = ETH_SPEED_NUM_100M; 3230 break; 3231 3232 case IXGBE_LINK_SPEED_100_FULL: 3233 link.link_speed = ETH_SPEED_NUM_100M; 3234 break; 3235 3236 case IXGBE_LINK_SPEED_1GB_FULL: 3237 link.link_speed = ETH_SPEED_NUM_1G; 3238 break; 3239 3240 case IXGBE_LINK_SPEED_10GB_FULL: 3241 link.link_speed = ETH_SPEED_NUM_10G; 3242 break; 3243 } 3244 rte_ixgbe_dev_atomic_write_link_status(dev, &link); 3245 3246 if (link.link_status == old.link_status) 3247 return -1; 3248 3249 return 0; 3250 } 3251 3252 static void 3253 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 3254 { 3255 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3256 uint32_t fctrl; 3257 3258 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3259 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3260 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3261 } 3262 3263 static void 3264 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 3265 { 3266 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3267 uint32_t fctrl; 3268 3269 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3270 fctrl &= (~IXGBE_FCTRL_UPE); 3271 if (dev->data->all_multicast == 1) 3272 fctrl |= IXGBE_FCTRL_MPE; 3273 else 3274 fctrl &= (~IXGBE_FCTRL_MPE); 3275 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3276 } 3277 3278 static void 3279 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 3280 { 3281 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3282 uint32_t fctrl; 3283 3284 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3285 fctrl |= IXGBE_FCTRL_MPE; 3286 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3287 } 3288 3289 static void 3290 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 3291 { 3292 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3293 uint32_t fctrl; 3294 3295 if (dev->data->promiscuous == 1) 3296 return; /* must remain in all_multicast mode */ 3297 3298 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3299 fctrl &= (~IXGBE_FCTRL_MPE); 3300 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3301 } 3302 3303 /** 3304 * It clears the interrupt causes and enables the interrupt. 3305 * It will be called once only during nic initialized. 3306 * 3307 * @param dev 3308 * Pointer to struct rte_eth_dev. 3309 * 3310 * @return 3311 * - On success, zero. 3312 * - On failure, a negative value. 3313 */ 3314 static int 3315 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) 3316 { 3317 struct ixgbe_interrupt *intr = 3318 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3319 3320 ixgbe_dev_link_status_print(dev); 3321 intr->mask |= IXGBE_EICR_LSC; 3322 3323 return 0; 3324 } 3325 3326 /** 3327 * It clears the interrupt causes and enables the interrupt. 3328 * It will be called once only during nic initialized. 3329 * 3330 * @param dev 3331 * Pointer to struct rte_eth_dev. 3332 * 3333 * @return 3334 * - On success, zero. 3335 * - On failure, a negative value. 3336 */ 3337 static int 3338 ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) 3339 { 3340 struct ixgbe_interrupt *intr = 3341 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3342 3343 intr->mask |= IXGBE_EICR_RTX_QUEUE; 3344 3345 return 0; 3346 } 3347 3348 /* 3349 * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. 3350 * 3351 * @param dev 3352 * Pointer to struct rte_eth_dev. 3353 * 3354 * @return 3355 * - On success, zero. 3356 * - On failure, a negative value. 3357 */ 3358 static int 3359 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) 3360 { 3361 uint32_t eicr; 3362 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3363 struct ixgbe_interrupt *intr = 3364 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3365 3366 /* clear all cause mask */ 3367 ixgbe_disable_intr(hw); 3368 3369 /* read-on-clear nic registers here */ 3370 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3371 PMD_DRV_LOG(DEBUG, "eicr %x", eicr); 3372 3373 intr->flags = 0; 3374 3375 /* set flag for async link update */ 3376 if (eicr & IXGBE_EICR_LSC) 3377 intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 3378 3379 if (eicr & IXGBE_EICR_MAILBOX) 3380 intr->flags |= IXGBE_FLAG_MAILBOX; 3381 3382 if (hw->mac.type == ixgbe_mac_X550EM_x && 3383 hw->phy.type == ixgbe_phy_x550em_ext_t && 3384 (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) 3385 intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; 3386 3387 return 0; 3388 } 3389 3390 /** 3391 * It gets and then prints the link status. 3392 * 3393 * @param dev 3394 * Pointer to struct rte_eth_dev. 3395 * 3396 * @return 3397 * - On success, zero. 3398 * - On failure, a negative value. 3399 */ 3400 static void 3401 ixgbe_dev_link_status_print(struct rte_eth_dev *dev) 3402 { 3403 struct rte_eth_link link; 3404 3405 memset(&link, 0, sizeof(link)); 3406 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 3407 if (link.link_status) { 3408 PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", 3409 (int)(dev->data->port_id), 3410 (unsigned)link.link_speed, 3411 link.link_duplex == ETH_LINK_FULL_DUPLEX ? 3412 "full-duplex" : "half-duplex"); 3413 } else { 3414 PMD_INIT_LOG(INFO, " Port %d: Link Down", 3415 (int)(dev->data->port_id)); 3416 } 3417 PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", 3418 dev->pci_dev->addr.domain, 3419 dev->pci_dev->addr.bus, 3420 dev->pci_dev->addr.devid, 3421 dev->pci_dev->addr.function); 3422 } 3423 3424 /* 3425 * It executes link_update after knowing an interrupt occurred. 3426 * 3427 * @param dev 3428 * Pointer to struct rte_eth_dev. 3429 * 3430 * @return 3431 * - On success, zero. 3432 * - On failure, a negative value. 3433 */ 3434 static int 3435 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) 3436 { 3437 struct ixgbe_interrupt *intr = 3438 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3439 int64_t timeout; 3440 struct rte_eth_link link; 3441 int intr_enable_delay = false; 3442 struct ixgbe_hw *hw = 3443 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3444 3445 PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); 3446 3447 if (intr->flags & IXGBE_FLAG_MAILBOX) { 3448 ixgbe_pf_mbx_process(dev); 3449 intr->flags &= ~IXGBE_FLAG_MAILBOX; 3450 } 3451 3452 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 3453 ixgbe_handle_lasi(hw); 3454 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 3455 } 3456 3457 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 3458 /* get the link status before link update, for predicting later */ 3459 memset(&link, 0, sizeof(link)); 3460 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 3461 3462 ixgbe_dev_link_update(dev, 0); 3463 3464 /* likely to up */ 3465 if (!link.link_status) 3466 /* handle it 1 sec later, wait it being stable */ 3467 timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; 3468 /* likely to down */ 3469 else 3470 /* handle it 4 sec later, wait it being stable */ 3471 timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; 3472 3473 ixgbe_dev_link_status_print(dev); 3474 3475 intr_enable_delay = true; 3476 } 3477 3478 if (intr_enable_delay) { 3479 if (rte_eal_alarm_set(timeout * 1000, 3480 ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) 3481 PMD_DRV_LOG(ERR, "Error setting alarm"); 3482 } else { 3483 PMD_DRV_LOG(DEBUG, "enable intr immediately"); 3484 ixgbe_enable_intr(dev); 3485 rte_intr_enable(&(dev->pci_dev->intr_handle)); 3486 } 3487 3488 3489 return 0; 3490 } 3491 3492 /** 3493 * Interrupt handler which shall be registered for alarm callback for delayed 3494 * handling specific interrupt to wait for the stable nic state. As the 3495 * NIC interrupt state is not stable for ixgbe after link is just down, 3496 * it needs to wait 4 seconds to get the stable status. 3497 * 3498 * @param handle 3499 * Pointer to interrupt handle. 3500 * @param param 3501 * The address of parameter (struct rte_eth_dev *) regsitered before. 3502 * 3503 * @return 3504 * void 3505 */ 3506 static void 3507 ixgbe_dev_interrupt_delayed_handler(void *param) 3508 { 3509 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3510 struct ixgbe_interrupt *intr = 3511 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 3512 struct ixgbe_hw *hw = 3513 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3514 uint32_t eicr; 3515 3516 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3517 if (eicr & IXGBE_EICR_MAILBOX) 3518 ixgbe_pf_mbx_process(dev); 3519 3520 if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { 3521 ixgbe_handle_lasi(hw); 3522 intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; 3523 } 3524 3525 if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 3526 ixgbe_dev_link_update(dev, 0); 3527 intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 3528 ixgbe_dev_link_status_print(dev); 3529 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); 3530 } 3531 3532 PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); 3533 ixgbe_enable_intr(dev); 3534 rte_intr_enable(&(dev->pci_dev->intr_handle)); 3535 } 3536 3537 /** 3538 * Interrupt handler triggered by NIC for handling 3539 * specific interrupt. 3540 * 3541 * @param handle 3542 * Pointer to interrupt handle. 3543 * @param param 3544 * The address of parameter (struct rte_eth_dev *) regsitered before. 3545 * 3546 * @return 3547 * void 3548 */ 3549 static void 3550 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 3551 void *param) 3552 { 3553 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 3554 3555 ixgbe_dev_interrupt_get_status(dev); 3556 ixgbe_dev_interrupt_action(dev); 3557 } 3558 3559 static int 3560 ixgbe_dev_led_on(struct rte_eth_dev *dev) 3561 { 3562 struct ixgbe_hw *hw; 3563 3564 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3565 return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 3566 } 3567 3568 static int 3569 ixgbe_dev_led_off(struct rte_eth_dev *dev) 3570 { 3571 struct ixgbe_hw *hw; 3572 3573 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3574 return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; 3575 } 3576 3577 static int 3578 ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3579 { 3580 struct ixgbe_hw *hw; 3581 uint32_t mflcn_reg; 3582 uint32_t fccfg_reg; 3583 int rx_pause; 3584 int tx_pause; 3585 3586 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3587 3588 fc_conf->pause_time = hw->fc.pause_time; 3589 fc_conf->high_water = hw->fc.high_water[0]; 3590 fc_conf->low_water = hw->fc.low_water[0]; 3591 fc_conf->send_xon = hw->fc.send_xon; 3592 fc_conf->autoneg = !hw->fc.disable_fc_autoneg; 3593 3594 /* 3595 * Return rx_pause status according to actual setting of 3596 * MFLCN register. 3597 */ 3598 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3599 if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE)) 3600 rx_pause = 1; 3601 else 3602 rx_pause = 0; 3603 3604 /* 3605 * Return tx_pause status according to actual setting of 3606 * FCCFG register. 3607 */ 3608 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 3609 if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY)) 3610 tx_pause = 1; 3611 else 3612 tx_pause = 0; 3613 3614 if (rx_pause && tx_pause) 3615 fc_conf->mode = RTE_FC_FULL; 3616 else if (rx_pause) 3617 fc_conf->mode = RTE_FC_RX_PAUSE; 3618 else if (tx_pause) 3619 fc_conf->mode = RTE_FC_TX_PAUSE; 3620 else 3621 fc_conf->mode = RTE_FC_NONE; 3622 3623 return 0; 3624 } 3625 3626 static int 3627 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) 3628 { 3629 struct ixgbe_hw *hw; 3630 int err; 3631 uint32_t rx_buf_size; 3632 uint32_t max_high_water; 3633 uint32_t mflcn; 3634 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 3635 ixgbe_fc_none, 3636 ixgbe_fc_rx_pause, 3637 ixgbe_fc_tx_pause, 3638 ixgbe_fc_full 3639 }; 3640 3641 PMD_INIT_FUNC_TRACE(); 3642 3643 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3644 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); 3645 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3646 3647 /* 3648 * At least reserve one Ethernet frame for watermark 3649 * high_water/low_water in kilo bytes for ixgbe 3650 */ 3651 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 3652 if ((fc_conf->high_water > max_high_water) || 3653 (fc_conf->high_water < fc_conf->low_water)) { 3654 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3655 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3656 return -EINVAL; 3657 } 3658 3659 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; 3660 hw->fc.pause_time = fc_conf->pause_time; 3661 hw->fc.high_water[0] = fc_conf->high_water; 3662 hw->fc.low_water[0] = fc_conf->low_water; 3663 hw->fc.send_xon = fc_conf->send_xon; 3664 hw->fc.disable_fc_autoneg = !fc_conf->autoneg; 3665 3666 err = ixgbe_fc_enable(hw); 3667 3668 /* Not negotiated is not an error case */ 3669 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { 3670 3671 /* check if we want to forward MAC frames - driver doesn't have native 3672 * capability to do that, so we'll write the registers ourselves */ 3673 3674 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3675 3676 /* set or clear MFLCN.PMCF bit depending on configuration */ 3677 if (fc_conf->mac_ctrl_frame_fwd != 0) 3678 mflcn |= IXGBE_MFLCN_PMCF; 3679 else 3680 mflcn &= ~IXGBE_MFLCN_PMCF; 3681 3682 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); 3683 IXGBE_WRITE_FLUSH(hw); 3684 3685 return 0; 3686 } 3687 3688 PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err); 3689 return -EIO; 3690 } 3691 3692 /** 3693 * ixgbe_pfc_enable_generic - Enable flow control 3694 * @hw: pointer to hardware structure 3695 * @tc_num: traffic class number 3696 * Enable flow control according to the current settings. 3697 */ 3698 static int 3699 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) 3700 { 3701 int ret_val = 0; 3702 uint32_t mflcn_reg, fccfg_reg; 3703 uint32_t reg; 3704 uint32_t fcrtl, fcrth; 3705 uint8_t i; 3706 uint8_t nb_rx_en; 3707 3708 /* Validate the water mark configuration */ 3709 if (!hw->fc.pause_time) { 3710 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3711 goto out; 3712 } 3713 3714 /* Low water mark of zero causes XOFF floods */ 3715 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 3716 /* High/Low water can not be 0 */ 3717 if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { 3718 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 3719 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3720 goto out; 3721 } 3722 3723 if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { 3724 PMD_INIT_LOG(ERR, "Invalid water mark configuration"); 3725 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; 3726 goto out; 3727 } 3728 } 3729 /* Negotiate the fc mode to use */ 3730 ixgbe_fc_autoneg(hw); 3731 3732 /* Disable any previous flow control settings */ 3733 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 3734 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE); 3735 3736 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 3737 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 3738 3739 switch (hw->fc.current_mode) { 3740 case ixgbe_fc_none: 3741 /* 3742 * If the count of enabled RX Priority Flow control >1, 3743 * and the TX pause can not be disabled 3744 */ 3745 nb_rx_en = 0; 3746 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 3747 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 3748 if (reg & IXGBE_FCRTH_FCEN) 3749 nb_rx_en++; 3750 } 3751 if (nb_rx_en > 1) 3752 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3753 break; 3754 case ixgbe_fc_rx_pause: 3755 /* 3756 * Rx Flow control is enabled and Tx Flow control is 3757 * disabled by software override. Since there really 3758 * isn't a way to advertise that we are capable of RX 3759 * Pause ONLY, we will advertise that we support both 3760 * symmetric and asymmetric Rx PAUSE. Later, we will 3761 * disable the adapter's ability to send PAUSE frames. 3762 */ 3763 mflcn_reg |= IXGBE_MFLCN_RPFCE; 3764 /* 3765 * If the count of enabled RX Priority Flow control >1, 3766 * and the TX pause can not be disabled 3767 */ 3768 nb_rx_en = 0; 3769 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { 3770 reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); 3771 if (reg & IXGBE_FCRTH_FCEN) 3772 nb_rx_en++; 3773 } 3774 if (nb_rx_en > 1) 3775 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3776 break; 3777 case ixgbe_fc_tx_pause: 3778 /* 3779 * Tx Flow control is enabled, and Rx Flow control is 3780 * disabled by software override. 3781 */ 3782 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3783 break; 3784 case ixgbe_fc_full: 3785 /* Flow control (both Rx and Tx) is enabled by SW override. */ 3786 mflcn_reg |= IXGBE_MFLCN_RPFCE; 3787 fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; 3788 break; 3789 default: 3790 PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); 3791 ret_val = IXGBE_ERR_CONFIG; 3792 goto out; 3793 } 3794 3795 /* Set 802.3x based flow control settings. */ 3796 mflcn_reg |= IXGBE_MFLCN_DPF; 3797 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 3798 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 3799 3800 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 3801 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && 3802 hw->fc.high_water[tc_num]) { 3803 fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE; 3804 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl); 3805 fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN; 3806 } else { 3807 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0); 3808 /* 3809 * In order to prevent Tx hangs when the internal Tx 3810 * switch is enabled we must set the high water mark 3811 * to the maximum FCRTH value. This allows the Tx 3812 * switch to function even under heavy Rx workloads. 3813 */ 3814 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32; 3815 } 3816 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth); 3817 3818 /* Configure pause time (2 TCs per register) */ 3819 reg = hw->fc.pause_time * 0x00010001; 3820 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) 3821 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); 3822 3823 /* Configure flow control refresh threshold value */ 3824 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); 3825 3826 out: 3827 return ret_val; 3828 } 3829 3830 static int 3831 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) 3832 { 3833 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3834 int32_t ret_val = IXGBE_NOT_IMPLEMENTED; 3835 3836 if (hw->mac.type != ixgbe_mac_82598EB) { 3837 ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); 3838 } 3839 return ret_val; 3840 } 3841 3842 static int 3843 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf) 3844 { 3845 int err; 3846 uint32_t rx_buf_size; 3847 uint32_t max_high_water; 3848 uint8_t tc_num; 3849 uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; 3850 struct ixgbe_hw *hw = 3851 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3852 struct ixgbe_dcb_config *dcb_config = 3853 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 3854 3855 enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { 3856 ixgbe_fc_none, 3857 ixgbe_fc_rx_pause, 3858 ixgbe_fc_tx_pause, 3859 ixgbe_fc_full 3860 }; 3861 3862 PMD_INIT_FUNC_TRACE(); 3863 3864 ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map); 3865 tc_num = map[pfc_conf->priority]; 3866 rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)); 3867 PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); 3868 /* 3869 * At least reserve one Ethernet frame for watermark 3870 * high_water/low_water in kilo bytes for ixgbe 3871 */ 3872 max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; 3873 if ((pfc_conf->fc.high_water > max_high_water) || 3874 (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { 3875 PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); 3876 PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); 3877 return -EINVAL; 3878 } 3879 3880 hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; 3881 hw->fc.pause_time = pfc_conf->fc.pause_time; 3882 hw->fc.send_xon = pfc_conf->fc.send_xon; 3883 hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; 3884 hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; 3885 3886 err = ixgbe_dcb_pfc_enable(dev, tc_num); 3887 3888 /* Not negotiated is not an error case */ 3889 if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 3890 return 0; 3891 3892 PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err); 3893 return -EIO; 3894 } 3895 3896 static int 3897 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 3898 struct rte_eth_rss_reta_entry64 *reta_conf, 3899 uint16_t reta_size) 3900 { 3901 uint16_t i, sp_reta_size; 3902 uint8_t j, mask; 3903 uint32_t reta, r; 3904 uint16_t idx, shift; 3905 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3906 uint32_t reta_reg; 3907 3908 PMD_INIT_FUNC_TRACE(); 3909 3910 if (!ixgbe_rss_update_sp(hw->mac.type)) { 3911 PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " 3912 "NIC."); 3913 return -ENOTSUP; 3914 } 3915 3916 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 3917 if (reta_size != sp_reta_size) { 3918 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3919 "(%d) doesn't match the number hardware can supported " 3920 "(%d)\n", reta_size, sp_reta_size); 3921 return -EINVAL; 3922 } 3923 3924 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 3925 idx = i / RTE_RETA_GROUP_SIZE; 3926 shift = i % RTE_RETA_GROUP_SIZE; 3927 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3928 IXGBE_4_BIT_MASK); 3929 if (!mask) 3930 continue; 3931 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 3932 if (mask == IXGBE_4_BIT_MASK) 3933 r = 0; 3934 else 3935 r = IXGBE_READ_REG(hw, reta_reg); 3936 for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { 3937 if (mask & (0x1 << j)) 3938 reta |= reta_conf[idx].reta[shift + j] << 3939 (CHAR_BIT * j); 3940 else 3941 reta |= r & (IXGBE_8_BIT_MASK << 3942 (CHAR_BIT * j)); 3943 } 3944 IXGBE_WRITE_REG(hw, reta_reg, reta); 3945 } 3946 3947 return 0; 3948 } 3949 3950 static int 3951 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 3952 struct rte_eth_rss_reta_entry64 *reta_conf, 3953 uint16_t reta_size) 3954 { 3955 uint16_t i, sp_reta_size; 3956 uint8_t j, mask; 3957 uint32_t reta; 3958 uint16_t idx, shift; 3959 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3960 uint32_t reta_reg; 3961 3962 PMD_INIT_FUNC_TRACE(); 3963 sp_reta_size = ixgbe_reta_size_get(hw->mac.type); 3964 if (reta_size != sp_reta_size) { 3965 PMD_DRV_LOG(ERR, "The size of hash lookup table configured " 3966 "(%d) doesn't match the number hardware can supported " 3967 "(%d)\n", reta_size, sp_reta_size); 3968 return -EINVAL; 3969 } 3970 3971 for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { 3972 idx = i / RTE_RETA_GROUP_SIZE; 3973 shift = i % RTE_RETA_GROUP_SIZE; 3974 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 3975 IXGBE_4_BIT_MASK); 3976 if (!mask) 3977 continue; 3978 3979 reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); 3980 reta = IXGBE_READ_REG(hw, reta_reg); 3981 for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { 3982 if (mask & (0x1 << j)) 3983 reta_conf[idx].reta[shift + j] = 3984 ((reta >> (CHAR_BIT * j)) & 3985 IXGBE_8_BIT_MASK); 3986 } 3987 } 3988 3989 return 0; 3990 } 3991 3992 static void 3993 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 3994 uint32_t index, uint32_t pool) 3995 { 3996 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 3997 uint32_t enable_addr = 1; 3998 3999 ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); 4000 } 4001 4002 static void 4003 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) 4004 { 4005 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4006 4007 ixgbe_clear_rar(hw, index); 4008 } 4009 4010 static void 4011 ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 4012 { 4013 ixgbe_remove_rar(dev, 0); 4014 4015 ixgbe_add_rar(dev, addr, 0, 0); 4016 } 4017 4018 static int 4019 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 4020 { 4021 uint32_t hlreg0; 4022 uint32_t maxfrs; 4023 struct ixgbe_hw *hw; 4024 struct rte_eth_dev_info dev_info; 4025 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 4026 4027 ixgbe_dev_info_get(dev, &dev_info); 4028 4029 /* check that mtu is within the allowed range */ 4030 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 4031 return -EINVAL; 4032 4033 /* refuse mtu that requires the support of scattered packets when this 4034 * feature has not been enabled before. 4035 */ 4036 if (!dev->data->scattered_rx && 4037 (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > 4038 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) 4039 return -EINVAL; 4040 4041 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4042 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 4043 4044 /* switch to jumbo mode if needed */ 4045 if (frame_size > ETHER_MAX_LEN) { 4046 dev->data->dev_conf.rxmode.jumbo_frame = 1; 4047 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 4048 } else { 4049 dev->data->dev_conf.rxmode.jumbo_frame = 0; 4050 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 4051 } 4052 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 4053 4054 /* update max frame size */ 4055 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 4056 4057 maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); 4058 maxfrs &= 0x0000FFFF; 4059 maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); 4060 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); 4061 4062 return 0; 4063 } 4064 4065 /* 4066 * Virtual Function operations 4067 */ 4068 static void 4069 ixgbevf_intr_disable(struct ixgbe_hw *hw) 4070 { 4071 PMD_INIT_FUNC_TRACE(); 4072 4073 /* Clear interrupt mask to stop from interrupts being generated */ 4074 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); 4075 4076 IXGBE_WRITE_FLUSH(hw); 4077 } 4078 4079 static void 4080 ixgbevf_intr_enable(struct ixgbe_hw *hw) 4081 { 4082 PMD_INIT_FUNC_TRACE(); 4083 4084 /* VF enable interrupt autoclean */ 4085 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); 4086 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); 4087 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); 4088 4089 IXGBE_WRITE_FLUSH(hw); 4090 } 4091 4092 static int 4093 ixgbevf_dev_configure(struct rte_eth_dev *dev) 4094 { 4095 struct rte_eth_conf *conf = &dev->data->dev_conf; 4096 struct ixgbe_adapter *adapter = 4097 (struct ixgbe_adapter *)dev->data->dev_private; 4098 4099 PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", 4100 dev->data->port_id); 4101 4102 /* 4103 * VF has no ability to enable/disable HW CRC 4104 * Keep the persistent behavior the same as Host PF 4105 */ 4106 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC 4107 if (!conf->rxmode.hw_strip_crc) { 4108 PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); 4109 conf->rxmode.hw_strip_crc = 1; 4110 } 4111 #else 4112 if (conf->rxmode.hw_strip_crc) { 4113 PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); 4114 conf->rxmode.hw_strip_crc = 0; 4115 } 4116 #endif 4117 4118 /* 4119 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk 4120 * allocation or vector Rx preconditions we will reset it. 4121 */ 4122 adapter->rx_bulk_alloc_allowed = true; 4123 adapter->rx_vec_allowed = true; 4124 4125 return 0; 4126 } 4127 4128 static int 4129 ixgbevf_dev_start(struct rte_eth_dev *dev) 4130 { 4131 struct ixgbe_hw *hw = 4132 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4133 uint32_t intr_vector = 0; 4134 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4135 4136 int err, mask = 0; 4137 4138 PMD_INIT_FUNC_TRACE(); 4139 4140 hw->mac.ops.reset_hw(hw); 4141 hw->mac.get_link_status = true; 4142 4143 /* negotiate mailbox API version to use with the PF. */ 4144 ixgbevf_negotiate_api(hw); 4145 4146 ixgbevf_dev_tx_init(dev); 4147 4148 /* This can fail when allocating mbufs for descriptor rings */ 4149 err = ixgbevf_dev_rx_init(dev); 4150 if (err) { 4151 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err); 4152 ixgbe_dev_clear_queues(dev); 4153 return err; 4154 } 4155 4156 /* Set vfta */ 4157 ixgbevf_set_vfta_all(dev, 1); 4158 4159 /* Set HW strip */ 4160 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | 4161 ETH_VLAN_EXTEND_MASK; 4162 ixgbevf_vlan_offload_set(dev, mask); 4163 4164 ixgbevf_dev_rxtx_start(dev); 4165 4166 /* check and configure queue intr-vector mapping */ 4167 if (dev->data->dev_conf.intr_conf.rxq != 0) { 4168 intr_vector = dev->data->nb_rx_queues; 4169 if (rte_intr_efd_enable(intr_handle, intr_vector)) 4170 return -1; 4171 } 4172 4173 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { 4174 intr_handle->intr_vec = 4175 rte_zmalloc("intr_vec", 4176 dev->data->nb_rx_queues * sizeof(int), 0); 4177 if (intr_handle->intr_vec == NULL) { 4178 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" 4179 " intr_vec\n", dev->data->nb_rx_queues); 4180 return -ENOMEM; 4181 } 4182 } 4183 ixgbevf_configure_msix(dev); 4184 4185 rte_intr_enable(intr_handle); 4186 4187 /* Re-enable interrupt for VF */ 4188 ixgbevf_intr_enable(hw); 4189 4190 return 0; 4191 } 4192 4193 static void 4194 ixgbevf_dev_stop(struct rte_eth_dev *dev) 4195 { 4196 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4197 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4198 4199 PMD_INIT_FUNC_TRACE(); 4200 4201 ixgbevf_intr_disable(hw); 4202 4203 hw->adapter_stopped = 1; 4204 ixgbe_stop_adapter(hw); 4205 4206 /* 4207 * Clear what we set, but we still keep shadow_vfta to 4208 * restore after device starts 4209 */ 4210 ixgbevf_set_vfta_all(dev, 0); 4211 4212 /* Clear stored conf */ 4213 dev->data->scattered_rx = 0; 4214 4215 ixgbe_dev_clear_queues(dev); 4216 4217 /* Clean datapath event and queue/vec mapping */ 4218 rte_intr_efd_disable(intr_handle); 4219 if (intr_handle->intr_vec != NULL) { 4220 rte_free(intr_handle->intr_vec); 4221 intr_handle->intr_vec = NULL; 4222 } 4223 } 4224 4225 static void 4226 ixgbevf_dev_close(struct rte_eth_dev *dev) 4227 { 4228 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4229 4230 PMD_INIT_FUNC_TRACE(); 4231 4232 ixgbe_reset_hw(hw); 4233 4234 ixgbevf_dev_stop(dev); 4235 4236 ixgbe_dev_free_queues(dev); 4237 4238 /** 4239 * Remove the VF MAC address ro ensure 4240 * that the VF traffic goes to the PF 4241 * after stop, close and detach of the VF 4242 **/ 4243 ixgbevf_remove_mac_addr(dev, 0); 4244 } 4245 4246 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) 4247 { 4248 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4249 struct ixgbe_vfta *shadow_vfta = 4250 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 4251 int i = 0, j = 0, vfta = 0, mask = 1; 4252 4253 for (i = 0; i < IXGBE_VFTA_SIZE; i++) { 4254 vfta = shadow_vfta->vfta[i]; 4255 if (vfta) { 4256 mask = 1; 4257 for (j = 0; j < 32; j++) { 4258 if (vfta & mask) 4259 ixgbe_set_vfta(hw, (i<<5)+j, 0, 4260 on, false); 4261 mask <<= 1; 4262 } 4263 } 4264 } 4265 4266 } 4267 4268 static int 4269 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) 4270 { 4271 struct ixgbe_hw *hw = 4272 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4273 struct ixgbe_vfta *shadow_vfta = 4274 IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); 4275 uint32_t vid_idx = 0; 4276 uint32_t vid_bit = 0; 4277 int ret = 0; 4278 4279 PMD_INIT_FUNC_TRACE(); 4280 4281 /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ 4282 ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); 4283 if (ret) { 4284 PMD_INIT_LOG(ERR, "Unable to set VF vlan"); 4285 return ret; 4286 } 4287 vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); 4288 vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); 4289 4290 /* Save what we set and retore it after device reset */ 4291 if (on) 4292 shadow_vfta->vfta[vid_idx] |= vid_bit; 4293 else 4294 shadow_vfta->vfta[vid_idx] &= ~vid_bit; 4295 4296 return 0; 4297 } 4298 4299 static void 4300 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) 4301 { 4302 struct ixgbe_hw *hw = 4303 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4304 uint32_t ctrl; 4305 4306 PMD_INIT_FUNC_TRACE(); 4307 4308 if (queue >= hw->mac.max_rx_queues) 4309 return; 4310 4311 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); 4312 if (on) 4313 ctrl |= IXGBE_RXDCTL_VME; 4314 else 4315 ctrl &= ~IXGBE_RXDCTL_VME; 4316 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); 4317 4318 ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); 4319 } 4320 4321 static void 4322 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) 4323 { 4324 struct ixgbe_hw *hw = 4325 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4326 uint16_t i; 4327 int on = 0; 4328 4329 /* VF function only support hw strip feature, others are not support */ 4330 if (mask & ETH_VLAN_STRIP_MASK) { 4331 on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); 4332 4333 for (i = 0; i < hw->mac.max_rx_queues; i++) 4334 ixgbevf_vlan_strip_queue_set(dev, i, on); 4335 } 4336 } 4337 4338 static int 4339 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) 4340 { 4341 uint32_t reg_val; 4342 4343 /* we only need to do this if VMDq is enabled */ 4344 reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 4345 if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { 4346 PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); 4347 return -1; 4348 } 4349 4350 return 0; 4351 } 4352 4353 static uint32_t 4354 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) 4355 { 4356 uint32_t vector = 0; 4357 4358 switch (hw->mac.mc_filter_type) { 4359 case 0: /* use bits [47:36] of the address */ 4360 vector = ((uc_addr->addr_bytes[4] >> 4) | 4361 (((uint16_t)uc_addr->addr_bytes[5]) << 4)); 4362 break; 4363 case 1: /* use bits [46:35] of the address */ 4364 vector = ((uc_addr->addr_bytes[4] >> 3) | 4365 (((uint16_t)uc_addr->addr_bytes[5]) << 5)); 4366 break; 4367 case 2: /* use bits [45:34] of the address */ 4368 vector = ((uc_addr->addr_bytes[4] >> 2) | 4369 (((uint16_t)uc_addr->addr_bytes[5]) << 6)); 4370 break; 4371 case 3: /* use bits [43:32] of the address */ 4372 vector = ((uc_addr->addr_bytes[4]) | 4373 (((uint16_t)uc_addr->addr_bytes[5]) << 8)); 4374 break; 4375 default: /* Invalid mc_filter_type */ 4376 break; 4377 } 4378 4379 /* vector can only be 12-bits or boundary will be exceeded */ 4380 vector &= 0xFFF; 4381 return vector; 4382 } 4383 4384 static int 4385 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 4386 uint8_t on) 4387 { 4388 uint32_t vector; 4389 uint32_t uta_idx; 4390 uint32_t reg_val; 4391 uint32_t uta_shift; 4392 uint32_t rc; 4393 const uint32_t ixgbe_uta_idx_mask = 0x7F; 4394 const uint32_t ixgbe_uta_bit_shift = 5; 4395 const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; 4396 const uint32_t bit1 = 0x1; 4397 4398 struct ixgbe_hw *hw = 4399 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4400 struct ixgbe_uta_info *uta_info = 4401 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 4402 4403 /* The UTA table only exists on 82599 hardware and newer */ 4404 if (hw->mac.type < ixgbe_mac_82599EB) 4405 return -ENOTSUP; 4406 4407 vector = ixgbe_uta_vector(hw, mac_addr); 4408 uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; 4409 uta_shift = vector & ixgbe_uta_bit_mask; 4410 4411 rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); 4412 if (rc == on) 4413 return 0; 4414 4415 reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); 4416 if (on) { 4417 uta_info->uta_in_use++; 4418 reg_val |= (bit1 << uta_shift); 4419 uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); 4420 } else { 4421 uta_info->uta_in_use--; 4422 reg_val &= ~(bit1 << uta_shift); 4423 uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); 4424 } 4425 4426 IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); 4427 4428 if (uta_info->uta_in_use > 0) 4429 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 4430 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 4431 else 4432 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 4433 4434 return 0; 4435 } 4436 4437 static int 4438 ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) 4439 { 4440 int i; 4441 struct ixgbe_hw *hw = 4442 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4443 struct ixgbe_uta_info *uta_info = 4444 IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); 4445 4446 /* The UTA table only exists on 82599 hardware and newer */ 4447 if (hw->mac.type < ixgbe_mac_82599EB) 4448 return -ENOTSUP; 4449 4450 if (on) { 4451 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 4452 uta_info->uta_shadow[i] = ~0; 4453 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); 4454 } 4455 } else { 4456 for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { 4457 uta_info->uta_shadow[i] = 0; 4458 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 4459 } 4460 } 4461 return 0; 4462 4463 } 4464 4465 uint32_t 4466 ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) 4467 { 4468 uint32_t new_val = orig_val; 4469 4470 if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) 4471 new_val |= IXGBE_VMOLR_AUPE; 4472 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) 4473 new_val |= IXGBE_VMOLR_ROMPE; 4474 if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) 4475 new_val |= IXGBE_VMOLR_ROPE; 4476 if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) 4477 new_val |= IXGBE_VMOLR_BAM; 4478 if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) 4479 new_val |= IXGBE_VMOLR_MPE; 4480 4481 return new_val; 4482 } 4483 4484 static int 4485 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, 4486 uint16_t rx_mask, uint8_t on) 4487 { 4488 int val = 0; 4489 4490 struct ixgbe_hw *hw = 4491 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4492 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); 4493 4494 if (hw->mac.type == ixgbe_mac_82598EB) { 4495 PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" 4496 " on 82599 hardware and newer"); 4497 return -ENOTSUP; 4498 } 4499 if (ixgbe_vmdq_mode_check(hw) < 0) 4500 return -ENOTSUP; 4501 4502 val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); 4503 4504 if (on) 4505 vmolr |= val; 4506 else 4507 vmolr &= ~val; 4508 4509 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); 4510 4511 return 0; 4512 } 4513 4514 static int 4515 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) 4516 { 4517 uint32_t reg, addr; 4518 uint32_t val; 4519 const uint8_t bit1 = 0x1; 4520 4521 struct ixgbe_hw *hw = 4522 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4523 4524 if (ixgbe_vmdq_mode_check(hw) < 0) 4525 return -ENOTSUP; 4526 4527 if (pool >= ETH_64_POOLS) 4528 return -EINVAL; 4529 4530 /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ 4531 if (pool >= 32) { 4532 addr = IXGBE_VFRE(1); 4533 val = bit1 << (pool - 32); 4534 } else { 4535 addr = IXGBE_VFRE(0); 4536 val = bit1 << pool; 4537 } 4538 4539 reg = IXGBE_READ_REG(hw, addr); 4540 4541 if (on) 4542 reg |= val; 4543 else 4544 reg &= ~val; 4545 4546 IXGBE_WRITE_REG(hw, addr, reg); 4547 4548 return 0; 4549 } 4550 4551 static int 4552 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) 4553 { 4554 uint32_t reg, addr; 4555 uint32_t val; 4556 const uint8_t bit1 = 0x1; 4557 4558 struct ixgbe_hw *hw = 4559 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4560 4561 if (ixgbe_vmdq_mode_check(hw) < 0) 4562 return -ENOTSUP; 4563 4564 if (pool >= ETH_64_POOLS) 4565 return -EINVAL; 4566 4567 /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ 4568 if (pool >= 32) { 4569 addr = IXGBE_VFTE(1); 4570 val = bit1 << (pool - 32); 4571 } else { 4572 addr = IXGBE_VFTE(0); 4573 val = bit1 << pool; 4574 } 4575 4576 reg = IXGBE_READ_REG(hw, addr); 4577 4578 if (on) 4579 reg |= val; 4580 else 4581 reg &= ~val; 4582 4583 IXGBE_WRITE_REG(hw, addr, reg); 4584 4585 return 0; 4586 } 4587 4588 static int 4589 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 4590 uint64_t pool_mask, uint8_t vlan_on) 4591 { 4592 int ret = 0; 4593 uint16_t pool_idx; 4594 struct ixgbe_hw *hw = 4595 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4596 4597 if (ixgbe_vmdq_mode_check(hw) < 0) 4598 return -ENOTSUP; 4599 for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { 4600 if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { 4601 ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, 4602 vlan_on, false); 4603 if (ret < 0) 4604 return ret; 4605 } 4606 } 4607 4608 return ret; 4609 } 4610 4611 #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ 4612 #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ 4613 #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ 4614 #define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ 4615 #define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ 4616 ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ 4617 ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) 4618 4619 static int 4620 ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 4621 struct rte_eth_mirror_conf *mirror_conf, 4622 uint8_t rule_id, uint8_t on) 4623 { 4624 uint32_t mr_ctl, vlvf; 4625 uint32_t mp_lsb = 0; 4626 uint32_t mv_msb = 0; 4627 uint32_t mv_lsb = 0; 4628 uint32_t mp_msb = 0; 4629 uint8_t i = 0; 4630 int reg_index = 0; 4631 uint64_t vlan_mask = 0; 4632 4633 const uint8_t pool_mask_offset = 32; 4634 const uint8_t vlan_mask_offset = 32; 4635 const uint8_t dst_pool_offset = 8; 4636 const uint8_t rule_mr_offset = 4; 4637 const uint8_t mirror_rule_mask = 0x0F; 4638 4639 struct ixgbe_mirror_info *mr_info = 4640 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 4641 struct ixgbe_hw *hw = 4642 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4643 uint8_t mirror_type = 0; 4644 4645 if (ixgbe_vmdq_mode_check(hw) < 0) 4646 return -ENOTSUP; 4647 4648 if (rule_id >= IXGBE_MAX_MIRROR_RULES) 4649 return -EINVAL; 4650 4651 if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { 4652 PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", 4653 mirror_conf->rule_type); 4654 return -EINVAL; 4655 } 4656 4657 if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { 4658 mirror_type |= IXGBE_MRCTL_VLME; 4659 /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ 4660 for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { 4661 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { 4662 /* search vlan id related pool vlan filter index */ 4663 reg_index = ixgbe_find_vlvf_slot(hw, 4664 mirror_conf->vlan.vlan_id[i], 4665 false); 4666 if (reg_index < 0) 4667 return -EINVAL; 4668 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); 4669 if ((vlvf & IXGBE_VLVF_VIEN) && 4670 ((vlvf & IXGBE_VLVF_VLANID_MASK) == 4671 mirror_conf->vlan.vlan_id[i])) 4672 vlan_mask |= (1ULL << reg_index); 4673 else 4674 return -EINVAL; 4675 } 4676 } 4677 4678 if (on) { 4679 mv_lsb = vlan_mask & 0xFFFFFFFF; 4680 mv_msb = vlan_mask >> vlan_mask_offset; 4681 4682 mr_info->mr_conf[rule_id].vlan.vlan_mask = 4683 mirror_conf->vlan.vlan_mask; 4684 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { 4685 if (mirror_conf->vlan.vlan_mask & (1ULL << i)) 4686 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 4687 mirror_conf->vlan.vlan_id[i]; 4688 } 4689 } else { 4690 mv_lsb = 0; 4691 mv_msb = 0; 4692 mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; 4693 for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) 4694 mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; 4695 } 4696 } 4697 4698 /* 4699 * if enable pool mirror, write related pool mask register,if disable 4700 * pool mirror, clear PFMRVM register 4701 */ 4702 if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { 4703 mirror_type |= IXGBE_MRCTL_VPME; 4704 if (on) { 4705 mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; 4706 mp_msb = mirror_conf->pool_mask >> pool_mask_offset; 4707 mr_info->mr_conf[rule_id].pool_mask = 4708 mirror_conf->pool_mask; 4709 4710 } else { 4711 mp_lsb = 0; 4712 mp_msb = 0; 4713 mr_info->mr_conf[rule_id].pool_mask = 0; 4714 } 4715 } 4716 if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) 4717 mirror_type |= IXGBE_MRCTL_UPME; 4718 if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) 4719 mirror_type |= IXGBE_MRCTL_DPME; 4720 4721 /* read mirror control register and recalculate it */ 4722 mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); 4723 4724 if (on) { 4725 mr_ctl |= mirror_type; 4726 mr_ctl &= mirror_rule_mask; 4727 mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; 4728 } else 4729 mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); 4730 4731 mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; 4732 mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; 4733 4734 /* write mirrror control register */ 4735 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 4736 4737 /* write pool mirrror control register */ 4738 if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { 4739 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); 4740 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), 4741 mp_msb); 4742 } 4743 /* write VLAN mirrror control register */ 4744 if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { 4745 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); 4746 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), 4747 mv_msb); 4748 } 4749 4750 return 0; 4751 } 4752 4753 static int 4754 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) 4755 { 4756 int mr_ctl = 0; 4757 uint32_t lsb_val = 0; 4758 uint32_t msb_val = 0; 4759 const uint8_t rule_mr_offset = 4; 4760 4761 struct ixgbe_hw *hw = 4762 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4763 struct ixgbe_mirror_info *mr_info = 4764 (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); 4765 4766 if (ixgbe_vmdq_mode_check(hw) < 0) 4767 return -ENOTSUP; 4768 4769 memset(&mr_info->mr_conf[rule_id], 0, 4770 sizeof(struct rte_eth_mirror_conf)); 4771 4772 /* clear PFVMCTL register */ 4773 IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); 4774 4775 /* clear pool mask register */ 4776 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); 4777 IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); 4778 4779 /* clear vlan mask register */ 4780 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); 4781 IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); 4782 4783 return 0; 4784 } 4785 4786 static int 4787 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 4788 { 4789 uint32_t mask; 4790 struct ixgbe_hw *hw = 4791 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4792 4793 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 4794 mask |= (1 << IXGBE_MISC_VEC_ID); 4795 RTE_SET_USED(queue_id); 4796 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 4797 4798 rte_intr_enable(&dev->pci_dev->intr_handle); 4799 4800 return 0; 4801 } 4802 4803 static int 4804 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 4805 { 4806 uint32_t mask; 4807 struct ixgbe_hw *hw = 4808 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4809 4810 mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); 4811 mask &= ~(1 << IXGBE_MISC_VEC_ID); 4812 RTE_SET_USED(queue_id); 4813 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 4814 4815 return 0; 4816 } 4817 4818 static int 4819 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) 4820 { 4821 uint32_t mask; 4822 struct ixgbe_hw *hw = 4823 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4824 struct ixgbe_interrupt *intr = 4825 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4826 4827 if (queue_id < 16) { 4828 ixgbe_disable_intr(hw); 4829 intr->mask |= (1 << queue_id); 4830 ixgbe_enable_intr(dev); 4831 } else if (queue_id < 32) { 4832 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 4833 mask &= (1 << queue_id); 4834 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 4835 } else if (queue_id < 64) { 4836 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 4837 mask &= (1 << (queue_id - 32)); 4838 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 4839 } 4840 rte_intr_enable(&dev->pci_dev->intr_handle); 4841 4842 return 0; 4843 } 4844 4845 static int 4846 ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) 4847 { 4848 uint32_t mask; 4849 struct ixgbe_hw *hw = 4850 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4851 struct ixgbe_interrupt *intr = 4852 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 4853 4854 if (queue_id < 16) { 4855 ixgbe_disable_intr(hw); 4856 intr->mask &= ~(1 << queue_id); 4857 ixgbe_enable_intr(dev); 4858 } else if (queue_id < 32) { 4859 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); 4860 mask &= ~(1 << queue_id); 4861 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); 4862 } else if (queue_id < 64) { 4863 mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); 4864 mask &= ~(1 << (queue_id - 32)); 4865 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); 4866 } 4867 4868 return 0; 4869 } 4870 4871 static void 4872 ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 4873 uint8_t queue, uint8_t msix_vector) 4874 { 4875 uint32_t tmp, idx; 4876 4877 if (direction == -1) { 4878 /* other causes */ 4879 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4880 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 4881 tmp &= ~0xFF; 4882 tmp |= msix_vector; 4883 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); 4884 } else { 4885 /* rx or tx cause */ 4886 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4887 idx = ((16 * (queue & 1)) + (8 * direction)); 4888 tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 4889 tmp &= ~(0xFF << idx); 4890 tmp |= (msix_vector << idx); 4891 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); 4892 } 4893 } 4894 4895 /** 4896 * set the IVAR registers, mapping interrupt causes to vectors 4897 * @param hw 4898 * pointer to ixgbe_hw struct 4899 * @direction 4900 * 0 for Rx, 1 for Tx, -1 for other causes 4901 * @queue 4902 * queue to map the corresponding interrupt to 4903 * @msix_vector 4904 * the vector to map to the corresponding queue 4905 */ 4906 static void 4907 ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, 4908 uint8_t queue, uint8_t msix_vector) 4909 { 4910 uint32_t tmp, idx; 4911 4912 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4913 if (hw->mac.type == ixgbe_mac_82598EB) { 4914 if (direction == -1) 4915 direction = 0; 4916 idx = (((direction * 64) + queue) >> 2) & 0x1F; 4917 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); 4918 tmp &= ~(0xFF << (8 * (queue & 0x3))); 4919 tmp |= (msix_vector << (8 * (queue & 0x3))); 4920 IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); 4921 } else if ((hw->mac.type == ixgbe_mac_82599EB) || 4922 (hw->mac.type == ixgbe_mac_X540)) { 4923 if (direction == -1) { 4924 /* other causes */ 4925 idx = ((queue & 1) * 8); 4926 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4927 tmp &= ~(0xFF << idx); 4928 tmp |= (msix_vector << idx); 4929 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); 4930 } else { 4931 /* rx or tx causes */ 4932 idx = ((16 * (queue & 1)) + (8 * direction)); 4933 tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); 4934 tmp &= ~(0xFF << idx); 4935 tmp |= (msix_vector << idx); 4936 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); 4937 } 4938 } 4939 } 4940 4941 static void 4942 ixgbevf_configure_msix(struct rte_eth_dev *dev) 4943 { 4944 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4945 struct ixgbe_hw *hw = 4946 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4947 uint32_t q_idx; 4948 uint32_t vector_idx = IXGBE_MISC_VEC_ID; 4949 4950 /* Configure VF other cause ivar */ 4951 ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); 4952 4953 /* won't configure msix register if no mapping is done 4954 * between intr vector and event fd. 4955 */ 4956 if (!rte_intr_dp_is_en(intr_handle)) 4957 return; 4958 4959 /* Configure all RX queues of VF */ 4960 for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { 4961 /* Force all queue use vector 0, 4962 * as IXGBE_VF_MAXMSIVECOTR = 1 4963 */ 4964 ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); 4965 intr_handle->intr_vec[q_idx] = vector_idx; 4966 } 4967 } 4968 4969 /** 4970 * Sets up the hardware to properly generate MSI-X interrupts 4971 * @hw 4972 * board private structure 4973 */ 4974 static void 4975 ixgbe_configure_msix(struct rte_eth_dev *dev) 4976 { 4977 struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; 4978 struct ixgbe_hw *hw = 4979 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4980 uint32_t queue_id, base = IXGBE_MISC_VEC_ID; 4981 uint32_t vec = IXGBE_MISC_VEC_ID; 4982 uint32_t mask; 4983 uint32_t gpie; 4984 4985 /* won't configure msix register if no mapping is done 4986 * between intr vector and event fd 4987 */ 4988 if (!rte_intr_dp_is_en(intr_handle)) 4989 return; 4990 4991 if (rte_intr_allow_others(intr_handle)) 4992 vec = base = IXGBE_RX_VEC_START; 4993 4994 /* setup GPIE for MSI-x mode */ 4995 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4996 gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | 4997 IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; 4998 /* auto clearing and auto setting corresponding bits in EIMS 4999 * when MSI-X interrupt is triggered 5000 */ 5001 if (hw->mac.type == ixgbe_mac_82598EB) { 5002 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 5003 } else { 5004 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 5005 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 5006 } 5007 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 5008 5009 /* Populate the IVAR table and set the ITR values to the 5010 * corresponding register. 5011 */ 5012 for (queue_id = 0; queue_id < dev->data->nb_rx_queues; 5013 queue_id++) { 5014 /* by default, 1:1 mapping */ 5015 ixgbe_set_ivar_map(hw, 0, queue_id, vec); 5016 intr_handle->intr_vec[queue_id] = vec; 5017 if (vec < base + intr_handle->nb_efd - 1) 5018 vec++; 5019 } 5020 5021 switch (hw->mac.type) { 5022 case ixgbe_mac_82598EB: 5023 ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 5024 IXGBE_MISC_VEC_ID); 5025 break; 5026 case ixgbe_mac_82599EB: 5027 case ixgbe_mac_X540: 5028 ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); 5029 break; 5030 default: 5031 break; 5032 } 5033 IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), 5034 IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); 5035 5036 /* set up to autoclear timer, and the vectors */ 5037 mask = IXGBE_EIMS_ENABLE_MASK; 5038 mask &= ~(IXGBE_EIMS_OTHER | 5039 IXGBE_EIMS_MAILBOX | 5040 IXGBE_EIMS_LSC); 5041 5042 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); 5043 } 5044 5045 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 5046 uint16_t queue_idx, uint16_t tx_rate) 5047 { 5048 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5049 uint32_t rf_dec, rf_int; 5050 uint32_t bcnrc_val; 5051 uint16_t link_speed = dev->data->dev_link.link_speed; 5052 5053 if (queue_idx >= hw->mac.max_tx_queues) 5054 return -EINVAL; 5055 5056 if (tx_rate != 0) { 5057 /* Calculate the rate factor values to set */ 5058 rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; 5059 rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; 5060 rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; 5061 5062 bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; 5063 bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & 5064 IXGBE_RTTBCNRC_RF_INT_MASK_M); 5065 bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); 5066 } else { 5067 bcnrc_val = 0; 5068 } 5069 5070 /* 5071 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM 5072 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise 5073 * set as 0x4. 5074 */ 5075 if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && 5076 (dev->data->dev_conf.rxmode.max_rx_pkt_len >= 5077 IXGBE_MAX_JUMBO_FRAME_SIZE)) 5078 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 5079 IXGBE_MMW_SIZE_JUMBO_FRAME); 5080 else 5081 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 5082 IXGBE_MMW_SIZE_DEFAULT); 5083 5084 /* Set RTTBCNRC of queue X */ 5085 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); 5086 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); 5087 IXGBE_WRITE_FLUSH(hw); 5088 5089 return 0; 5090 } 5091 5092 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 5093 uint16_t tx_rate, uint64_t q_msk) 5094 { 5095 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5096 struct ixgbe_vf_info *vfinfo = 5097 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); 5098 uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; 5099 uint32_t queue_stride = 5100 IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; 5101 uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; 5102 uint32_t queue_end = queue_idx + nb_q_per_pool - 1; 5103 uint16_t total_rate = 0; 5104 5105 if (queue_end >= hw->mac.max_tx_queues) 5106 return -EINVAL; 5107 5108 if (vfinfo != NULL) { 5109 for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { 5110 if (vf_idx == vf) 5111 continue; 5112 for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); 5113 idx++) 5114 total_rate += vfinfo[vf_idx].tx_rate[idx]; 5115 } 5116 } else 5117 return -EINVAL; 5118 5119 /* Store tx_rate for this vf. */ 5120 for (idx = 0; idx < nb_q_per_pool; idx++) { 5121 if (((uint64_t)0x1 << idx) & q_msk) { 5122 if (vfinfo[vf].tx_rate[idx] != tx_rate) 5123 vfinfo[vf].tx_rate[idx] = tx_rate; 5124 total_rate += tx_rate; 5125 } 5126 } 5127 5128 if (total_rate > dev->data->dev_link.link_speed) { 5129 /* 5130 * Reset stored TX rate of the VF if it causes exceed 5131 * link speed. 5132 */ 5133 memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); 5134 return -EINVAL; 5135 } 5136 5137 /* Set RTTBCNRC of each queue/pool for vf X */ 5138 for (; queue_idx <= queue_end; queue_idx++) { 5139 if (0x1 & q_msk) 5140 ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); 5141 q_msk = q_msk >> 1; 5142 } 5143 5144 return 0; 5145 } 5146 5147 static void 5148 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, 5149 __attribute__((unused)) uint32_t index, 5150 __attribute__((unused)) uint32_t pool) 5151 { 5152 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5153 int diag; 5154 5155 /* 5156 * On a 82599 VF, adding again the same MAC addr is not an idempotent 5157 * operation. Trap this case to avoid exhausting the [very limited] 5158 * set of PF resources used to store VF MAC addresses. 5159 */ 5160 if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 5161 return; 5162 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 5163 if (diag == 0) 5164 return; 5165 PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); 5166 } 5167 5168 static void 5169 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) 5170 { 5171 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5172 struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; 5173 struct ether_addr *mac_addr; 5174 uint32_t i; 5175 int diag; 5176 5177 /* 5178 * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does 5179 * not support the deletion of a given MAC address. 5180 * Instead, it imposes to delete all MAC addresses, then to add again 5181 * all MAC addresses with the exception of the one to be deleted. 5182 */ 5183 (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); 5184 5185 /* 5186 * Add again all MAC addresses, with the exception of the deleted one 5187 * and of the permanent MAC address. 5188 */ 5189 for (i = 0, mac_addr = dev->data->mac_addrs; 5190 i < hw->mac.num_rar_entries; i++, mac_addr++) { 5191 /* Skip the deleted MAC address */ 5192 if (i == index) 5193 continue; 5194 /* Skip NULL MAC addresses */ 5195 if (is_zero_ether_addr(mac_addr)) 5196 continue; 5197 /* Skip the permanent MAC address */ 5198 if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) 5199 continue; 5200 diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); 5201 if (diag != 0) 5202 PMD_DRV_LOG(ERR, 5203 "Adding again MAC address " 5204 "%02x:%02x:%02x:%02x:%02x:%02x failed " 5205 "diag=%d", 5206 mac_addr->addr_bytes[0], 5207 mac_addr->addr_bytes[1], 5208 mac_addr->addr_bytes[2], 5209 mac_addr->addr_bytes[3], 5210 mac_addr->addr_bytes[4], 5211 mac_addr->addr_bytes[5], 5212 diag); 5213 } 5214 } 5215 5216 static void 5217 ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) 5218 { 5219 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5220 5221 hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); 5222 } 5223 5224 #define MAC_TYPE_FILTER_SUP(type) do {\ 5225 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ 5226 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ 5227 (type) != ixgbe_mac_X550EM_a)\ 5228 return -ENOTSUP;\ 5229 } while (0) 5230 5231 static int 5232 ixgbe_syn_filter_set(struct rte_eth_dev *dev, 5233 struct rte_eth_syn_filter *filter, 5234 bool add) 5235 { 5236 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5237 uint32_t synqf; 5238 5239 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 5240 return -EINVAL; 5241 5242 synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 5243 5244 if (add) { 5245 if (synqf & IXGBE_SYN_FILTER_ENABLE) 5246 return -EINVAL; 5247 synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & 5248 IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); 5249 5250 if (filter->hig_pri) 5251 synqf |= IXGBE_SYN_FILTER_SYNQFP; 5252 else 5253 synqf &= ~IXGBE_SYN_FILTER_SYNQFP; 5254 } else { 5255 if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) 5256 return -ENOENT; 5257 synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); 5258 } 5259 IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); 5260 IXGBE_WRITE_FLUSH(hw); 5261 return 0; 5262 } 5263 5264 static int 5265 ixgbe_syn_filter_get(struct rte_eth_dev *dev, 5266 struct rte_eth_syn_filter *filter) 5267 { 5268 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5269 uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); 5270 5271 if (synqf & IXGBE_SYN_FILTER_ENABLE) { 5272 filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; 5273 filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); 5274 return 0; 5275 } 5276 return -ENOENT; 5277 } 5278 5279 static int 5280 ixgbe_syn_filter_handle(struct rte_eth_dev *dev, 5281 enum rte_filter_op filter_op, 5282 void *arg) 5283 { 5284 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5285 int ret; 5286 5287 MAC_TYPE_FILTER_SUP(hw->mac.type); 5288 5289 if (filter_op == RTE_ETH_FILTER_NOP) 5290 return 0; 5291 5292 if (arg == NULL) { 5293 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", 5294 filter_op); 5295 return -EINVAL; 5296 } 5297 5298 switch (filter_op) { 5299 case RTE_ETH_FILTER_ADD: 5300 ret = ixgbe_syn_filter_set(dev, 5301 (struct rte_eth_syn_filter *)arg, 5302 TRUE); 5303 break; 5304 case RTE_ETH_FILTER_DELETE: 5305 ret = ixgbe_syn_filter_set(dev, 5306 (struct rte_eth_syn_filter *)arg, 5307 FALSE); 5308 break; 5309 case RTE_ETH_FILTER_GET: 5310 ret = ixgbe_syn_filter_get(dev, 5311 (struct rte_eth_syn_filter *)arg); 5312 break; 5313 default: 5314 PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); 5315 ret = -EINVAL; 5316 break; 5317 } 5318 5319 return ret; 5320 } 5321 5322 5323 static inline enum ixgbe_5tuple_protocol 5324 convert_protocol_type(uint8_t protocol_value) 5325 { 5326 if (protocol_value == IPPROTO_TCP) 5327 return IXGBE_FILTER_PROTOCOL_TCP; 5328 else if (protocol_value == IPPROTO_UDP) 5329 return IXGBE_FILTER_PROTOCOL_UDP; 5330 else if (protocol_value == IPPROTO_SCTP) 5331 return IXGBE_FILTER_PROTOCOL_SCTP; 5332 else 5333 return IXGBE_FILTER_PROTOCOL_NONE; 5334 } 5335 5336 /* 5337 * add a 5tuple filter 5338 * 5339 * @param 5340 * dev: Pointer to struct rte_eth_dev. 5341 * index: the index the filter allocates. 5342 * filter: ponter to the filter that will be added. 5343 * rx_queue: the queue id the filter assigned to. 5344 * 5345 * @return 5346 * - On success, zero. 5347 * - On failure, a negative value. 5348 */ 5349 static int 5350 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, 5351 struct ixgbe_5tuple_filter *filter) 5352 { 5353 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5354 struct ixgbe_filter_info *filter_info = 5355 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5356 int i, idx, shift; 5357 uint32_t ftqf, sdpqf; 5358 uint32_t l34timir = 0; 5359 uint8_t mask = 0xff; 5360 5361 /* 5362 * look for an unused 5tuple filter index, 5363 * and insert the filter to list. 5364 */ 5365 for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { 5366 idx = i / (sizeof(uint32_t) * NBBY); 5367 shift = i % (sizeof(uint32_t) * NBBY); 5368 if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { 5369 filter_info->fivetuple_mask[idx] |= 1 << shift; 5370 filter->index = i; 5371 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, 5372 filter, 5373 entries); 5374 break; 5375 } 5376 } 5377 if (i >= IXGBE_MAX_FTQF_FILTERS) { 5378 PMD_DRV_LOG(ERR, "5tuple filters are full."); 5379 return -ENOSYS; 5380 } 5381 5382 sdpqf = (uint32_t)(filter->filter_info.dst_port << 5383 IXGBE_SDPQF_DSTPORT_SHIFT); 5384 sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); 5385 5386 ftqf = (uint32_t)(filter->filter_info.proto & 5387 IXGBE_FTQF_PROTOCOL_MASK); 5388 ftqf |= (uint32_t)((filter->filter_info.priority & 5389 IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); 5390 if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ 5391 mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; 5392 if (filter->filter_info.dst_ip_mask == 0) 5393 mask &= IXGBE_FTQF_DEST_ADDR_MASK; 5394 if (filter->filter_info.src_port_mask == 0) 5395 mask &= IXGBE_FTQF_SOURCE_PORT_MASK; 5396 if (filter->filter_info.dst_port_mask == 0) 5397 mask &= IXGBE_FTQF_DEST_PORT_MASK; 5398 if (filter->filter_info.proto_mask == 0) 5399 mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; 5400 ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; 5401 ftqf |= IXGBE_FTQF_POOL_MASK_EN; 5402 ftqf |= IXGBE_FTQF_QUEUE_ENABLE; 5403 5404 IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); 5405 IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); 5406 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); 5407 IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); 5408 5409 l34timir |= IXGBE_L34T_IMIR_RESERVE; 5410 l34timir |= (uint32_t)(filter->queue << 5411 IXGBE_L34T_IMIR_QUEUE_SHIFT); 5412 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); 5413 return 0; 5414 } 5415 5416 /* 5417 * remove a 5tuple filter 5418 * 5419 * @param 5420 * dev: Pointer to struct rte_eth_dev. 5421 * filter: the pointer of the filter will be removed. 5422 */ 5423 static void 5424 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, 5425 struct ixgbe_5tuple_filter *filter) 5426 { 5427 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5428 struct ixgbe_filter_info *filter_info = 5429 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5430 uint16_t index = filter->index; 5431 5432 filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= 5433 ~(1 << (index % (sizeof(uint32_t) * NBBY))); 5434 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); 5435 rte_free(filter); 5436 5437 IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); 5438 IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); 5439 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); 5440 IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); 5441 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); 5442 } 5443 5444 static int 5445 ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 5446 { 5447 struct ixgbe_hw *hw; 5448 uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 5449 5450 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5451 5452 if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) 5453 return -EINVAL; 5454 5455 /* refuse mtu that requires the support of scattered packets when this 5456 * feature has not been enabled before. 5457 */ 5458 if (!dev->data->scattered_rx && 5459 (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > 5460 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) 5461 return -EINVAL; 5462 5463 /* 5464 * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU 5465 * request of the version 2.0 of the mailbox API. 5466 * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 5467 * of the mailbox API. 5468 * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers 5469 * prior to 3.11.33 which contains the following change: 5470 * "ixgbe: Enable jumbo frames support w/ SR-IOV" 5471 */ 5472 ixgbevf_rlpml_set_vf(hw, max_frame); 5473 5474 /* update max frame size */ 5475 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; 5476 return 0; 5477 } 5478 5479 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 5480 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ 5481 return -ENOTSUP;\ 5482 } while (0) 5483 5484 static inline struct ixgbe_5tuple_filter * 5485 ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, 5486 struct ixgbe_5tuple_filter_info *key) 5487 { 5488 struct ixgbe_5tuple_filter *it; 5489 5490 TAILQ_FOREACH(it, filter_list, entries) { 5491 if (memcmp(key, &it->filter_info, 5492 sizeof(struct ixgbe_5tuple_filter_info)) == 0) { 5493 return it; 5494 } 5495 } 5496 return NULL; 5497 } 5498 5499 /* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ 5500 static inline int 5501 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, 5502 struct ixgbe_5tuple_filter_info *filter_info) 5503 { 5504 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || 5505 filter->priority > IXGBE_5TUPLE_MAX_PRI || 5506 filter->priority < IXGBE_5TUPLE_MIN_PRI) 5507 return -EINVAL; 5508 5509 switch (filter->dst_ip_mask) { 5510 case UINT32_MAX: 5511 filter_info->dst_ip_mask = 0; 5512 filter_info->dst_ip = filter->dst_ip; 5513 break; 5514 case 0: 5515 filter_info->dst_ip_mask = 1; 5516 break; 5517 default: 5518 PMD_DRV_LOG(ERR, "invalid dst_ip mask."); 5519 return -EINVAL; 5520 } 5521 5522 switch (filter->src_ip_mask) { 5523 case UINT32_MAX: 5524 filter_info->src_ip_mask = 0; 5525 filter_info->src_ip = filter->src_ip; 5526 break; 5527 case 0: 5528 filter_info->src_ip_mask = 1; 5529 break; 5530 default: 5531 PMD_DRV_LOG(ERR, "invalid src_ip mask."); 5532 return -EINVAL; 5533 } 5534 5535 switch (filter->dst_port_mask) { 5536 case UINT16_MAX: 5537 filter_info->dst_port_mask = 0; 5538 filter_info->dst_port = filter->dst_port; 5539 break; 5540 case 0: 5541 filter_info->dst_port_mask = 1; 5542 break; 5543 default: 5544 PMD_DRV_LOG(ERR, "invalid dst_port mask."); 5545 return -EINVAL; 5546 } 5547 5548 switch (filter->src_port_mask) { 5549 case UINT16_MAX: 5550 filter_info->src_port_mask = 0; 5551 filter_info->src_port = filter->src_port; 5552 break; 5553 case 0: 5554 filter_info->src_port_mask = 1; 5555 break; 5556 default: 5557 PMD_DRV_LOG(ERR, "invalid src_port mask."); 5558 return -EINVAL; 5559 } 5560 5561 switch (filter->proto_mask) { 5562 case UINT8_MAX: 5563 filter_info->proto_mask = 0; 5564 filter_info->proto = 5565 convert_protocol_type(filter->proto); 5566 break; 5567 case 0: 5568 filter_info->proto_mask = 1; 5569 break; 5570 default: 5571 PMD_DRV_LOG(ERR, "invalid protocol mask."); 5572 return -EINVAL; 5573 } 5574 5575 filter_info->priority = (uint8_t)filter->priority; 5576 return 0; 5577 } 5578 5579 /* 5580 * add or delete a ntuple filter 5581 * 5582 * @param 5583 * dev: Pointer to struct rte_eth_dev. 5584 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 5585 * add: if true, add filter, if false, remove filter 5586 * 5587 * @return 5588 * - On success, zero. 5589 * - On failure, a negative value. 5590 */ 5591 static int 5592 ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 5593 struct rte_eth_ntuple_filter *ntuple_filter, 5594 bool add) 5595 { 5596 struct ixgbe_filter_info *filter_info = 5597 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5598 struct ixgbe_5tuple_filter_info filter_5tuple; 5599 struct ixgbe_5tuple_filter *filter; 5600 int ret; 5601 5602 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 5603 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 5604 return -EINVAL; 5605 } 5606 5607 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 5608 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 5609 if (ret < 0) 5610 return ret; 5611 5612 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 5613 &filter_5tuple); 5614 if (filter != NULL && add) { 5615 PMD_DRV_LOG(ERR, "filter exists."); 5616 return -EEXIST; 5617 } 5618 if (filter == NULL && !add) { 5619 PMD_DRV_LOG(ERR, "filter doesn't exist."); 5620 return -ENOENT; 5621 } 5622 5623 if (add) { 5624 filter = rte_zmalloc("ixgbe_5tuple_filter", 5625 sizeof(struct ixgbe_5tuple_filter), 0); 5626 if (filter == NULL) 5627 return -ENOMEM; 5628 (void)rte_memcpy(&filter->filter_info, 5629 &filter_5tuple, 5630 sizeof(struct ixgbe_5tuple_filter_info)); 5631 filter->queue = ntuple_filter->queue; 5632 ret = ixgbe_add_5tuple_filter(dev, filter); 5633 if (ret < 0) { 5634 rte_free(filter); 5635 return ret; 5636 } 5637 } else 5638 ixgbe_remove_5tuple_filter(dev, filter); 5639 5640 return 0; 5641 } 5642 5643 /* 5644 * get a ntuple filter 5645 * 5646 * @param 5647 * dev: Pointer to struct rte_eth_dev. 5648 * ntuple_filter: Pointer to struct rte_eth_ntuple_filter 5649 * 5650 * @return 5651 * - On success, zero. 5652 * - On failure, a negative value. 5653 */ 5654 static int 5655 ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, 5656 struct rte_eth_ntuple_filter *ntuple_filter) 5657 { 5658 struct ixgbe_filter_info *filter_info = 5659 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5660 struct ixgbe_5tuple_filter_info filter_5tuple; 5661 struct ixgbe_5tuple_filter *filter; 5662 int ret; 5663 5664 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { 5665 PMD_DRV_LOG(ERR, "only 5tuple is supported."); 5666 return -EINVAL; 5667 } 5668 5669 memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); 5670 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); 5671 if (ret < 0) 5672 return ret; 5673 5674 filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, 5675 &filter_5tuple); 5676 if (filter == NULL) { 5677 PMD_DRV_LOG(ERR, "filter doesn't exist."); 5678 return -ENOENT; 5679 } 5680 ntuple_filter->queue = filter->queue; 5681 return 0; 5682 } 5683 5684 /* 5685 * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. 5686 * @dev: pointer to rte_eth_dev structure 5687 * @filter_op:operation will be taken. 5688 * @arg: a pointer to specific structure corresponding to the filter_op 5689 * 5690 * @return 5691 * - On success, zero. 5692 * - On failure, a negative value. 5693 */ 5694 static int 5695 ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, 5696 enum rte_filter_op filter_op, 5697 void *arg) 5698 { 5699 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5700 int ret; 5701 5702 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); 5703 5704 if (filter_op == RTE_ETH_FILTER_NOP) 5705 return 0; 5706 5707 if (arg == NULL) { 5708 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 5709 filter_op); 5710 return -EINVAL; 5711 } 5712 5713 switch (filter_op) { 5714 case RTE_ETH_FILTER_ADD: 5715 ret = ixgbe_add_del_ntuple_filter(dev, 5716 (struct rte_eth_ntuple_filter *)arg, 5717 TRUE); 5718 break; 5719 case RTE_ETH_FILTER_DELETE: 5720 ret = ixgbe_add_del_ntuple_filter(dev, 5721 (struct rte_eth_ntuple_filter *)arg, 5722 FALSE); 5723 break; 5724 case RTE_ETH_FILTER_GET: 5725 ret = ixgbe_get_ntuple_filter(dev, 5726 (struct rte_eth_ntuple_filter *)arg); 5727 break; 5728 default: 5729 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 5730 ret = -EINVAL; 5731 break; 5732 } 5733 return ret; 5734 } 5735 5736 static inline int 5737 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, 5738 uint16_t ethertype) 5739 { 5740 int i; 5741 5742 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 5743 if (filter_info->ethertype_filters[i] == ethertype && 5744 (filter_info->ethertype_mask & (1 << i))) 5745 return i; 5746 } 5747 return -1; 5748 } 5749 5750 static inline int 5751 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, 5752 uint16_t ethertype) 5753 { 5754 int i; 5755 5756 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 5757 if (!(filter_info->ethertype_mask & (1 << i))) { 5758 filter_info->ethertype_mask |= 1 << i; 5759 filter_info->ethertype_filters[i] = ethertype; 5760 return i; 5761 } 5762 } 5763 return -1; 5764 } 5765 5766 static inline int 5767 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, 5768 uint8_t idx) 5769 { 5770 if (idx >= IXGBE_MAX_ETQF_FILTERS) 5771 return -1; 5772 filter_info->ethertype_mask &= ~(1 << idx); 5773 filter_info->ethertype_filters[idx] = 0; 5774 return idx; 5775 } 5776 5777 static int 5778 ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 5779 struct rte_eth_ethertype_filter *filter, 5780 bool add) 5781 { 5782 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5783 struct ixgbe_filter_info *filter_info = 5784 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5785 uint32_t etqf = 0; 5786 uint32_t etqs = 0; 5787 int ret; 5788 5789 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) 5790 return -EINVAL; 5791 5792 if (filter->ether_type == ETHER_TYPE_IPv4 || 5793 filter->ether_type == ETHER_TYPE_IPv6) { 5794 PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" 5795 " ethertype filter.", filter->ether_type); 5796 return -EINVAL; 5797 } 5798 5799 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { 5800 PMD_DRV_LOG(ERR, "mac compare is unsupported."); 5801 return -EINVAL; 5802 } 5803 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { 5804 PMD_DRV_LOG(ERR, "drop option is unsupported."); 5805 return -EINVAL; 5806 } 5807 5808 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 5809 if (ret >= 0 && add) { 5810 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", 5811 filter->ether_type); 5812 return -EEXIST; 5813 } 5814 if (ret < 0 && !add) { 5815 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 5816 filter->ether_type); 5817 return -ENOENT; 5818 } 5819 5820 if (add) { 5821 ret = ixgbe_ethertype_filter_insert(filter_info, 5822 filter->ether_type); 5823 if (ret < 0) { 5824 PMD_DRV_LOG(ERR, "ethertype filters are full."); 5825 return -ENOSYS; 5826 } 5827 etqf = IXGBE_ETQF_FILTER_EN; 5828 etqf |= (uint32_t)filter->ether_type; 5829 etqs |= (uint32_t)((filter->queue << 5830 IXGBE_ETQS_RX_QUEUE_SHIFT) & 5831 IXGBE_ETQS_RX_QUEUE); 5832 etqs |= IXGBE_ETQS_QUEUE_EN; 5833 } else { 5834 ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); 5835 if (ret < 0) 5836 return -ENOSYS; 5837 } 5838 IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); 5839 IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); 5840 IXGBE_WRITE_FLUSH(hw); 5841 5842 return 0; 5843 } 5844 5845 static int 5846 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, 5847 struct rte_eth_ethertype_filter *filter) 5848 { 5849 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5850 struct ixgbe_filter_info *filter_info = 5851 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); 5852 uint32_t etqf, etqs; 5853 int ret; 5854 5855 ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); 5856 if (ret < 0) { 5857 PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", 5858 filter->ether_type); 5859 return -ENOENT; 5860 } 5861 5862 etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); 5863 if (etqf & IXGBE_ETQF_FILTER_EN) { 5864 etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); 5865 filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; 5866 filter->flags = 0; 5867 filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> 5868 IXGBE_ETQS_RX_QUEUE_SHIFT; 5869 return 0; 5870 } 5871 return -ENOENT; 5872 } 5873 5874 /* 5875 * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. 5876 * @dev: pointer to rte_eth_dev structure 5877 * @filter_op:operation will be taken. 5878 * @arg: a pointer to specific structure corresponding to the filter_op 5879 */ 5880 static int 5881 ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, 5882 enum rte_filter_op filter_op, 5883 void *arg) 5884 { 5885 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5886 int ret; 5887 5888 MAC_TYPE_FILTER_SUP(hw->mac.type); 5889 5890 if (filter_op == RTE_ETH_FILTER_NOP) 5891 return 0; 5892 5893 if (arg == NULL) { 5894 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 5895 filter_op); 5896 return -EINVAL; 5897 } 5898 5899 switch (filter_op) { 5900 case RTE_ETH_FILTER_ADD: 5901 ret = ixgbe_add_del_ethertype_filter(dev, 5902 (struct rte_eth_ethertype_filter *)arg, 5903 TRUE); 5904 break; 5905 case RTE_ETH_FILTER_DELETE: 5906 ret = ixgbe_add_del_ethertype_filter(dev, 5907 (struct rte_eth_ethertype_filter *)arg, 5908 FALSE); 5909 break; 5910 case RTE_ETH_FILTER_GET: 5911 ret = ixgbe_get_ethertype_filter(dev, 5912 (struct rte_eth_ethertype_filter *)arg); 5913 break; 5914 default: 5915 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 5916 ret = -EINVAL; 5917 break; 5918 } 5919 return ret; 5920 } 5921 5922 static int 5923 ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, 5924 enum rte_filter_type filter_type, 5925 enum rte_filter_op filter_op, 5926 void *arg) 5927 { 5928 int ret = -EINVAL; 5929 5930 switch (filter_type) { 5931 case RTE_ETH_FILTER_NTUPLE: 5932 ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); 5933 break; 5934 case RTE_ETH_FILTER_ETHERTYPE: 5935 ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); 5936 break; 5937 case RTE_ETH_FILTER_SYN: 5938 ret = ixgbe_syn_filter_handle(dev, filter_op, arg); 5939 break; 5940 case RTE_ETH_FILTER_FDIR: 5941 ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); 5942 break; 5943 case RTE_ETH_FILTER_L2_TUNNEL: 5944 ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); 5945 break; 5946 default: 5947 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", 5948 filter_type); 5949 break; 5950 } 5951 5952 return ret; 5953 } 5954 5955 static u8 * 5956 ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, 5957 u8 **mc_addr_ptr, u32 *vmdq) 5958 { 5959 u8 *mc_addr; 5960 5961 *vmdq = 0; 5962 mc_addr = *mc_addr_ptr; 5963 *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); 5964 return mc_addr; 5965 } 5966 5967 static int 5968 ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 5969 struct ether_addr *mc_addr_set, 5970 uint32_t nb_mc_addr) 5971 { 5972 struct ixgbe_hw *hw; 5973 u8 *mc_addr_list; 5974 5975 hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5976 mc_addr_list = (u8 *)mc_addr_set; 5977 return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, 5978 ixgbe_dev_addr_list_itr, TRUE); 5979 } 5980 5981 static uint64_t 5982 ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) 5983 { 5984 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5985 uint64_t systime_cycles; 5986 5987 switch (hw->mac.type) { 5988 case ixgbe_mac_X550: 5989 case ixgbe_mac_X550EM_x: 5990 case ixgbe_mac_X550EM_a: 5991 /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ 5992 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 5993 systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 5994 * NSEC_PER_SEC; 5995 break; 5996 default: 5997 systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); 5998 systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) 5999 << 32; 6000 } 6001 6002 return systime_cycles; 6003 } 6004 6005 static uint64_t 6006 ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6007 { 6008 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6009 uint64_t rx_tstamp_cycles; 6010 6011 switch (hw->mac.type) { 6012 case ixgbe_mac_X550: 6013 case ixgbe_mac_X550EM_x: 6014 case ixgbe_mac_X550EM_a: 6015 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6016 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6017 rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6018 * NSEC_PER_SEC; 6019 break; 6020 default: 6021 /* RXSTMPL stores ns and RXSTMPH stores seconds. */ 6022 rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 6023 rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) 6024 << 32; 6025 } 6026 6027 return rx_tstamp_cycles; 6028 } 6029 6030 static uint64_t 6031 ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) 6032 { 6033 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6034 uint64_t tx_tstamp_cycles; 6035 6036 switch (hw->mac.type) { 6037 case ixgbe_mac_X550: 6038 case ixgbe_mac_X550EM_x: 6039 case ixgbe_mac_X550EM_a: 6040 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6041 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6042 tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6043 * NSEC_PER_SEC; 6044 break; 6045 default: 6046 /* TXSTMPL stores ns and TXSTMPH stores seconds. */ 6047 tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); 6048 tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) 6049 << 32; 6050 } 6051 6052 return tx_tstamp_cycles; 6053 } 6054 6055 static void 6056 ixgbe_start_timecounters(struct rte_eth_dev *dev) 6057 { 6058 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6059 struct ixgbe_adapter *adapter = 6060 (struct ixgbe_adapter *)dev->data->dev_private; 6061 struct rte_eth_link link; 6062 uint32_t incval = 0; 6063 uint32_t shift = 0; 6064 6065 /* Get current link speed. */ 6066 memset(&link, 0, sizeof(link)); 6067 ixgbe_dev_link_update(dev, 1); 6068 rte_ixgbe_dev_atomic_read_link_status(dev, &link); 6069 6070 switch (link.link_speed) { 6071 case ETH_SPEED_NUM_100M: 6072 incval = IXGBE_INCVAL_100; 6073 shift = IXGBE_INCVAL_SHIFT_100; 6074 break; 6075 case ETH_SPEED_NUM_1G: 6076 incval = IXGBE_INCVAL_1GB; 6077 shift = IXGBE_INCVAL_SHIFT_1GB; 6078 break; 6079 case ETH_SPEED_NUM_10G: 6080 default: 6081 incval = IXGBE_INCVAL_10GB; 6082 shift = IXGBE_INCVAL_SHIFT_10GB; 6083 break; 6084 } 6085 6086 switch (hw->mac.type) { 6087 case ixgbe_mac_X550: 6088 case ixgbe_mac_X550EM_x: 6089 case ixgbe_mac_X550EM_a: 6090 /* Independent of link speed. */ 6091 incval = 1; 6092 /* Cycles read will be interpreted as ns. */ 6093 shift = 0; 6094 /* Fall-through */ 6095 case ixgbe_mac_X540: 6096 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); 6097 break; 6098 case ixgbe_mac_82599EB: 6099 incval >>= IXGBE_INCVAL_SHIFT_82599; 6100 shift -= IXGBE_INCVAL_SHIFT_82599; 6101 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 6102 (1 << IXGBE_INCPER_SHIFT_82599) | incval); 6103 break; 6104 default: 6105 /* Not supported. */ 6106 return; 6107 } 6108 6109 memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); 6110 memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6111 memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); 6112 6113 adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6114 adapter->systime_tc.cc_shift = shift; 6115 adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; 6116 6117 adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6118 adapter->rx_tstamp_tc.cc_shift = shift; 6119 adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6120 6121 adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; 6122 adapter->tx_tstamp_tc.cc_shift = shift; 6123 adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; 6124 } 6125 6126 static int 6127 ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) 6128 { 6129 struct ixgbe_adapter *adapter = 6130 (struct ixgbe_adapter *)dev->data->dev_private; 6131 6132 adapter->systime_tc.nsec += delta; 6133 adapter->rx_tstamp_tc.nsec += delta; 6134 adapter->tx_tstamp_tc.nsec += delta; 6135 6136 return 0; 6137 } 6138 6139 static int 6140 ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) 6141 { 6142 uint64_t ns; 6143 struct ixgbe_adapter *adapter = 6144 (struct ixgbe_adapter *)dev->data->dev_private; 6145 6146 ns = rte_timespec_to_ns(ts); 6147 /* Set the timecounters to a new value. */ 6148 adapter->systime_tc.nsec = ns; 6149 adapter->rx_tstamp_tc.nsec = ns; 6150 adapter->tx_tstamp_tc.nsec = ns; 6151 6152 return 0; 6153 } 6154 6155 static int 6156 ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) 6157 { 6158 uint64_t ns, systime_cycles; 6159 struct ixgbe_adapter *adapter = 6160 (struct ixgbe_adapter *)dev->data->dev_private; 6161 6162 systime_cycles = ixgbe_read_systime_cyclecounter(dev); 6163 ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); 6164 *ts = rte_ns_to_timespec(ns); 6165 6166 return 0; 6167 } 6168 6169 static int 6170 ixgbe_timesync_enable(struct rte_eth_dev *dev) 6171 { 6172 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6173 uint32_t tsync_ctl; 6174 uint32_t tsauxc; 6175 6176 /* Stop the timesync system time. */ 6177 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); 6178 /* Reset the timesync system time value. */ 6179 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); 6180 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); 6181 6182 /* Enable system time for platforms where it isn't on by default. */ 6183 tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); 6184 tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; 6185 IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); 6186 6187 ixgbe_start_timecounters(dev); 6188 6189 /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6190 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 6191 (ETHER_TYPE_1588 | 6192 IXGBE_ETQF_FILTER_EN | 6193 IXGBE_ETQF_1588)); 6194 6195 /* Enable timestamping of received PTP packets. */ 6196 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6197 tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; 6198 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6199 6200 /* Enable timestamping of transmitted PTP packets. */ 6201 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6202 tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; 6203 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6204 6205 IXGBE_WRITE_FLUSH(hw); 6206 6207 return 0; 6208 } 6209 6210 static int 6211 ixgbe_timesync_disable(struct rte_eth_dev *dev) 6212 { 6213 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6214 uint32_t tsync_ctl; 6215 6216 /* Disable timestamping of transmitted PTP packets. */ 6217 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6218 tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; 6219 IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); 6220 6221 /* Disable timestamping of received PTP packets. */ 6222 tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6223 tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; 6224 IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); 6225 6226 /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ 6227 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); 6228 6229 /* Stop incrementating the System Time registers. */ 6230 IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); 6231 6232 return 0; 6233 } 6234 6235 static int 6236 ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 6237 struct timespec *timestamp, 6238 uint32_t flags __rte_unused) 6239 { 6240 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6241 struct ixgbe_adapter *adapter = 6242 (struct ixgbe_adapter *)dev->data->dev_private; 6243 uint32_t tsync_rxctl; 6244 uint64_t rx_tstamp_cycles; 6245 uint64_t ns; 6246 6247 tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 6248 if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) 6249 return -EINVAL; 6250 6251 rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); 6252 ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); 6253 *timestamp = rte_ns_to_timespec(ns); 6254 6255 return 0; 6256 } 6257 6258 static int 6259 ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 6260 struct timespec *timestamp) 6261 { 6262 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6263 struct ixgbe_adapter *adapter = 6264 (struct ixgbe_adapter *)dev->data->dev_private; 6265 uint32_t tsync_txctl; 6266 uint64_t tx_tstamp_cycles; 6267 uint64_t ns; 6268 6269 tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 6270 if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) 6271 return -EINVAL; 6272 6273 tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); 6274 ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); 6275 *timestamp = rte_ns_to_timespec(ns); 6276 6277 return 0; 6278 } 6279 6280 static int 6281 ixgbe_get_reg_length(struct rte_eth_dev *dev) 6282 { 6283 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6284 int count = 0; 6285 int g_ind = 0; 6286 const struct reg_info *reg_group; 6287 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6288 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6289 6290 while ((reg_group = reg_set[g_ind++])) 6291 count += ixgbe_regs_group_count(reg_group); 6292 6293 return count; 6294 } 6295 6296 static int 6297 ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) 6298 { 6299 int count = 0; 6300 int g_ind = 0; 6301 const struct reg_info *reg_group; 6302 6303 while ((reg_group = ixgbevf_regs[g_ind++])) 6304 count += ixgbe_regs_group_count(reg_group); 6305 6306 return count; 6307 } 6308 6309 static int 6310 ixgbe_get_regs(struct rte_eth_dev *dev, 6311 struct rte_dev_reg_info *regs) 6312 { 6313 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6314 uint32_t *data = regs->data; 6315 int g_ind = 0; 6316 int count = 0; 6317 const struct reg_info *reg_group; 6318 const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? 6319 ixgbe_regs_mac_82598EB : ixgbe_regs_others; 6320 6321 if (data == NULL) { 6322 regs->length = ixgbe_get_reg_length(dev); 6323 regs->width = sizeof(uint32_t); 6324 return 0; 6325 } 6326 6327 /* Support only full register dump */ 6328 if ((regs->length == 0) || 6329 (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { 6330 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6331 hw->device_id; 6332 while ((reg_group = reg_set[g_ind++])) 6333 count += ixgbe_read_regs_group(dev, &data[count], 6334 reg_group); 6335 return 0; 6336 } 6337 6338 return -ENOTSUP; 6339 } 6340 6341 static int 6342 ixgbevf_get_regs(struct rte_eth_dev *dev, 6343 struct rte_dev_reg_info *regs) 6344 { 6345 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6346 uint32_t *data = regs->data; 6347 int g_ind = 0; 6348 int count = 0; 6349 const struct reg_info *reg_group; 6350 6351 if (data == NULL) { 6352 regs->length = ixgbevf_get_reg_length(dev); 6353 regs->width = sizeof(uint32_t); 6354 return 0; 6355 } 6356 6357 /* Support only full register dump */ 6358 if ((regs->length == 0) || 6359 (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { 6360 regs->version = hw->mac.type << 24 | hw->revision_id << 16 | 6361 hw->device_id; 6362 while ((reg_group = ixgbevf_regs[g_ind++])) 6363 count += ixgbe_read_regs_group(dev, &data[count], 6364 reg_group); 6365 return 0; 6366 } 6367 6368 return -ENOTSUP; 6369 } 6370 6371 static int 6372 ixgbe_get_eeprom_length(struct rte_eth_dev *dev) 6373 { 6374 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6375 6376 /* Return unit is byte count */ 6377 return hw->eeprom.word_size * 2; 6378 } 6379 6380 static int 6381 ixgbe_get_eeprom(struct rte_eth_dev *dev, 6382 struct rte_dev_eeprom_info *in_eeprom) 6383 { 6384 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6385 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 6386 uint16_t *data = in_eeprom->data; 6387 int first, length; 6388 6389 first = in_eeprom->offset >> 1; 6390 length = in_eeprom->length >> 1; 6391 if ((first > hw->eeprom.word_size) || 6392 ((first + length) > hw->eeprom.word_size)) 6393 return -EINVAL; 6394 6395 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 6396 6397 return eeprom->ops.read_buffer(hw, first, length, data); 6398 } 6399 6400 static int 6401 ixgbe_set_eeprom(struct rte_eth_dev *dev, 6402 struct rte_dev_eeprom_info *in_eeprom) 6403 { 6404 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6405 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 6406 uint16_t *data = in_eeprom->data; 6407 int first, length; 6408 6409 first = in_eeprom->offset >> 1; 6410 length = in_eeprom->length >> 1; 6411 if ((first > hw->eeprom.word_size) || 6412 ((first + length) > hw->eeprom.word_size)) 6413 return -EINVAL; 6414 6415 in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); 6416 6417 return eeprom->ops.write_buffer(hw, first, length, data); 6418 } 6419 6420 uint16_t 6421 ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { 6422 switch (mac_type) { 6423 case ixgbe_mac_X550: 6424 case ixgbe_mac_X550EM_x: 6425 case ixgbe_mac_X550EM_a: 6426 return ETH_RSS_RETA_SIZE_512; 6427 case ixgbe_mac_X550_vf: 6428 case ixgbe_mac_X550EM_x_vf: 6429 case ixgbe_mac_X550EM_a_vf: 6430 return ETH_RSS_RETA_SIZE_64; 6431 default: 6432 return ETH_RSS_RETA_SIZE_128; 6433 } 6434 } 6435 6436 uint32_t 6437 ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { 6438 switch (mac_type) { 6439 case ixgbe_mac_X550: 6440 case ixgbe_mac_X550EM_x: 6441 case ixgbe_mac_X550EM_a: 6442 if (reta_idx < ETH_RSS_RETA_SIZE_128) 6443 return IXGBE_RETA(reta_idx >> 2); 6444 else 6445 return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); 6446 case ixgbe_mac_X550_vf: 6447 case ixgbe_mac_X550EM_x_vf: 6448 case ixgbe_mac_X550EM_a_vf: 6449 return IXGBE_VFRETA(reta_idx >> 2); 6450 default: 6451 return IXGBE_RETA(reta_idx >> 2); 6452 } 6453 } 6454 6455 uint32_t 6456 ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { 6457 switch (mac_type) { 6458 case ixgbe_mac_X550_vf: 6459 case ixgbe_mac_X550EM_x_vf: 6460 case ixgbe_mac_X550EM_a_vf: 6461 return IXGBE_VFMRQC; 6462 default: 6463 return IXGBE_MRQC; 6464 } 6465 } 6466 6467 uint32_t 6468 ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { 6469 switch (mac_type) { 6470 case ixgbe_mac_X550_vf: 6471 case ixgbe_mac_X550EM_x_vf: 6472 case ixgbe_mac_X550EM_a_vf: 6473 return IXGBE_VFRSSRK(i); 6474 default: 6475 return IXGBE_RSSRK(i); 6476 } 6477 } 6478 6479 bool 6480 ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { 6481 switch (mac_type) { 6482 case ixgbe_mac_82599_vf: 6483 case ixgbe_mac_X540_vf: 6484 return 0; 6485 default: 6486 return 1; 6487 } 6488 } 6489 6490 static int 6491 ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, 6492 struct rte_eth_dcb_info *dcb_info) 6493 { 6494 struct ixgbe_dcb_config *dcb_config = 6495 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); 6496 struct ixgbe_dcb_tc_config *tc; 6497 uint8_t i, j; 6498 6499 if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) 6500 dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; 6501 else 6502 dcb_info->nb_tcs = 1; 6503 6504 if (dcb_config->vt_mode) { /* vt is enabled*/ 6505 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = 6506 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; 6507 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 6508 dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; 6509 for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { 6510 for (j = 0; j < dcb_info->nb_tcs; j++) { 6511 dcb_info->tc_queue.tc_rxq[i][j].base = 6512 i * dcb_info->nb_tcs + j; 6513 dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; 6514 dcb_info->tc_queue.tc_txq[i][j].base = 6515 i * dcb_info->nb_tcs + j; 6516 dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; 6517 } 6518 } 6519 } else { /* vt is disabled*/ 6520 struct rte_eth_dcb_rx_conf *rx_conf = 6521 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; 6522 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) 6523 dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; 6524 if (dcb_info->nb_tcs == ETH_4_TCS) { 6525 for (i = 0; i < dcb_info->nb_tcs; i++) { 6526 dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; 6527 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 6528 } 6529 dcb_info->tc_queue.tc_txq[0][0].base = 0; 6530 dcb_info->tc_queue.tc_txq[0][1].base = 64; 6531 dcb_info->tc_queue.tc_txq[0][2].base = 96; 6532 dcb_info->tc_queue.tc_txq[0][3].base = 112; 6533 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; 6534 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 6535 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 6536 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 6537 } else if (dcb_info->nb_tcs == ETH_8_TCS) { 6538 for (i = 0; i < dcb_info->nb_tcs; i++) { 6539 dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; 6540 dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; 6541 } 6542 dcb_info->tc_queue.tc_txq[0][0].base = 0; 6543 dcb_info->tc_queue.tc_txq[0][1].base = 32; 6544 dcb_info->tc_queue.tc_txq[0][2].base = 64; 6545 dcb_info->tc_queue.tc_txq[0][3].base = 80; 6546 dcb_info->tc_queue.tc_txq[0][4].base = 96; 6547 dcb_info->tc_queue.tc_txq[0][5].base = 104; 6548 dcb_info->tc_queue.tc_txq[0][6].base = 112; 6549 dcb_info->tc_queue.tc_txq[0][7].base = 120; 6550 dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; 6551 dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; 6552 dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; 6553 dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; 6554 dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; 6555 dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; 6556 dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; 6557 dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; 6558 } 6559 } 6560 for (i = 0; i < dcb_info->nb_tcs; i++) { 6561 tc = &dcb_config->tc_config[i]; 6562 dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; 6563 } 6564 return 0; 6565 } 6566 6567 /* Update e-tag ether type */ 6568 static int 6569 ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, 6570 uint16_t ether_type) 6571 { 6572 uint32_t etag_etype; 6573 6574 if (hw->mac.type != ixgbe_mac_X550 && 6575 hw->mac.type != ixgbe_mac_X550EM_x && 6576 hw->mac.type != ixgbe_mac_X550EM_a) { 6577 return -ENOTSUP; 6578 } 6579 6580 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6581 etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; 6582 etag_etype |= ether_type; 6583 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6584 IXGBE_WRITE_FLUSH(hw); 6585 6586 return 0; 6587 } 6588 6589 /* Config l2 tunnel ether type */ 6590 static int 6591 ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, 6592 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6593 { 6594 int ret = 0; 6595 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6596 6597 if (l2_tunnel == NULL) 6598 return -EINVAL; 6599 6600 switch (l2_tunnel->l2_tunnel_type) { 6601 case RTE_L2_TUNNEL_TYPE_E_TAG: 6602 ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); 6603 break; 6604 default: 6605 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6606 ret = -EINVAL; 6607 break; 6608 } 6609 6610 return ret; 6611 } 6612 6613 /* Enable e-tag tunnel */ 6614 static int 6615 ixgbe_e_tag_enable(struct ixgbe_hw *hw) 6616 { 6617 uint32_t etag_etype; 6618 6619 if (hw->mac.type != ixgbe_mac_X550 && 6620 hw->mac.type != ixgbe_mac_X550EM_x && 6621 hw->mac.type != ixgbe_mac_X550EM_a) { 6622 return -ENOTSUP; 6623 } 6624 6625 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6626 etag_etype |= IXGBE_ETAG_ETYPE_VALID; 6627 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6628 IXGBE_WRITE_FLUSH(hw); 6629 6630 return 0; 6631 } 6632 6633 /* Enable l2 tunnel */ 6634 static int 6635 ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, 6636 enum rte_eth_tunnel_type l2_tunnel_type) 6637 { 6638 int ret = 0; 6639 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6640 6641 switch (l2_tunnel_type) { 6642 case RTE_L2_TUNNEL_TYPE_E_TAG: 6643 ret = ixgbe_e_tag_enable(hw); 6644 break; 6645 default: 6646 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6647 ret = -EINVAL; 6648 break; 6649 } 6650 6651 return ret; 6652 } 6653 6654 /* Disable e-tag tunnel */ 6655 static int 6656 ixgbe_e_tag_disable(struct ixgbe_hw *hw) 6657 { 6658 uint32_t etag_etype; 6659 6660 if (hw->mac.type != ixgbe_mac_X550 && 6661 hw->mac.type != ixgbe_mac_X550EM_x && 6662 hw->mac.type != ixgbe_mac_X550EM_a) { 6663 return -ENOTSUP; 6664 } 6665 6666 etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); 6667 etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; 6668 IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); 6669 IXGBE_WRITE_FLUSH(hw); 6670 6671 return 0; 6672 } 6673 6674 /* Disable l2 tunnel */ 6675 static int 6676 ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, 6677 enum rte_eth_tunnel_type l2_tunnel_type) 6678 { 6679 int ret = 0; 6680 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6681 6682 switch (l2_tunnel_type) { 6683 case RTE_L2_TUNNEL_TYPE_E_TAG: 6684 ret = ixgbe_e_tag_disable(hw); 6685 break; 6686 default: 6687 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6688 ret = -EINVAL; 6689 break; 6690 } 6691 6692 return ret; 6693 } 6694 6695 static int 6696 ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, 6697 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6698 { 6699 int ret = 0; 6700 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6701 uint32_t i, rar_entries; 6702 uint32_t rar_low, rar_high; 6703 6704 if (hw->mac.type != ixgbe_mac_X550 && 6705 hw->mac.type != ixgbe_mac_X550EM_x && 6706 hw->mac.type != ixgbe_mac_X550EM_a) { 6707 return -ENOTSUP; 6708 } 6709 6710 rar_entries = ixgbe_get_num_rx_addrs(hw); 6711 6712 for (i = 1; i < rar_entries; i++) { 6713 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 6714 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 6715 if ((rar_high & IXGBE_RAH_AV) && 6716 (rar_high & IXGBE_RAH_ADTYPE) && 6717 ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == 6718 l2_tunnel->tunnel_id)) { 6719 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 6720 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 6721 6722 ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); 6723 6724 return ret; 6725 } 6726 } 6727 6728 return ret; 6729 } 6730 6731 static int 6732 ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, 6733 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6734 { 6735 int ret = 0; 6736 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6737 uint32_t i, rar_entries; 6738 uint32_t rar_low, rar_high; 6739 6740 if (hw->mac.type != ixgbe_mac_X550 && 6741 hw->mac.type != ixgbe_mac_X550EM_x && 6742 hw->mac.type != ixgbe_mac_X550EM_a) { 6743 return -ENOTSUP; 6744 } 6745 6746 /* One entry for one tunnel. Try to remove potential existing entry. */ 6747 ixgbe_e_tag_filter_del(dev, l2_tunnel); 6748 6749 rar_entries = ixgbe_get_num_rx_addrs(hw); 6750 6751 for (i = 1; i < rar_entries; i++) { 6752 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 6753 if (rar_high & IXGBE_RAH_AV) { 6754 continue; 6755 } else { 6756 ixgbe_set_vmdq(hw, i, l2_tunnel->pool); 6757 rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; 6758 rar_low = l2_tunnel->tunnel_id; 6759 6760 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); 6761 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); 6762 6763 return ret; 6764 } 6765 } 6766 6767 PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." 6768 " Please remove a rule before adding a new one."); 6769 return -EINVAL; 6770 } 6771 6772 /* Add l2 tunnel filter */ 6773 static int 6774 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 6775 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6776 { 6777 int ret = 0; 6778 6779 switch (l2_tunnel->l2_tunnel_type) { 6780 case RTE_L2_TUNNEL_TYPE_E_TAG: 6781 ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); 6782 break; 6783 default: 6784 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6785 ret = -EINVAL; 6786 break; 6787 } 6788 6789 return ret; 6790 } 6791 6792 /* Delete l2 tunnel filter */ 6793 static int 6794 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 6795 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6796 { 6797 int ret = 0; 6798 6799 switch (l2_tunnel->l2_tunnel_type) { 6800 case RTE_L2_TUNNEL_TYPE_E_TAG: 6801 ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); 6802 break; 6803 default: 6804 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6805 ret = -EINVAL; 6806 break; 6807 } 6808 6809 return ret; 6810 } 6811 6812 /** 6813 * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. 6814 * @dev: pointer to rte_eth_dev structure 6815 * @filter_op:operation will be taken. 6816 * @arg: a pointer to specific structure corresponding to the filter_op 6817 */ 6818 static int 6819 ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, 6820 enum rte_filter_op filter_op, 6821 void *arg) 6822 { 6823 int ret = 0; 6824 6825 if (filter_op == RTE_ETH_FILTER_NOP) 6826 return 0; 6827 6828 if (arg == NULL) { 6829 PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", 6830 filter_op); 6831 return -EINVAL; 6832 } 6833 6834 switch (filter_op) { 6835 case RTE_ETH_FILTER_ADD: 6836 ret = ixgbe_dev_l2_tunnel_filter_add 6837 (dev, 6838 (struct rte_eth_l2_tunnel_conf *)arg); 6839 break; 6840 case RTE_ETH_FILTER_DELETE: 6841 ret = ixgbe_dev_l2_tunnel_filter_del 6842 (dev, 6843 (struct rte_eth_l2_tunnel_conf *)arg); 6844 break; 6845 default: 6846 PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); 6847 ret = -EINVAL; 6848 break; 6849 } 6850 return ret; 6851 } 6852 6853 static int 6854 ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) 6855 { 6856 int ret = 0; 6857 uint32_t ctrl; 6858 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6859 6860 if (hw->mac.type != ixgbe_mac_X550 && 6861 hw->mac.type != ixgbe_mac_X550EM_x && 6862 hw->mac.type != ixgbe_mac_X550EM_a) { 6863 return -ENOTSUP; 6864 } 6865 6866 ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); 6867 ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; 6868 if (en) 6869 ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; 6870 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); 6871 6872 return ret; 6873 } 6874 6875 /* Enable l2 tunnel forwarding */ 6876 static int 6877 ixgbe_dev_l2_tunnel_forwarding_enable 6878 (struct rte_eth_dev *dev, 6879 enum rte_eth_tunnel_type l2_tunnel_type) 6880 { 6881 int ret = 0; 6882 6883 switch (l2_tunnel_type) { 6884 case RTE_L2_TUNNEL_TYPE_E_TAG: 6885 ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); 6886 break; 6887 default: 6888 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6889 ret = -EINVAL; 6890 break; 6891 } 6892 6893 return ret; 6894 } 6895 6896 /* Disable l2 tunnel forwarding */ 6897 static int 6898 ixgbe_dev_l2_tunnel_forwarding_disable 6899 (struct rte_eth_dev *dev, 6900 enum rte_eth_tunnel_type l2_tunnel_type) 6901 { 6902 int ret = 0; 6903 6904 switch (l2_tunnel_type) { 6905 case RTE_L2_TUNNEL_TYPE_E_TAG: 6906 ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); 6907 break; 6908 default: 6909 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6910 ret = -EINVAL; 6911 break; 6912 } 6913 6914 return ret; 6915 } 6916 6917 static int 6918 ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, 6919 struct rte_eth_l2_tunnel_conf *l2_tunnel, 6920 bool en) 6921 { 6922 int ret = 0; 6923 uint32_t vmtir, vmvir; 6924 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 6925 6926 if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { 6927 PMD_DRV_LOG(ERR, 6928 "VF id %u should be less than %u", 6929 l2_tunnel->vf_id, 6930 dev->pci_dev->max_vfs); 6931 return -EINVAL; 6932 } 6933 6934 if (hw->mac.type != ixgbe_mac_X550 && 6935 hw->mac.type != ixgbe_mac_X550EM_x && 6936 hw->mac.type != ixgbe_mac_X550EM_a) { 6937 return -ENOTSUP; 6938 } 6939 6940 if (en) 6941 vmtir = l2_tunnel->tunnel_id; 6942 else 6943 vmtir = 0; 6944 6945 IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); 6946 6947 vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); 6948 vmvir &= ~IXGBE_VMVIR_TAGA_MASK; 6949 if (en) 6950 vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; 6951 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); 6952 6953 return ret; 6954 } 6955 6956 /* Enable l2 tunnel tag insertion */ 6957 static int 6958 ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, 6959 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6960 { 6961 int ret = 0; 6962 6963 switch (l2_tunnel->l2_tunnel_type) { 6964 case RTE_L2_TUNNEL_TYPE_E_TAG: 6965 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); 6966 break; 6967 default: 6968 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6969 ret = -EINVAL; 6970 break; 6971 } 6972 6973 return ret; 6974 } 6975 6976 /* Disable l2 tunnel tag insertion */ 6977 static int 6978 ixgbe_dev_l2_tunnel_insertion_disable 6979 (struct rte_eth_dev *dev, 6980 struct rte_eth_l2_tunnel_conf *l2_tunnel) 6981 { 6982 int ret = 0; 6983 6984 switch (l2_tunnel->l2_tunnel_type) { 6985 case RTE_L2_TUNNEL_TYPE_E_TAG: 6986 ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); 6987 break; 6988 default: 6989 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 6990 ret = -EINVAL; 6991 break; 6992 } 6993 6994 return ret; 6995 } 6996 6997 static int 6998 ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, 6999 bool en) 7000 { 7001 int ret = 0; 7002 uint32_t qde; 7003 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7004 7005 if (hw->mac.type != ixgbe_mac_X550 && 7006 hw->mac.type != ixgbe_mac_X550EM_x && 7007 hw->mac.type != ixgbe_mac_X550EM_a) { 7008 return -ENOTSUP; 7009 } 7010 7011 qde = IXGBE_READ_REG(hw, IXGBE_QDE); 7012 if (en) 7013 qde |= IXGBE_QDE_STRIP_TAG; 7014 else 7015 qde &= ~IXGBE_QDE_STRIP_TAG; 7016 qde &= ~IXGBE_QDE_READ; 7017 qde |= IXGBE_QDE_WRITE; 7018 IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); 7019 7020 return ret; 7021 } 7022 7023 /* Enable l2 tunnel tag stripping */ 7024 static int 7025 ixgbe_dev_l2_tunnel_stripping_enable 7026 (struct rte_eth_dev *dev, 7027 enum rte_eth_tunnel_type l2_tunnel_type) 7028 { 7029 int ret = 0; 7030 7031 switch (l2_tunnel_type) { 7032 case RTE_L2_TUNNEL_TYPE_E_TAG: 7033 ret = ixgbe_e_tag_stripping_en_dis(dev, 1); 7034 break; 7035 default: 7036 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7037 ret = -EINVAL; 7038 break; 7039 } 7040 7041 return ret; 7042 } 7043 7044 /* Disable l2 tunnel tag stripping */ 7045 static int 7046 ixgbe_dev_l2_tunnel_stripping_disable 7047 (struct rte_eth_dev *dev, 7048 enum rte_eth_tunnel_type l2_tunnel_type) 7049 { 7050 int ret = 0; 7051 7052 switch (l2_tunnel_type) { 7053 case RTE_L2_TUNNEL_TYPE_E_TAG: 7054 ret = ixgbe_e_tag_stripping_en_dis(dev, 0); 7055 break; 7056 default: 7057 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7058 ret = -EINVAL; 7059 break; 7060 } 7061 7062 return ret; 7063 } 7064 7065 /* Enable/disable l2 tunnel offload functions */ 7066 static int 7067 ixgbe_dev_l2_tunnel_offload_set 7068 (struct rte_eth_dev *dev, 7069 struct rte_eth_l2_tunnel_conf *l2_tunnel, 7070 uint32_t mask, 7071 uint8_t en) 7072 { 7073 int ret = 0; 7074 7075 if (l2_tunnel == NULL) 7076 return -EINVAL; 7077 7078 ret = -EINVAL; 7079 if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { 7080 if (en) 7081 ret = ixgbe_dev_l2_tunnel_enable( 7082 dev, 7083 l2_tunnel->l2_tunnel_type); 7084 else 7085 ret = ixgbe_dev_l2_tunnel_disable( 7086 dev, 7087 l2_tunnel->l2_tunnel_type); 7088 } 7089 7090 if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { 7091 if (en) 7092 ret = ixgbe_dev_l2_tunnel_insertion_enable( 7093 dev, 7094 l2_tunnel); 7095 else 7096 ret = ixgbe_dev_l2_tunnel_insertion_disable( 7097 dev, 7098 l2_tunnel); 7099 } 7100 7101 if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { 7102 if (en) 7103 ret = ixgbe_dev_l2_tunnel_stripping_enable( 7104 dev, 7105 l2_tunnel->l2_tunnel_type); 7106 else 7107 ret = ixgbe_dev_l2_tunnel_stripping_disable( 7108 dev, 7109 l2_tunnel->l2_tunnel_type); 7110 } 7111 7112 if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { 7113 if (en) 7114 ret = ixgbe_dev_l2_tunnel_forwarding_enable( 7115 dev, 7116 l2_tunnel->l2_tunnel_type); 7117 else 7118 ret = ixgbe_dev_l2_tunnel_forwarding_disable( 7119 dev, 7120 l2_tunnel->l2_tunnel_type); 7121 } 7122 7123 return ret; 7124 } 7125 7126 static int 7127 ixgbe_update_vxlan_port(struct ixgbe_hw *hw, 7128 uint16_t port) 7129 { 7130 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); 7131 IXGBE_WRITE_FLUSH(hw); 7132 7133 return 0; 7134 } 7135 7136 /* There's only one register for VxLAN UDP port. 7137 * So, we cannot add several ports. Will update it. 7138 */ 7139 static int 7140 ixgbe_add_vxlan_port(struct ixgbe_hw *hw, 7141 uint16_t port) 7142 { 7143 if (port == 0) { 7144 PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); 7145 return -EINVAL; 7146 } 7147 7148 return ixgbe_update_vxlan_port(hw, port); 7149 } 7150 7151 /* We cannot delete the VxLAN port. For there's a register for VxLAN 7152 * UDP port, it must have a value. 7153 * So, will reset it to the original value 0. 7154 */ 7155 static int 7156 ixgbe_del_vxlan_port(struct ixgbe_hw *hw, 7157 uint16_t port) 7158 { 7159 uint16_t cur_port; 7160 7161 cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); 7162 7163 if (cur_port != port) { 7164 PMD_DRV_LOG(ERR, "Port %u does not exist.", port); 7165 return -EINVAL; 7166 } 7167 7168 return ixgbe_update_vxlan_port(hw, 0); 7169 } 7170 7171 /* Add UDP tunneling port */ 7172 static int 7173 ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, 7174 struct rte_eth_udp_tunnel *udp_tunnel) 7175 { 7176 int ret = 0; 7177 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7178 7179 if (hw->mac.type != ixgbe_mac_X550 && 7180 hw->mac.type != ixgbe_mac_X550EM_x && 7181 hw->mac.type != ixgbe_mac_X550EM_a) { 7182 return -ENOTSUP; 7183 } 7184 7185 if (udp_tunnel == NULL) 7186 return -EINVAL; 7187 7188 switch (udp_tunnel->prot_type) { 7189 case RTE_TUNNEL_TYPE_VXLAN: 7190 ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); 7191 break; 7192 7193 case RTE_TUNNEL_TYPE_GENEVE: 7194 case RTE_TUNNEL_TYPE_TEREDO: 7195 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7196 ret = -EINVAL; 7197 break; 7198 7199 default: 7200 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7201 ret = -EINVAL; 7202 break; 7203 } 7204 7205 return ret; 7206 } 7207 7208 /* Remove UDP tunneling port */ 7209 static int 7210 ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, 7211 struct rte_eth_udp_tunnel *udp_tunnel) 7212 { 7213 int ret = 0; 7214 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7215 7216 if (hw->mac.type != ixgbe_mac_X550 && 7217 hw->mac.type != ixgbe_mac_X550EM_x && 7218 hw->mac.type != ixgbe_mac_X550EM_a) { 7219 return -ENOTSUP; 7220 } 7221 7222 if (udp_tunnel == NULL) 7223 return -EINVAL; 7224 7225 switch (udp_tunnel->prot_type) { 7226 case RTE_TUNNEL_TYPE_VXLAN: 7227 ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); 7228 break; 7229 case RTE_TUNNEL_TYPE_GENEVE: 7230 case RTE_TUNNEL_TYPE_TEREDO: 7231 PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); 7232 ret = -EINVAL; 7233 break; 7234 default: 7235 PMD_DRV_LOG(ERR, "Invalid tunnel type"); 7236 ret = -EINVAL; 7237 break; 7238 } 7239 7240 return ret; 7241 } 7242 7243 /* ixgbevf_update_xcast_mode - Update Multicast mode 7244 * @hw: pointer to the HW structure 7245 * @netdev: pointer to net device structure 7246 * @xcast_mode: new multicast mode 7247 * 7248 * Updates the Multicast Mode of VF. 7249 */ 7250 static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, 7251 int xcast_mode) 7252 { 7253 struct ixgbe_mbx_info *mbx = &hw->mbx; 7254 u32 msgbuf[2]; 7255 s32 err; 7256 7257 switch (hw->api_version) { 7258 case ixgbe_mbox_api_12: 7259 break; 7260 default: 7261 return -EOPNOTSUPP; 7262 } 7263 7264 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; 7265 msgbuf[1] = xcast_mode; 7266 7267 err = mbx->ops.write_posted(hw, msgbuf, 2, 0); 7268 if (err) 7269 return err; 7270 7271 err = mbx->ops.read_posted(hw, msgbuf, 2, 0); 7272 if (err) 7273 return err; 7274 7275 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; 7276 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) 7277 return -EPERM; 7278 7279 return 0; 7280 } 7281 7282 static void 7283 ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) 7284 { 7285 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7286 7287 ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); 7288 } 7289 7290 static void 7291 ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) 7292 { 7293 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7294 7295 ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); 7296 } 7297 7298 static void ixgbevf_mbx_process(struct rte_eth_dev *dev) 7299 { 7300 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7301 u32 in_msg = 0; 7302 7303 if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) 7304 return; 7305 7306 /* PF reset VF event */ 7307 if (in_msg == IXGBE_PF_CONTROL_MSG) 7308 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); 7309 } 7310 7311 static int 7312 ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) 7313 { 7314 uint32_t eicr; 7315 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7316 struct ixgbe_interrupt *intr = 7317 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7318 ixgbevf_intr_disable(hw); 7319 7320 /* read-on-clear nic registers here */ 7321 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); 7322 intr->flags = 0; 7323 7324 /* only one misc vector supported - mailbox */ 7325 eicr &= IXGBE_VTEICR_MASK; 7326 if (eicr == IXGBE_MISC_VEC_ID) 7327 intr->flags |= IXGBE_FLAG_MAILBOX; 7328 7329 return 0; 7330 } 7331 7332 static int 7333 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) 7334 { 7335 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7336 struct ixgbe_interrupt *intr = 7337 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); 7338 7339 if (intr->flags & IXGBE_FLAG_MAILBOX) { 7340 ixgbevf_mbx_process(dev); 7341 intr->flags &= ~IXGBE_FLAG_MAILBOX; 7342 } 7343 7344 ixgbevf_intr_enable(hw); 7345 7346 return 0; 7347 } 7348 7349 static void 7350 ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, 7351 void *param) 7352 { 7353 struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 7354 7355 ixgbevf_dev_interrupt_get_status(dev); 7356 ixgbevf_dev_interrupt_action(dev); 7357 } 7358 7359 static struct rte_driver rte_ixgbe_driver = { 7360 .type = PMD_PDEV, 7361 .init = rte_ixgbe_pmd_init, 7362 }; 7363 7364 static struct rte_driver rte_ixgbevf_driver = { 7365 .type = PMD_PDEV, 7366 .init = rte_ixgbevf_pmd_init, 7367 }; 7368 7369 PMD_REGISTER_DRIVER(rte_ixgbe_driver, ixgbe); 7370 DRIVER_REGISTER_PCI_TABLE(ixgbe, pci_id_ixgbe_map); 7371 PMD_REGISTER_DRIVER(rte_ixgbevf_driver, ixgbevf); 7372 DRIVER_REGISTER_PCI_TABLE(ixgbevf, pci_id_ixgbevf_map); 7373