1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #ifndef _IXGBE_ETHDEV_H_ 6 #define _IXGBE_ETHDEV_H_ 7 8 #include <stdint.h> 9 #include <sys/queue.h> 10 11 #include "base/ixgbe_type.h" 12 #include "base/ixgbe_dcb.h" 13 #include "base/ixgbe_dcb_82599.h" 14 #include "base/ixgbe_dcb_82598.h" 15 #include "ixgbe_bypass.h" 16 #ifdef RTE_LIB_SECURITY 17 #include "ixgbe_ipsec.h" 18 #endif 19 #include <rte_flow.h> 20 #include <rte_time.h> 21 #include <rte_hash.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_tm_driver.h> 25 26 /* need update link, bit flag */ 27 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) 28 #define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) 29 #define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) 30 #define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3) 31 #define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) 32 33 /* 34 * Defines that were not part of ixgbe_type.h as they are not used by the 35 * FreeBSD driver. 36 */ 37 #define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ 38 #define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ 39 #define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ 40 #define IXGBE_RXDADV_ERR_CKSUM_BIT 30 41 #define IXGBE_RXDADV_ERR_CKSUM_MSK 3 42 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ 43 #define IXGBE_NB_STAT_MAPPING_REGS 32 44 #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ 45 #define IXGBE_VFTA_SIZE 128 46 #define IXGBE_VLAN_TAG_SIZE 4 47 #define IXGBE_HKEY_MAX_INDEX 10 48 #define IXGBE_MAX_RX_QUEUE_NUM 128 49 #define IXGBE_MAX_INTR_QUEUE_NUM 15 50 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 51 #define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 52 #define IXGBE_NONE_MODE_TX_NB_QUEUES 64 53 54 #ifndef NBBY 55 #define NBBY 8 /* number of bits in a byte */ 56 #endif 57 #define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) 58 59 /* EITR Interval is in 2048ns uinits for 1G and 10G link */ 60 #define IXGBE_EITR_INTERVAL_UNIT_NS 2048 61 #define IXGBE_EITR_ITR_INT_SHIFT 3 62 #define IXGBE_EITR_INTERVAL_US(us) \ 63 (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ 64 IXGBE_EITR_ITR_INT_MASK) 65 66 #define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ 67 68 /* Loopback operation modes */ 69 #define IXGBE_LPBK_NONE 0x0 /* Default value. Loopback is disabled. */ 70 #define IXGBE_LPBK_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */ 71 /* X540-X550 specific loopback operations */ 72 #define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negociation enable (default = 1) */ 73 74 #define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */ 75 76 #define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF 77 #define IXGBE_RTTBCNRC_RF_INT_MASK_M \ 78 (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT) 79 80 #define IXGBE_MAX_QUEUE_NUM_PER_VF 8 81 82 #define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ 83 #define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */ 84 #define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */ 85 #define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */ 86 87 #define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */ 88 #define IXGBE_ETQF_SHIFT 16 89 #define IXGBE_ETQF_UP_EN 0x00080000 90 #define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */ 91 #define IXGBE_ETQF_MAX_PRI 7 92 93 #define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */ 94 #define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */ 95 #define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */ 96 97 #define IXGBE_L34T_IMIR_SIZE_BP 0x00001000 98 #define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */ 99 #define IXGBE_L34T_IMIR_LLI 0x00100000 100 #define IXGBE_L34T_IMIR_QUEUE 0x0FE00000 101 #define IXGBE_L34T_IMIR_QUEUE_SHIFT 21 102 #define IXGBE_5TUPLE_MAX_PRI 7 103 #define IXGBE_5TUPLE_MIN_PRI 1 104 105 /* The overhead from MTU to max frame size. */ 106 #define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) 107 108 /* The max frame size with default MTU */ 109 #define IXGBE_ETH_MAX_LEN (RTE_ETHER_MTU + IXGBE_ETH_OVERHEAD) 110 111 /* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ 112 #define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 113 /* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ 114 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0 115 116 #define IXGBE_RSS_OFFLOAD_ALL ( \ 117 ETH_RSS_IPV4 | \ 118 ETH_RSS_NONFRAG_IPV4_TCP | \ 119 ETH_RSS_NONFRAG_IPV4_UDP | \ 120 ETH_RSS_IPV6 | \ 121 ETH_RSS_NONFRAG_IPV6_TCP | \ 122 ETH_RSS_NONFRAG_IPV6_UDP | \ 123 ETH_RSS_IPV6_EX | \ 124 ETH_RSS_IPV6_TCP_EX | \ 125 ETH_RSS_IPV6_UDP_EX) 126 127 #define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */ 128 #define IXGBE_VF_MAXMSIVECTOR 1 129 130 #define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 131 #define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 132 133 #define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F 134 135 #define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00 136 137 #define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) 138 #define IXGBE_MAX_L2_TN_FILTER_NUM 128 139 140 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 141 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ 142 return -ENOTSUP;\ 143 } while (0) 144 145 #define MAC_TYPE_FILTER_SUP(type) do {\ 146 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ 147 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ 148 (type) != ixgbe_mac_X550EM_a)\ 149 return -ENOTSUP;\ 150 } while (0) 151 152 /* Link speed for X550 auto negotiation */ 153 #define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 154 IXGBE_LINK_SPEED_1GB_FULL | \ 155 IXGBE_LINK_SPEED_2_5GB_FULL | \ 156 IXGBE_LINK_SPEED_5GB_FULL | \ 157 IXGBE_LINK_SPEED_10GB_FULL) 158 159 /* 160 * Information about the fdir mode. 161 */ 162 struct ixgbe_hw_fdir_mask { 163 uint16_t vlan_tci_mask; 164 uint32_t src_ipv4_mask; 165 uint32_t dst_ipv4_mask; 166 uint16_t src_ipv6_mask; 167 uint16_t dst_ipv6_mask; 168 uint16_t src_port_mask; 169 uint16_t dst_port_mask; 170 uint16_t flex_bytes_mask; 171 uint8_t mac_addr_byte_mask; 172 uint32_t tunnel_id_mask; 173 uint8_t tunnel_type_mask; 174 }; 175 176 struct ixgbe_fdir_filter { 177 TAILQ_ENTRY(ixgbe_fdir_filter) entries; 178 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 179 uint32_t fdirflags; /* drop or forward */ 180 uint32_t fdirhash; /* hash value for fdir */ 181 uint8_t queue; /* assigned rx queue */ 182 }; 183 184 /* list of fdir filters */ 185 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter); 186 187 struct ixgbe_fdir_rule { 188 struct ixgbe_hw_fdir_mask mask; 189 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 190 bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */ 191 bool b_mask; /* If TRUE, mask has meaning. */ 192 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ 193 uint32_t fdirflags; /* drop or forward */ 194 uint32_t soft_id; /* an unique value for this rule */ 195 uint8_t queue; /* assigned rx queue */ 196 uint8_t flex_bytes_offset; 197 }; 198 199 struct ixgbe_hw_fdir_info { 200 struct ixgbe_hw_fdir_mask mask; 201 uint8_t flex_bytes_offset; 202 uint16_t collision; 203 uint16_t free; 204 uint16_t maxhash; 205 uint8_t maxlen; 206 uint64_t add; 207 uint64_t remove; 208 uint64_t f_add; 209 uint64_t f_remove; 210 struct ixgbe_fdir_filter_list fdir_list; /* filter list*/ 211 /* store the pointers of the filters, index is the hash value. */ 212 struct ixgbe_fdir_filter **hash_map; 213 struct rte_hash *hash_handle; /* cuckoo hash handler */ 214 bool mask_added; /* If already got mask from consistent filter */ 215 }; 216 217 struct ixgbe_rte_flow_rss_conf { 218 struct rte_flow_action_rss conf; /**< RSS parameters. */ 219 uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ 220 uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ 221 }; 222 223 /* structure for interrupt relative data */ 224 struct ixgbe_interrupt { 225 uint32_t flags; 226 uint32_t mask; 227 /*to save original mask during delayed handler */ 228 uint32_t mask_original; 229 }; 230 231 struct ixgbe_stat_mapping_registers { 232 uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS]; 233 uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS]; 234 }; 235 236 struct ixgbe_vfta { 237 uint32_t vfta[IXGBE_VFTA_SIZE]; 238 }; 239 240 struct ixgbe_hwstrip { 241 uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE]; 242 }; 243 244 /* 245 * VF data which used by PF host only 246 */ 247 #define IXGBE_MAX_VF_MC_ENTRIES 30 248 #define IXGBE_MAX_UTA 128 249 250 struct ixgbe_uta_info { 251 uint8_t uc_filter_type; 252 uint16_t uta_in_use; 253 uint32_t uta_shadow[IXGBE_MAX_UTA]; 254 }; 255 256 struct ixgbe_vf_info { 257 uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; 258 uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; 259 uint16_t num_vf_mc_hashes; 260 uint16_t default_vf_vlan_id; 261 uint16_t vlans_enabled; 262 bool clear_to_send; 263 uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF]; 264 uint16_t vlan_count; 265 uint8_t spoofchk_enabled; 266 uint8_t api_version; 267 uint16_t switch_domain_id; 268 uint16_t xcast_mode; 269 uint16_t mac_count; 270 }; 271 272 /* 273 * Possible l4type of 5tuple filters. 274 */ 275 enum ixgbe_5tuple_protocol { 276 IXGBE_FILTER_PROTOCOL_TCP = 0, 277 IXGBE_FILTER_PROTOCOL_UDP, 278 IXGBE_FILTER_PROTOCOL_SCTP, 279 IXGBE_FILTER_PROTOCOL_NONE, 280 }; 281 282 TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter); 283 284 struct ixgbe_5tuple_filter_info { 285 uint32_t dst_ip; 286 uint32_t src_ip; 287 uint16_t dst_port; 288 uint16_t src_port; 289 enum ixgbe_5tuple_protocol proto; /* l4 protocol. */ 290 uint8_t priority; /* seven levels (001b-111b), 111b is highest, 291 used when more than one filter matches. */ 292 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ 293 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ 294 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ 295 src_port_mask:1, /* if mask is 1b, do not compare src port. */ 296 proto_mask:1; /* if mask is 1b, do not compare protocol. */ 297 }; 298 299 /* 5tuple filter structure */ 300 struct ixgbe_5tuple_filter { 301 TAILQ_ENTRY(ixgbe_5tuple_filter) entries; 302 uint16_t index; /* the index of 5tuple filter */ 303 struct ixgbe_5tuple_filter_info filter_info; 304 uint16_t queue; /* rx queue assigned to */ 305 }; 306 307 #define IXGBE_5TUPLE_ARRAY_SIZE \ 308 (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ 309 (sizeof(uint32_t) * NBBY)) 310 311 struct ixgbe_ethertype_filter { 312 uint16_t ethertype; 313 uint32_t etqf; 314 uint32_t etqs; 315 /** 316 * If this filter is added by configuration, 317 * it should not be removed. 318 */ 319 bool conf; 320 }; 321 322 /* 323 * Structure to store filters' info. 324 */ 325 struct ixgbe_filter_info { 326 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ 327 /* store used ethertype filters*/ 328 struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS]; 329 /* Bit mask for every used 5tuple filter */ 330 uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE]; 331 struct ixgbe_5tuple_filter_list fivetuple_list; 332 /* store the SYN filter info */ 333 uint32_t syn_info; 334 /* store the rss filter info */ 335 struct ixgbe_rte_flow_rss_conf rss_info; 336 }; 337 338 struct ixgbe_l2_tn_key { 339 enum rte_eth_tunnel_type l2_tn_type; 340 uint32_t tn_id; 341 }; 342 343 struct ixgbe_l2_tn_filter { 344 TAILQ_ENTRY(ixgbe_l2_tn_filter) entries; 345 struct ixgbe_l2_tn_key key; 346 uint32_t pool; 347 }; 348 349 TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter); 350 351 struct ixgbe_l2_tn_info { 352 struct ixgbe_l2_tn_filter_list l2_tn_list; 353 struct ixgbe_l2_tn_filter **hash_map; 354 struct rte_hash *hash_handle; 355 bool e_tag_en; /* e-tag enabled */ 356 bool e_tag_fwd_en; /* e-tag based forwarding enabled */ 357 uint16_t e_tag_ether_type; /* ether type for e-tag */ 358 }; 359 360 struct rte_flow { 361 enum rte_filter_type filter_type; 362 void *rule; 363 }; 364 365 struct ixgbe_macsec_setting { 366 uint8_t offload_en; 367 uint8_t encrypt_en; 368 uint8_t replayprotect_en; 369 }; 370 371 /* 372 * Statistics counters collected by the MACsec 373 */ 374 struct ixgbe_macsec_stats { 375 /* TX port statistics */ 376 uint64_t out_pkts_untagged; 377 uint64_t out_pkts_encrypted; 378 uint64_t out_pkts_protected; 379 uint64_t out_octets_encrypted; 380 uint64_t out_octets_protected; 381 382 /* RX port statistics */ 383 uint64_t in_pkts_untagged; 384 uint64_t in_pkts_badtag; 385 uint64_t in_pkts_nosci; 386 uint64_t in_pkts_unknownsci; 387 uint64_t in_octets_decrypted; 388 uint64_t in_octets_validated; 389 390 /* RX SC statistics */ 391 uint64_t in_pkts_unchecked; 392 uint64_t in_pkts_delayed; 393 uint64_t in_pkts_late; 394 395 /* RX SA statistics */ 396 uint64_t in_pkts_ok; 397 uint64_t in_pkts_invalid; 398 uint64_t in_pkts_notvalid; 399 uint64_t in_pkts_unusedsa; 400 uint64_t in_pkts_notusingsa; 401 }; 402 403 /* The configuration of bandwidth */ 404 struct ixgbe_bw_conf { 405 uint8_t tc_num; /* Number of TCs. */ 406 }; 407 408 /* Struct to store Traffic Manager shaper profile. */ 409 struct ixgbe_tm_shaper_profile { 410 TAILQ_ENTRY(ixgbe_tm_shaper_profile) node; 411 uint32_t shaper_profile_id; 412 uint32_t reference_count; 413 struct rte_tm_shaper_params profile; 414 }; 415 416 TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile); 417 418 /* node type of Traffic Manager */ 419 enum ixgbe_tm_node_type { 420 IXGBE_TM_NODE_TYPE_PORT, 421 IXGBE_TM_NODE_TYPE_TC, 422 IXGBE_TM_NODE_TYPE_QUEUE, 423 IXGBE_TM_NODE_TYPE_MAX, 424 }; 425 426 /* Struct to store Traffic Manager node configuration. */ 427 struct ixgbe_tm_node { 428 TAILQ_ENTRY(ixgbe_tm_node) node; 429 uint32_t id; 430 uint32_t priority; 431 uint32_t weight; 432 uint32_t reference_count; 433 uint16_t no; 434 struct ixgbe_tm_node *parent; 435 struct ixgbe_tm_shaper_profile *shaper_profile; 436 struct rte_tm_node_params params; 437 }; 438 439 TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node); 440 441 /* The configuration of Traffic Manager */ 442 struct ixgbe_tm_conf { 443 struct ixgbe_shaper_profile_list shaper_profile_list; 444 struct ixgbe_tm_node *root; /* root node - port */ 445 struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */ 446 struct ixgbe_tm_node_list queue_list; /* node list for all the queues */ 447 /** 448 * The number of added TC nodes. 449 * It should be no more than the TC number of this port. 450 */ 451 uint32_t nb_tc_node; 452 /** 453 * The number of added queue nodes. 454 * It should be no more than the queue number of this port. 455 */ 456 uint32_t nb_queue_node; 457 /** 458 * This flag is used to check if APP can change the TM node 459 * configuration. 460 * When it's true, means the configuration is applied to HW, 461 * APP should not change the configuration. 462 * As we don't support on-the-fly configuration, when starting 463 * the port, APP should call the hierarchy_commit API to set this 464 * flag to true. When stopping the port, this flag should be set 465 * to false. 466 */ 467 bool committed; 468 }; 469 470 /* 471 * Structure to store private data for each driver instance (for each port). 472 */ 473 struct ixgbe_adapter { 474 struct ixgbe_hw hw; 475 struct ixgbe_hw_stats stats; 476 struct ixgbe_macsec_stats macsec_stats; 477 struct ixgbe_macsec_setting macsec_setting; 478 struct ixgbe_hw_fdir_info fdir; 479 struct ixgbe_interrupt intr; 480 struct ixgbe_stat_mapping_registers stat_mappings; 481 struct ixgbe_vfta shadow_vfta; 482 struct ixgbe_hwstrip hwstrip; 483 struct ixgbe_dcb_config dcb_config; 484 struct ixgbe_vf_info *vfdata; 485 struct ixgbe_uta_info uta_info; 486 #ifdef RTE_LIBRTE_IXGBE_BYPASS 487 struct ixgbe_bypass_info bps; 488 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 489 struct ixgbe_filter_info filter; 490 struct ixgbe_l2_tn_info l2_tn; 491 struct ixgbe_bw_conf bw_conf; 492 #ifdef RTE_LIB_SECURITY 493 struct ixgbe_ipsec ipsec; 494 #endif 495 bool rx_bulk_alloc_allowed; 496 bool rx_vec_allowed; 497 struct rte_timecounter systime_tc; 498 struct rte_timecounter rx_tstamp_tc; 499 struct rte_timecounter tx_tstamp_tc; 500 struct ixgbe_tm_conf tm_conf; 501 502 /* For RSS reta table update */ 503 uint8_t rss_reta_updated; 504 505 /* Used for VF link sync with PF's physical and logical (by checking 506 * mailbox status) link status. 507 */ 508 uint8_t pflink_fullchk; 509 uint8_t mac_ctrl_frame_fwd; 510 rte_atomic32_t link_thread_running; 511 pthread_t link_thread_tid; 512 }; 513 514 struct ixgbe_vf_representor { 515 uint16_t vf_id; 516 uint16_t switch_domain_id; 517 struct rte_eth_dev *pf_ethdev; 518 }; 519 520 int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); 521 int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev); 522 523 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ 524 (&((struct ixgbe_adapter *)adapter)->hw) 525 526 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ 527 (&((struct ixgbe_adapter *)adapter)->stats) 528 529 #define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \ 530 (&((struct ixgbe_adapter *)adapter)->macsec_stats) 531 532 #define IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(adapter) \ 533 (&((struct ixgbe_adapter *)adapter)->macsec_setting) 534 535 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ 536 (&((struct ixgbe_adapter *)adapter)->intr) 537 538 #define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ 539 (&((struct ixgbe_adapter *)adapter)->fdir) 540 541 #define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \ 542 (&((struct ixgbe_adapter *)adapter)->stat_mappings) 543 544 #define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ 545 (&((struct ixgbe_adapter *)adapter)->shadow_vfta) 546 547 #define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \ 548 (&((struct ixgbe_adapter *)adapter)->hwstrip) 549 550 #define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \ 551 (&((struct ixgbe_adapter *)adapter)->dcb_config) 552 553 #define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \ 554 (&((struct ixgbe_adapter *)adapter)->vfdata) 555 556 #define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \ 557 (&((struct ixgbe_adapter *)adapter)->mr_data) 558 559 #define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \ 560 (&((struct ixgbe_adapter *)adapter)->uta_info) 561 562 #define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ 563 (&((struct ixgbe_adapter *)adapter)->filter) 564 565 #define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \ 566 (&((struct ixgbe_adapter *)adapter)->l2_tn) 567 568 #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \ 569 (&((struct ixgbe_adapter *)adapter)->bw_conf) 570 571 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ 572 (&((struct ixgbe_adapter *)adapter)->tm_conf) 573 574 #define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\ 575 (&((struct ixgbe_adapter *)adapter)->ipsec) 576 577 /* 578 * RX/TX function prototypes 579 */ 580 void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); 581 582 void ixgbe_dev_free_queues(struct rte_eth_dev *dev); 583 584 void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 585 586 void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 587 588 int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 589 uint16_t nb_rx_desc, unsigned int socket_id, 590 const struct rte_eth_rxconf *rx_conf, 591 struct rte_mempool *mb_pool); 592 593 int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 594 uint16_t nb_tx_desc, unsigned int socket_id, 595 const struct rte_eth_txconf *tx_conf); 596 597 uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 598 uint16_t rx_queue_id); 599 600 int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 601 int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 602 603 int ixgbe_dev_rx_init(struct rte_eth_dev *dev); 604 605 void ixgbe_dev_tx_init(struct rte_eth_dev *dev); 606 607 int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); 608 609 int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 610 611 int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 612 613 int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 614 615 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 616 617 void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 618 struct rte_eth_rxq_info *qinfo); 619 620 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 621 struct rte_eth_txq_info *qinfo); 622 623 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); 624 625 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); 626 627 void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); 628 629 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 630 uint16_t nb_pkts); 631 632 uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, 633 uint16_t nb_pkts); 634 635 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, 636 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 637 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, 638 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 639 640 uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 641 uint16_t nb_pkts); 642 643 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 644 uint16_t nb_pkts); 645 646 uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 647 uint16_t nb_pkts); 648 649 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 650 struct rte_eth_rss_conf *rss_conf); 651 652 int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 653 struct rte_eth_rss_conf *rss_conf); 654 655 uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); 656 657 uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); 658 659 uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type); 660 661 uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); 662 663 bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); 664 665 int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 666 struct rte_eth_ntuple_filter *filter, 667 bool add); 668 int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 669 struct rte_eth_ethertype_filter *filter, 670 bool add); 671 int ixgbe_syn_filter_set(struct rte_eth_dev *dev, 672 struct rte_eth_syn_filter *filter, 673 bool add); 674 675 /** 676 * l2 tunnel configuration. 677 */ 678 struct ixgbe_l2_tunnel_conf { 679 enum rte_eth_tunnel_type l2_tunnel_type; 680 uint16_t ether_type; /* ether type in l2 header */ 681 uint32_t tunnel_id; /* port tag id for e-tag */ 682 uint16_t vf_id; /* VF id for tag insertion */ 683 uint32_t pool; /* destination pool for tag based forwarding */ 684 }; 685 686 int 687 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 688 struct ixgbe_l2_tunnel_conf *l2_tunnel, 689 bool restore); 690 int 691 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 692 struct ixgbe_l2_tunnel_conf *l2_tunnel); 693 void ixgbe_filterlist_init(void); 694 void ixgbe_filterlist_flush(void); 695 /* 696 * Flow director function prototypes 697 */ 698 int ixgbe_fdir_configure(struct rte_eth_dev *dev); 699 int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev); 700 int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, 701 uint16_t offset); 702 int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, 703 struct ixgbe_fdir_rule *rule, 704 bool del, bool update); 705 void ixgbe_fdir_info_get(struct rte_eth_dev *dev, 706 struct rte_eth_fdir_info *fdir_info); 707 void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, 708 struct rte_eth_fdir_stats *fdir_stats); 709 710 void ixgbe_configure_dcb(struct rte_eth_dev *dev); 711 712 int 713 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 714 int wait_to_complete, int vf); 715 716 /* 717 * misc function prototypes 718 */ 719 void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); 720 721 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); 722 723 void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); 724 725 int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); 726 727 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); 728 729 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); 730 731 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev); 732 733 uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); 734 735 void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev); 736 int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); 737 738 extern const struct rte_flow_ops ixgbe_flow_ops; 739 740 void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); 741 void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); 742 void ixgbe_clear_syn_filter(struct rte_eth_dev *dev); 743 int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); 744 745 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw); 746 747 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw); 748 749 int ixgbe_vt_check(struct ixgbe_hw *hw); 750 int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 751 uint16_t tx_rate, uint64_t q_msk); 752 bool is_ixgbe_supported(struct rte_eth_dev *dev); 753 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); 754 void ixgbe_tm_conf_init(struct rte_eth_dev *dev); 755 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); 756 int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, 757 uint16_t tx_rate); 758 int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, 759 const struct rte_flow_action_rss *in); 760 int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, 761 const struct rte_flow_action_rss *with); 762 int ixgbe_config_rss_filter(struct rte_eth_dev *dev, 763 struct ixgbe_rte_flow_rss_conf *conf, bool add); 764 765 void ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 766 struct ixgbe_macsec_setting *macsec_setting); 767 768 void ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev); 769 770 void ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 771 struct ixgbe_macsec_setting *macsec_setting); 772 773 void ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev); 774 775 static inline int 776 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, 777 uint16_t ethertype) 778 { 779 int i; 780 781 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 782 if (filter_info->ethertype_filters[i].ethertype == ethertype && 783 (filter_info->ethertype_mask & (1 << i))) 784 return i; 785 } 786 return -1; 787 } 788 789 static inline int 790 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, 791 struct ixgbe_ethertype_filter *ethertype_filter) 792 { 793 int i; 794 795 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 796 if (!(filter_info->ethertype_mask & (1 << i))) { 797 filter_info->ethertype_mask |= 1 << i; 798 filter_info->ethertype_filters[i].ethertype = 799 ethertype_filter->ethertype; 800 filter_info->ethertype_filters[i].etqf = 801 ethertype_filter->etqf; 802 filter_info->ethertype_filters[i].etqs = 803 ethertype_filter->etqs; 804 filter_info->ethertype_filters[i].conf = 805 ethertype_filter->conf; 806 return i; 807 } 808 } 809 return -1; 810 } 811 812 static inline int 813 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, 814 uint8_t idx) 815 { 816 if (idx >= IXGBE_MAX_ETQF_FILTERS) 817 return -1; 818 filter_info->ethertype_mask &= ~(1 << idx); 819 filter_info->ethertype_filters[idx].ethertype = 0; 820 filter_info->ethertype_filters[idx].etqf = 0; 821 filter_info->ethertype_filters[idx].etqs = 0; 822 filter_info->ethertype_filters[idx].etqs = FALSE; 823 return idx; 824 } 825 826 #endif /* _IXGBE_ETHDEV_H_ */ 827