1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2016 Intel Corporation 3 */ 4 5 #ifndef _IXGBE_ETHDEV_H_ 6 #define _IXGBE_ETHDEV_H_ 7 8 #include <stdint.h> 9 #include <sys/queue.h> 10 11 #include "base/ixgbe_type.h" 12 #include "base/ixgbe_dcb.h" 13 #include "base/ixgbe_dcb_82599.h" 14 #include "base/ixgbe_dcb_82598.h" 15 #include "ixgbe_bypass.h" 16 #ifdef RTE_LIB_SECURITY 17 #include "ixgbe_ipsec.h" 18 #endif 19 #include <rte_flow.h> 20 #include <rte_time.h> 21 #include <rte_hash.h> 22 #include <rte_pci.h> 23 #include <rte_bus_pci.h> 24 #include <rte_tm_driver.h> 25 26 /* need update link, bit flag */ 27 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) 28 #define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) 29 #define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) 30 #define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3) 31 #define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) 32 33 /* 34 * Defines that were not part of ixgbe_type.h as they are not used by the 35 * FreeBSD driver. 36 */ 37 #define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ 38 #define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ 39 #define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ 40 #define IXGBE_RXDADV_ERR_CKSUM_BIT 30 41 #define IXGBE_RXDADV_ERR_CKSUM_MSK 3 42 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ 43 #define IXGBE_NB_STAT_MAPPING_REGS 32 44 #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ 45 #define IXGBE_VFTA_SIZE 128 46 #define IXGBE_HKEY_MAX_INDEX 10 47 #define IXGBE_MAX_RX_QUEUE_NUM 128 48 #define IXGBE_MAX_INTR_QUEUE_NUM 15 49 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 50 #define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 51 #define IXGBE_NONE_MODE_TX_NB_QUEUES 64 52 53 #ifndef NBBY 54 #define NBBY 8 /* number of bits in a byte */ 55 #endif 56 #define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) 57 58 /* EITR Interval is in 2048ns uinits for 1G and 10G link */ 59 #define IXGBE_EITR_INTERVAL_UNIT_NS 2048 60 #define IXGBE_EITR_ITR_INT_SHIFT 3 61 #define IXGBE_EITR_INTERVAL_US(us) \ 62 (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ 63 IXGBE_EITR_ITR_INT_MASK) 64 65 #define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ 66 67 /* Loopback operation modes */ 68 #define IXGBE_LPBK_NONE 0x0 /* Default value. Loopback is disabled. */ 69 #define IXGBE_LPBK_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */ 70 /* X540-X550 specific loopback operations */ 71 #define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negotiation enable (default = 1) */ 72 73 #define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */ 74 75 #define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF 76 #define IXGBE_RTTBCNRC_RF_INT_MASK_M \ 77 (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT) 78 79 #define IXGBE_MAX_QUEUE_NUM_PER_VF 8 80 81 #define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ 82 #define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */ 83 #define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */ 84 #define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */ 85 86 #define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */ 87 #define IXGBE_ETQF_SHIFT 16 88 #define IXGBE_ETQF_UP_EN 0x00080000 89 #define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */ 90 #define IXGBE_ETQF_MAX_PRI 7 91 92 #define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */ 93 #define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */ 94 #define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */ 95 96 #define IXGBE_L34T_IMIR_SIZE_BP 0x00001000 97 #define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */ 98 #define IXGBE_L34T_IMIR_LLI 0x00100000 99 #define IXGBE_L34T_IMIR_QUEUE 0x0FE00000 100 #define IXGBE_L34T_IMIR_QUEUE_SHIFT 21 101 #define IXGBE_5TUPLE_MAX_PRI 7 102 #define IXGBE_5TUPLE_MIN_PRI 1 103 104 /* The overhead from MTU to max frame size. */ 105 #define IXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) 106 107 /* The max frame size with default MTU */ 108 #define IXGBE_ETH_MAX_LEN (RTE_ETHER_MTU + IXGBE_ETH_OVERHEAD) 109 110 /* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/ 111 #define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000 112 /* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/ 113 #define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0 114 115 #define IXGBE_RSS_OFFLOAD_ALL ( \ 116 RTE_ETH_RSS_IPV4 | \ 117 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ 118 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ 119 RTE_ETH_RSS_IPV6 | \ 120 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ 121 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ 122 RTE_ETH_RSS_IPV6_EX | \ 123 RTE_ETH_RSS_IPV6_TCP_EX | \ 124 RTE_ETH_RSS_IPV6_UDP_EX) 125 126 #define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */ 127 #define IXGBE_VF_MAXMSIVECTOR 1 128 129 #define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 130 #define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 131 132 #define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F 133 134 #define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00 135 136 #define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) 137 #define IXGBE_MAX_L2_TN_FILTER_NUM 128 138 139 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 140 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ 141 return -ENOTSUP;\ 142 } while (0) 143 144 #define MAC_TYPE_FILTER_SUP(type) do {\ 145 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ 146 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ 147 (type) != ixgbe_mac_X550EM_a)\ 148 return -ENOTSUP;\ 149 } while (0) 150 151 /* Link speed for X550 auto negotiation */ 152 #define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 153 IXGBE_LINK_SPEED_1GB_FULL | \ 154 IXGBE_LINK_SPEED_2_5GB_FULL | \ 155 IXGBE_LINK_SPEED_5GB_FULL | \ 156 IXGBE_LINK_SPEED_10GB_FULL) 157 158 /* 159 * Information about the fdir mode. 160 */ 161 struct ixgbe_hw_fdir_mask { 162 uint16_t vlan_tci_mask; 163 uint32_t src_ipv4_mask; 164 uint32_t dst_ipv4_mask; 165 uint16_t src_ipv6_mask; 166 uint16_t dst_ipv6_mask; 167 uint16_t src_port_mask; 168 uint16_t dst_port_mask; 169 uint16_t flex_bytes_mask; 170 uint8_t mac_addr_byte_mask; 171 uint32_t tunnel_id_mask; 172 uint8_t tunnel_type_mask; 173 }; 174 175 struct ixgbe_fdir_filter { 176 TAILQ_ENTRY(ixgbe_fdir_filter) entries; 177 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 178 uint32_t fdirflags; /* drop or forward */ 179 uint32_t fdirhash; /* hash value for fdir */ 180 uint8_t queue; /* assigned rx queue */ 181 }; 182 183 /* list of fdir filters */ 184 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter); 185 186 struct ixgbe_fdir_rule { 187 struct ixgbe_hw_fdir_mask mask; 188 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 189 bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */ 190 bool b_mask; /* If TRUE, mask has meaning. */ 191 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ 192 uint32_t fdirflags; /* drop or forward */ 193 uint32_t soft_id; /* an unique value for this rule */ 194 uint8_t queue; /* assigned rx queue */ 195 uint8_t flex_bytes_offset; 196 }; 197 198 struct ixgbe_hw_fdir_info { 199 struct ixgbe_hw_fdir_mask mask; 200 uint8_t flex_bytes_offset; 201 uint16_t collision; 202 uint16_t free; 203 uint16_t maxhash; 204 uint8_t maxlen; 205 uint64_t add; 206 uint64_t remove; 207 uint64_t f_add; 208 uint64_t f_remove; 209 struct ixgbe_fdir_filter_list fdir_list; /* filter list*/ 210 /* store the pointers of the filters, index is the hash value. */ 211 struct ixgbe_fdir_filter **hash_map; 212 struct rte_hash *hash_handle; /* cuckoo hash handler */ 213 bool mask_added; /* If already got mask from consistent filter */ 214 }; 215 216 struct ixgbe_rte_flow_rss_conf { 217 struct rte_flow_action_rss conf; /**< RSS parameters. */ 218 uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ 219 uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ 220 }; 221 222 /* structure for interrupt relative data */ 223 struct ixgbe_interrupt { 224 uint32_t flags; 225 uint32_t mask; 226 /*to save original mask during delayed handler */ 227 uint32_t mask_original; 228 }; 229 230 struct ixgbe_stat_mapping_registers { 231 uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS]; 232 uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS]; 233 }; 234 235 struct ixgbe_vfta { 236 uint32_t vfta[IXGBE_VFTA_SIZE]; 237 }; 238 239 struct ixgbe_hwstrip { 240 uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE]; 241 }; 242 243 /* 244 * VF data which used by PF host only 245 */ 246 #define IXGBE_MAX_VF_MC_ENTRIES 30 247 #define IXGBE_MAX_UTA 128 248 249 struct ixgbe_uta_info { 250 uint8_t uc_filter_type; 251 uint16_t uta_in_use; 252 uint32_t uta_shadow[IXGBE_MAX_UTA]; 253 }; 254 255 struct ixgbe_vf_info { 256 uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; 257 uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; 258 uint16_t num_vf_mc_hashes; 259 uint16_t default_vf_vlan_id; 260 uint16_t vlans_enabled; 261 bool clear_to_send; 262 uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF]; 263 uint16_t vlan_count; 264 uint8_t spoofchk_enabled; 265 uint8_t api_version; 266 uint16_t switch_domain_id; 267 uint16_t xcast_mode; 268 uint16_t mac_count; 269 }; 270 271 /* 272 * Possible l4type of 5tuple filters. 273 */ 274 enum ixgbe_5tuple_protocol { 275 IXGBE_FILTER_PROTOCOL_TCP = 0, 276 IXGBE_FILTER_PROTOCOL_UDP, 277 IXGBE_FILTER_PROTOCOL_SCTP, 278 IXGBE_FILTER_PROTOCOL_NONE, 279 }; 280 281 TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter); 282 283 struct ixgbe_5tuple_filter_info { 284 uint32_t dst_ip; 285 uint32_t src_ip; 286 uint16_t dst_port; 287 uint16_t src_port; 288 enum ixgbe_5tuple_protocol proto; /* l4 protocol. */ 289 uint8_t priority; /* seven levels (001b-111b), 111b is highest, 290 used when more than one filter matches. */ 291 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ 292 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ 293 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ 294 src_port_mask:1, /* if mask is 1b, do not compare src port. */ 295 proto_mask:1; /* if mask is 1b, do not compare protocol. */ 296 }; 297 298 /* 5tuple filter structure */ 299 struct ixgbe_5tuple_filter { 300 TAILQ_ENTRY(ixgbe_5tuple_filter) entries; 301 uint16_t index; /* the index of 5tuple filter */ 302 struct ixgbe_5tuple_filter_info filter_info; 303 uint16_t queue; /* rx queue assigned to */ 304 }; 305 306 #define IXGBE_5TUPLE_ARRAY_SIZE \ 307 (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ 308 (sizeof(uint32_t) * NBBY)) 309 310 struct ixgbe_ethertype_filter { 311 uint16_t ethertype; 312 uint32_t etqf; 313 uint32_t etqs; 314 /** 315 * If this filter is added by configuration, 316 * it should not be removed. 317 */ 318 bool conf; 319 }; 320 321 /* 322 * Structure to store filters' info. 323 */ 324 struct ixgbe_filter_info { 325 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ 326 /* store used ethertype filters*/ 327 struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS]; 328 /* Bit mask for every used 5tuple filter */ 329 uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE]; 330 struct ixgbe_5tuple_filter_list fivetuple_list; 331 /* store the SYN filter info */ 332 uint32_t syn_info; 333 /* store the rss filter info */ 334 struct ixgbe_rte_flow_rss_conf rss_info; 335 }; 336 337 struct ixgbe_l2_tn_key { 338 enum rte_eth_tunnel_type l2_tn_type; 339 uint32_t tn_id; 340 }; 341 342 struct ixgbe_l2_tn_filter { 343 TAILQ_ENTRY(ixgbe_l2_tn_filter) entries; 344 struct ixgbe_l2_tn_key key; 345 uint32_t pool; 346 }; 347 348 TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter); 349 350 struct ixgbe_l2_tn_info { 351 struct ixgbe_l2_tn_filter_list l2_tn_list; 352 struct ixgbe_l2_tn_filter **hash_map; 353 struct rte_hash *hash_handle; 354 bool e_tag_en; /* e-tag enabled */ 355 bool e_tag_fwd_en; /* e-tag based forwarding enabled */ 356 uint16_t e_tag_ether_type; /* ether type for e-tag */ 357 }; 358 359 struct rte_flow { 360 enum rte_filter_type filter_type; 361 void *rule; 362 }; 363 364 struct ixgbe_macsec_setting { 365 uint8_t offload_en; 366 uint8_t encrypt_en; 367 uint8_t replayprotect_en; 368 }; 369 370 /* 371 * Statistics counters collected by the MACsec 372 */ 373 struct ixgbe_macsec_stats { 374 /* TX port statistics */ 375 uint64_t out_pkts_untagged; 376 uint64_t out_pkts_encrypted; 377 uint64_t out_pkts_protected; 378 uint64_t out_octets_encrypted; 379 uint64_t out_octets_protected; 380 381 /* RX port statistics */ 382 uint64_t in_pkts_untagged; 383 uint64_t in_pkts_badtag; 384 uint64_t in_pkts_nosci; 385 uint64_t in_pkts_unknownsci; 386 uint64_t in_octets_decrypted; 387 uint64_t in_octets_validated; 388 389 /* RX SC statistics */ 390 uint64_t in_pkts_unchecked; 391 uint64_t in_pkts_delayed; 392 uint64_t in_pkts_late; 393 394 /* RX SA statistics */ 395 uint64_t in_pkts_ok; 396 uint64_t in_pkts_invalid; 397 uint64_t in_pkts_notvalid; 398 uint64_t in_pkts_unusedsa; 399 uint64_t in_pkts_notusingsa; 400 }; 401 402 /* The configuration of bandwidth */ 403 struct ixgbe_bw_conf { 404 uint8_t tc_num; /* Number of TCs. */ 405 }; 406 407 /* Struct to store Traffic Manager shaper profile. */ 408 struct ixgbe_tm_shaper_profile { 409 TAILQ_ENTRY(ixgbe_tm_shaper_profile) node; 410 uint32_t shaper_profile_id; 411 uint32_t reference_count; 412 struct rte_tm_shaper_params profile; 413 }; 414 415 TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile); 416 417 /* node type of Traffic Manager */ 418 enum ixgbe_tm_node_type { 419 IXGBE_TM_NODE_TYPE_PORT, 420 IXGBE_TM_NODE_TYPE_TC, 421 IXGBE_TM_NODE_TYPE_QUEUE, 422 IXGBE_TM_NODE_TYPE_MAX, 423 }; 424 425 /* Struct to store Traffic Manager node configuration. */ 426 struct ixgbe_tm_node { 427 TAILQ_ENTRY(ixgbe_tm_node) node; 428 uint32_t id; 429 uint32_t priority; 430 uint32_t weight; 431 uint32_t reference_count; 432 uint16_t no; 433 struct ixgbe_tm_node *parent; 434 struct ixgbe_tm_shaper_profile *shaper_profile; 435 struct rte_tm_node_params params; 436 }; 437 438 TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node); 439 440 /* The configuration of Traffic Manager */ 441 struct ixgbe_tm_conf { 442 struct ixgbe_shaper_profile_list shaper_profile_list; 443 struct ixgbe_tm_node *root; /* root node - port */ 444 struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */ 445 struct ixgbe_tm_node_list queue_list; /* node list for all the queues */ 446 /** 447 * The number of added TC nodes. 448 * It should be no more than the TC number of this port. 449 */ 450 uint32_t nb_tc_node; 451 /** 452 * The number of added queue nodes. 453 * It should be no more than the queue number of this port. 454 */ 455 uint32_t nb_queue_node; 456 /** 457 * This flag is used to check if APP can change the TM node 458 * configuration. 459 * When it's true, means the configuration is applied to HW, 460 * APP should not change the configuration. 461 * As we don't support on-the-fly configuration, when starting 462 * the port, APP should call the hierarchy_commit API to set this 463 * flag to true. When stopping the port, this flag should be set 464 * to false. 465 */ 466 bool committed; 467 }; 468 469 /* 470 * Structure to store private data for each driver instance (for each port). 471 */ 472 struct ixgbe_adapter { 473 struct ixgbe_hw hw; 474 struct ixgbe_hw_stats stats; 475 struct ixgbe_macsec_stats macsec_stats; 476 struct ixgbe_macsec_setting macsec_setting; 477 struct ixgbe_hw_fdir_info fdir; 478 struct ixgbe_interrupt intr; 479 struct ixgbe_stat_mapping_registers stat_mappings; 480 struct ixgbe_vfta shadow_vfta; 481 struct ixgbe_hwstrip hwstrip; 482 struct ixgbe_dcb_config dcb_config; 483 struct ixgbe_vf_info *vfdata; 484 struct ixgbe_uta_info uta_info; 485 #ifdef RTE_LIBRTE_IXGBE_BYPASS 486 struct ixgbe_bypass_info bps; 487 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 488 struct ixgbe_filter_info filter; 489 struct ixgbe_l2_tn_info l2_tn; 490 struct ixgbe_bw_conf bw_conf; 491 #ifdef RTE_LIB_SECURITY 492 struct ixgbe_ipsec ipsec; 493 #endif 494 bool rx_bulk_alloc_allowed; 495 bool rx_vec_allowed; 496 struct rte_timecounter systime_tc; 497 struct rte_timecounter rx_tstamp_tc; 498 struct rte_timecounter tx_tstamp_tc; 499 struct ixgbe_tm_conf tm_conf; 500 501 /* For RSS reta table update */ 502 uint8_t rss_reta_updated; 503 504 /* Used for VF link sync with PF's physical and logical (by checking 505 * mailbox status) link status. 506 */ 507 uint8_t pflink_fullchk; 508 uint8_t mac_ctrl_frame_fwd; 509 rte_atomic32_t link_thread_running; 510 pthread_t link_thread_tid; 511 }; 512 513 struct ixgbe_vf_representor { 514 uint16_t vf_id; 515 uint16_t switch_domain_id; 516 struct rte_eth_dev *pf_ethdev; 517 }; 518 519 int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); 520 int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev); 521 522 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ 523 (&((struct ixgbe_adapter *)adapter)->hw) 524 525 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ 526 (&((struct ixgbe_adapter *)adapter)->stats) 527 528 #define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \ 529 (&((struct ixgbe_adapter *)adapter)->macsec_stats) 530 531 #define IXGBE_DEV_PRIVATE_TO_MACSEC_SETTING(adapter) \ 532 (&((struct ixgbe_adapter *)adapter)->macsec_setting) 533 534 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ 535 (&((struct ixgbe_adapter *)adapter)->intr) 536 537 #define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ 538 (&((struct ixgbe_adapter *)adapter)->fdir) 539 540 #define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \ 541 (&((struct ixgbe_adapter *)adapter)->stat_mappings) 542 543 #define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ 544 (&((struct ixgbe_adapter *)adapter)->shadow_vfta) 545 546 #define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \ 547 (&((struct ixgbe_adapter *)adapter)->hwstrip) 548 549 #define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \ 550 (&((struct ixgbe_adapter *)adapter)->dcb_config) 551 552 #define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \ 553 (&((struct ixgbe_adapter *)adapter)->vfdata) 554 555 #define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \ 556 (&((struct ixgbe_adapter *)adapter)->mr_data) 557 558 #define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \ 559 (&((struct ixgbe_adapter *)adapter)->uta_info) 560 561 #define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ 562 (&((struct ixgbe_adapter *)adapter)->filter) 563 564 #define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \ 565 (&((struct ixgbe_adapter *)adapter)->l2_tn) 566 567 #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \ 568 (&((struct ixgbe_adapter *)adapter)->bw_conf) 569 570 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ 571 (&((struct ixgbe_adapter *)adapter)->tm_conf) 572 573 #define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\ 574 (&((struct ixgbe_adapter *)adapter)->ipsec) 575 576 /* 577 * RX/TX function prototypes 578 */ 579 void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); 580 581 void ixgbe_dev_free_queues(struct rte_eth_dev *dev); 582 583 void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 584 585 void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); 586 587 int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 588 uint16_t nb_rx_desc, unsigned int socket_id, 589 const struct rte_eth_rxconf *rx_conf, 590 struct rte_mempool *mb_pool); 591 592 int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 593 uint16_t nb_tx_desc, unsigned int socket_id, 594 const struct rte_eth_txconf *tx_conf); 595 596 uint32_t ixgbe_dev_rx_queue_count(void *rx_queue); 597 598 int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 599 int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 600 601 int ixgbe_dev_rx_init(struct rte_eth_dev *dev); 602 603 void ixgbe_dev_tx_init(struct rte_eth_dev *dev); 604 605 int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); 606 607 int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 608 609 int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 610 611 int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 612 613 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 614 615 void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 616 struct rte_eth_rxq_info *qinfo); 617 618 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 619 struct rte_eth_txq_info *qinfo); 620 621 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); 622 623 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); 624 625 void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); 626 627 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 628 uint16_t nb_pkts); 629 630 uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, 631 uint16_t nb_pkts); 632 633 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, 634 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 635 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, 636 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 637 638 uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 639 uint16_t nb_pkts); 640 641 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 642 uint16_t nb_pkts); 643 644 uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 645 uint16_t nb_pkts); 646 647 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 648 struct rte_eth_rss_conf *rss_conf); 649 650 int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 651 struct rte_eth_rss_conf *rss_conf); 652 653 uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); 654 655 uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); 656 657 uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type); 658 659 uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); 660 661 bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); 662 663 int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 664 struct rte_eth_ntuple_filter *filter, 665 bool add); 666 int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 667 struct rte_eth_ethertype_filter *filter, 668 bool add); 669 int ixgbe_syn_filter_set(struct rte_eth_dev *dev, 670 struct rte_eth_syn_filter *filter, 671 bool add); 672 673 /** 674 * l2 tunnel configuration. 675 */ 676 struct ixgbe_l2_tunnel_conf { 677 enum rte_eth_tunnel_type l2_tunnel_type; 678 uint16_t ether_type; /* ether type in l2 header */ 679 uint32_t tunnel_id; /* port tag id for e-tag */ 680 uint16_t vf_id; /* VF id for tag insertion */ 681 uint32_t pool; /* destination pool for tag based forwarding */ 682 }; 683 684 int 685 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 686 struct ixgbe_l2_tunnel_conf *l2_tunnel, 687 bool restore); 688 int 689 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 690 struct ixgbe_l2_tunnel_conf *l2_tunnel); 691 void ixgbe_filterlist_init(void); 692 void ixgbe_filterlist_flush(void); 693 /* 694 * Flow director function prototypes 695 */ 696 int ixgbe_fdir_configure(struct rte_eth_dev *dev); 697 int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev); 698 int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, 699 uint16_t offset); 700 int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, 701 struct ixgbe_fdir_rule *rule, 702 bool del, bool update); 703 void ixgbe_fdir_info_get(struct rte_eth_dev *dev, 704 struct rte_eth_fdir_info *fdir_info); 705 void ixgbe_fdir_stats_get(struct rte_eth_dev *dev, 706 struct rte_eth_fdir_stats *fdir_stats); 707 708 void ixgbe_configure_dcb(struct rte_eth_dev *dev); 709 710 int 711 ixgbe_dev_link_update_share(struct rte_eth_dev *dev, 712 int wait_to_complete, int vf); 713 714 /* 715 * misc function prototypes 716 */ 717 void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); 718 719 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); 720 721 void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); 722 723 int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); 724 725 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); 726 727 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); 728 729 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev); 730 731 uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); 732 733 void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev); 734 int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); 735 736 extern const struct rte_flow_ops ixgbe_flow_ops; 737 738 void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); 739 void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); 740 void ixgbe_clear_syn_filter(struct rte_eth_dev *dev); 741 int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); 742 743 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw); 744 745 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw); 746 747 int ixgbe_vt_check(struct ixgbe_hw *hw); 748 int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 749 uint16_t tx_rate, uint64_t q_msk); 750 bool is_ixgbe_supported(struct rte_eth_dev *dev); 751 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); 752 void ixgbe_tm_conf_init(struct rte_eth_dev *dev); 753 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); 754 int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, 755 uint16_t tx_rate); 756 int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out, 757 const struct rte_flow_action_rss *in); 758 int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp, 759 const struct rte_flow_action_rss *with); 760 int ixgbe_config_rss_filter(struct rte_eth_dev *dev, 761 struct ixgbe_rte_flow_rss_conf *conf, bool add); 762 763 void ixgbe_dev_macsec_register_enable(struct rte_eth_dev *dev, 764 struct ixgbe_macsec_setting *macsec_setting); 765 766 void ixgbe_dev_macsec_register_disable(struct rte_eth_dev *dev); 767 768 void ixgbe_dev_macsec_setting_save(struct rte_eth_dev *dev, 769 struct ixgbe_macsec_setting *macsec_setting); 770 771 void ixgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev); 772 773 static inline int 774 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, 775 uint16_t ethertype) 776 { 777 int i; 778 779 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 780 if (filter_info->ethertype_filters[i].ethertype == ethertype && 781 (filter_info->ethertype_mask & (1 << i))) 782 return i; 783 } 784 return -1; 785 } 786 787 static inline int 788 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, 789 struct ixgbe_ethertype_filter *ethertype_filter) 790 { 791 int i; 792 793 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 794 if (!(filter_info->ethertype_mask & (1 << i))) { 795 filter_info->ethertype_mask |= 1 << i; 796 filter_info->ethertype_filters[i].ethertype = 797 ethertype_filter->ethertype; 798 filter_info->ethertype_filters[i].etqf = 799 ethertype_filter->etqf; 800 filter_info->ethertype_filters[i].etqs = 801 ethertype_filter->etqs; 802 filter_info->ethertype_filters[i].conf = 803 ethertype_filter->conf; 804 return i; 805 } 806 } 807 return -1; 808 } 809 810 static inline int 811 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, 812 uint8_t idx) 813 { 814 if (idx >= IXGBE_MAX_ETQF_FILTERS) 815 return -1; 816 filter_info->ethertype_mask &= ~(1 << idx); 817 filter_info->ethertype_filters[idx].ethertype = 0; 818 filter_info->ethertype_filters[idx].etqf = 0; 819 filter_info->ethertype_filters[idx].etqs = 0; 820 filter_info->ethertype_filters[idx].etqs = FALSE; 821 return idx; 822 } 823 824 #endif /* _IXGBE_ETHDEV_H_ */ 825