1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _IXGBE_ETHDEV_H_ 35 #define _IXGBE_ETHDEV_H_ 36 #include "base/ixgbe_type.h" 37 #include "base/ixgbe_dcb.h" 38 #include "base/ixgbe_dcb_82599.h" 39 #include "base/ixgbe_dcb_82598.h" 40 #include "ixgbe_bypass.h" 41 #ifdef RTE_LIBRTE_SECURITY 42 #include "ixgbe_ipsec.h" 43 #endif 44 #include <rte_time.h> 45 #include <rte_hash.h> 46 #include <rte_pci.h> 47 #include <rte_bus_pci.h> 48 #include <rte_tm_driver.h> 49 50 /* need update link, bit flag */ 51 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) 52 #define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) 53 #define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) 54 #define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3) 55 #define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) 56 57 /* 58 * Defines that were not part of ixgbe_type.h as they are not used by the 59 * FreeBSD driver. 60 */ 61 #define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ 62 #define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ 63 #define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ 64 #define IXGBE_RXDADV_ERR_CKSUM_BIT 30 65 #define IXGBE_RXDADV_ERR_CKSUM_MSK 3 66 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ 67 #define IXGBE_NB_STAT_MAPPING_REGS 32 68 #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */ 69 #define IXGBE_VFTA_SIZE 128 70 #define IXGBE_VLAN_TAG_SIZE 4 71 #define IXGBE_MAX_RX_QUEUE_NUM 128 72 #define IXGBE_MAX_INTR_QUEUE_NUM 15 73 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 74 #define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM 75 #define IXGBE_NONE_MODE_TX_NB_QUEUES 64 76 77 #ifndef NBBY 78 #define NBBY 8 /* number of bits in a byte */ 79 #endif 80 #define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) 81 82 /* EITR Interval is in 2048ns uinits for 1G and 10G link */ 83 #define IXGBE_EITR_INTERVAL_UNIT_NS 2048 84 #define IXGBE_EITR_ITR_INT_SHIFT 3 85 #define IXGBE_EITR_INTERVAL_US(us) \ 86 (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \ 87 IXGBE_EITR_ITR_INT_MASK) 88 89 90 /* Loopback operation modes */ 91 /* 82599 specific loopback operation types */ 92 #define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */ 93 #define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */ 94 95 #define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */ 96 97 #define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF 98 #define IXGBE_RTTBCNRC_RF_INT_MASK_M \ 99 (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT) 100 101 #define IXGBE_MAX_QUEUE_NUM_PER_VF 8 102 103 #define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */ 104 #define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */ 105 #define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */ 106 #define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */ 107 108 #define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */ 109 #define IXGBE_ETQF_SHIFT 16 110 #define IXGBE_ETQF_UP_EN 0x00080000 111 #define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */ 112 #define IXGBE_ETQF_MAX_PRI 7 113 114 #define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */ 115 #define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */ 116 #define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */ 117 118 #define IXGBE_L34T_IMIR_SIZE_BP 0x00001000 119 #define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */ 120 #define IXGBE_L34T_IMIR_LLI 0x00100000 121 #define IXGBE_L34T_IMIR_QUEUE 0x0FE00000 122 #define IXGBE_L34T_IMIR_QUEUE_SHIFT 21 123 #define IXGBE_5TUPLE_MAX_PRI 7 124 #define IXGBE_5TUPLE_MIN_PRI 1 125 126 #define IXGBE_RSS_OFFLOAD_ALL ( \ 127 ETH_RSS_IPV4 | \ 128 ETH_RSS_NONFRAG_IPV4_TCP | \ 129 ETH_RSS_NONFRAG_IPV4_UDP | \ 130 ETH_RSS_IPV6 | \ 131 ETH_RSS_NONFRAG_IPV6_TCP | \ 132 ETH_RSS_NONFRAG_IPV6_UDP | \ 133 ETH_RSS_IPV6_EX | \ 134 ETH_RSS_IPV6_TCP_EX | \ 135 ETH_RSS_IPV6_UDP_EX) 136 137 #define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */ 138 #define IXGBE_VF_MAXMSIVECTOR 1 139 140 #define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 141 #define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 142 143 #define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F 144 145 #define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00 146 147 #define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) 148 #define IXGBE_MAX_L2_TN_FILTER_NUM 128 149 150 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ 151 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ 152 return -ENOTSUP;\ 153 } while (0) 154 155 #define MAC_TYPE_FILTER_SUP(type) do {\ 156 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ 157 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ 158 (type) != ixgbe_mac_X550EM_a)\ 159 return -ENOTSUP;\ 160 } while (0) 161 162 /* Link speed for X550 auto negotiation */ 163 #define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ 164 IXGBE_LINK_SPEED_1GB_FULL | \ 165 IXGBE_LINK_SPEED_2_5GB_FULL | \ 166 IXGBE_LINK_SPEED_5GB_FULL | \ 167 IXGBE_LINK_SPEED_10GB_FULL) 168 169 /* 170 * Information about the fdir mode. 171 */ 172 struct ixgbe_hw_fdir_mask { 173 uint16_t vlan_tci_mask; 174 uint32_t src_ipv4_mask; 175 uint32_t dst_ipv4_mask; 176 uint16_t src_ipv6_mask; 177 uint16_t dst_ipv6_mask; 178 uint16_t src_port_mask; 179 uint16_t dst_port_mask; 180 uint16_t flex_bytes_mask; 181 uint8_t mac_addr_byte_mask; 182 uint32_t tunnel_id_mask; 183 uint8_t tunnel_type_mask; 184 }; 185 186 struct ixgbe_fdir_filter { 187 TAILQ_ENTRY(ixgbe_fdir_filter) entries; 188 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 189 uint32_t fdirflags; /* drop or forward */ 190 uint32_t fdirhash; /* hash value for fdir */ 191 uint8_t queue; /* assigned rx queue */ 192 }; 193 194 /* list of fdir filters */ 195 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter); 196 197 struct ixgbe_fdir_rule { 198 struct ixgbe_hw_fdir_mask mask; 199 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/ 200 bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */ 201 bool b_mask; /* If TRUE, mask has meaning. */ 202 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ 203 uint32_t fdirflags; /* drop or forward */ 204 uint32_t soft_id; /* an unique value for this rule */ 205 uint8_t queue; /* assigned rx queue */ 206 uint8_t flex_bytes_offset; 207 }; 208 209 struct ixgbe_hw_fdir_info { 210 struct ixgbe_hw_fdir_mask mask; 211 uint8_t flex_bytes_offset; 212 uint16_t collision; 213 uint16_t free; 214 uint16_t maxhash; 215 uint8_t maxlen; 216 uint64_t add; 217 uint64_t remove; 218 uint64_t f_add; 219 uint64_t f_remove; 220 struct ixgbe_fdir_filter_list fdir_list; /* filter list*/ 221 /* store the pointers of the filters, index is the hash value. */ 222 struct ixgbe_fdir_filter **hash_map; 223 struct rte_hash *hash_handle; /* cuckoo hash handler */ 224 bool mask_added; /* If already got mask from consistent filter */ 225 }; 226 227 /* structure for interrupt relative data */ 228 struct ixgbe_interrupt { 229 uint32_t flags; 230 uint32_t mask; 231 /*to save original mask during delayed handler */ 232 uint32_t mask_original; 233 }; 234 235 struct ixgbe_stat_mapping_registers { 236 uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS]; 237 uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS]; 238 }; 239 240 struct ixgbe_vfta { 241 uint32_t vfta[IXGBE_VFTA_SIZE]; 242 }; 243 244 struct ixgbe_hwstrip { 245 uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE]; 246 }; 247 248 /* 249 * VF data which used by PF host only 250 */ 251 #define IXGBE_MAX_VF_MC_ENTRIES 30 252 #define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */ 253 #define IXGBE_MAX_UTA 128 254 255 struct ixgbe_uta_info { 256 uint8_t uc_filter_type; 257 uint16_t uta_in_use; 258 uint32_t uta_shadow[IXGBE_MAX_UTA]; 259 }; 260 261 #define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ 262 263 struct ixgbe_mirror_info { 264 struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES]; 265 /**< store PF mirror rules configuration*/ 266 }; 267 268 struct ixgbe_vf_info { 269 uint8_t vf_mac_addresses[ETHER_ADDR_LEN]; 270 uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; 271 uint16_t num_vf_mc_hashes; 272 uint16_t default_vf_vlan_id; 273 uint16_t vlans_enabled; 274 bool clear_to_send; 275 uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF]; 276 uint16_t vlan_count; 277 uint8_t spoofchk_enabled; 278 uint8_t api_version; 279 }; 280 281 /* 282 * Possible l4type of 5tuple filters. 283 */ 284 enum ixgbe_5tuple_protocol { 285 IXGBE_FILTER_PROTOCOL_TCP = 0, 286 IXGBE_FILTER_PROTOCOL_UDP, 287 IXGBE_FILTER_PROTOCOL_SCTP, 288 IXGBE_FILTER_PROTOCOL_NONE, 289 }; 290 291 TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter); 292 293 struct ixgbe_5tuple_filter_info { 294 uint32_t dst_ip; 295 uint32_t src_ip; 296 uint16_t dst_port; 297 uint16_t src_port; 298 enum ixgbe_5tuple_protocol proto; /* l4 protocol. */ 299 uint8_t priority; /* seven levels (001b-111b), 111b is highest, 300 used when more than one filter matches. */ 301 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ 302 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ 303 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ 304 src_port_mask:1, /* if mask is 1b, do not compare src port. */ 305 proto_mask:1; /* if mask is 1b, do not compare protocol. */ 306 }; 307 308 /* 5tuple filter structure */ 309 struct ixgbe_5tuple_filter { 310 TAILQ_ENTRY(ixgbe_5tuple_filter) entries; 311 uint16_t index; /* the index of 5tuple filter */ 312 struct ixgbe_5tuple_filter_info filter_info; 313 uint16_t queue; /* rx queue assigned to */ 314 }; 315 316 #define IXGBE_5TUPLE_ARRAY_SIZE \ 317 (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ 318 (sizeof(uint32_t) * NBBY)) 319 320 struct ixgbe_ethertype_filter { 321 uint16_t ethertype; 322 uint32_t etqf; 323 uint32_t etqs; 324 /** 325 * If this filter is added by configuration, 326 * it should not be removed. 327 */ 328 bool conf; 329 }; 330 331 /* 332 * Structure to store filters' info. 333 */ 334 struct ixgbe_filter_info { 335 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ 336 /* store used ethertype filters*/ 337 struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS]; 338 /* Bit mask for every used 5tuple filter */ 339 uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE]; 340 struct ixgbe_5tuple_filter_list fivetuple_list; 341 /* store the SYN filter info */ 342 uint32_t syn_info; 343 }; 344 345 struct ixgbe_l2_tn_key { 346 enum rte_eth_tunnel_type l2_tn_type; 347 uint32_t tn_id; 348 }; 349 350 struct ixgbe_l2_tn_filter { 351 TAILQ_ENTRY(ixgbe_l2_tn_filter) entries; 352 struct ixgbe_l2_tn_key key; 353 uint32_t pool; 354 }; 355 356 TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter); 357 358 struct ixgbe_l2_tn_info { 359 struct ixgbe_l2_tn_filter_list l2_tn_list; 360 struct ixgbe_l2_tn_filter **hash_map; 361 struct rte_hash *hash_handle; 362 bool e_tag_en; /* e-tag enabled */ 363 bool e_tag_fwd_en; /* e-tag based forwarding enabled */ 364 bool e_tag_ether_type; /* ether type for e-tag */ 365 }; 366 367 struct rte_flow { 368 enum rte_filter_type filter_type; 369 void *rule; 370 }; 371 372 /* 373 * Statistics counters collected by the MACsec 374 */ 375 struct ixgbe_macsec_stats { 376 /* TX port statistics */ 377 uint64_t out_pkts_untagged; 378 uint64_t out_pkts_encrypted; 379 uint64_t out_pkts_protected; 380 uint64_t out_octets_encrypted; 381 uint64_t out_octets_protected; 382 383 /* RX port statistics */ 384 uint64_t in_pkts_untagged; 385 uint64_t in_pkts_badtag; 386 uint64_t in_pkts_nosci; 387 uint64_t in_pkts_unknownsci; 388 uint64_t in_octets_decrypted; 389 uint64_t in_octets_validated; 390 391 /* RX SC statistics */ 392 uint64_t in_pkts_unchecked; 393 uint64_t in_pkts_delayed; 394 uint64_t in_pkts_late; 395 396 /* RX SA statistics */ 397 uint64_t in_pkts_ok; 398 uint64_t in_pkts_invalid; 399 uint64_t in_pkts_notvalid; 400 uint64_t in_pkts_unusedsa; 401 uint64_t in_pkts_notusingsa; 402 }; 403 404 /* The configuration of bandwidth */ 405 struct ixgbe_bw_conf { 406 uint8_t tc_num; /* Number of TCs. */ 407 }; 408 409 /* Struct to store Traffic Manager shaper profile. */ 410 struct ixgbe_tm_shaper_profile { 411 TAILQ_ENTRY(ixgbe_tm_shaper_profile) node; 412 uint32_t shaper_profile_id; 413 uint32_t reference_count; 414 struct rte_tm_shaper_params profile; 415 }; 416 417 TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile); 418 419 /* node type of Traffic Manager */ 420 enum ixgbe_tm_node_type { 421 IXGBE_TM_NODE_TYPE_PORT, 422 IXGBE_TM_NODE_TYPE_TC, 423 IXGBE_TM_NODE_TYPE_QUEUE, 424 IXGBE_TM_NODE_TYPE_MAX, 425 }; 426 427 /* Struct to store Traffic Manager node configuration. */ 428 struct ixgbe_tm_node { 429 TAILQ_ENTRY(ixgbe_tm_node) node; 430 uint32_t id; 431 uint32_t priority; 432 uint32_t weight; 433 uint32_t reference_count; 434 uint16_t no; 435 struct ixgbe_tm_node *parent; 436 struct ixgbe_tm_shaper_profile *shaper_profile; 437 struct rte_tm_node_params params; 438 }; 439 440 TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node); 441 442 /* The configuration of Traffic Manager */ 443 struct ixgbe_tm_conf { 444 struct ixgbe_shaper_profile_list shaper_profile_list; 445 struct ixgbe_tm_node *root; /* root node - port */ 446 struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */ 447 struct ixgbe_tm_node_list queue_list; /* node list for all the queues */ 448 /** 449 * The number of added TC nodes. 450 * It should be no more than the TC number of this port. 451 */ 452 uint32_t nb_tc_node; 453 /** 454 * The number of added queue nodes. 455 * It should be no more than the queue number of this port. 456 */ 457 uint32_t nb_queue_node; 458 /** 459 * This flag is used to check if APP can change the TM node 460 * configuration. 461 * When it's true, means the configuration is applied to HW, 462 * APP should not change the configuration. 463 * As we don't support on-the-fly configuration, when starting 464 * the port, APP should call the hierarchy_commit API to set this 465 * flag to true. When stopping the port, this flag should be set 466 * to false. 467 */ 468 bool committed; 469 }; 470 471 /* 472 * Structure to store private data for each driver instance (for each port). 473 */ 474 struct ixgbe_adapter { 475 struct ixgbe_hw hw; 476 struct ixgbe_hw_stats stats; 477 struct ixgbe_macsec_stats macsec_stats; 478 struct ixgbe_hw_fdir_info fdir; 479 struct ixgbe_interrupt intr; 480 struct ixgbe_stat_mapping_registers stat_mappings; 481 struct ixgbe_vfta shadow_vfta; 482 struct ixgbe_hwstrip hwstrip; 483 struct ixgbe_dcb_config dcb_config; 484 struct ixgbe_mirror_info mr_data; 485 struct ixgbe_vf_info *vfdata; 486 struct ixgbe_uta_info uta_info; 487 #ifdef RTE_LIBRTE_IXGBE_BYPASS 488 struct ixgbe_bypass_info bps; 489 #endif /* RTE_LIBRTE_IXGBE_BYPASS */ 490 struct ixgbe_filter_info filter; 491 struct ixgbe_l2_tn_info l2_tn; 492 struct ixgbe_bw_conf bw_conf; 493 #ifdef RTE_LIBRTE_SECURITY 494 struct ixgbe_ipsec ipsec; 495 #endif 496 bool rx_bulk_alloc_allowed; 497 bool rx_vec_allowed; 498 struct rte_timecounter systime_tc; 499 struct rte_timecounter rx_tstamp_tc; 500 struct rte_timecounter tx_tstamp_tc; 501 struct ixgbe_tm_conf tm_conf; 502 }; 503 504 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ 505 (&((struct ixgbe_adapter *)adapter)->hw) 506 507 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ 508 (&((struct ixgbe_adapter *)adapter)->stats) 509 510 #define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \ 511 (&((struct ixgbe_adapter *)adapter)->macsec_stats) 512 513 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ 514 (&((struct ixgbe_adapter *)adapter)->intr) 515 516 #define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ 517 (&((struct ixgbe_adapter *)adapter)->fdir) 518 519 #define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \ 520 (&((struct ixgbe_adapter *)adapter)->stat_mappings) 521 522 #define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ 523 (&((struct ixgbe_adapter *)adapter)->shadow_vfta) 524 525 #define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \ 526 (&((struct ixgbe_adapter *)adapter)->hwstrip) 527 528 #define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \ 529 (&((struct ixgbe_adapter *)adapter)->dcb_config) 530 531 #define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \ 532 (&((struct ixgbe_adapter *)adapter)->vfdata) 533 534 #define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \ 535 (&((struct ixgbe_adapter *)adapter)->mr_data) 536 537 #define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \ 538 (&((struct ixgbe_adapter *)adapter)->uta_info) 539 540 #define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \ 541 (&((struct ixgbe_adapter *)adapter)->filter) 542 543 #define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \ 544 (&((struct ixgbe_adapter *)adapter)->l2_tn) 545 546 #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \ 547 (&((struct ixgbe_adapter *)adapter)->bw_conf) 548 549 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \ 550 (&((struct ixgbe_adapter *)adapter)->tm_conf) 551 552 #define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\ 553 (&((struct ixgbe_adapter *)adapter)->ipsec) 554 555 /* 556 * RX/TX function prototypes 557 */ 558 void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); 559 560 void ixgbe_dev_free_queues(struct rte_eth_dev *dev); 561 562 void ixgbe_dev_rx_queue_release(void *rxq); 563 564 void ixgbe_dev_tx_queue_release(void *txq); 565 566 int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 567 uint16_t nb_rx_desc, unsigned int socket_id, 568 const struct rte_eth_rxconf *rx_conf, 569 struct rte_mempool *mb_pool); 570 571 int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 572 uint16_t nb_tx_desc, unsigned int socket_id, 573 const struct rte_eth_txconf *tx_conf); 574 575 uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 576 uint16_t rx_queue_id); 577 578 int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset); 579 580 int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 581 int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 582 583 int ixgbe_dev_rx_init(struct rte_eth_dev *dev); 584 585 void ixgbe_dev_tx_init(struct rte_eth_dev *dev); 586 587 int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); 588 589 int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 590 591 int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 592 593 int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 594 595 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 596 597 void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 598 struct rte_eth_rxq_info *qinfo); 599 600 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 601 struct rte_eth_txq_info *qinfo); 602 603 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); 604 605 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); 606 607 void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); 608 609 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 610 uint16_t nb_pkts); 611 612 uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, 613 uint16_t nb_pkts); 614 615 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, 616 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 617 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, 618 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 619 620 uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 621 uint16_t nb_pkts); 622 623 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 624 uint16_t nb_pkts); 625 626 uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 627 uint16_t nb_pkts); 628 629 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 630 struct rte_eth_rss_conf *rss_conf); 631 632 int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 633 struct rte_eth_rss_conf *rss_conf); 634 635 uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type); 636 637 uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx); 638 639 uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type); 640 641 uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i); 642 643 bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type); 644 645 int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 646 struct rte_eth_ntuple_filter *filter, 647 bool add); 648 int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 649 struct rte_eth_ethertype_filter *filter, 650 bool add); 651 int ixgbe_syn_filter_set(struct rte_eth_dev *dev, 652 struct rte_eth_syn_filter *filter, 653 bool add); 654 int 655 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 656 struct rte_eth_l2_tunnel_conf *l2_tunnel, 657 bool restore); 658 int 659 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 660 struct rte_eth_l2_tunnel_conf *l2_tunnel); 661 void ixgbe_filterlist_init(void); 662 void ixgbe_filterlist_flush(void); 663 /* 664 * Flow director function prototypes 665 */ 666 int ixgbe_fdir_configure(struct rte_eth_dev *dev); 667 int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev); 668 int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, 669 uint16_t offset); 670 int ixgbe_fdir_filter_program(struct rte_eth_dev *dev, 671 struct ixgbe_fdir_rule *rule, 672 bool del, bool update); 673 674 void ixgbe_configure_dcb(struct rte_eth_dev *dev); 675 676 /* 677 * misc function prototypes 678 */ 679 void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); 680 681 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); 682 683 void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev); 684 685 void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev); 686 687 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev); 688 689 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); 690 691 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); 692 693 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev); 694 695 uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); 696 697 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev, 698 enum rte_filter_op filter_op, void *arg); 699 void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev); 700 int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); 701 702 extern const struct rte_flow_ops ixgbe_flow_ops; 703 704 void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); 705 void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); 706 void ixgbe_clear_syn_filter(struct rte_eth_dev *dev); 707 int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); 708 709 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw); 710 711 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw); 712 713 int ixgbe_vt_check(struct ixgbe_hw *hw); 714 int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 715 uint16_t tx_rate, uint64_t q_msk); 716 bool is_ixgbe_supported(struct rte_eth_dev *dev); 717 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); 718 void ixgbe_tm_conf_init(struct rte_eth_dev *dev); 719 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); 720 int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, 721 uint16_t tx_rate); 722 723 static inline int 724 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, 725 uint16_t ethertype) 726 { 727 int i; 728 729 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 730 if (filter_info->ethertype_filters[i].ethertype == ethertype && 731 (filter_info->ethertype_mask & (1 << i))) 732 return i; 733 } 734 return -1; 735 } 736 737 static inline int 738 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, 739 struct ixgbe_ethertype_filter *ethertype_filter) 740 { 741 int i; 742 743 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { 744 if (!(filter_info->ethertype_mask & (1 << i))) { 745 filter_info->ethertype_mask |= 1 << i; 746 filter_info->ethertype_filters[i].ethertype = 747 ethertype_filter->ethertype; 748 filter_info->ethertype_filters[i].etqf = 749 ethertype_filter->etqf; 750 filter_info->ethertype_filters[i].etqs = 751 ethertype_filter->etqs; 752 filter_info->ethertype_filters[i].conf = 753 ethertype_filter->conf; 754 return i; 755 } 756 } 757 return -1; 758 } 759 760 static inline int 761 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, 762 uint8_t idx) 763 { 764 if (idx >= IXGBE_MAX_ETQF_FILTERS) 765 return -1; 766 filter_info->ethertype_mask &= ~(1 << idx); 767 filter_info->ethertype_filters[idx].ethertype = 0; 768 filter_info->ethertype_filters[idx].etqf = 0; 769 filter_info->ethertype_filters[idx].etqs = 0; 770 filter_info->ethertype_filters[idx].etqs = FALSE; 771 return idx; 772 } 773 774 #endif /* _IXGBE_ETHDEV_H_ */ 775