1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2020 3 */ 4 5 #ifndef _TXGBE_ETHDEV_H_ 6 #define _TXGBE_ETHDEV_H_ 7 8 #include <stdint.h> 9 10 #include "base/txgbe.h" 11 #include "txgbe_ptypes.h" 12 #ifdef RTE_LIB_SECURITY 13 #include "txgbe_ipsec.h" 14 #endif 15 #include <rte_flow.h> 16 #include <rte_flow_driver.h> 17 #include <rte_time.h> 18 #include <rte_ethdev.h> 19 #include <rte_ethdev_core.h> 20 #include <rte_hash.h> 21 #include <rte_hash_crc.h> 22 #include <rte_bus_pci.h> 23 #include <rte_tm_driver.h> 24 25 /* need update link, bit flag */ 26 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) 27 #define TXGBE_FLAG_MAILBOX (uint32_t)(1 << 1) 28 #define TXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) 29 #define TXGBE_FLAG_MACSEC (uint32_t)(1 << 3) 30 #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) 31 32 /* 33 * Defines that were not part of txgbe_type.h as they are not used by the 34 * FreeBSD driver. 35 */ 36 #define TXGBE_VFTA_SIZE 128 37 #define TXGBE_VLAN_TAG_SIZE 4 38 #define TXGBE_HKEY_MAX_INDEX 10 39 /*Default value of Max Rx Queue*/ 40 #define TXGBE_MAX_RX_QUEUE_NUM 128 41 #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM 42 43 #ifndef NBBY 44 #define NBBY 8 /* number of bits in a byte */ 45 #endif 46 #define TXGBE_HWSTRIP_BITMAP_SIZE \ 47 (TXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY)) 48 49 #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ 50 51 #define TXGBE_MAX_QUEUE_NUM_PER_VF 8 52 53 #define TXGBE_5TUPLE_MAX_PRI 7 54 #define TXGBE_5TUPLE_MIN_PRI 1 55 56 #define TXGBE_RSS_OFFLOAD_ALL ( \ 57 ETH_RSS_IPV4 | \ 58 ETH_RSS_NONFRAG_IPV4_TCP | \ 59 ETH_RSS_NONFRAG_IPV4_UDP | \ 60 ETH_RSS_IPV6 | \ 61 ETH_RSS_NONFRAG_IPV6_TCP | \ 62 ETH_RSS_NONFRAG_IPV6_UDP | \ 63 ETH_RSS_IPV6_EX | \ 64 ETH_RSS_IPV6_TCP_EX | \ 65 ETH_RSS_IPV6_UDP_EX) 66 67 #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET 68 #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET 69 70 #define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32) 71 #define TXGBE_MAX_L2_TN_FILTER_NUM 128 72 73 /* 74 * Information about the fdir mode. 75 */ 76 struct txgbe_hw_fdir_mask { 77 uint16_t vlan_tci_mask; 78 uint32_t src_ipv4_mask; 79 uint32_t dst_ipv4_mask; 80 uint16_t src_ipv6_mask; 81 uint16_t dst_ipv6_mask; 82 uint16_t src_port_mask; 83 uint16_t dst_port_mask; 84 uint16_t flex_bytes_mask; 85 uint8_t mac_addr_byte_mask; 86 uint32_t tunnel_id_mask; 87 uint8_t tunnel_type_mask; 88 }; 89 90 struct txgbe_fdir_filter { 91 TAILQ_ENTRY(txgbe_fdir_filter) entries; 92 struct txgbe_atr_input input; /* key of fdir filter*/ 93 uint32_t fdirflags; /* drop or forward */ 94 uint32_t fdirhash; /* hash value for fdir */ 95 uint8_t queue; /* assigned rx queue */ 96 }; 97 98 /* list of fdir filters */ 99 TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter); 100 101 struct txgbe_fdir_rule { 102 struct txgbe_hw_fdir_mask mask; 103 struct txgbe_atr_input input; /* key of fdir filter */ 104 bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */ 105 bool b_mask; /* If TRUE, mask has meaning. */ 106 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ 107 uint32_t fdirflags; /* drop or forward */ 108 uint32_t soft_id; /* an unique value for this rule */ 109 uint8_t queue; /* assigned rx queue */ 110 uint8_t flex_bytes_offset; 111 }; 112 113 struct txgbe_hw_fdir_info { 114 struct txgbe_hw_fdir_mask mask; 115 uint8_t flex_bytes_offset; 116 uint16_t collision; 117 uint16_t free; 118 uint16_t maxhash; 119 uint8_t maxlen; 120 uint64_t add; 121 uint64_t remove; 122 uint64_t f_add; 123 uint64_t f_remove; 124 struct txgbe_fdir_filter_list fdir_list; /* filter list*/ 125 /* store the pointers of the filters, index is the hash value. */ 126 struct txgbe_fdir_filter **hash_map; 127 struct rte_hash *hash_handle; /* cuckoo hash handler */ 128 bool mask_added; /* If already got mask from consistent filter */ 129 }; 130 131 struct txgbe_rte_flow_rss_conf { 132 struct rte_flow_action_rss conf; /**< RSS parameters. */ 133 uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ 134 uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ 135 }; 136 137 /* structure for interrupt relative data */ 138 struct txgbe_interrupt { 139 uint32_t flags; 140 uint32_t mask_misc; 141 /* to save original mask during delayed handler */ 142 uint32_t mask_misc_orig; 143 uint32_t mask[2]; 144 }; 145 146 #define TXGBE_NB_STAT_MAPPING 32 147 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8 148 #define NB_QMAP_FIELDS_PER_QSM_REG 4 149 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f 150 struct txgbe_stat_mappings { 151 uint32_t tqsm[TXGBE_NB_STAT_MAPPING]; 152 uint32_t rqsm[TXGBE_NB_STAT_MAPPING]; 153 }; 154 155 struct txgbe_vfta { 156 uint32_t vfta[TXGBE_VFTA_SIZE]; 157 }; 158 159 struct txgbe_hwstrip { 160 uint32_t bitmap[TXGBE_HWSTRIP_BITMAP_SIZE]; 161 }; 162 163 /* 164 * VF data which used by PF host only 165 */ 166 #define TXGBE_MAX_VF_MC_ENTRIES 30 167 168 struct txgbe_uta_info { 169 uint8_t uc_filter_type; 170 uint16_t uta_in_use; 171 uint32_t uta_shadow[TXGBE_MAX_UTA]; 172 }; 173 174 #define TXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ 175 176 struct txgbe_mirror_info { 177 struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES]; 178 /* store PF mirror rules configuration */ 179 }; 180 181 struct txgbe_vf_info { 182 uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; 183 uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; 184 uint16_t num_vf_mc_hashes; 185 bool clear_to_send; 186 uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF]; 187 uint16_t vlan_count; 188 uint8_t api_version; 189 uint16_t switch_domain_id; 190 uint16_t xcast_mode; 191 uint16_t mac_count; 192 }; 193 194 TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter); 195 196 struct txgbe_5tuple_filter_info { 197 uint32_t dst_ip; 198 uint32_t src_ip; 199 uint16_t dst_port; 200 uint16_t src_port; 201 enum txgbe_5tuple_protocol proto; /* l4 protocol. */ 202 uint8_t priority; /* seven levels (001b-111b), 111b is highest, 203 * used when more than one filter matches. 204 */ 205 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */ 206 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */ 207 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */ 208 src_port_mask:1, /* if mask is 1b, do not compare src port. */ 209 proto_mask:1; /* if mask is 1b, do not compare protocol. */ 210 }; 211 212 /* 5tuple filter structure */ 213 struct txgbe_5tuple_filter { 214 TAILQ_ENTRY(txgbe_5tuple_filter) entries; 215 uint16_t index; /* the index of 5tuple filter */ 216 struct txgbe_5tuple_filter_info filter_info; 217 uint16_t queue; /* rx queue assigned to */ 218 }; 219 220 #define TXGBE_5TUPLE_ARRAY_SIZE \ 221 (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \ 222 (sizeof(uint32_t) * NBBY)) 223 224 struct txgbe_ethertype_filter { 225 uint16_t ethertype; 226 uint32_t etqf; 227 uint32_t etqs; 228 /** 229 * If this filter is added by configuration, 230 * it should not be removed. 231 */ 232 bool conf; 233 }; 234 235 /* 236 * Structure to store filters' info. 237 */ 238 struct txgbe_filter_info { 239 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */ 240 /* store used ethertype filters*/ 241 struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX]; 242 /* Bit mask for every used 5tuple filter */ 243 uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE]; 244 struct txgbe_5tuple_filter_list fivetuple_list; 245 /* store the SYN filter info */ 246 uint32_t syn_info; 247 /* store the rss filter info */ 248 struct txgbe_rte_flow_rss_conf rss_info; 249 }; 250 251 struct txgbe_l2_tn_key { 252 enum rte_eth_tunnel_type l2_tn_type; 253 uint32_t tn_id; 254 }; 255 256 struct txgbe_l2_tn_filter { 257 TAILQ_ENTRY(txgbe_l2_tn_filter) entries; 258 struct txgbe_l2_tn_key key; 259 uint32_t pool; 260 }; 261 262 TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter); 263 264 struct txgbe_l2_tn_info { 265 struct txgbe_l2_tn_filter_list l2_tn_list; 266 struct txgbe_l2_tn_filter **hash_map; 267 struct rte_hash *hash_handle; 268 bool e_tag_en; /* e-tag enabled */ 269 bool e_tag_fwd_en; /* e-tag based forwarding enabled */ 270 uint16_t e_tag_ether_type; /* ether type for e-tag */ 271 }; 272 273 struct rte_flow { 274 enum rte_filter_type filter_type; 275 void *rule; 276 }; 277 278 /* The configuration of bandwidth */ 279 struct txgbe_bw_conf { 280 uint8_t tc_num; /* Number of TCs. */ 281 }; 282 283 /* Struct to store Traffic Manager shaper profile. */ 284 struct txgbe_tm_shaper_profile { 285 TAILQ_ENTRY(txgbe_tm_shaper_profile) node; 286 uint32_t shaper_profile_id; 287 uint32_t reference_count; 288 struct rte_tm_shaper_params profile; 289 }; 290 291 TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile); 292 293 /* node type of Traffic Manager */ 294 enum txgbe_tm_node_type { 295 TXGBE_TM_NODE_TYPE_PORT, 296 TXGBE_TM_NODE_TYPE_TC, 297 TXGBE_TM_NODE_TYPE_QUEUE, 298 TXGBE_TM_NODE_TYPE_MAX, 299 }; 300 301 /* Struct to store Traffic Manager node configuration. */ 302 struct txgbe_tm_node { 303 TAILQ_ENTRY(txgbe_tm_node) node; 304 uint32_t id; 305 uint32_t priority; 306 uint32_t weight; 307 uint32_t reference_count; 308 uint16_t no; 309 struct txgbe_tm_node *parent; 310 struct txgbe_tm_shaper_profile *shaper_profile; 311 struct rte_tm_node_params params; 312 }; 313 314 TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node); 315 316 /* The configuration of Traffic Manager */ 317 struct txgbe_tm_conf { 318 struct txgbe_shaper_profile_list shaper_profile_list; 319 struct txgbe_tm_node *root; /* root node - port */ 320 struct txgbe_tm_node_list tc_list; /* node list for all the TCs */ 321 struct txgbe_tm_node_list queue_list; /* node list for all the queues */ 322 /** 323 * The number of added TC nodes. 324 * It should be no more than the TC number of this port. 325 */ 326 uint32_t nb_tc_node; 327 /** 328 * The number of added queue nodes. 329 * It should be no more than the queue number of this port. 330 */ 331 uint32_t nb_queue_node; 332 /** 333 * This flag is used to check if APP can change the TM node 334 * configuration. 335 * When it's true, means the configuration is applied to HW, 336 * APP should not change the configuration. 337 * As we don't support on-the-fly configuration, when starting 338 * the port, APP should call the hierarchy_commit API to set this 339 * flag to true. When stopping the port, this flag should be set 340 * to false. 341 */ 342 bool committed; 343 }; 344 345 /* 346 * Structure to store private data for each driver instance (for each port). 347 */ 348 struct txgbe_adapter { 349 struct txgbe_hw hw; 350 struct txgbe_hw_stats stats; 351 struct txgbe_hw_fdir_info fdir; 352 struct txgbe_interrupt intr; 353 struct txgbe_stat_mappings stat_mappings; 354 struct txgbe_vfta shadow_vfta; 355 struct txgbe_hwstrip hwstrip; 356 struct txgbe_dcb_config dcb_config; 357 struct txgbe_mirror_info mr_data; 358 struct txgbe_vf_info *vfdata; 359 struct txgbe_uta_info uta_info; 360 struct txgbe_filter_info filter; 361 struct txgbe_l2_tn_info l2_tn; 362 struct txgbe_bw_conf bw_conf; 363 #ifdef RTE_LIB_SECURITY 364 struct txgbe_ipsec ipsec; 365 #endif 366 bool rx_bulk_alloc_allowed; 367 struct rte_timecounter systime_tc; 368 struct rte_timecounter rx_tstamp_tc; 369 struct rte_timecounter tx_tstamp_tc; 370 struct txgbe_tm_conf tm_conf; 371 372 /* For RSS reta table update */ 373 uint8_t rss_reta_updated; 374 }; 375 376 #define TXGBE_DEV_ADAPTER(dev) \ 377 ((struct txgbe_adapter *)(dev)->data->dev_private) 378 379 #define TXGBE_DEV_HW(dev) \ 380 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hw) 381 382 #define TXGBE_DEV_STATS(dev) \ 383 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stats) 384 385 #define TXGBE_DEV_INTR(dev) \ 386 (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr) 387 388 #define TXGBE_DEV_FDIR(dev) \ 389 (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir) 390 391 #define TXGBE_DEV_STAT_MAPPINGS(dev) \ 392 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings) 393 394 #define TXGBE_DEV_VFTA(dev) \ 395 (&((struct txgbe_adapter *)(dev)->data->dev_private)->shadow_vfta) 396 397 #define TXGBE_DEV_HWSTRIP(dev) \ 398 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip) 399 400 #define TXGBE_DEV_DCB_CONFIG(dev) \ 401 (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config) 402 403 #define TXGBE_DEV_VFDATA(dev) \ 404 (&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata) 405 406 #define TXGBE_DEV_MR_INFO(dev) \ 407 (&((struct txgbe_adapter *)(dev)->data->dev_private)->mr_data) 408 409 #define TXGBE_DEV_UTA_INFO(dev) \ 410 (&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info) 411 412 #define TXGBE_DEV_FILTER(dev) \ 413 (&((struct txgbe_adapter *)(dev)->data->dev_private)->filter) 414 415 #define TXGBE_DEV_L2_TN(dev) \ 416 (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn) 417 418 #define TXGBE_DEV_BW_CONF(dev) \ 419 (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf) 420 421 #define TXGBE_DEV_TM_CONF(dev) \ 422 (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf) 423 424 #define TXGBE_DEV_IPSEC(dev) \ 425 (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec) 426 427 /* 428 * RX/TX function prototypes 429 */ 430 void txgbe_dev_clear_queues(struct rte_eth_dev *dev); 431 432 void txgbe_dev_free_queues(struct rte_eth_dev *dev); 433 434 void txgbe_dev_rx_queue_release(void *rxq); 435 436 void txgbe_dev_tx_queue_release(void *txq); 437 438 int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 439 uint16_t nb_rx_desc, unsigned int socket_id, 440 const struct rte_eth_rxconf *rx_conf, 441 struct rte_mempool *mb_pool); 442 443 int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 444 uint16_t nb_tx_desc, unsigned int socket_id, 445 const struct rte_eth_txconf *tx_conf); 446 447 uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 448 uint16_t rx_queue_id); 449 450 int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 451 int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 452 453 int txgbe_dev_rx_init(struct rte_eth_dev *dev); 454 455 void txgbe_dev_tx_init(struct rte_eth_dev *dev); 456 457 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev); 458 459 void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); 460 void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id); 461 void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); 462 void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id); 463 464 int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 465 466 int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 467 468 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 469 470 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 471 472 void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 473 struct rte_eth_rxq_info *qinfo); 474 475 void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 476 struct rte_eth_txq_info *qinfo); 477 478 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 479 uint16_t nb_pkts); 480 481 uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, 482 uint16_t nb_pkts); 483 484 uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue, 485 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 486 uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, 487 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 488 489 uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 490 uint16_t nb_pkts); 491 492 uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 493 uint16_t nb_pkts); 494 495 uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 496 uint16_t nb_pkts); 497 498 int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev, 499 struct rte_eth_rss_conf *rss_conf); 500 501 int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 502 struct rte_eth_rss_conf *rss_conf); 503 504 bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type); 505 506 int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, 507 struct rte_eth_ntuple_filter *filter, 508 bool add); 509 int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, 510 struct rte_eth_ethertype_filter *filter, 511 bool add); 512 int txgbe_syn_filter_set(struct rte_eth_dev *dev, 513 struct rte_eth_syn_filter *filter, 514 bool add); 515 516 /** 517 * l2 tunnel configuration. 518 */ 519 struct txgbe_l2_tunnel_conf { 520 enum rte_eth_tunnel_type l2_tunnel_type; 521 uint16_t ether_type; /* ether type in l2 header */ 522 uint32_t tunnel_id; /* port tag id for e-tag */ 523 uint16_t vf_id; /* VF id for tag insertion */ 524 uint32_t pool; /* destination pool for tag based forwarding */ 525 }; 526 527 int 528 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, 529 struct txgbe_l2_tunnel_conf *l2_tunnel, 530 bool restore); 531 int 532 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, 533 struct txgbe_l2_tunnel_conf *l2_tunnel); 534 void txgbe_filterlist_init(void); 535 void txgbe_filterlist_flush(void); 536 537 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, 538 uint8_t queue, uint8_t msix_vector); 539 540 /* 541 * Flow director function prototypes 542 */ 543 int txgbe_fdir_configure(struct rte_eth_dev *dev); 544 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev); 545 int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, 546 uint16_t offset); 547 int txgbe_fdir_filter_program(struct rte_eth_dev *dev, 548 struct txgbe_fdir_rule *rule, 549 bool del, bool update); 550 551 void txgbe_configure_pb(struct rte_eth_dev *dev); 552 void txgbe_configure_port(struct rte_eth_dev *dev); 553 void txgbe_configure_dcb(struct rte_eth_dev *dev); 554 555 int 556 txgbe_dev_link_update_share(struct rte_eth_dev *dev, 557 int wait_to_complete); 558 int txgbe_pf_host_init(struct rte_eth_dev *eth_dev); 559 560 void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev); 561 562 void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); 563 564 int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); 565 566 uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); 567 568 void txgbe_fdir_filter_restore(struct rte_eth_dev *dev); 569 int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); 570 571 extern const struct rte_flow_ops txgbe_flow_ops; 572 573 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); 574 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); 575 void txgbe_clear_syn_filter(struct rte_eth_dev *dev); 576 int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); 577 578 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, 579 uint16_t tx_rate, uint64_t q_msk); 580 int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); 581 void txgbe_tm_conf_init(struct rte_eth_dev *dev); 582 void txgbe_tm_conf_uninit(struct rte_eth_dev *dev); 583 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, 584 uint16_t tx_rate); 585 int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, 586 const struct rte_flow_action_rss *in); 587 int txgbe_action_rss_same(const struct rte_flow_action_rss *comp, 588 const struct rte_flow_action_rss *with); 589 int txgbe_config_rss_filter(struct rte_eth_dev *dev, 590 struct txgbe_rte_flow_rss_conf *conf, bool add); 591 592 static inline int 593 txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info, 594 uint16_t ethertype) 595 { 596 int i; 597 598 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 599 if (filter_info->ethertype_filters[i].ethertype == ethertype && 600 (filter_info->ethertype_mask & (1 << i))) 601 return i; 602 } 603 return -1; 604 } 605 606 static inline int 607 txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info, 608 struct txgbe_ethertype_filter *ethertype_filter) 609 { 610 int i; 611 612 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) { 613 if (filter_info->ethertype_mask & (1 << i)) 614 continue; 615 616 filter_info->ethertype_mask |= 1 << i; 617 filter_info->ethertype_filters[i].ethertype = 618 ethertype_filter->ethertype; 619 filter_info->ethertype_filters[i].etqf = 620 ethertype_filter->etqf; 621 filter_info->ethertype_filters[i].etqs = 622 ethertype_filter->etqs; 623 filter_info->ethertype_filters[i].conf = 624 ethertype_filter->conf; 625 break; 626 } 627 return (i < TXGBE_ETF_ID_MAX ? i : -1); 628 } 629 630 static inline int 631 txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info, 632 uint8_t idx) 633 { 634 if (idx >= TXGBE_ETF_ID_MAX) 635 return -1; 636 filter_info->ethertype_mask &= ~(1 << idx); 637 filter_info->ethertype_filters[idx].ethertype = 0; 638 filter_info->ethertype_filters[idx].etqf = 0; 639 filter_info->ethertype_filters[idx].etqs = 0; 640 filter_info->ethertype_filters[idx].etqs = FALSE; 641 return idx; 642 } 643 644 #ifdef RTE_LIB_SECURITY 645 int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); 646 #endif 647 648 /* High threshold controlling when to start sending XOFF frames. */ 649 #define TXGBE_FC_XOFF_HITH 128 /*KB*/ 650 /* Low threshold controlling when to start sending XON frames. */ 651 #define TXGBE_FC_XON_LOTH 64 /*KB*/ 652 653 /* Timer value included in XOFF frames. */ 654 #define TXGBE_FC_PAUSE_TIME 0x680 655 656 #define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ 657 #define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ 658 #define TXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ 659 660 /* 661 * Default values for RX/TX configuration 662 */ 663 #define TXGBE_DEFAULT_RX_FREE_THRESH 32 664 #define TXGBE_DEFAULT_RX_PTHRESH 8 665 #define TXGBE_DEFAULT_RX_HTHRESH 8 666 #define TXGBE_DEFAULT_RX_WTHRESH 0 667 668 #define TXGBE_DEFAULT_TX_FREE_THRESH 32 669 #define TXGBE_DEFAULT_TX_PTHRESH 32 670 #define TXGBE_DEFAULT_TX_HTHRESH 0 671 #define TXGBE_DEFAULT_TX_WTHRESH 0 672 673 /* Additional timesync values. */ 674 #define NSEC_PER_SEC 1000000000L 675 #define TXGBE_INCVAL_10GB 0xCCCCCC 676 #define TXGBE_INCVAL_1GB 0x800000 677 #define TXGBE_INCVAL_100 0xA00000 678 #define TXGBE_INCVAL_10 0xC7F380 679 #define TXGBE_INCVAL_FPGA 0x800000 680 #define TXGBE_INCVAL_SHIFT_10GB 20 681 #define TXGBE_INCVAL_SHIFT_1GB 18 682 #define TXGBE_INCVAL_SHIFT_100 15 683 #define TXGBE_INCVAL_SHIFT_10 12 684 #define TXGBE_INCVAL_SHIFT_FPGA 17 685 686 #define TXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL 687 688 /* store statistics names and its offset in stats structure */ 689 struct rte_txgbe_xstats_name_off { 690 char name[RTE_ETH_XSTATS_NAME_SIZE]; 691 unsigned int offset; 692 }; 693 694 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); 695 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, 696 struct rte_ether_addr *mc_addr_set, 697 uint32_t nb_mc_addr); 698 int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev, 699 struct rte_eth_rss_reta_entry64 *reta_conf, 700 uint16_t reta_size); 701 int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev, 702 struct rte_eth_rss_reta_entry64 *reta_conf, 703 uint16_t reta_size); 704 void txgbe_dev_setup_link_alarm_handler(void *param); 705 void txgbe_read_stats_registers(struct txgbe_hw *hw, 706 struct txgbe_hw_stats *hw_stats); 707 708 void txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); 709 void txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); 710 void txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev); 711 void txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 712 uint16_t queue, bool on); 713 void txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, 714 int mask); 715 716 #endif /* _TXGBE_ETHDEV_H_ */ 717