1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #ifndef _HNS3_ETHDEV_H_ 6 #define _HNS3_ETHDEV_H_ 7 8 #include <pthread.h> 9 #include <ethdev_driver.h> 10 #include <rte_byteorder.h> 11 #include <rte_io.h> 12 #include <rte_spinlock.h> 13 14 #include "hns3_cmd.h" 15 #include "hns3_mbx.h" 16 #include "hns3_rss.h" 17 #include "hns3_fdir.h" 18 #include "hns3_stats.h" 19 #include "hns3_tm.h" 20 #include "hns3_flow.h" 21 22 /* Vendor ID */ 23 #define PCI_VENDOR_ID_HUAWEI 0x19e5 24 25 /* Device IDs */ 26 #define HNS3_DEV_ID_GE 0xA220 27 #define HNS3_DEV_ID_25GE 0xA221 28 #define HNS3_DEV_ID_25GE_RDMA 0xA222 29 #define HNS3_DEV_ID_50GE_RDMA 0xA224 30 #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 31 #define HNS3_DEV_ID_200G_RDMA 0xA228 32 #define HNS3_DEV_ID_100G_VF 0xA22E 33 #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F 34 35 /* PCI Config offsets */ 36 #define HNS3_PCI_REVISION_ID 0x08 37 #define HNS3_PCI_REVISION_ID_LEN 1 38 39 #define PCI_REVISION_ID_HIP08_B 0x21 40 #define PCI_REVISION_ID_HIP09_A 0x30 41 42 #define HNS3_PF_FUNC_ID 0 43 #define HNS3_1ST_VF_FUNC_ID 1 44 45 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 46 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 47 48 #define HNS3_SW_SHIFT_AND_DISCARD_MODE 0 49 #define HNS3_HW_SHIFT_AND_DISCARD_MODE 1 50 51 #define HNS3_UNLIMIT_PROMISC_MODE 0 52 #define HNS3_LIMIT_PROMISC_MODE 1 53 54 #define HNS3_SPECIAL_PORT_SW_CKSUM_MODE 0 55 #define HNS3_SPECIAL_PORT_HW_CKSUM_MODE 1 56 57 #define HNS3_UC_MACADDR_NUM 128 58 #define HNS3_VF_UC_MACADDR_NUM 48 59 #define HNS3_MC_MACADDR_NUM 128 60 61 #define HNS3_MAX_BD_SIZE 65535 62 #define HNS3_MAX_NON_TSO_BD_PER_PKT 8 63 #define HNS3_MAX_TSO_BD_PER_PKT 63 64 #define HNS3_MAX_FRAME_LEN 9728 65 #define HNS3_DEFAULT_RX_BUF_LEN 2048 66 #define HNS3_MAX_BD_PAYLEN (1024 * 1024 - 1) 67 #define HNS3_MAX_TSO_HDR_SIZE 512 68 #define HNS3_MAX_TSO_HDR_BD_NUM 3 69 #define HNS3_MAX_LRO_SIZE 64512 70 71 #define HNS3_ETH_OVERHEAD \ 72 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2) 73 #define HNS3_PKTLEN_TO_MTU(pktlen) ((pktlen) - HNS3_ETH_OVERHEAD) 74 #define HNS3_MAX_MTU (HNS3_MAX_FRAME_LEN - HNS3_ETH_OVERHEAD) 75 #define HNS3_DEFAULT_MTU 1500UL 76 #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) 77 #define HNS3_HIP08_MIN_TX_PKT_LEN 33 78 #define HNS3_HIP09_MIN_TX_PKT_LEN 9 79 80 #define HNS3_BITS_PER_BYTE 8 81 82 #define HNS3_4_TCS 4 83 #define HNS3_8_TCS 8 84 85 #define HNS3_MAX_PF_NUM 8 86 #define HNS3_UMV_TBL_SIZE 3072 87 #define HNS3_DEFAULT_UMV_SPACE_PER_PF \ 88 (HNS3_UMV_TBL_SIZE / HNS3_MAX_PF_NUM) 89 90 #define HNS3_PF_CFG_BLOCK_SIZE 32 91 #define HNS3_PF_CFG_DESC_NUM \ 92 (HNS3_PF_CFG_BLOCK_SIZE / HNS3_CFG_RD_LEN_BYTES) 93 94 #define HNS3_DEFAULT_ENABLE_PFC_NUM 0 95 96 #define HNS3_INTR_UNREG_FAIL_RETRY_CNT 5 97 #define HNS3_INTR_UNREG_FAIL_DELAY_MS 500 98 99 #define HNS3_QUIT_RESET_CNT 10 100 #define HNS3_QUIT_RESET_DELAY_MS 100 101 102 #define HNS3_POLL_RESPONE_MS 1 103 104 #define HNS3_MAX_USER_PRIO 8 105 #define HNS3_PG_NUM 4 106 enum hns3_fc_mode { 107 HNS3_FC_NONE, 108 HNS3_FC_RX_PAUSE, 109 HNS3_FC_TX_PAUSE, 110 HNS3_FC_FULL, 111 HNS3_FC_DEFAULT 112 }; 113 114 #define HNS3_SCH_MODE_SP 0 115 #define HNS3_SCH_MODE_DWRR 1 116 struct hns3_pg_info { 117 uint8_t pg_id; 118 uint8_t pg_sch_mode; /* 0: sp; 1: dwrr */ 119 uint8_t tc_bit_map; 120 uint32_t bw_limit; 121 uint8_t tc_dwrr[HNS3_MAX_TC_NUM]; 122 }; 123 124 struct hns3_tc_info { 125 uint8_t tc_id; 126 uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ 127 uint8_t pgid; 128 uint32_t bw_limit; 129 uint8_t up_to_tc_map; /* user priority mapping on the TC */ 130 }; 131 132 struct hns3_dcb_info { 133 uint8_t num_tc; 134 uint8_t num_pg; /* It must be 1 if vNET-Base schd */ 135 uint8_t pg_dwrr[HNS3_PG_NUM]; 136 uint8_t prio_tc[HNS3_MAX_USER_PRIO]; 137 struct hns3_pg_info pg_info[HNS3_PG_NUM]; 138 struct hns3_tc_info tc_info[HNS3_MAX_TC_NUM]; 139 uint8_t hw_pfc_map; /* Allow for packet drop or not on this TC */ 140 uint8_t pfc_en; /* Pfc enabled or not for user priority */ 141 }; 142 143 enum hns3_fc_status { 144 HNS3_FC_STATUS_NONE, 145 HNS3_FC_STATUS_MAC_PAUSE, 146 HNS3_FC_STATUS_PFC, 147 }; 148 149 struct hns3_tc_queue_info { 150 uint16_t tqp_offset; /* TQP offset from base TQP */ 151 uint16_t tqp_count; /* Total TQPs */ 152 uint8_t tc; /* TC index */ 153 bool enable; /* If this TC is enable or not */ 154 }; 155 156 struct hns3_cfg { 157 uint8_t tc_num; 158 uint16_t rss_size_max; 159 uint8_t phy_addr; 160 uint8_t media_type; 161 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 162 uint8_t default_speed; 163 uint32_t numa_node_map; 164 uint8_t speed_ability; 165 uint16_t umv_space; 166 }; 167 168 struct hns3_set_link_speed_cfg { 169 uint32_t speed; 170 uint8_t duplex : 1; 171 uint8_t autoneg : 1; 172 }; 173 174 /* mac media type */ 175 enum hns3_media_type { 176 HNS3_MEDIA_TYPE_UNKNOWN, 177 HNS3_MEDIA_TYPE_FIBER, 178 HNS3_MEDIA_TYPE_COPPER, 179 HNS3_MEDIA_TYPE_BACKPLANE, 180 HNS3_MEDIA_TYPE_NONE, 181 }; 182 183 #define HNS3_DEFAULT_QUERY 0 184 #define HNS3_ACTIVE_QUERY 1 185 186 struct hns3_mac { 187 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 188 uint8_t media_type; 189 uint8_t phy_addr; 190 uint8_t link_duplex : 1; /* RTE_ETH_LINK_[HALF/FULL]_DUPLEX */ 191 uint8_t link_autoneg : 1; /* RTE_ETH_LINK_[AUTONEG/FIXED] */ 192 uint8_t link_status : 1; /* RTE_ETH_LINK_[DOWN/UP] */ 193 uint32_t link_speed; /* RTE_ETH_SPEED_NUM_ */ 194 /* 195 * Some firmware versions support only the SFP speed query. In addition 196 * to the SFP speed query, some firmware supports the query of the speed 197 * capability, auto-negotiation capability, and FEC mode, which can be 198 * selected by the 'query_type' filed in the HNS3_OPC_GET_SFP_INFO CMD. 199 * This field is used to record the SFP information query mode. 200 * Value range: 201 * HNS3_DEFAULT_QUERY/HNS3_ACTIVE_QUERY 202 * 203 * - HNS3_DEFAULT_QUERY 204 * Speed obtained is from SFP. When the queried speed changes, the MAC 205 * speed needs to be reconfigured. 206 * 207 * - HNS3_ACTIVE_QUERY 208 * Speed obtained is from MAC. At this time, it is unnecessary for 209 * driver to reconfigured the MAC speed. In addition, more information, 210 * such as, the speed capability, auto-negotiation capability and FEC 211 * mode, can be obtained by the HNS3_OPC_GET_SFP_INFO CMD. 212 */ 213 uint8_t query_type; 214 uint32_t supported_speed; /* supported speed for current media type */ 215 uint32_t advertising; /* advertised capability in the local part */ 216 uint32_t lp_advertising; /* advertised capability in the link partner */ 217 uint8_t support_autoneg; 218 }; 219 220 struct hns3_fake_queue_data { 221 void **rx_queues; /* Array of pointers to fake RX queues. */ 222 void **tx_queues; /* Array of pointers to fake TX queues. */ 223 uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */ 224 uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */ 225 }; 226 227 #define HNS3_PORT_BASE_VLAN_DISABLE 0 228 #define HNS3_PORT_BASE_VLAN_ENABLE 1 229 struct hns3_port_base_vlan_config { 230 uint16_t state; 231 uint16_t pvid; 232 }; 233 234 /* Primary process maintains driver state in main thread. 235 * 236 * +---------------+ 237 * | UNINITIALIZED |<-----------+ 238 * +---------------+ | 239 * |.eth_dev_init |.eth_dev_uninit 240 * V | 241 * +---------------+------------+ 242 * | INITIALIZED | 243 * +---------------+<-----------<---------------+ 244 * |.dev_configure | | 245 * V |failed | 246 * +---------------+------------+ | 247 * | CONFIGURING | | 248 * +---------------+----+ | 249 * |success | | 250 * | | +---------------+ 251 * | | | CLOSING | 252 * | | +---------------+ 253 * | | ^ 254 * V |.dev_configure | 255 * +---------------+----+ |.dev_close 256 * | CONFIGURED |----------------------------+ 257 * +---------------+<-----------+ 258 * |.dev_start | 259 * V | 260 * +---------------+ | 261 * | STARTING |------------^ 262 * +---------------+ failed | 263 * |success | 264 * | +---------------+ 265 * | | STOPPING | 266 * | +---------------+ 267 * | ^ 268 * V |.dev_stop 269 * +---------------+------------+ 270 * | STARTED | 271 * +---------------+ 272 */ 273 enum hns3_adapter_state { 274 HNS3_NIC_UNINITIALIZED = 0, 275 HNS3_NIC_INITIALIZED, 276 HNS3_NIC_CONFIGURING, 277 HNS3_NIC_CONFIGURED, 278 HNS3_NIC_STARTING, 279 HNS3_NIC_STARTED, 280 HNS3_NIC_STOPPING, 281 HNS3_NIC_CLOSING, 282 HNS3_NIC_CLOSED, 283 HNS3_NIC_REMOVED, 284 HNS3_NIC_NSTATES 285 }; 286 287 /* Reset various stages, execute in order */ 288 enum hns3_reset_stage { 289 /* Stop query services, stop transceiver, disable MAC */ 290 RESET_STAGE_DOWN, 291 /* Clear reset completion flags, disable send command */ 292 RESET_STAGE_PREWAIT, 293 /* Inform IMP to start resetting */ 294 RESET_STAGE_REQ_HW_RESET, 295 /* Waiting for hardware reset to complete */ 296 RESET_STAGE_WAIT, 297 /* Reinitialize hardware */ 298 RESET_STAGE_DEV_INIT, 299 /* Restore user settings and enable MAC */ 300 RESET_STAGE_RESTORE, 301 /* Restart query services, start transceiver */ 302 RESET_STAGE_DONE, 303 /* Not in reset state */ 304 RESET_STAGE_NONE, 305 }; 306 307 enum hns3_reset_level { 308 HNS3_FLR_RESET, /* A VF perform FLR reset */ 309 HNS3_VF_FUNC_RESET, /* A VF function reset */ 310 311 /* 312 * All VFs under a PF perform function reset. 313 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value 314 * of the reset level and the one defined in kernel driver should be 315 * same. 316 */ 317 HNS3_VF_PF_FUNC_RESET = 2, 318 319 /* 320 * All VFs under a PF perform FLR reset. 321 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value 322 * of the reset level and the one defined in kernel driver should be 323 * same. 324 * 325 * According to the protocol of PCIe, FLR to a PF resets the PF state as 326 * well as the SR-IOV extended capability including VF Enable which 327 * means that VFs no longer exist. 328 * 329 * In PF FLR, the register state of VF is not reliable, VF's driver 330 * should not access the registers of the VF device. 331 */ 332 HNS3_VF_FULL_RESET, 333 334 /* All VFs under the rootport perform a global or IMP reset */ 335 HNS3_VF_RESET, 336 337 /* 338 * The enumeration value of HNS3_FUNC_RESET/HNS3_GLOBAL_RESET/ 339 * HNS3_IMP_RESET/HNS3_NONE_RESET are also used by firmware, and 340 * can not be changed. 341 */ 342 343 HNS3_FUNC_RESET = 5, /* A PF function reset */ 344 345 /* All PFs under the rootport perform a global reset */ 346 HNS3_GLOBAL_RESET, 347 HNS3_IMP_RESET, /* All PFs under the rootport perform a IMP reset */ 348 HNS3_NONE_RESET, 349 HNS3_MAX_RESET 350 }; 351 352 enum hns3_wait_result { 353 HNS3_WAIT_UNKNOWN, 354 HNS3_WAIT_REQUEST, 355 HNS3_WAIT_SUCCESS, 356 HNS3_WAIT_TIMEOUT 357 }; 358 359 #define HNS3_RESET_SYNC_US 100000 360 361 struct hns3_reset_stats { 362 uint64_t request_cnt; /* Total request reset times */ 363 uint64_t global_cnt; /* Total GLOBAL reset times */ 364 uint64_t imp_cnt; /* Total IMP reset times */ 365 uint64_t exec_cnt; /* Total reset executive times */ 366 uint64_t success_cnt; /* Total reset successful times */ 367 uint64_t fail_cnt; /* Total reset failed times */ 368 uint64_t merge_cnt; /* Total merged in high reset times */ 369 }; 370 371 typedef bool (*check_completion_func)(struct hns3_hw *hw); 372 373 struct hns3_wait_data { 374 void *hns; 375 uint64_t end_ms; 376 uint64_t interval; 377 int16_t count; 378 enum hns3_wait_result result; 379 check_completion_func check_completion; 380 }; 381 382 struct hns3_reset_ops { 383 void (*reset_service)(void *arg); 384 int (*stop_service)(struct hns3_adapter *hns); 385 int (*prepare_reset)(struct hns3_adapter *hns); 386 int (*wait_hardware_ready)(struct hns3_adapter *hns); 387 int (*reinit_dev)(struct hns3_adapter *hns); 388 int (*restore_conf)(struct hns3_adapter *hns); 389 int (*start_service)(struct hns3_adapter *hns); 390 }; 391 392 enum hns3_schedule { 393 SCHEDULE_NONE, 394 SCHEDULE_PENDING, 395 SCHEDULE_REQUESTED, 396 SCHEDULE_DEFERRED, 397 }; 398 399 struct hns3_reset_data { 400 enum hns3_reset_stage stage; 401 uint16_t schedule; 402 /* Reset flag, covering the entire reset process */ 403 uint16_t resetting; 404 /* Used to disable sending cmds during reset */ 405 uint16_t disable_cmd; 406 /* The reset level being processed */ 407 enum hns3_reset_level level; 408 /* Reset level set, each bit represents a reset level */ 409 uint64_t pending; 410 /* Request reset level set, from interrupt or mailbox */ 411 uint64_t request; 412 int attempts; /* Reset failure retry */ 413 int retries; /* Timeout failure retry in reset_post */ 414 /* 415 * At the time of global or IMP reset, the command cannot be sent to 416 * stop the tx/rx queues. Tx/Rx queues may be access mbuf during the 417 * reset process, so the mbuf is required to be released after the reset 418 * is completed.The mbuf_deferred_free is used to mark whether mbuf 419 * needs to be released. 420 */ 421 bool mbuf_deferred_free; 422 struct timeval start_time; 423 struct hns3_reset_stats stats; 424 const struct hns3_reset_ops *ops; 425 struct hns3_wait_data *wait_data; 426 }; 427 428 struct hns3_hw_ops { 429 int (*add_mc_mac_addr)(struct hns3_hw *hw, 430 struct rte_ether_addr *mac_addr); 431 int (*del_mc_mac_addr)(struct hns3_hw *hw, 432 struct rte_ether_addr *mac_addr); 433 int (*add_uc_mac_addr)(struct hns3_hw *hw, 434 struct rte_ether_addr *mac_addr); 435 int (*del_uc_mac_addr)(struct hns3_hw *hw, 436 struct rte_ether_addr *mac_addr); 437 int (*bind_ring_with_vector)(struct hns3_hw *hw, uint16_t vector_id, 438 bool en, enum hns3_ring_type queue_type, 439 uint16_t queue_id); 440 }; 441 442 #define HNS3_INTR_MAPPING_VEC_RSV_ONE 0 443 #define HNS3_INTR_MAPPING_VEC_ALL 1 444 445 #define HNS3_INTR_COALESCE_GL_UINT_2US 0 446 #define HNS3_INTR_COALESCE_GL_UINT_1US 1 447 448 #define HNS3_INTR_QL_NONE 0 449 450 struct hns3_queue_intr { 451 /* 452 * interrupt mapping mode. 453 * value range: 454 * HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL 455 * 456 * - HNS3_INTR_MAPPING_VEC_RSV_ONE 457 * For some versions of hardware network engine, because of the 458 * hardware constraint, we need implement clearing the mapping 459 * relationship configurations by binding all queues to the last 460 * interrupt vector and reserving the last interrupt vector. This 461 * method results in a decrease of the maximum queues when upper 462 * applications call the rte_eth_dev_configure API function to 463 * enable Rx interrupt. 464 * 465 * - HNS3_INTR_MAPPING_VEC_ALL 466 * PMD can map/unmmap all interrupt vectors with queues when 467 * Rx interrupt is enabled. 468 */ 469 uint8_t mapping_mode; 470 /* 471 * The unit of GL(gap limiter) configuration for interrupt coalesce of 472 * queue's interrupt. 473 * value range: 474 * HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US 475 */ 476 uint8_t gl_unit; 477 /* The max QL(quantity limiter) value */ 478 uint16_t int_ql_max; 479 }; 480 481 #define HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 0 482 #define HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 1 483 484 #define HNS3_PKTS_DROP_STATS_MODE1 0 485 #define HNS3_PKTS_DROP_STATS_MODE2 1 486 487 struct hns3_hw { 488 struct rte_eth_dev_data *data; 489 void *io_base; 490 uint8_t revision; /* PCI revision, low byte of class word */ 491 struct hns3_cmq cmq; 492 struct hns3_mbx_resp_status mbx_resp; /* mailbox response */ 493 struct hns3_mac mac; 494 /* 495 * This flag indicates dev_set_link_down() API is called, and is cleared 496 * by dev_set_link_up() or dev_start(). 497 */ 498 bool set_link_down; 499 unsigned int secondary_cnt; /* Number of secondary processes init'd. */ 500 struct hns3_tqp_stats tqp_stats; 501 /* Include Mac stats | Rx stats | Tx stats */ 502 struct hns3_mac_stats mac_stats; 503 uint32_t mac_stats_reg_num; 504 struct hns3_rx_missed_stats imissed_stats; 505 uint64_t oerror_stats; 506 uint32_t fw_version; 507 uint16_t pf_vf_if_version; /* version of communication interface */ 508 509 uint16_t num_msi; 510 uint16_t total_tqps_num; /* total task queue pairs of this PF */ 511 uint16_t tqps_num; /* num task queue pairs of this function */ 512 uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */ 513 uint16_t rss_size_max; /* HW defined max RSS task queue */ 514 uint16_t rx_buf_len; /* hold min hardware rx buf len */ 515 uint32_t mng_entry_num; /* number of manager table entry */ 516 uint32_t mac_entry_num; /* number of mac-vlan table entry */ 517 518 struct rte_ether_addr mc_addrs[HNS3_MC_MACADDR_NUM]; 519 int mc_addrs_num; /* Multicast mac addresses number */ 520 521 /* The configuration info of RSS */ 522 struct hns3_rss_conf rss_info; 523 bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */ 524 uint16_t rss_ind_tbl_size; 525 uint16_t rss_key_size; 526 527 uint8_t num_tc; /* Total number of enabled TCs */ 528 uint8_t hw_tc_map; 529 enum hns3_fc_mode requested_fc_mode; /* FC mode requested by user */ 530 struct hns3_dcb_info dcb_info; 531 enum hns3_fc_status current_fc_status; /* current flow control status */ 532 struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM]; 533 uint16_t used_rx_queues; 534 uint16_t used_tx_queues; 535 536 /* Config max queue numbers between rx and tx queues from user */ 537 uint16_t cfg_max_queues; 538 struct hns3_fake_queue_data fkq_data; /* fake queue data */ 539 uint16_t alloc_rss_size; /* RX queue number per TC */ 540 uint16_t tx_qnum_per_tc; /* TX queue number per TC */ 541 542 uint32_t capability; 543 uint32_t max_tm_rate; 544 /* 545 * The minimum length of the packet supported by hardware in the Tx 546 * direction. 547 */ 548 uint32_t min_tx_pkt_len; 549 550 struct hns3_queue_intr intr; 551 /* 552 * tso mode. 553 * value range: 554 * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 555 * 556 * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 557 * In this mode, because of the hardware constraint, network driver 558 * software need erase the L4 len value of the TCP pseudo header 559 * and recalculate the TCP pseudo header checksum of packets that 560 * need TSO. 561 * 562 * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 563 * In this mode, hardware support recalculate the TCP pseudo header 564 * checksum of packets that need TSO, so network driver software 565 * not need to recalculate it. 566 */ 567 uint8_t tso_mode; 568 /* 569 * vlan mode. 570 * value range: 571 * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE 572 * 573 * - HNS3_SW_SHIFT_AND_DISCARD_MODE 574 * For some versions of hardware network engine, because of the 575 * hardware limitation, PMD needs to detect the PVID status 576 * to work with hardware to implement PVID-related functions. 577 * For example, driver need discard the stripped PVID tag to ensure 578 * the PVID will not report to mbuf and shift the inserted VLAN tag 579 * to avoid port based VLAN covering it. 580 * 581 * - HNS3_HW_SHIT_AND_DISCARD_MODE 582 * PMD does not need to process PVID-related functions in 583 * I/O process, Hardware will adjust the sequence between port based 584 * VLAN tag and BD VLAN tag automatically and VLAN tag stripped by 585 * PVID will be invisible to driver. And in this mode, hns3 is able 586 * to send a multi-layer VLAN packets when hw VLAN insert offload 587 * is enabled. 588 */ 589 uint8_t vlan_mode; 590 /* 591 * promisc mode. 592 * value range: 593 * HNS3_UNLIMIT_PROMISC_MODE/HNS3_LIMIT_PROMISC_MODE 594 * 595 * - HNS3_UNLIMIT_PROMISC_MODE 596 * In this mode, TX unicast promisc will be configured when promisc 597 * is set, driver can receive all the ingress and outgoing traffic. 598 * In the words, all the ingress packets, all the packets sent from 599 * the PF and other VFs on the same physical port. 600 * 601 * - HNS3_LIMIT_PROMISC_MODE 602 * In this mode, TX unicast promisc is shutdown when promisc mode 603 * is set. So, driver will only receive all the ingress traffic. 604 * The packets sent from the PF and other VFs on the same physical 605 * port won't be copied to the function which has set promisc mode. 606 */ 607 uint8_t promisc_mode; 608 609 /* 610 * drop_stats_mode mode. 611 * value range: 612 * HNS3_PKTS_DROP_STATS_MODE1/HNS3_PKTS_DROP_STATS_MODE2 613 * 614 * - HNS3_PKTS_DROP_STATS_MODE1 615 * This mode for kunpeng920. In this mode, port level imissed stats 616 * is supported. It only includes RPU drop stats. 617 * 618 * - HNS3_PKTS_DROP_STATS_MODE2 619 * This mode for kunpeng930. In this mode, imissed stats and oerrors 620 * stats is supported. Function level imissed stats is supported. It 621 * includes RPU drop stats in VF, and includes both RPU drop stats 622 * and SSU drop stats in PF. Oerror stats is also supported in PF. 623 */ 624 uint8_t drop_stats_mode; 625 626 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ 627 /* 628 * udp checksum mode. 629 * value range: 630 * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE 631 * 632 * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE 633 * In this mode, HW can not do checksum for special UDP port like 634 * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel 635 * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need 636 * do the checksum for these packets to avoid a checksum error. 637 * 638 * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE 639 * In this mode, HW does not have the preceding problems and can 640 * directly calculate the checksum of these UDP packets. 641 */ 642 uint8_t udp_cksum_mode; 643 644 struct hns3_port_base_vlan_config port_base_vlan_cfg; 645 646 pthread_mutex_t flows_lock; /* rte_flow ops lock */ 647 struct hns3_fdir_rule_list flow_fdir_list; /* flow fdir rule list */ 648 struct hns3_rss_filter_list flow_rss_list; /* flow RSS rule list */ 649 struct hns3_flow_mem_list flow_list; 650 651 struct hns3_hw_ops ops; 652 653 /* 654 * PMD setup and configuration is not thread safe. Since it is not 655 * performance sensitive, it is better to guarantee thread-safety 656 * and add device level lock. Adapter control operations which 657 * change its state should acquire the lock. 658 */ 659 rte_spinlock_t lock; 660 enum hns3_adapter_state adapter_state; 661 struct hns3_reset_data reset; 662 }; 663 664 #define HNS3_FLAG_TC_BASE_SCH_MODE 1 665 #define HNS3_FLAG_VNET_BASE_SCH_MODE 2 666 667 /* vlan entry information. */ 668 struct hns3_user_vlan_table { 669 LIST_ENTRY(hns3_user_vlan_table) next; 670 bool hd_tbl_status; 671 uint16_t vlan_id; 672 }; 673 674 /* Vlan tag configuration for RX direction */ 675 struct hns3_rx_vtag_cfg { 676 bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ 677 bool strip_tag1_en; /* Whether strip inner vlan tag */ 678 bool strip_tag2_en; /* Whether strip outer vlan tag */ 679 /* 680 * If strip_tag_en is enabled, this bit decide whether to map the vlan 681 * tag to descriptor. 682 */ 683 bool strip_tag1_discard_en; 684 bool strip_tag2_discard_en; 685 /* 686 * If this bit is enabled, only map inner/outer priority to descriptor 687 * and the vlan tag is always 0. 688 */ 689 bool vlan1_vlan_prionly; 690 bool vlan2_vlan_prionly; 691 }; 692 693 /* Vlan tag configuration for TX direction */ 694 struct hns3_tx_vtag_cfg { 695 bool accept_tag1; /* Whether accept tag1 packet from host */ 696 bool accept_untag1; /* Whether accept untag1 packet from host */ 697 bool accept_tag2; 698 bool accept_untag2; 699 bool insert_tag1_en; /* Whether insert outer vlan tag */ 700 bool insert_tag2_en; /* Whether insert inner vlan tag */ 701 /* 702 * In shift mode, hw will shift the sequence of port based VLAN and 703 * BD VLAN. 704 */ 705 bool tag_shift_mode_en; /* hw shift vlan tag automatically */ 706 uint16_t default_tag1; /* The default outer vlan tag to insert */ 707 uint16_t default_tag2; /* The default inner vlan tag to insert */ 708 }; 709 710 struct hns3_vtag_cfg { 711 struct hns3_rx_vtag_cfg rx_vcfg; 712 struct hns3_tx_vtag_cfg tx_vcfg; 713 }; 714 715 /* Request types for IPC. */ 716 enum hns3_mp_req_type { 717 HNS3_MP_REQ_START_RXTX = 1, 718 HNS3_MP_REQ_STOP_RXTX, 719 HNS3_MP_REQ_START_TX, 720 HNS3_MP_REQ_STOP_TX, 721 HNS3_MP_REQ_MAX 722 }; 723 724 /* Parameters for IPC. */ 725 struct hns3_mp_param { 726 enum hns3_mp_req_type type; 727 int port_id; 728 int result; 729 }; 730 731 /* Request timeout for IPC. */ 732 #define HNS3_MP_REQ_TIMEOUT_SEC 5 733 734 /* Key string for IPC. */ 735 #define HNS3_MP_NAME "net_hns3_mp" 736 737 #define HNS3_L2TBL_NUM 4 738 #define HNS3_L3TBL_NUM 16 739 #define HNS3_L4TBL_NUM 16 740 #define HNS3_OL2TBL_NUM 4 741 #define HNS3_OL3TBL_NUM 16 742 #define HNS3_OL4TBL_NUM 16 743 #define HNS3_PTYPE_NUM 256 744 745 struct hns3_ptype_table { 746 /* 747 * The next fields used to calc packet-type by the 748 * L3_ID/L4_ID/OL3_ID/OL4_ID from the Rx descriptor. 749 */ 750 uint32_t l3table[HNS3_L3TBL_NUM]; 751 uint32_t l4table[HNS3_L4TBL_NUM]; 752 uint32_t inner_l3table[HNS3_L3TBL_NUM]; 753 uint32_t inner_l4table[HNS3_L4TBL_NUM]; 754 uint32_t ol3table[HNS3_OL3TBL_NUM]; 755 uint32_t ol4table[HNS3_OL4TBL_NUM]; 756 757 /* 758 * The next field used to calc packet-type by the PTYPE from the Rx 759 * descriptor, it functions only when firmware report the capability of 760 * HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it. 761 */ 762 uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_aligned; 763 }; 764 765 #define HNS3_FIXED_MAX_TQP_NUM_MODE 0 766 #define HNS3_FLEX_MAX_TQP_NUM_MODE 1 767 768 struct hns3_pf { 769 struct hns3_adapter *adapter; 770 bool is_main_pf; 771 uint16_t func_num; /* num functions of this pf, include pf and vfs */ 772 773 /* 774 * tqp_config mode 775 * tqp_config_mode value range: 776 * HNS3_FIXED_MAX_TQP_NUM_MODE, 777 * HNS3_FLEX_MAX_TQP_NUM_MODE 778 * 779 * - HNS3_FIXED_MAX_TQP_NUM_MODE 780 * There is a limitation on the number of pf interrupts available for 781 * on some versions of network engines. In this case, the maximum 782 * queue number of pf can not be greater than the interrupt number, 783 * such as pf of network engine with revision_id 0x21. So the maximum 784 * number of queues must be fixed. 785 * 786 * - HNS3_FLEX_MAX_TQP_NUM_MODE 787 * In this mode, the maximum queue number of pf has not any constraint 788 * and comes from the macro RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF 789 * in the config file. Users can modify the macro according to their 790 * own application scenarios, which is more flexible to use. 791 */ 792 uint8_t tqp_config_mode; 793 794 uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */ 795 uint32_t tx_buf_size; /* Tx buffer size for each TC */ 796 uint32_t dv_buf_size; /* Dv buffer size for each TC */ 797 798 uint16_t mps; /* Max packet size */ 799 800 uint8_t tx_sch_mode; 801 uint8_t tc_max; /* max number of tc driver supported */ 802 uint8_t local_max_tc; /* max number of local tc */ 803 uint8_t pfc_max; 804 uint16_t pause_time; 805 bool support_fc_autoneg; /* support FC autonegotiate */ 806 bool support_multi_tc_pause; 807 808 uint16_t wanted_umv_size; 809 uint16_t max_umv_size; 810 uint16_t used_umv_size; 811 812 bool support_sfp_query; 813 uint32_t fec_mode; /* current FEC mode for ethdev */ 814 815 bool ptp_enable; 816 817 /* Stores timestamp of last received packet on dev */ 818 uint64_t rx_timestamp; 819 820 struct hns3_vtag_cfg vtag_config; 821 LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list; 822 823 struct hns3_fdir_info fdir; /* flow director info */ 824 LIST_HEAD(counters, hns3_flow_counter) flow_counters; 825 826 struct hns3_tm_conf tm_conf; 827 }; 828 829 enum { 830 HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED, 831 HNS3_PF_PUSH_LSC_CAP_SUPPORTED, 832 HNS3_PF_PUSH_LSC_CAP_UNKNOWN 833 }; 834 835 struct hns3_vf { 836 struct hns3_adapter *adapter; 837 838 /* Whether PF support push link status change to VF */ 839 uint16_t pf_push_lsc_cap; 840 841 /* 842 * If PF support push link status change, VF still need send request to 843 * get link status in some cases (such as reset recover stage), so use 844 * the req_link_info_cnt to control max request count. 845 */ 846 uint16_t req_link_info_cnt; 847 848 uint16_t poll_job_started; /* whether poll job is started */ 849 }; 850 851 struct hns3_adapter { 852 struct hns3_hw hw; 853 854 /* Specific for PF or VF */ 855 bool is_vf; /* false - PF, true - VF */ 856 union { 857 struct hns3_pf pf; 858 struct hns3_vf vf; 859 }; 860 861 uint32_t rx_func_hint; 862 uint32_t tx_func_hint; 863 864 uint64_t dev_caps_mask; 865 uint16_t mbx_time_limit_ms; /* wait time for mbx message */ 866 867 struct hns3_ptype_table ptype_tbl __rte_cache_aligned; 868 }; 869 870 #define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" 871 #define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" 872 873 #define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" 874 875 #define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" 876 877 enum { 878 HNS3_DEV_SUPPORT_DCB_B, 879 HNS3_DEV_SUPPORT_COPPER_B, 880 HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B, 881 HNS3_DEV_SUPPORT_PTP_B, 882 HNS3_DEV_SUPPORT_TX_PUSH_B, 883 HNS3_DEV_SUPPORT_INDEP_TXRX_B, 884 HNS3_DEV_SUPPORT_STASH_B, 885 HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, 886 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 887 HNS3_DEV_SUPPORT_RAS_IMP_B, 888 HNS3_DEV_SUPPORT_TM_B, 889 HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 890 }; 891 892 #define hns3_dev_get_support(hw, _name) \ 893 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B) 894 895 #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ 896 (&((struct hns3_adapter *)adapter)->hw) 897 #define HNS3_DEV_PRIVATE_TO_PF(adapter) \ 898 (&((struct hns3_adapter *)adapter)->pf) 899 #define HNS3_DEV_PRIVATE_TO_VF(adapter) \ 900 (&((struct hns3_adapter *)adapter)->vf) 901 #define HNS3_DEV_HW_TO_ADAPTER(hw) \ 902 container_of(hw, struct hns3_adapter, hw) 903 904 static inline struct hns3_pf *HNS3_DEV_HW_TO_PF(struct hns3_hw *hw) 905 { 906 struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw); 907 return &adapter->pf; 908 } 909 910 static inline struct hns3_vf *HNS3_DEV_HW_TO_VF(struct hns3_hw *hw) 911 { 912 struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw); 913 return &adapter->vf; 914 } 915 916 #define hns3_set_field(origin, mask, shift, val) \ 917 do { \ 918 (origin) &= (~(mask)); \ 919 (origin) |= ((val) << (shift)) & (mask); \ 920 } while (0) 921 #define hns3_get_field(origin, mask, shift) \ 922 (((origin) & (mask)) >> (shift)) 923 #define hns3_set_bit(origin, shift, val) \ 924 hns3_set_field((origin), (0x1UL << (shift)), (shift), (val)) 925 #define hns3_get_bit(origin, shift) \ 926 hns3_get_field((origin), (0x1UL << (shift)), (shift)) 927 928 #define hns3_gen_field_val(mask, shift, val) (((val) << (shift)) & (mask)) 929 930 /* 931 * upper_32_bits - return bits 32-63 of a number 932 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress 933 * the "right shift count >= width of type" warning when that quantity is 934 * 32-bits. 935 */ 936 #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) 937 938 /* lower_32_bits - return bits 0-31 of a number */ 939 #define lower_32_bits(n) ((uint32_t)(n)) 940 941 #define BIT(nr) (1UL << (nr)) 942 943 #define BIT_ULL(x) (1ULL << (x)) 944 945 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) 946 #define GENMASK(h, l) \ 947 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) 948 949 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 950 #define rounddown(x, y) ((x) - ((x) % (y))) 951 952 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 953 954 /* 955 * Because hardware always access register in little-endian mode based on hns3 956 * network engine, so driver should also call rte_cpu_to_le_32 to convert data 957 * in little-endian mode before writing register and call rte_le_to_cpu_32 to 958 * convert data after reading from register. 959 * 960 * Here the driver encapsulates the data conversion operation in the register 961 * read/write operation function as below: 962 * hns3_write_reg 963 * hns3_write_reg_opt 964 * hns3_read_reg 965 * Therefore, when calling these functions, conversion is not required again. 966 */ 967 static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value) 968 { 969 rte_write32(rte_cpu_to_le_32(value), 970 (volatile void *)((char *)base + reg)); 971 } 972 973 /* 974 * The optimized function for writing registers reduces one address addition 975 * calculation, it was used in the '.rx_pkt_burst' and '.tx_pkt_burst' ops 976 * implementation function. 977 */ 978 static inline void hns3_write_reg_opt(volatile void *addr, uint32_t value) 979 { 980 rte_write32(rte_cpu_to_le_32(value), addr); 981 } 982 983 static inline uint32_t hns3_read_reg(void *base, uint32_t reg) 984 { 985 uint32_t read_val = rte_read32((volatile void *)((char *)base + reg)); 986 return rte_le_to_cpu_32(read_val); 987 } 988 989 #define hns3_write_dev(a, reg, value) \ 990 hns3_write_reg((a)->io_base, (reg), (value)) 991 992 #define hns3_read_dev(a, reg) \ 993 hns3_read_reg((a)->io_base, (reg)) 994 995 #define NEXT_ITEM_OF_ACTION(act, actions, index) \ 996 do { \ 997 act = (actions) + (index); \ 998 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ 999 (index)++; \ 1000 act = actions + index; \ 1001 } \ 1002 } while (0) 1003 1004 static inline uint64_t 1005 hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) 1006 { 1007 uint64_t res; 1008 1009 res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0; 1010 return res; 1011 } 1012 1013 static inline void 1014 hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr) 1015 { 1016 __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED); 1017 } 1018 1019 static inline void 1020 hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr) 1021 { 1022 __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); 1023 } 1024 1025 static inline int64_t 1026 hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) 1027 { 1028 uint64_t mask = (1UL << nr); 1029 1030 return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; 1031 } 1032 1033 int 1034 hns3_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); 1035 uint32_t hns3_get_speed_capa(struct hns3_hw *hw); 1036 1037 int hns3_buffer_alloc(struct hns3_hw *hw); 1038 bool hns3_is_reset_pending(struct hns3_adapter *hns); 1039 bool hns3vf_is_reset_pending(struct hns3_adapter *hns); 1040 void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); 1041 void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, 1042 uint32_t link_speed, uint8_t link_duplex); 1043 void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); 1044 1045 int hns3_restore_ptp(struct hns3_adapter *hns); 1046 int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, 1047 struct rte_eth_conf *conf); 1048 int hns3_ptp_init(struct hns3_hw *hw); 1049 int hns3_timesync_enable(struct rte_eth_dev *dev); 1050 int hns3_timesync_disable(struct rte_eth_dev *dev); 1051 int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 1052 struct timespec *timestamp, 1053 uint32_t flags __rte_unused); 1054 int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 1055 struct timespec *timestamp); 1056 int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); 1057 int hns3_timesync_write_time(struct rte_eth_dev *dev, 1058 const struct timespec *ts); 1059 int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 1060 int hns3_eth_dev_priv_dump(struct rte_eth_dev *dev, FILE *file); 1061 1062 static inline bool 1063 is_reset_pending(struct hns3_adapter *hns) 1064 { 1065 bool ret; 1066 if (hns->is_vf) 1067 ret = hns3vf_is_reset_pending(hns); 1068 else 1069 ret = hns3_is_reset_pending(hns); 1070 return ret; 1071 } 1072 1073 #endif /* _HNS3_ETHDEV_H_ */ 1074