1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 #ifndef __CNXK_ETHDEV_H__ 5 #define __CNXK_ETHDEV_H__ 6 7 #include <math.h> 8 #include <stdint.h> 9 10 #include <ethdev_driver.h> 11 #include <ethdev_pci.h> 12 #include <rte_kvargs.h> 13 #include <rte_mbuf.h> 14 #include <rte_mbuf_pool_ops.h> 15 #include <rte_mempool.h> 16 #include <rte_mtr_driver.h> 17 #include <rte_security.h> 18 #include <rte_security_driver.h> 19 #include <rte_tailq.h> 20 #include <rte_time.h> 21 #include <rte_tm_driver.h> 22 23 #include "roc_api.h" 24 25 #define CNXK_ETH_DEV_PMD_VERSION "1.0" 26 27 /* Used for struct cnxk_eth_dev::flags */ 28 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0) 29 30 /* VLAN tag inserted by NIX_TX_VTAG_ACTION. 31 * In Tx space is always reserved for this in FRS. 32 */ 33 #define CNXK_NIX_MAX_VTAG_INS 2 34 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS) 35 36 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */ 37 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \ 38 RTE_ETHER_CRC_LEN + \ 39 CNXK_NIX_MAX_VTAG_ACT_SIZE) 40 41 #define CNXK_NIX_RX_MIN_DESC 16 42 #define CNXK_NIX_RX_MIN_DESC_ALIGN 16 43 #define CNXK_NIX_RX_NB_SEG_MAX 6 44 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096 45 /* Max supported SQB count */ 46 #define CNXK_NIX_TX_MAX_SQB 512 47 48 /* If PTP is enabled additional SEND MEM DESC is required which 49 * takes 2 words, hence max 7 iova address are possible 50 */ 51 #if defined(RTE_LIBRTE_IEEE1588) 52 #define CNXK_NIX_TX_NB_SEG_MAX 7 53 #else 54 #define CNXK_NIX_TX_NB_SEG_MAX 9 55 #endif 56 57 #define CNXK_NIX_TX_MSEG_SG_DWORDS \ 58 ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) + \ 59 CNXK_NIX_TX_NB_SEG_MAX) 60 61 #define CNXK_NIX_RSS_L3_L4_SRC_DST \ 62 (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \ 63 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) 64 65 #define CNXK_NIX_RSS_OFFLOAD \ 66 (RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | \ 67 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL | \ 68 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST | \ 69 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN) 70 71 #define CNXK_NIX_TX_OFFLOAD_CAPA \ 72 (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \ 73 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ 74 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \ 75 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ 76 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | \ 77 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ 78 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ 79 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY) 80 81 #define CNXK_NIX_RX_OFFLOAD_CAPA \ 82 (RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \ 83 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | \ 84 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH | \ 85 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \ 86 RTE_ETH_RX_OFFLOAD_SECURITY) 87 88 #define RSS_IPV4_ENABLE \ 89 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \ 90 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ 91 RTE_ETH_RSS_NONFRAG_IPV4_SCTP) 92 93 #define RSS_IPV6_ENABLE \ 94 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \ 95 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ 96 RTE_ETH_RSS_NONFRAG_IPV6_SCTP) 97 98 #define RSS_IPV6_EX_ENABLE \ 99 (RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX) 100 101 #define RSS_MAX_LEVELS 3 102 103 #define RSS_IPV4_INDEX 0 104 #define RSS_IPV6_INDEX 1 105 #define RSS_TCP_INDEX 2 106 #define RSS_UDP_INDEX 3 107 #define RSS_SCTP_INDEX 4 108 #define RSS_DMAC_INDEX 5 109 110 /* Default mark value used when none is provided. */ 111 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff 112 113 /* Default cycle counter mask */ 114 #define CNXK_CYCLECOUNTER_MASK 0xffffffffffffffffULL 115 #define CNXK_NIX_TIMESYNC_RX_OFFSET 8 116 117 #define PTYPE_NON_TUNNEL_WIDTH 16 118 #define PTYPE_TUNNEL_WIDTH 12 119 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH) 120 #define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH) 121 #define PTYPE_ARRAY_SZ \ 122 ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t)) 123 124 /* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */ 125 #define ERRCODE_ERRLEN_WIDTH 12 126 #define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t)) 127 128 /* Fastpath lookup */ 129 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem" 130 131 #define CNXK_NIX_UDP_TUN_BITMASK \ 132 ((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \ 133 (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45))) 134 135 /* Subtype from inline outbound error event */ 136 #define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL 137 138 /* SPI will be in 20 bits of tag */ 139 #define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL 140 141 #define CNXK_NIX_PFC_CHAN_COUNT 16 142 143 #define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0) 144 #define CNXK_TM_MARK_IP_DSCP BIT_ULL(1) 145 #define CNXK_TM_MARK_IP_ECN BIT_ULL(2) 146 147 #define CNXK_TM_MARK_MASK \ 148 (CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN) 149 150 #define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull) 151 152 struct cnxk_fc_cfg { 153 enum rte_eth_fc_mode mode; 154 uint8_t rx_pause; 155 uint8_t tx_pause; 156 }; 157 158 struct cnxk_pfc_cfg { 159 struct cnxk_fc_cfg fc_cfg; 160 uint16_t class_en; 161 uint16_t pause_time; 162 uint8_t rx_tc; 163 uint8_t rx_qid; 164 uint8_t tx_tc; 165 uint8_t tx_qid; 166 }; 167 168 struct cnxk_eth_qconf { 169 union { 170 struct rte_eth_txconf tx; 171 struct rte_eth_rxconf rx; 172 } conf; 173 struct rte_mempool *mp; 174 uint16_t nb_desc; 175 uint8_t valid; 176 }; 177 178 struct cnxk_timesync_info { 179 uint8_t rx_ready; 180 uint64_t rx_tstamp; 181 uint64_t rx_tstamp_dynflag; 182 int tstamp_dynfield_offset; 183 rte_iova_t tx_tstamp_iova; 184 uint64_t *tx_tstamp; 185 } __plt_cache_aligned; 186 187 struct cnxk_meter_node { 188 #define MAX_PRV_MTR_NODES 10 189 TAILQ_ENTRY(cnxk_meter_node) next; 190 /**< Pointer to the next flow meter structure. */ 191 uint32_t id; /**< Usr mtr id. */ 192 struct cnxk_mtr_profile_node *profile; 193 struct cnxk_mtr_policy_node *policy; 194 uint32_t bpf_id; /**< Hw mtr id. */ 195 uint32_t rq_num; 196 uint32_t *rq_id; 197 uint16_t level; 198 uint32_t prev_id[MAX_PRV_MTR_NODES]; /**< Prev mtr id for chaining */ 199 uint32_t prev_cnt; 200 uint32_t next_id; /**< Next mtr id for chaining */ 201 bool is_prev; 202 bool is_next; 203 struct rte_mtr_params params; 204 struct roc_nix_bpf_objs profs; 205 bool is_used; 206 uint32_t ref_cnt; 207 }; 208 209 struct action_rss { 210 enum rte_eth_hash_function func; 211 uint32_t level; 212 uint64_t types; 213 uint32_t key_len; 214 uint32_t queue_num; 215 uint8_t *key; 216 uint16_t *queue; 217 }; 218 219 struct policy_actions { 220 uint32_t action_fate; 221 union { 222 uint16_t queue; 223 uint32_t mtr_id; 224 struct action_rss *rss_desc; 225 }; 226 }; 227 228 struct cnxk_mtr_policy_node { 229 TAILQ_ENTRY(cnxk_mtr_policy_node) next; 230 /**< Pointer to the next flow meter structure. */ 231 uint32_t id; /**< Policy id */ 232 uint32_t mtr_id; /** Meter id */ 233 struct rte_mtr_meter_policy_params policy; 234 struct policy_actions actions[RTE_COLORS]; 235 uint32_t ref_cnt; 236 }; 237 238 struct cnxk_mtr_profile_node { 239 TAILQ_ENTRY(cnxk_mtr_profile_node) next; 240 struct rte_mtr_meter_profile profile; /**< Profile detail. */ 241 uint32_t ref_cnt; /**< Use count. */ 242 uint32_t id; /**< Profile id. */ 243 }; 244 245 TAILQ_HEAD(cnxk_mtr_profiles, cnxk_mtr_profile_node); 246 TAILQ_HEAD(cnxk_mtr_policy, cnxk_mtr_policy_node); 247 TAILQ_HEAD(cnxk_mtr, cnxk_meter_node); 248 249 /* Security session private data */ 250 struct cnxk_eth_sec_sess { 251 /* List entry */ 252 TAILQ_ENTRY(cnxk_eth_sec_sess) entry; 253 254 /* Inbound SA is from NIX_RX_IPSEC_SA_BASE or 255 * Outbound SA from roc_nix_inl_outb_sa_base_get() 256 */ 257 void *sa; 258 259 /* SA index */ 260 uint32_t sa_idx; 261 262 /* SPI */ 263 uint32_t spi; 264 265 /* Back pointer to session */ 266 struct rte_security_session *sess; 267 268 /* Inbound */ 269 bool inb; 270 271 /* Inbound session on inl dev */ 272 bool inl_dev; 273 }; 274 275 TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess); 276 277 /* Inbound security data */ 278 struct cnxk_eth_dev_sec_inb { 279 /* IPSec inbound max SPI */ 280 uint16_t max_spi; 281 282 /* Using inbound with inline device */ 283 bool inl_dev; 284 285 /* Device argument to disable inline device usage for inb */ 286 bool no_inl_dev; 287 288 /* Active sessions */ 289 uint16_t nb_sess; 290 291 /* List of sessions */ 292 struct cnxk_eth_sec_sess_list list; 293 294 /* DPTR for WRITE_SA microcode op */ 295 void *sa_dptr; 296 297 /* Lock to synchronize sa setup/release */ 298 rte_spinlock_t lock; 299 }; 300 301 /* Outbound security data */ 302 struct cnxk_eth_dev_sec_outb { 303 /* IPSec outbound max SA */ 304 uint16_t max_sa; 305 306 /* Per CPT LF descriptor count */ 307 uint32_t nb_desc; 308 309 /* SA Bitmap */ 310 struct plt_bitmap *sa_bmap; 311 312 /* SA bitmap memory */ 313 void *sa_bmap_mem; 314 315 /* SA base */ 316 uint64_t sa_base; 317 318 /* CPT LF base */ 319 struct roc_cpt_lf *lf_base; 320 321 /* Crypto queues => CPT lf count */ 322 uint16_t nb_crypto_qs; 323 324 /* Active sessions */ 325 uint16_t nb_sess; 326 327 /* List of sessions */ 328 struct cnxk_eth_sec_sess_list list; 329 330 /* DPTR for WRITE_SA microcode op */ 331 void *sa_dptr; 332 333 /* Lock to synchronize sa setup/release */ 334 rte_spinlock_t lock; 335 }; 336 337 struct cnxk_eth_dev { 338 /* ROC NIX */ 339 struct roc_nix nix; 340 341 /* ROC NPC */ 342 struct roc_npc npc; 343 344 /* ROC RQs, SQs and CQs */ 345 struct roc_nix_rq *rqs; 346 struct roc_nix_sq *sqs; 347 struct roc_nix_cq *cqs; 348 349 /* Configured queue count */ 350 uint16_t nb_rxq; 351 uint16_t nb_txq; 352 uint16_t nb_rxq_sso; 353 uint8_t configured; 354 355 /* Max macfilter entries */ 356 uint8_t dmac_filter_count; 357 uint8_t max_mac_entries; 358 bool dmac_filter_enable; 359 360 uint16_t flags; 361 uint8_t ptype_disable; 362 bool scalar_ena; 363 bool tx_mark; 364 bool ptp_en; 365 bool rx_mark_update; /* Enable/Disable mark update to mbuf */ 366 367 /* Pointer back to rte */ 368 struct rte_eth_dev *eth_dev; 369 370 /* HW capabilities / Limitations */ 371 union { 372 struct { 373 uint64_t cq_min_4k : 1; 374 uint64_t ipsecd_drop_re_dis : 1; 375 uint64_t vec_drop_re_dis : 1; 376 }; 377 uint64_t hwcap; 378 }; 379 380 /* Rx and Tx offload capabilities */ 381 uint64_t rx_offload_capa; 382 uint64_t tx_offload_capa; 383 uint32_t speed_capa; 384 /* Configured Rx and Tx offloads */ 385 uint64_t rx_offloads; 386 uint64_t tx_offloads; 387 /* Platform specific offload flags */ 388 uint16_t rx_offload_flags; 389 uint16_t tx_offload_flags; 390 391 /* ETHDEV RSS HF bitmask */ 392 uint64_t ethdev_rss_hf; 393 394 /* Saved qconf before lf realloc */ 395 struct cnxk_eth_qconf *tx_qconf; 396 struct cnxk_eth_qconf *rx_qconf; 397 398 /* Flow control configuration */ 399 uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT]; 400 struct cnxk_pfc_cfg pfc_cfg; 401 struct cnxk_fc_cfg fc_cfg; 402 403 /* PTP Counters */ 404 struct cnxk_timesync_info tstamp; 405 struct rte_timecounter systime_tc; 406 struct rte_timecounter rx_tstamp_tc; 407 struct rte_timecounter tx_tstamp_tc; 408 double clk_freq_mult; 409 uint64_t clk_delta; 410 411 /* Ingress policer */ 412 enum roc_nix_bpf_color precolor_tbl[ROC_NIX_BPF_PRE_COLOR_MAX]; 413 struct cnxk_mtr_profiles mtr_profiles; 414 struct cnxk_mtr_policy mtr_policy; 415 struct cnxk_mtr mtr; 416 417 /* Rx burst for cleanup(Only Primary) */ 418 eth_rx_burst_t rx_pkt_burst_no_offload; 419 420 /* Default mac address */ 421 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 422 423 /* LSO Tunnel format indices */ 424 uint64_t lso_tun_fmt; 425 426 /* Per queue statistics counters */ 427 uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS]; 428 uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS]; 429 430 /* Security data */ 431 struct cnxk_eth_dev_sec_inb inb; 432 struct cnxk_eth_dev_sec_outb outb; 433 434 /* Reassembly dynfield/flag offsets */ 435 int reass_dynfield_off; 436 int reass_dynflag_bit; 437 }; 438 439 struct cnxk_eth_rxq_sp { 440 struct cnxk_eth_dev *dev; 441 struct cnxk_eth_qconf qconf; 442 uint16_t qid; 443 } __plt_cache_aligned; 444 445 struct cnxk_eth_txq_sp { 446 struct cnxk_eth_dev *dev; 447 struct cnxk_eth_qconf qconf; 448 uint16_t qid; 449 } __plt_cache_aligned; 450 451 static inline struct cnxk_eth_dev * 452 cnxk_eth_pmd_priv(const struct rte_eth_dev *eth_dev) 453 { 454 return eth_dev->data->dev_private; 455 } 456 457 static inline struct cnxk_eth_rxq_sp * 458 cnxk_eth_rxq_to_sp(void *__rxq) 459 { 460 return ((struct cnxk_eth_rxq_sp *)__rxq) - 1; 461 } 462 463 static inline struct cnxk_eth_txq_sp * 464 cnxk_eth_txq_to_sp(void *__txq) 465 { 466 return ((struct cnxk_eth_txq_sp *)__txq) - 1; 467 } 468 469 /* Common ethdev ops */ 470 extern struct eth_dev_ops cnxk_eth_dev_ops; 471 472 /* Common flow ops */ 473 extern struct rte_flow_ops cnxk_flow_ops; 474 475 /* Common security ops */ 476 extern struct rte_security_ops cnxk_eth_sec_ops; 477 478 /* Common tm ops */ 479 extern struct rte_tm_ops cnxk_tm_ops; 480 481 /* Ops */ 482 int cnxk_nix_probe(struct rte_pci_driver *pci_drv, 483 struct rte_pci_device *pci_dev); 484 int cnxk_nix_remove(struct rte_pci_device *pci_dev); 485 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); 486 int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, 487 struct rte_ether_addr *mc_addr_set, 488 uint32_t nb_mc_addr); 489 int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, 490 struct rte_ether_addr *addr, uint32_t index, 491 uint32_t pool); 492 void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index); 493 int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, 494 struct rte_ether_addr *addr); 495 int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev); 496 int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev); 497 int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev); 498 int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev); 499 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev, 500 struct rte_eth_dev_info *dev_info); 501 int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, 502 struct rte_eth_burst_mode *mode); 503 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id, 504 struct rte_eth_burst_mode *mode); 505 int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev, 506 struct rte_eth_fc_conf *fc_conf); 507 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev, 508 struct rte_eth_fc_conf *fc_conf); 509 int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev, 510 struct rte_eth_pfc_queue_conf *pfc_conf); 511 int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev, 512 struct rte_eth_pfc_queue_info *pfc_info); 513 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev); 514 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev); 515 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev, 516 struct rte_eth_dev_module_info *modinfo); 517 int cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev, 518 struct rte_dev_eeprom_info *info); 519 int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, 520 uint16_t rx_queue_id); 521 int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, 522 uint16_t rx_queue_id); 523 int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool); 524 int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt); 525 int cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev, 526 const struct rte_flow_ops **ops); 527 int cnxk_nix_configure(struct rte_eth_dev *eth_dev); 528 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, 529 uint16_t nb_desc, uint16_t fp_tx_q_sz, 530 const struct rte_eth_txconf *tx_conf); 531 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, 532 uint16_t nb_desc, uint16_t fp_rx_q_sz, 533 const struct rte_eth_rxconf *rx_conf, 534 struct rte_mempool *mp); 535 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid); 536 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid); 537 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev); 538 int cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev); 539 int cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev); 540 int cnxk_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev, 541 struct timespec *timestamp, 542 uint32_t flags); 543 int cnxk_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev, 544 struct timespec *timestamp); 545 int cnxk_nix_timesync_read_time(struct rte_eth_dev *eth_dev, 546 struct timespec *ts); 547 int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev, 548 const struct timespec *ts); 549 int cnxk_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta); 550 int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev); 551 int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock); 552 553 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev); 554 int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops); 555 int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, 556 uint16_t queue_idx, uint16_t tx_rate); 557 int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green, 558 int mark_yellow, int mark_red, 559 struct rte_tm_error *error); 560 int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green, 561 int mark_yellow, int mark_red, 562 struct rte_tm_error *error); 563 int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green, 564 int mark_yellow, int mark_red, 565 struct rte_tm_error *error); 566 567 /* MTR */ 568 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops); 569 570 /* RSS */ 571 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, 572 uint8_t rss_level); 573 int cnxk_nix_reta_update(struct rte_eth_dev *eth_dev, 574 struct rte_eth_rss_reta_entry64 *reta_conf, 575 uint16_t reta_size); 576 int cnxk_nix_reta_query(struct rte_eth_dev *eth_dev, 577 struct rte_eth_rss_reta_entry64 *reta_conf, 578 uint16_t reta_size); 579 int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev, 580 struct rte_eth_rss_conf *rss_conf); 581 int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 582 struct rte_eth_rss_conf *rss_conf); 583 584 /* Link */ 585 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set); 586 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix, 587 struct roc_nix_link_info *link); 588 void cnxk_eth_dev_link_status_get_cb(struct roc_nix *nix, 589 struct roc_nix_link_info *link); 590 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete); 591 int cnxk_nix_queue_stats_mapping(struct rte_eth_dev *dev, uint16_t queue_id, 592 uint8_t stat_idx, uint8_t is_rx); 593 int cnxk_nix_stats_reset(struct rte_eth_dev *dev); 594 int cnxk_nix_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 595 int cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev, 596 struct rte_eth_xstat *xstats, unsigned int n); 597 int cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev, 598 struct rte_eth_xstat_name *xstats_names, 599 unsigned int limit); 600 int cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev, 601 const uint64_t *ids, 602 struct rte_eth_xstat_name *xstats_names, 603 unsigned int limit); 604 int cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids, 605 uint64_t *values, unsigned int n); 606 int cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev); 607 int cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, 608 size_t fw_size); 609 void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid, 610 struct rte_eth_rxq_info *qinfo); 611 void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid, 612 struct rte_eth_txq_info *qinfo); 613 614 /* Queue status */ 615 int cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset); 616 int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset); 617 uint32_t cnxk_nix_rx_queue_count(void *rxq); 618 619 /* Lookup configuration */ 620 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev); 621 void *cnxk_nix_fastpath_lookup_mem_get(void); 622 623 /* Devargs */ 624 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, 625 struct cnxk_eth_dev *dev); 626 627 /* Debug */ 628 int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, 629 struct rte_dev_reg_info *regs); 630 /* Security */ 631 int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p); 632 int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx); 633 int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev); 634 int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev); 635 __rte_internal 636 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev); 637 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, 638 uint32_t spi, bool inb); 639 struct cnxk_eth_sec_sess * 640 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev, 641 struct rte_security_session *sess); 642 643 /* Other private functions */ 644 int nix_recalc_mtu(struct rte_eth_dev *eth_dev); 645 int nix_mtr_validate(struct rte_eth_dev *dev, uint32_t id); 646 int nix_mtr_policy_act_get(struct rte_eth_dev *eth_dev, uint32_t id, 647 struct cnxk_mtr_policy_node **policy); 648 int nix_mtr_rq_update(struct rte_eth_dev *eth_dev, uint32_t id, 649 uint32_t queue_num, const uint16_t *queue); 650 int nix_mtr_chain_update(struct rte_eth_dev *eth_dev, uint32_t cur_id, 651 uint32_t prev_id, uint32_t next_id); 652 int nix_mtr_chain_reset(struct rte_eth_dev *eth_dev, uint32_t cur_id); 653 struct cnxk_meter_node *nix_get_mtr(struct rte_eth_dev *eth_dev, 654 uint32_t cur_id); 655 int nix_mtr_level_update(struct rte_eth_dev *eth_dev, uint32_t id, 656 uint32_t level); 657 int nix_mtr_capabilities_init(struct rte_eth_dev *eth_dev); 658 int nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id); 659 int nix_mtr_connect(struct rte_eth_dev *eth_dev, uint32_t id); 660 int nix_mtr_destroy(struct rte_eth_dev *eth_dev, uint32_t id, 661 struct rte_mtr_error *error); 662 int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id, 663 uint32_t *prev_id, uint32_t *next_id, 664 struct cnxk_mtr_policy_node *policy, 665 int *tree_level); 666 int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev, 667 struct cnxk_pfc_cfg *conf); 668 669 /* Inlines */ 670 static __rte_always_inline uint64_t 671 cnxk_pktmbuf_detach(struct rte_mbuf *m) 672 { 673 struct rte_mempool *mp = m->pool; 674 uint32_t mbuf_size, buf_len; 675 struct rte_mbuf *md; 676 uint16_t priv_size; 677 uint16_t refcount; 678 679 /* Update refcount of direct mbuf */ 680 md = rte_mbuf_from_indirect(m); 681 refcount = rte_mbuf_refcnt_update(md, -1); 682 683 priv_size = rte_pktmbuf_priv_size(mp); 684 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); 685 buf_len = rte_pktmbuf_data_room_size(mp); 686 687 m->priv_size = priv_size; 688 m->buf_addr = (char *)m + mbuf_size; 689 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; 690 m->buf_len = (uint16_t)buf_len; 691 rte_pktmbuf_reset_headroom(m); 692 m->data_len = 0; 693 m->ol_flags = 0; 694 m->next = NULL; 695 m->nb_segs = 1; 696 697 /* Now indirect mbuf is safe to free */ 698 rte_pktmbuf_free(m); 699 700 if (refcount == 0) { 701 rte_mbuf_refcnt_set(md, 1); 702 md->data_len = 0; 703 md->ol_flags = 0; 704 md->next = NULL; 705 md->nb_segs = 1; 706 return 0; 707 } else { 708 return 1; 709 } 710 } 711 712 static __rte_always_inline uint64_t 713 cnxk_nix_prefree_seg(struct rte_mbuf *m) 714 { 715 if (likely(rte_mbuf_refcnt_read(m) == 1)) { 716 if (!RTE_MBUF_DIRECT(m)) 717 return cnxk_pktmbuf_detach(m); 718 719 m->next = NULL; 720 m->nb_segs = 1; 721 return 0; 722 } else if (rte_mbuf_refcnt_update(m, -1) == 0) { 723 if (!RTE_MBUF_DIRECT(m)) 724 return cnxk_pktmbuf_detach(m); 725 726 rte_mbuf_refcnt_set(m, 1); 727 m->next = NULL; 728 m->nb_segs = 1; 729 return 0; 730 } 731 732 /* Mbuf is having refcount more than 1 so need not to be freed */ 733 return 1; 734 } 735 736 static inline rte_mbuf_timestamp_t * 737 cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf, 738 struct cnxk_timesync_info *info) 739 { 740 return RTE_MBUF_DYNFIELD(mbuf, info->tstamp_dynfield_offset, 741 rte_mbuf_timestamp_t *); 742 } 743 744 static __rte_always_inline uintptr_t 745 cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem) 746 { 747 uintptr_t sa_base_tbl; 748 749 sa_base_tbl = (uintptr_t)lookup_mem; 750 sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ; 751 return *((const uintptr_t *)sa_base_tbl + port); 752 } 753 754 #endif /* __CNXK_ETHDEV_H__ */ 755