1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright 2017 NXP 5 * 6 */ 7 #ifndef __DPAA_ETHDEV_H__ 8 #define __DPAA_ETHDEV_H__ 9 10 /* System headers */ 11 #include <stdbool.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_event_eth_rx_adapter.h> 14 15 #include <fsl_usd.h> 16 #include <fsl_qman.h> 17 #include <fsl_bman.h> 18 #include <of.h> 19 #include <netcfg.h> 20 21 #define MAX_DPAA_CORES 4 22 #define DPAA_MBUF_HW_ANNOTATION 64 23 #define DPAA_FD_PTA_SIZE 64 24 25 #if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM 26 #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" 27 #endif 28 29 /* mbuf->seqn will be used to store event entry index for 30 * driver specific usage. For parallel mode queues, invalid 31 * index will be set and for atomic mode queues, valid value 32 * ranging from 1 to 16. 33 */ 34 #define DPAA_INVALID_MBUF_SEQN 0 35 36 /* we will re-use the HEADROOM for annotation in RX */ 37 #define DPAA_HW_BUF_RESERVE 0 38 #define DPAA_PACKET_LAYOUT_ALIGN 64 39 40 /* Alignment to use for cpu-local structs to avoid coherency problems. */ 41 #define MAX_CACHELINE 64 42 43 #define DPAA_MAX_RX_PKT_LEN 10240 44 45 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ 46 47 /* RX queue tail drop threshold (CGR Based) in frame count */ 48 #define CGR_RX_PERFQ_THRESH 256 49 50 /*max mac filter for memac(8) including primary mac addr*/ 51 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1) 52 53 /*Maximum number of slots available in TX ring*/ 54 #define DPAA_TX_BURST_SIZE 7 55 56 /* Optimal burst size for RX and TX as default */ 57 #define DPAA_DEF_RX_BURST_SIZE 7 58 #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE 59 60 #ifndef VLAN_TAG_SIZE 61 #define VLAN_TAG_SIZE 4 /** < Vlan Header Length */ 62 #endif 63 64 /* PCD frame queues */ 65 #define DPAA_PCD_FQID_START 0x400 66 #define DPAA_PCD_FQID_MULTIPLIER 0x100 67 #define DPAA_DEFAULT_NUM_PCD_QUEUES 1 68 #define DPAA_MAX_NUM_PCD_QUEUES 4 69 70 #define DPAA_IF_TX_PRIORITY 3 71 #define DPAA_IF_RX_PRIORITY 0 72 #define DPAA_IF_DEBUG_PRIORITY 7 73 74 #define DPAA_IF_RX_ANNOTATION_STASH 1 75 #define DPAA_IF_RX_DATA_STASH 1 76 #define DPAA_IF_RX_CONTEXT_STASH 0 77 78 /* Each "debug" FQ is represented by one of these */ 79 #define DPAA_DEBUG_FQ_RX_ERROR 0 80 #define DPAA_DEBUG_FQ_TX_ERROR 1 81 82 #define DPAA_RSS_OFFLOAD_ALL ( \ 83 ETH_RSS_FRAG_IPV4 | \ 84 ETH_RSS_NONFRAG_IPV4_TCP | \ 85 ETH_RSS_NONFRAG_IPV4_UDP | \ 86 ETH_RSS_NONFRAG_IPV4_SCTP | \ 87 ETH_RSS_FRAG_IPV6 | \ 88 ETH_RSS_NONFRAG_IPV6_TCP | \ 89 ETH_RSS_NONFRAG_IPV6_UDP | \ 90 ETH_RSS_NONFRAG_IPV6_SCTP) 91 92 #define DPAA_TX_CKSUM_OFFLOAD_MASK ( \ 93 PKT_TX_IP_CKSUM | \ 94 PKT_TX_TCP_CKSUM | \ 95 PKT_TX_UDP_CKSUM) 96 97 /* DPAA Frame descriptor macros */ 98 99 #define DPAA_FD_CMD_FCO 0x80000000 100 /**< Frame queue Context Override */ 101 #define DPAA_FD_CMD_RPD 0x40000000 102 /**< Read Prepended Data */ 103 #define DPAA_FD_CMD_UPD 0x20000000 104 /**< Update Prepended Data */ 105 #define DPAA_FD_CMD_DTC 0x10000000 106 /**< Do IP/TCP/UDP Checksum */ 107 #define DPAA_FD_CMD_DCL4C 0x10000000 108 /**< Didn't calculate L4 Checksum */ 109 #define DPAA_FD_CMD_CFQ 0x00ffffff 110 /**< Confirmation Frame Queue */ 111 112 /* Each network interface is represented by one of these */ 113 struct dpaa_if { 114 int valid; 115 char *name; 116 const struct fm_eth_port_cfg *cfg; 117 struct qman_fq *rx_queues; 118 struct qman_cgr *cgr_rx; 119 struct qman_fq *tx_queues; 120 struct qman_fq debug_queues[2]; 121 uint16_t nb_rx_queues; 122 uint16_t nb_tx_queues; 123 uint32_t ifid; 124 struct fman_if *fif; 125 struct dpaa_bp_info *bp_info; 126 struct rte_eth_fc_conf *fc_conf; 127 }; 128 129 struct dpaa_if_stats { 130 /* Rx Statistics Counter */ 131 uint64_t reoct; /**<Rx Eth Octets Counter */ 132 uint64_t roct; /**<Rx Octet Counters */ 133 uint64_t raln; /**<Rx Alignment Error Counter */ 134 uint64_t rxpf; /**<Rx valid Pause Frame */ 135 uint64_t rfrm; /**<Rx Frame counter */ 136 uint64_t rfcs; /**<Rx frame check seq error */ 137 uint64_t rvlan; /**<Rx Vlan Frame Counter */ 138 uint64_t rerr; /**<Rx Frame error */ 139 uint64_t ruca; /**<Rx Unicast */ 140 uint64_t rmca; /**<Rx Multicast */ 141 uint64_t rbca; /**<Rx Broadcast */ 142 uint64_t rdrp; /**<Rx Dropped Packet */ 143 uint64_t rpkt; /**<Rx packet */ 144 uint64_t rund; /**<Rx undersized packets */ 145 uint32_t res_x[14]; 146 uint64_t rovr; /**<Rx oversized but good */ 147 uint64_t rjbr; /**<Rx oversized with bad csum */ 148 uint64_t rfrg; /**<Rx fragment Packet */ 149 uint64_t rcnp; /**<Rx control packets (0x8808 */ 150 uint64_t rdrntp; /**<Rx dropped due to FIFO overflow */ 151 uint32_t res01d0[12]; 152 /* Tx Statistics Counter */ 153 uint64_t teoct; /**<Tx eth octets */ 154 uint64_t toct; /**<Tx Octets */ 155 uint32_t res0210[2]; 156 uint64_t txpf; /**<Tx valid pause frame */ 157 uint64_t tfrm; /**<Tx frame counter */ 158 uint64_t tfcs; /**<Tx FCS error */ 159 uint64_t tvlan; /**<Tx Vlan Frame */ 160 uint64_t terr; /**<Tx frame error */ 161 uint64_t tuca; /**<Tx Unicast */ 162 uint64_t tmca; /**<Tx Multicast */ 163 uint64_t tbca; /**<Tx Broadcast */ 164 uint32_t res0258[2]; 165 uint64_t tpkt; /**<Tx Packet */ 166 uint64_t tund; /**<Tx Undersized */ 167 }; 168 169 int 170 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, 171 int eth_rx_queue_id, 172 u16 ch_id, 173 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); 174 175 int 176 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, 177 int eth_rx_queue_id); 178 179 enum qman_cb_dqrr_result 180 dpaa_rx_cb_parallel(void *event, 181 struct qman_portal *qm __always_unused, 182 struct qman_fq *fq, 183 const struct qm_dqrr_entry *dqrr, 184 void **bufs); 185 enum qman_cb_dqrr_result 186 dpaa_rx_cb_atomic(void *event, 187 struct qman_portal *qm __always_unused, 188 struct qman_fq *fq, 189 const struct qm_dqrr_entry *dqrr, 190 void **bufs); 191 192 #endif 193