xref: /f-stack/dpdk/drivers/net/dpaa/dpaa_ethdev.h (revision 84bcae25)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2014-2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 #ifndef __DPAA_ETHDEV_H__
8 #define __DPAA_ETHDEV_H__
9 
10 /* System headers */
11 #include <stdbool.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_event_eth_rx_adapter.h>
14 
15 #include <fsl_usd.h>
16 #include <fsl_qman.h>
17 #include <fsl_bman.h>
18 #include <of.h>
19 #include <netcfg.h>
20 
21 #define DPAA_MBUF_HW_ANNOTATION		64
22 #define DPAA_FD_PTA_SIZE		64
23 
24 /* mbuf->seqn will be used to store event entry index for
25  * driver specific usage. For parallel mode queues, invalid
26  * index will be set and for atomic mode queues, valid value
27  * ranging from 1 to 16.
28  */
29 #define DPAA_INVALID_MBUF_SEQN  0
30 
31 /* we will re-use the HEADROOM for annotation in RX */
32 #define DPAA_HW_BUF_RESERVE	0
33 #define DPAA_PACKET_LAYOUT_ALIGN	64
34 
35 /* Alignment to use for cpu-local structs to avoid coherency problems. */
36 #define MAX_CACHELINE			64
37 
38 #define DPAA_MAX_RX_PKT_LEN  10240
39 
40 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
41 
42 /* RX queue tail drop threshold (CGR Based) in frame count */
43 #define CGR_RX_PERFQ_THRESH 256
44 
45 /*max mac filter for memac(8) including primary mac addr*/
46 #define DPAA_MAX_MAC_FILTER (MEMAC_NUM_OF_PADDRS + 1)
47 
48 /*Maximum number of slots available in TX ring*/
49 #define DPAA_TX_BURST_SIZE	7
50 
51 /* Optimal burst size for RX and TX as default */
52 #define DPAA_DEF_RX_BURST_SIZE 7
53 #define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE
54 
55 #ifndef VLAN_TAG_SIZE
56 #define VLAN_TAG_SIZE   4 /** < Vlan Header Length */
57 #endif
58 
59 /* PCD frame queues */
60 #define DPAA_PCD_FQID_START		0x400
61 #define DPAA_PCD_FQID_MULTIPLIER	0x100
62 #define DPAA_DEFAULT_NUM_PCD_QUEUES	1
63 #define DPAA_MAX_NUM_PCD_QUEUES		4
64 
65 #define DPAA_IF_TX_PRIORITY		3
66 #define DPAA_IF_RX_PRIORITY		0
67 #define DPAA_IF_DEBUG_PRIORITY		7
68 
69 #define DPAA_IF_RX_ANNOTATION_STASH	1
70 #define DPAA_IF_RX_DATA_STASH		1
71 #define DPAA_IF_RX_CONTEXT_STASH		0
72 
73 /* Each "debug" FQ is represented by one of these */
74 #define DPAA_DEBUG_FQ_RX_ERROR   0
75 #define DPAA_DEBUG_FQ_TX_ERROR   1
76 
77 #define DPAA_RSS_OFFLOAD_ALL ( \
78 	ETH_RSS_IP | \
79 	ETH_RSS_UDP | \
80 	ETH_RSS_TCP | \
81 	ETH_RSS_SCTP)
82 
83 #define DPAA_TX_CKSUM_OFFLOAD_MASK (             \
84 		PKT_TX_IP_CKSUM |                \
85 		PKT_TX_TCP_CKSUM |               \
86 		PKT_TX_UDP_CKSUM)
87 
88 /* DPAA Frame descriptor macros */
89 
90 #define DPAA_FD_CMD_FCO			0x80000000
91 /**< Frame queue Context Override */
92 #define DPAA_FD_CMD_RPD			0x40000000
93 /**< Read Prepended Data */
94 #define DPAA_FD_CMD_UPD			0x20000000
95 /**< Update Prepended Data */
96 #define DPAA_FD_CMD_DTC			0x10000000
97 /**< Do IP/TCP/UDP Checksum */
98 #define DPAA_FD_CMD_DCL4C		0x10000000
99 /**< Didn't calculate L4 Checksum */
100 #define DPAA_FD_CMD_CFQ			0x00ffffff
101 /**< Confirmation Frame Queue */
102 
103 /* Each network interface is represented by one of these */
104 struct dpaa_if {
105 	int valid;
106 	char *name;
107 	const struct fm_eth_port_cfg *cfg;
108 	struct qman_fq *rx_queues;
109 	struct qman_cgr *cgr_rx;
110 	struct qman_fq *tx_queues;
111 	struct qman_fq debug_queues[2];
112 	uint16_t nb_rx_queues;
113 	uint16_t nb_tx_queues;
114 	uint32_t ifid;
115 	struct fman_if *fif;
116 	struct dpaa_bp_info *bp_info;
117 	struct rte_eth_fc_conf *fc_conf;
118 };
119 
120 struct dpaa_if_stats {
121 	/* Rx Statistics Counter */
122 	uint64_t reoct;		/**<Rx Eth Octets Counter */
123 	uint64_t roct;		/**<Rx Octet Counters */
124 	uint64_t raln;		/**<Rx Alignment Error Counter */
125 	uint64_t rxpf;		/**<Rx valid Pause Frame */
126 	uint64_t rfrm;		/**<Rx Frame counter */
127 	uint64_t rfcs;		/**<Rx frame check seq error */
128 	uint64_t rvlan;		/**<Rx Vlan Frame Counter */
129 	uint64_t rerr;		/**<Rx Frame error */
130 	uint64_t ruca;		/**<Rx Unicast */
131 	uint64_t rmca;		/**<Rx Multicast */
132 	uint64_t rbca;		/**<Rx Broadcast */
133 	uint64_t rdrp;		/**<Rx Dropped Packet */
134 	uint64_t rpkt;		/**<Rx packet */
135 	uint64_t rund;		/**<Rx undersized packets */
136 	uint32_t res_x[14];
137 	uint64_t rovr;		/**<Rx oversized but good */
138 	uint64_t rjbr;		/**<Rx oversized with bad csum */
139 	uint64_t rfrg;		/**<Rx fragment Packet */
140 	uint64_t rcnp;		/**<Rx control packets (0x8808 */
141 	uint64_t rdrntp;	/**<Rx dropped due to FIFO overflow */
142 	uint32_t res01d0[12];
143 	/* Tx Statistics Counter */
144 	uint64_t teoct;		/**<Tx eth octets */
145 	uint64_t toct;		/**<Tx Octets */
146 	uint32_t res0210[2];
147 	uint64_t txpf;		/**<Tx valid pause frame */
148 	uint64_t tfrm;		/**<Tx frame counter */
149 	uint64_t tfcs;		/**<Tx FCS error */
150 	uint64_t tvlan;		/**<Tx Vlan Frame */
151 	uint64_t terr;		/**<Tx frame error */
152 	uint64_t tuca;		/**<Tx Unicast */
153 	uint64_t tmca;		/**<Tx Multicast */
154 	uint64_t tbca;		/**<Tx Broadcast */
155 	uint32_t res0258[2];
156 	uint64_t tpkt;		/**<Tx Packet */
157 	uint64_t tund;		/**<Tx Undersized */
158 };
159 
160 int
161 dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
162 		int eth_rx_queue_id,
163 		u16 ch_id,
164 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
165 
166 int
167 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
168 			   int eth_rx_queue_id);
169 
170 enum qman_cb_dqrr_result
171 dpaa_rx_cb_parallel(void *event,
172 		    struct qman_portal *qm __always_unused,
173 		    struct qman_fq *fq,
174 		    const struct qm_dqrr_entry *dqrr,
175 		    void **bufs);
176 enum qman_cb_dqrr_result
177 dpaa_rx_cb_atomic(void *event,
178 		  struct qman_portal *qm __always_unused,
179 		  struct qman_fq *fq,
180 		  const struct qm_dqrr_entry *dqrr,
181 		  void **bufs);
182 
183 #endif
184