xref: /f-stack/dpdk/drivers/net/ena/ena_ethdev.h (revision 2d9fd380)
14418919fSjohnjiang /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3a9643ea8Slogwang  * All rights reserved.
4a9643ea8Slogwang  */
5a9643ea8Slogwang 
6a9643ea8Slogwang #ifndef _ENA_ETHDEV_H_
7a9643ea8Slogwang #define _ENA_ETHDEV_H_
8a9643ea8Slogwang 
9d30ea906Sjfb8856606 #include <rte_cycles.h>
10a9643ea8Slogwang #include <rte_pci.h>
112bfe3f2eSlogwang #include <rte_bus_pci.h>
12d30ea906Sjfb8856606 #include <rte_timer.h>
13a9643ea8Slogwang 
14a9643ea8Slogwang #include "ena_com.h"
15a9643ea8Slogwang 
16a9643ea8Slogwang #define ENA_REGS_BAR	0
17a9643ea8Slogwang #define ENA_MEM_BAR	2
18a9643ea8Slogwang 
19a9643ea8Slogwang #define ENA_MAX_NUM_QUEUES	128
20a9643ea8Slogwang #define ENA_MIN_FRAME_LEN	64
21a9643ea8Slogwang #define ENA_NAME_MAX_LEN	20
22a9643ea8Slogwang #define ENA_PKT_MAX_BUFS	17
23*2d9fd380Sjfb8856606 #define ENA_RX_BUF_MIN_SIZE	1400
24*2d9fd380Sjfb8856606 #define ENA_DEFAULT_RING_SIZE	1024
25a9643ea8Slogwang 
26d30ea906Sjfb8856606 #define ENA_MIN_MTU		128
27d30ea906Sjfb8856606 
28a9643ea8Slogwang #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
29a9643ea8Slogwang 
30d30ea906Sjfb8856606 #define ENA_WD_TIMEOUT_SEC	3
31d30ea906Sjfb8856606 #define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
32d30ea906Sjfb8856606 
33*2d9fd380Sjfb8856606 /* While processing submitted and completed descriptors (rx and tx path
34*2d9fd380Sjfb8856606  * respectively) in a loop it is desired to:
35*2d9fd380Sjfb8856606  *  - perform batch submissions while populating sumbissmion queue
36*2d9fd380Sjfb8856606  *  - avoid blocking transmission of other packets during cleanup phase
37*2d9fd380Sjfb8856606  * Hence the utilization ratio of 1/8 of a queue size or max value if the size
38*2d9fd380Sjfb8856606  * of the ring is very big - like 8k Rx rings.
39*2d9fd380Sjfb8856606  */
40*2d9fd380Sjfb8856606 #define ENA_REFILL_THRESH_DIVIDER      8
41*2d9fd380Sjfb8856606 #define ENA_REFILL_THRESH_PACKET       256
42*2d9fd380Sjfb8856606 
43*2d9fd380Sjfb8856606 #define ENA_IDX_NEXT_MASKED(idx, mask) (((idx) + 1) & (mask))
44*2d9fd380Sjfb8856606 #define ENA_IDX_ADD_MASKED(idx, n, mask) (((idx) + (n)) & (mask))
45*2d9fd380Sjfb8856606 
46a9643ea8Slogwang struct ena_adapter;
47a9643ea8Slogwang 
48a9643ea8Slogwang enum ena_ring_type {
49a9643ea8Slogwang 	ENA_RING_TYPE_RX = 1,
50a9643ea8Slogwang 	ENA_RING_TYPE_TX = 2,
51a9643ea8Slogwang };
52a9643ea8Slogwang 
53a9643ea8Slogwang struct ena_tx_buffer {
54a9643ea8Slogwang 	struct rte_mbuf *mbuf;
55a9643ea8Slogwang 	unsigned int tx_descs;
56a9643ea8Slogwang 	unsigned int num_of_bufs;
57a9643ea8Slogwang 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
58a9643ea8Slogwang };
59a9643ea8Slogwang 
60*2d9fd380Sjfb8856606 /* Rx buffer holds only pointer to the mbuf - may be expanded in the future */
61*2d9fd380Sjfb8856606 struct ena_rx_buffer {
62*2d9fd380Sjfb8856606 	struct rte_mbuf *mbuf;
63*2d9fd380Sjfb8856606 	struct ena_com_buf ena_buf;
64*2d9fd380Sjfb8856606 };
65*2d9fd380Sjfb8856606 
664418919fSjohnjiang struct ena_calc_queue_size_ctx {
674418919fSjohnjiang 	struct ena_com_dev_get_features_ctx *get_feat_ctx;
684418919fSjohnjiang 	struct ena_com_dev *ena_dev;
69*2d9fd380Sjfb8856606 	u32 max_rx_queue_size;
70*2d9fd380Sjfb8856606 	u32 max_tx_queue_size;
714418919fSjohnjiang 	u16 max_tx_sgl_size;
724418919fSjohnjiang 	u16 max_rx_sgl_size;
734418919fSjohnjiang };
744418919fSjohnjiang 
754418919fSjohnjiang struct ena_stats_tx {
764418919fSjohnjiang 	u64 cnt;
774418919fSjohnjiang 	u64 bytes;
784418919fSjohnjiang 	u64 prepare_ctx_err;
794418919fSjohnjiang 	u64 linearize;
804418919fSjohnjiang 	u64 linearize_failed;
814418919fSjohnjiang 	u64 tx_poll;
824418919fSjohnjiang 	u64 doorbells;
834418919fSjohnjiang 	u64 bad_req_id;
844418919fSjohnjiang 	u64 available_desc;
854418919fSjohnjiang };
864418919fSjohnjiang 
874418919fSjohnjiang struct ena_stats_rx {
884418919fSjohnjiang 	u64 cnt;
894418919fSjohnjiang 	u64 bytes;
904418919fSjohnjiang 	u64 refill_partial;
914418919fSjohnjiang 	u64 bad_csum;
924418919fSjohnjiang 	u64 mbuf_alloc_fail;
934418919fSjohnjiang 	u64 bad_desc_num;
944418919fSjohnjiang 	u64 bad_req_id;
954418919fSjohnjiang };
964418919fSjohnjiang 
97a9643ea8Slogwang struct ena_ring {
98a9643ea8Slogwang 	u16 next_to_use;
99a9643ea8Slogwang 	u16 next_to_clean;
100a9643ea8Slogwang 
101a9643ea8Slogwang 	enum ena_ring_type type;
102a9643ea8Slogwang 	enum ena_admin_placement_policy_type tx_mem_queue_type;
103d30ea906Sjfb8856606 	/* Holds the empty requests for TX/RX OOO completions */
104d30ea906Sjfb8856606 	union {
105a9643ea8Slogwang 		uint16_t *empty_tx_reqs;
106d30ea906Sjfb8856606 		uint16_t *empty_rx_reqs;
107d30ea906Sjfb8856606 	};
108d30ea906Sjfb8856606 
109a9643ea8Slogwang 	union {
110a9643ea8Slogwang 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
111*2d9fd380Sjfb8856606 		struct ena_rx_buffer *rx_buffer_info; /* contex of rx packet */
112a9643ea8Slogwang 	};
113d30ea906Sjfb8856606 	struct rte_mbuf **rx_refill_buffer;
114a9643ea8Slogwang 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
115*2d9fd380Sjfb8856606 	unsigned int size_mask;
116a9643ea8Slogwang 
117a9643ea8Slogwang 	struct ena_com_io_cq *ena_com_io_cq;
118a9643ea8Slogwang 	struct ena_com_io_sq *ena_com_io_sq;
119a9643ea8Slogwang 
120a9643ea8Slogwang 	struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS]
121a9643ea8Slogwang 						__rte_cache_aligned;
122a9643ea8Slogwang 
123a9643ea8Slogwang 	struct rte_mempool *mb_pool;
124a9643ea8Slogwang 	unsigned int port_id;
125a9643ea8Slogwang 	unsigned int id;
126a9643ea8Slogwang 	/* Max length PMD can push to device for LLQ */
127a9643ea8Slogwang 	uint8_t tx_max_header_size;
128a9643ea8Slogwang 	int configured;
1294418919fSjohnjiang 
1304418919fSjohnjiang 	uint8_t *push_buf_intermediate_buf;
1314418919fSjohnjiang 
132a9643ea8Slogwang 	struct ena_adapter *adapter;
133d30ea906Sjfb8856606 	uint64_t offloads;
134d30ea906Sjfb8856606 	u16 sgl_size;
1354418919fSjohnjiang 
136*2d9fd380Sjfb8856606 	bool disable_meta_caching;
137*2d9fd380Sjfb8856606 
1384418919fSjohnjiang 	union {
1394418919fSjohnjiang 		struct ena_stats_rx rx_stats;
1404418919fSjohnjiang 		struct ena_stats_tx tx_stats;
1414418919fSjohnjiang 	};
1424418919fSjohnjiang 
1434b05018fSfengbojiang 	unsigned int numa_socket_id;
144a9643ea8Slogwang } __rte_cache_aligned;
145a9643ea8Slogwang 
146a9643ea8Slogwang enum ena_adapter_state {
147a9643ea8Slogwang 	ENA_ADAPTER_STATE_FREE    = 0,
148a9643ea8Slogwang 	ENA_ADAPTER_STATE_INIT    = 1,
149a9643ea8Slogwang 	ENA_ADAPTER_STATE_RUNNING = 2,
150a9643ea8Slogwang 	ENA_ADAPTER_STATE_STOPPED = 3,
151a9643ea8Slogwang 	ENA_ADAPTER_STATE_CONFIG  = 4,
152d30ea906Sjfb8856606 	ENA_ADAPTER_STATE_CLOSED  = 5,
153a9643ea8Slogwang };
154a9643ea8Slogwang 
155a9643ea8Slogwang struct ena_driver_stats {
156a9643ea8Slogwang 	rte_atomic64_t ierrors;
157a9643ea8Slogwang 	rte_atomic64_t oerrors;
158a9643ea8Slogwang 	rte_atomic64_t rx_nombuf;
159*2d9fd380Sjfb8856606 	u64 rx_drops;
160a9643ea8Slogwang };
161a9643ea8Slogwang 
162a9643ea8Slogwang struct ena_stats_dev {
163a9643ea8Slogwang 	u64 wd_expired;
1644418919fSjohnjiang 	u64 dev_start;
1654418919fSjohnjiang 	u64 dev_stop;
166*2d9fd380Sjfb8856606 	/*
167*2d9fd380Sjfb8856606 	 * Tx drops cannot be reported as the driver statistic, because DPDK
168*2d9fd380Sjfb8856606 	 * rte_eth_stats structure isn't providing appropriate field for that.
169*2d9fd380Sjfb8856606 	 * As a workaround it is being published as an extended statistic.
170*2d9fd380Sjfb8856606 	 */
171*2d9fd380Sjfb8856606 	u64 tx_drops;
172*2d9fd380Sjfb8856606 };
173*2d9fd380Sjfb8856606 
174*2d9fd380Sjfb8856606 struct ena_stats_eni {
175*2d9fd380Sjfb8856606 	/*
176*2d9fd380Sjfb8856606 	 * The number of packets shaped due to inbound aggregate BW
177*2d9fd380Sjfb8856606 	 * allowance being exceeded
178*2d9fd380Sjfb8856606 	 */
179*2d9fd380Sjfb8856606 	uint64_t bw_in_allowance_exceeded;
180*2d9fd380Sjfb8856606 	/*
181*2d9fd380Sjfb8856606 	 * The number of packets shaped due to outbound aggregate BW
182*2d9fd380Sjfb8856606 	 * allowance being exceeded
183*2d9fd380Sjfb8856606 	 */
184*2d9fd380Sjfb8856606 	uint64_t bw_out_allowance_exceeded;
185*2d9fd380Sjfb8856606 	/* The number of packets shaped due to PPS allowance being exceeded */
186*2d9fd380Sjfb8856606 	uint64_t pps_allowance_exceeded;
187*2d9fd380Sjfb8856606 	/*
188*2d9fd380Sjfb8856606 	 * The number of packets shaped due to connection tracking
189*2d9fd380Sjfb8856606 	 * allowance being exceeded and leading to failure in establishment
190*2d9fd380Sjfb8856606 	 * of new connections
191*2d9fd380Sjfb8856606 	 */
192*2d9fd380Sjfb8856606 	uint64_t conntrack_allowance_exceeded;
193*2d9fd380Sjfb8856606 	/*
194*2d9fd380Sjfb8856606 	 * The number of packets shaped due to linklocal packet rate
195*2d9fd380Sjfb8856606 	 * allowance being exceeded
196*2d9fd380Sjfb8856606 	 */
197*2d9fd380Sjfb8856606 	uint64_t linklocal_allowance_exceeded;
198a9643ea8Slogwang };
199a9643ea8Slogwang 
2004418919fSjohnjiang struct ena_offloads {
2014418919fSjohnjiang 	bool tso4_supported;
2024418919fSjohnjiang 	bool tx_csum_supported;
2034418919fSjohnjiang 	bool rx_csum_supported;
204a9643ea8Slogwang };
205a9643ea8Slogwang 
206a9643ea8Slogwang /* board specific private data structure */
207a9643ea8Slogwang struct ena_adapter {
208a9643ea8Slogwang 	/* OS defined structs */
209a9643ea8Slogwang 	struct rte_pci_device *pdev;
210a9643ea8Slogwang 	struct rte_eth_dev_data *rte_eth_dev_data;
211a9643ea8Slogwang 	struct rte_eth_dev *rte_dev;
212a9643ea8Slogwang 
213a9643ea8Slogwang 	struct ena_com_dev ena_dev __rte_cache_aligned;
214a9643ea8Slogwang 
215a9643ea8Slogwang 	/* TX */
216a9643ea8Slogwang 	struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
217*2d9fd380Sjfb8856606 	u32 max_tx_ring_size;
218d30ea906Sjfb8856606 	u16 max_tx_sgl_size;
219a9643ea8Slogwang 
220a9643ea8Slogwang 	/* RX */
221a9643ea8Slogwang 	struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
222*2d9fd380Sjfb8856606 	u32 max_rx_ring_size;
2234418919fSjohnjiang 	u16 max_rx_sgl_size;
224a9643ea8Slogwang 
225*2d9fd380Sjfb8856606 	u32 max_num_io_queues;
226a9643ea8Slogwang 	u16 max_mtu;
2274418919fSjohnjiang 	struct ena_offloads offloads;
228a9643ea8Slogwang 
229*2d9fd380Sjfb8856606 	/* The admin queue isn't protected by the lock and is used to
230*2d9fd380Sjfb8856606 	 * retrieve statistics from the device. As there is no guarantee that
231*2d9fd380Sjfb8856606 	 * application won't try to get statistics from multiple threads, it is
232*2d9fd380Sjfb8856606 	 * safer to lock the queue to avoid admin queue failure.
233*2d9fd380Sjfb8856606 	 */
234*2d9fd380Sjfb8856606 	rte_spinlock_t admin_lock;
235*2d9fd380Sjfb8856606 
236a9643ea8Slogwang 	int id_number;
237a9643ea8Slogwang 	char name[ENA_NAME_MAX_LEN];
2384418919fSjohnjiang 	u8 mac_addr[RTE_ETHER_ADDR_LEN];
239a9643ea8Slogwang 
240a9643ea8Slogwang 	void *regs;
241a9643ea8Slogwang 	void *dev_mem_base;
242a9643ea8Slogwang 
243a9643ea8Slogwang 	struct ena_driver_stats *drv_stats;
244a9643ea8Slogwang 	enum ena_adapter_state state;
245a9643ea8Slogwang 
246d30ea906Sjfb8856606 	uint64_t tx_supported_offloads;
247d30ea906Sjfb8856606 	uint64_t tx_selected_offloads;
248d30ea906Sjfb8856606 	uint64_t rx_supported_offloads;
249d30ea906Sjfb8856606 	uint64_t rx_selected_offloads;
250d30ea906Sjfb8856606 
251d30ea906Sjfb8856606 	bool link_status;
252d30ea906Sjfb8856606 
253d30ea906Sjfb8856606 	enum ena_regs_reset_reason_types reset_reason;
254d30ea906Sjfb8856606 
255d30ea906Sjfb8856606 	struct rte_timer timer_wd;
256d30ea906Sjfb8856606 	uint64_t timestamp_wd;
257d30ea906Sjfb8856606 	uint64_t keep_alive_timeout;
258d30ea906Sjfb8856606 
2594418919fSjohnjiang 	struct ena_stats_dev dev_stats;
260*2d9fd380Sjfb8856606 	struct ena_stats_eni eni_stats;
2614418919fSjohnjiang 
262d30ea906Sjfb8856606 	bool trigger_reset;
263d30ea906Sjfb8856606 
264d30ea906Sjfb8856606 	bool wd_state;
265*2d9fd380Sjfb8856606 
266*2d9fd380Sjfb8856606 	bool use_large_llq_hdr;
267a9643ea8Slogwang };
268a9643ea8Slogwang 
269a9643ea8Slogwang #endif /* _ENA_ETHDEV_H_ */
270