14418919fSjohnjiang /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3a9643ea8Slogwang * All rights reserved.
4a9643ea8Slogwang */
5a9643ea8Slogwang
64418919fSjohnjiang #include <rte_string_fns.h>
7a9643ea8Slogwang #include <rte_ether.h>
8d30ea906Sjfb8856606 #include <rte_ethdev_driver.h>
92bfe3f2eSlogwang #include <rte_ethdev_pci.h>
10a9643ea8Slogwang #include <rte_tcp.h>
11a9643ea8Slogwang #include <rte_atomic.h>
12a9643ea8Slogwang #include <rte_dev.h>
13a9643ea8Slogwang #include <rte_errno.h>
14a9643ea8Slogwang #include <rte_version.h>
152bfe3f2eSlogwang #include <rte_net.h>
16*2d9fd380Sjfb8856606 #include <rte_kvargs.h>
17a9643ea8Slogwang
18a9643ea8Slogwang #include "ena_ethdev.h"
19a9643ea8Slogwang #include "ena_logs.h"
20a9643ea8Slogwang #include "ena_platform.h"
21a9643ea8Slogwang #include "ena_com.h"
22a9643ea8Slogwang #include "ena_eth_com.h"
23a9643ea8Slogwang
24a9643ea8Slogwang #include <ena_common_defs.h>
25a9643ea8Slogwang #include <ena_regs_defs.h>
26a9643ea8Slogwang #include <ena_admin_defs.h>
27a9643ea8Slogwang #include <ena_eth_io_defs.h>
28a9643ea8Slogwang
294418919fSjohnjiang #define DRV_MODULE_VER_MAJOR 2
30*2d9fd380Sjfb8856606 #define DRV_MODULE_VER_MINOR 2
31*2d9fd380Sjfb8856606 #define DRV_MODULE_VER_SUBMINOR 0
32a9643ea8Slogwang
33a9643ea8Slogwang #define ENA_IO_TXQ_IDX(q) (2 * (q))
34a9643ea8Slogwang #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
35a9643ea8Slogwang /*reverse version of ENA_IO_RXQ_IDX*/
36a9643ea8Slogwang #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2)
37a9643ea8Slogwang
38a9643ea8Slogwang #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l)
39a9643ea8Slogwang #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift))
40a9643ea8Slogwang
41a9643ea8Slogwang #define GET_L4_HDR_LEN(mbuf) \
424418919fSjohnjiang ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \
43a9643ea8Slogwang mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
44a9643ea8Slogwang
45a9643ea8Slogwang #define ENA_RX_RSS_TABLE_LOG_SIZE 7
46a9643ea8Slogwang #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
47a9643ea8Slogwang #define ENA_HASH_KEY_SIZE 40
48a9643ea8Slogwang #define ETH_GSTRING_LEN 32
49a9643ea8Slogwang
50a9643ea8Slogwang #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
51a9643ea8Slogwang
52d30ea906Sjfb8856606 #define ENA_MIN_RING_DESC 128
53d30ea906Sjfb8856606
54a9643ea8Slogwang enum ethtool_stringset {
55a9643ea8Slogwang ETH_SS_TEST = 0,
56a9643ea8Slogwang ETH_SS_STATS,
57a9643ea8Slogwang };
58a9643ea8Slogwang
59a9643ea8Slogwang struct ena_stats {
60a9643ea8Slogwang char name[ETH_GSTRING_LEN];
61a9643ea8Slogwang int stat_offset;
62a9643ea8Slogwang };
63a9643ea8Slogwang
64a9643ea8Slogwang #define ENA_STAT_ENTRY(stat, stat_type) { \
65a9643ea8Slogwang .name = #stat, \
66a9643ea8Slogwang .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
67a9643ea8Slogwang }
68a9643ea8Slogwang
69a9643ea8Slogwang #define ENA_STAT_RX_ENTRY(stat) \
70a9643ea8Slogwang ENA_STAT_ENTRY(stat, rx)
71a9643ea8Slogwang
72a9643ea8Slogwang #define ENA_STAT_TX_ENTRY(stat) \
73a9643ea8Slogwang ENA_STAT_ENTRY(stat, tx)
74a9643ea8Slogwang
75*2d9fd380Sjfb8856606 #define ENA_STAT_ENI_ENTRY(stat) \
76*2d9fd380Sjfb8856606 ENA_STAT_ENTRY(stat, eni)
77*2d9fd380Sjfb8856606
78a9643ea8Slogwang #define ENA_STAT_GLOBAL_ENTRY(stat) \
79a9643ea8Slogwang ENA_STAT_ENTRY(stat, dev)
80a9643ea8Slogwang
81*2d9fd380Sjfb8856606 /* Device arguments */
82*2d9fd380Sjfb8856606 #define ENA_DEVARG_LARGE_LLQ_HDR "large_llq_hdr"
834418919fSjohnjiang
84d30ea906Sjfb8856606 /*
85d30ea906Sjfb8856606 * Each rte_memzone should have unique name.
86d30ea906Sjfb8856606 * To satisfy it, count number of allocation and add it to name.
87d30ea906Sjfb8856606 */
880c6bd470Sfengbojiang rte_atomic32_t ena_alloc_cnt;
89d30ea906Sjfb8856606
90a9643ea8Slogwang static const struct ena_stats ena_stats_global_strings[] = {
91a9643ea8Slogwang ENA_STAT_GLOBAL_ENTRY(wd_expired),
924418919fSjohnjiang ENA_STAT_GLOBAL_ENTRY(dev_start),
934418919fSjohnjiang ENA_STAT_GLOBAL_ENTRY(dev_stop),
94*2d9fd380Sjfb8856606 ENA_STAT_GLOBAL_ENTRY(tx_drops),
95*2d9fd380Sjfb8856606 };
96*2d9fd380Sjfb8856606
97*2d9fd380Sjfb8856606 static const struct ena_stats ena_stats_eni_strings[] = {
98*2d9fd380Sjfb8856606 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
99*2d9fd380Sjfb8856606 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
100*2d9fd380Sjfb8856606 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
101*2d9fd380Sjfb8856606 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
102*2d9fd380Sjfb8856606 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
103a9643ea8Slogwang };
104a9643ea8Slogwang
105a9643ea8Slogwang static const struct ena_stats ena_stats_tx_strings[] = {
106a9643ea8Slogwang ENA_STAT_TX_ENTRY(cnt),
107a9643ea8Slogwang ENA_STAT_TX_ENTRY(bytes),
1084418919fSjohnjiang ENA_STAT_TX_ENTRY(prepare_ctx_err),
109a9643ea8Slogwang ENA_STAT_TX_ENTRY(linearize),
110a9643ea8Slogwang ENA_STAT_TX_ENTRY(linearize_failed),
111a9643ea8Slogwang ENA_STAT_TX_ENTRY(tx_poll),
112a9643ea8Slogwang ENA_STAT_TX_ENTRY(doorbells),
113a9643ea8Slogwang ENA_STAT_TX_ENTRY(bad_req_id),
1144418919fSjohnjiang ENA_STAT_TX_ENTRY(available_desc),
115a9643ea8Slogwang };
116a9643ea8Slogwang
117a9643ea8Slogwang static const struct ena_stats ena_stats_rx_strings[] = {
118a9643ea8Slogwang ENA_STAT_RX_ENTRY(cnt),
119a9643ea8Slogwang ENA_STAT_RX_ENTRY(bytes),
1204418919fSjohnjiang ENA_STAT_RX_ENTRY(refill_partial),
121a9643ea8Slogwang ENA_STAT_RX_ENTRY(bad_csum),
1224418919fSjohnjiang ENA_STAT_RX_ENTRY(mbuf_alloc_fail),
123a9643ea8Slogwang ENA_STAT_RX_ENTRY(bad_desc_num),
1244418919fSjohnjiang ENA_STAT_RX_ENTRY(bad_req_id),
125a9643ea8Slogwang };
126a9643ea8Slogwang
127a9643ea8Slogwang #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
128*2d9fd380Sjfb8856606 #define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
129a9643ea8Slogwang #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
130a9643ea8Slogwang #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
131a9643ea8Slogwang
132d30ea906Sjfb8856606 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\
133d30ea906Sjfb8856606 DEV_TX_OFFLOAD_UDP_CKSUM |\
134d30ea906Sjfb8856606 DEV_TX_OFFLOAD_IPV4_CKSUM |\
135d30ea906Sjfb8856606 DEV_TX_OFFLOAD_TCP_TSO)
136d30ea906Sjfb8856606 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\
137d30ea906Sjfb8856606 PKT_TX_IP_CKSUM |\
138d30ea906Sjfb8856606 PKT_TX_TCP_SEG)
139d30ea906Sjfb8856606
140a9643ea8Slogwang /** Vendor ID used by Amazon devices */
141a9643ea8Slogwang #define PCI_VENDOR_ID_AMAZON 0x1D0F
142a9643ea8Slogwang /** Amazon devices */
143a9643ea8Slogwang #define PCI_DEVICE_ID_ENA_VF 0xEC20
144*2d9fd380Sjfb8856606 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21
145a9643ea8Slogwang
1462bfe3f2eSlogwang #define ENA_TX_OFFLOAD_MASK (\
1472bfe3f2eSlogwang PKT_TX_L4_MASK | \
1481646932aSjfb8856606 PKT_TX_IPV6 | \
1491646932aSjfb8856606 PKT_TX_IPV4 | \
1502bfe3f2eSlogwang PKT_TX_IP_CKSUM | \
1512bfe3f2eSlogwang PKT_TX_TCP_SEG)
1522bfe3f2eSlogwang
1532bfe3f2eSlogwang #define ENA_TX_OFFLOAD_NOTSUP_MASK \
1542bfe3f2eSlogwang (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
1552bfe3f2eSlogwang
1562bfe3f2eSlogwang static const struct rte_pci_id pci_id_ena_map[] = {
157a9643ea8Slogwang { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
158*2d9fd380Sjfb8856606 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF_RSERV0) },
159a9643ea8Slogwang { .device_id = 0 },
160a9643ea8Slogwang };
161a9643ea8Slogwang
162d30ea906Sjfb8856606 static struct ena_aenq_handlers aenq_handlers;
163d30ea906Sjfb8856606
164a9643ea8Slogwang static int ena_device_init(struct ena_com_dev *ena_dev,
165d30ea906Sjfb8856606 struct ena_com_dev_get_features_ctx *get_feat_ctx,
166d30ea906Sjfb8856606 bool *wd_state);
167a9643ea8Slogwang static int ena_dev_configure(struct rte_eth_dev *dev);
168*2d9fd380Sjfb8856606 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
169*2d9fd380Sjfb8856606 struct ena_tx_buffer *tx_info,
170*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf,
171*2d9fd380Sjfb8856606 void **push_header,
172*2d9fd380Sjfb8856606 uint16_t *header_len);
173*2d9fd380Sjfb8856606 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
174*2d9fd380Sjfb8856606 static void ena_tx_cleanup(struct ena_ring *tx_ring);
175a9643ea8Slogwang static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
176a9643ea8Slogwang uint16_t nb_pkts);
1772bfe3f2eSlogwang static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
1782bfe3f2eSlogwang uint16_t nb_pkts);
179a9643ea8Slogwang static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
180a9643ea8Slogwang uint16_t nb_desc, unsigned int socket_id,
181a9643ea8Slogwang const struct rte_eth_txconf *tx_conf);
182a9643ea8Slogwang static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
183a9643ea8Slogwang uint16_t nb_desc, unsigned int socket_id,
184a9643ea8Slogwang const struct rte_eth_rxconf *rx_conf,
185a9643ea8Slogwang struct rte_mempool *mp);
186*2d9fd380Sjfb8856606 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len);
187*2d9fd380Sjfb8856606 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
188*2d9fd380Sjfb8856606 struct ena_com_rx_buf_info *ena_bufs,
189*2d9fd380Sjfb8856606 uint32_t descs,
190*2d9fd380Sjfb8856606 uint16_t *next_to_clean,
191*2d9fd380Sjfb8856606 uint8_t offset);
192a9643ea8Slogwang static uint16_t eth_ena_recv_pkts(void *rx_queue,
193a9643ea8Slogwang struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
194*2d9fd380Sjfb8856606 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
195*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf, uint16_t id);
196a9643ea8Slogwang static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
197*2d9fd380Sjfb8856606 static void ena_init_rings(struct ena_adapter *adapter,
198*2d9fd380Sjfb8856606 bool disable_meta_caching);
199a9643ea8Slogwang static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
200a9643ea8Slogwang static int ena_start(struct rte_eth_dev *dev);
201*2d9fd380Sjfb8856606 static int ena_stop(struct rte_eth_dev *dev);
202*2d9fd380Sjfb8856606 static int ena_close(struct rte_eth_dev *dev);
203d30ea906Sjfb8856606 static int ena_dev_reset(struct rte_eth_dev *dev);
2042bfe3f2eSlogwang static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
205a9643ea8Slogwang static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
206a9643ea8Slogwang static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
207a9643ea8Slogwang static void ena_rx_queue_release(void *queue);
208a9643ea8Slogwang static void ena_tx_queue_release(void *queue);
209a9643ea8Slogwang static void ena_rx_queue_release_bufs(struct ena_ring *ring);
210a9643ea8Slogwang static void ena_tx_queue_release_bufs(struct ena_ring *ring);
211a9643ea8Slogwang static int ena_link_update(struct rte_eth_dev *dev,
2122bfe3f2eSlogwang int wait_to_complete);
213d30ea906Sjfb8856606 static int ena_create_io_queue(struct ena_ring *ring);
2141646932aSjfb8856606 static void ena_queue_stop(struct ena_ring *ring);
2151646932aSjfb8856606 static void ena_queue_stop_all(struct rte_eth_dev *dev,
2161646932aSjfb8856606 enum ena_ring_type ring_type);
2171646932aSjfb8856606 static int ena_queue_start(struct ena_ring *ring);
2181646932aSjfb8856606 static int ena_queue_start_all(struct rte_eth_dev *dev,
219a9643ea8Slogwang enum ena_ring_type ring_type);
220a9643ea8Slogwang static void ena_stats_restart(struct rte_eth_dev *dev);
2214418919fSjohnjiang static int ena_infos_get(struct rte_eth_dev *dev,
222a9643ea8Slogwang struct rte_eth_dev_info *dev_info);
223a9643ea8Slogwang static int ena_rss_reta_update(struct rte_eth_dev *dev,
224a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
225a9643ea8Slogwang uint16_t reta_size);
226a9643ea8Slogwang static int ena_rss_reta_query(struct rte_eth_dev *dev,
227a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
228a9643ea8Slogwang uint16_t reta_size);
229d30ea906Sjfb8856606 static void ena_interrupt_handler_rte(void *cb_arg);
230d30ea906Sjfb8856606 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
2311646932aSjfb8856606 static void ena_destroy_device(struct rte_eth_dev *eth_dev);
2321646932aSjfb8856606 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev);
2334418919fSjohnjiang static int ena_xstats_get_names(struct rte_eth_dev *dev,
2344418919fSjohnjiang struct rte_eth_xstat_name *xstats_names,
2354418919fSjohnjiang unsigned int n);
2364418919fSjohnjiang static int ena_xstats_get(struct rte_eth_dev *dev,
2374418919fSjohnjiang struct rte_eth_xstat *stats,
2384418919fSjohnjiang unsigned int n);
2394418919fSjohnjiang static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
2404418919fSjohnjiang const uint64_t *ids,
2414418919fSjohnjiang uint64_t *values,
2424418919fSjohnjiang unsigned int n);
243*2d9fd380Sjfb8856606 static int ena_process_bool_devarg(const char *key,
244*2d9fd380Sjfb8856606 const char *value,
245*2d9fd380Sjfb8856606 void *opaque);
246*2d9fd380Sjfb8856606 static int ena_parse_devargs(struct ena_adapter *adapter,
247*2d9fd380Sjfb8856606 struct rte_devargs *devargs);
248*2d9fd380Sjfb8856606 static int ena_copy_eni_stats(struct ena_adapter *adapter);
249a9643ea8Slogwang
2502bfe3f2eSlogwang static const struct eth_dev_ops ena_dev_ops = {
251a9643ea8Slogwang .dev_configure = ena_dev_configure,
252a9643ea8Slogwang .dev_infos_get = ena_infos_get,
253a9643ea8Slogwang .rx_queue_setup = ena_rx_queue_setup,
254a9643ea8Slogwang .tx_queue_setup = ena_tx_queue_setup,
255a9643ea8Slogwang .dev_start = ena_start,
256d30ea906Sjfb8856606 .dev_stop = ena_stop,
257a9643ea8Slogwang .link_update = ena_link_update,
258a9643ea8Slogwang .stats_get = ena_stats_get,
2594418919fSjohnjiang .xstats_get_names = ena_xstats_get_names,
2604418919fSjohnjiang .xstats_get = ena_xstats_get,
2614418919fSjohnjiang .xstats_get_by_id = ena_xstats_get_by_id,
262a9643ea8Slogwang .mtu_set = ena_mtu_set,
263a9643ea8Slogwang .rx_queue_release = ena_rx_queue_release,
264a9643ea8Slogwang .tx_queue_release = ena_tx_queue_release,
265a9643ea8Slogwang .dev_close = ena_close,
266d30ea906Sjfb8856606 .dev_reset = ena_dev_reset,
267a9643ea8Slogwang .reta_update = ena_rss_reta_update,
268a9643ea8Slogwang .reta_query = ena_rss_reta_query,
269a9643ea8Slogwang };
270a9643ea8Slogwang
ena_rss_key_fill(void * key,size_t size)271*2d9fd380Sjfb8856606 void ena_rss_key_fill(void *key, size_t size)
272*2d9fd380Sjfb8856606 {
273*2d9fd380Sjfb8856606 static bool key_generated;
274*2d9fd380Sjfb8856606 static uint8_t default_key[ENA_HASH_KEY_SIZE];
275*2d9fd380Sjfb8856606 size_t i;
276*2d9fd380Sjfb8856606
277*2d9fd380Sjfb8856606 RTE_ASSERT(size <= ENA_HASH_KEY_SIZE);
278*2d9fd380Sjfb8856606
279*2d9fd380Sjfb8856606 if (!key_generated) {
280*2d9fd380Sjfb8856606 for (i = 0; i < ENA_HASH_KEY_SIZE; ++i)
281*2d9fd380Sjfb8856606 default_key[i] = rte_rand() & 0xff;
282*2d9fd380Sjfb8856606 key_generated = true;
283*2d9fd380Sjfb8856606 }
284*2d9fd380Sjfb8856606
285*2d9fd380Sjfb8856606 rte_memcpy(key, default_key, size);
286*2d9fd380Sjfb8856606 }
287*2d9fd380Sjfb8856606
ena_rx_mbuf_prepare(struct rte_mbuf * mbuf,struct ena_com_rx_ctx * ena_rx_ctx)288a9643ea8Slogwang static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
289a9643ea8Slogwang struct ena_com_rx_ctx *ena_rx_ctx)
290a9643ea8Slogwang {
291a9643ea8Slogwang uint64_t ol_flags = 0;
2922bfe3f2eSlogwang uint32_t packet_type = 0;
293a9643ea8Slogwang
294a9643ea8Slogwang if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP)
2952bfe3f2eSlogwang packet_type |= RTE_PTYPE_L4_TCP;
296a9643ea8Slogwang else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)
2972bfe3f2eSlogwang packet_type |= RTE_PTYPE_L4_UDP;
298a9643ea8Slogwang
2990c6bd470Sfengbojiang if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) {
3002bfe3f2eSlogwang packet_type |= RTE_PTYPE_L3_IPV4;
301a9643ea8Slogwang if (unlikely(ena_rx_ctx->l3_csum_err))
302a9643ea8Slogwang ol_flags |= PKT_RX_IP_CKSUM_BAD;
3030c6bd470Sfengbojiang else
3040c6bd470Sfengbojiang ol_flags |= PKT_RX_IP_CKSUM_GOOD;
3050c6bd470Sfengbojiang } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) {
3060c6bd470Sfengbojiang packet_type |= RTE_PTYPE_L3_IPV6;
3070c6bd470Sfengbojiang }
3080c6bd470Sfengbojiang
3090c6bd470Sfengbojiang if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag)
3100c6bd470Sfengbojiang ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
3110c6bd470Sfengbojiang else
3120c6bd470Sfengbojiang if (unlikely(ena_rx_ctx->l4_csum_err))
3130c6bd470Sfengbojiang ol_flags |= PKT_RX_L4_CKSUM_BAD;
3140c6bd470Sfengbojiang else
3150c6bd470Sfengbojiang ol_flags |= PKT_RX_L4_CKSUM_GOOD;
316a9643ea8Slogwang
317a9643ea8Slogwang mbuf->ol_flags = ol_flags;
3182bfe3f2eSlogwang mbuf->packet_type = packet_type;
319a9643ea8Slogwang }
320a9643ea8Slogwang
ena_tx_mbuf_prepare(struct rte_mbuf * mbuf,struct ena_com_tx_ctx * ena_tx_ctx,uint64_t queue_offloads,bool disable_meta_caching)321a9643ea8Slogwang static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
322d30ea906Sjfb8856606 struct ena_com_tx_ctx *ena_tx_ctx,
323*2d9fd380Sjfb8856606 uint64_t queue_offloads,
324*2d9fd380Sjfb8856606 bool disable_meta_caching)
325a9643ea8Slogwang {
326a9643ea8Slogwang struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
327a9643ea8Slogwang
328d30ea906Sjfb8856606 if ((mbuf->ol_flags & MBUF_OFFLOADS) &&
329d30ea906Sjfb8856606 (queue_offloads & QUEUE_OFFLOADS)) {
330a9643ea8Slogwang /* check if TSO is required */
331d30ea906Sjfb8856606 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) &&
332d30ea906Sjfb8856606 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) {
333a9643ea8Slogwang ena_tx_ctx->tso_enable = true;
334a9643ea8Slogwang
335a9643ea8Slogwang ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf);
336a9643ea8Slogwang }
337a9643ea8Slogwang
338a9643ea8Slogwang /* check if L3 checksum is needed */
339d30ea906Sjfb8856606 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) &&
340d30ea906Sjfb8856606 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM))
341a9643ea8Slogwang ena_tx_ctx->l3_csum_enable = true;
342a9643ea8Slogwang
343a9643ea8Slogwang if (mbuf->ol_flags & PKT_TX_IPV6) {
344a9643ea8Slogwang ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
345a9643ea8Slogwang } else {
346a9643ea8Slogwang ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
347a9643ea8Slogwang
348a9643ea8Slogwang /* set don't fragment (DF) flag */
349a9643ea8Slogwang if (mbuf->packet_type &
350a9643ea8Slogwang (RTE_PTYPE_L4_NONFRAG
351a9643ea8Slogwang | RTE_PTYPE_INNER_L4_NONFRAG))
352a9643ea8Slogwang ena_tx_ctx->df = true;
353a9643ea8Slogwang }
354a9643ea8Slogwang
355a9643ea8Slogwang /* check if L4 checksum is needed */
3564b05018fSfengbojiang if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
357d30ea906Sjfb8856606 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
358a9643ea8Slogwang ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
359a9643ea8Slogwang ena_tx_ctx->l4_csum_enable = true;
3604b05018fSfengbojiang } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
3614b05018fSfengbojiang PKT_TX_UDP_CKSUM) &&
362d30ea906Sjfb8856606 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
363a9643ea8Slogwang ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
364a9643ea8Slogwang ena_tx_ctx->l4_csum_enable = true;
365d30ea906Sjfb8856606 } else {
366a9643ea8Slogwang ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN;
367a9643ea8Slogwang ena_tx_ctx->l4_csum_enable = false;
368a9643ea8Slogwang }
369a9643ea8Slogwang
370a9643ea8Slogwang ena_meta->mss = mbuf->tso_segsz;
371a9643ea8Slogwang ena_meta->l3_hdr_len = mbuf->l3_len;
372a9643ea8Slogwang ena_meta->l3_hdr_offset = mbuf->l2_len;
373a9643ea8Slogwang
374a9643ea8Slogwang ena_tx_ctx->meta_valid = true;
375*2d9fd380Sjfb8856606 } else if (disable_meta_caching) {
376*2d9fd380Sjfb8856606 memset(ena_meta, 0, sizeof(*ena_meta));
377*2d9fd380Sjfb8856606 ena_tx_ctx->meta_valid = true;
378a9643ea8Slogwang } else {
379a9643ea8Slogwang ena_tx_ctx->meta_valid = false;
380a9643ea8Slogwang }
381a9643ea8Slogwang }
382a9643ea8Slogwang
validate_rx_req_id(struct ena_ring * rx_ring,uint16_t req_id)383d30ea906Sjfb8856606 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
384d30ea906Sjfb8856606 {
385d30ea906Sjfb8856606 if (likely(req_id < rx_ring->ring_size))
386d30ea906Sjfb8856606 return 0;
387d30ea906Sjfb8856606
3884418919fSjohnjiang PMD_DRV_LOG(ERR, "Invalid rx req_id: %hu\n", req_id);
389d30ea906Sjfb8856606
390d30ea906Sjfb8856606 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
391d30ea906Sjfb8856606 rx_ring->adapter->trigger_reset = true;
3924418919fSjohnjiang ++rx_ring->rx_stats.bad_req_id;
393d30ea906Sjfb8856606
394d30ea906Sjfb8856606 return -EFAULT;
395d30ea906Sjfb8856606 }
396d30ea906Sjfb8856606
validate_tx_req_id(struct ena_ring * tx_ring,u16 req_id)397d30ea906Sjfb8856606 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
398d30ea906Sjfb8856606 {
399d30ea906Sjfb8856606 struct ena_tx_buffer *tx_info = NULL;
400d30ea906Sjfb8856606
401d30ea906Sjfb8856606 if (likely(req_id < tx_ring->ring_size)) {
402d30ea906Sjfb8856606 tx_info = &tx_ring->tx_buffer_info[req_id];
403d30ea906Sjfb8856606 if (likely(tx_info->mbuf))
404d30ea906Sjfb8856606 return 0;
405d30ea906Sjfb8856606 }
406d30ea906Sjfb8856606
407d30ea906Sjfb8856606 if (tx_info)
4084418919fSjohnjiang PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n");
409d30ea906Sjfb8856606 else
4104418919fSjohnjiang PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id);
411d30ea906Sjfb8856606
412d30ea906Sjfb8856606 /* Trigger device reset */
4134418919fSjohnjiang ++tx_ring->tx_stats.bad_req_id;
414d30ea906Sjfb8856606 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
415d30ea906Sjfb8856606 tx_ring->adapter->trigger_reset = true;
416d30ea906Sjfb8856606 return -EFAULT;
417d30ea906Sjfb8856606 }
418d30ea906Sjfb8856606
ena_config_host_info(struct ena_com_dev * ena_dev)419a9643ea8Slogwang static void ena_config_host_info(struct ena_com_dev *ena_dev)
420a9643ea8Slogwang {
421a9643ea8Slogwang struct ena_admin_host_info *host_info;
422a9643ea8Slogwang int rc;
423a9643ea8Slogwang
424a9643ea8Slogwang /* Allocate only the host info */
425a9643ea8Slogwang rc = ena_com_allocate_host_info(ena_dev);
426a9643ea8Slogwang if (rc) {
4274418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot allocate host info\n");
428a9643ea8Slogwang return;
429a9643ea8Slogwang }
430a9643ea8Slogwang
431a9643ea8Slogwang host_info = ena_dev->host_attr.host_info;
432a9643ea8Slogwang
433a9643ea8Slogwang host_info->os_type = ENA_ADMIN_OS_DPDK;
434a9643ea8Slogwang host_info->kernel_ver = RTE_VERSION;
4354418919fSjohnjiang strlcpy((char *)host_info->kernel_ver_str, rte_version(),
4364418919fSjohnjiang sizeof(host_info->kernel_ver_str));
437a9643ea8Slogwang host_info->os_dist = RTE_VERSION;
4384418919fSjohnjiang strlcpy((char *)host_info->os_dist_str, rte_version(),
4394418919fSjohnjiang sizeof(host_info->os_dist_str));
440a9643ea8Slogwang host_info->driver_version =
441a9643ea8Slogwang (DRV_MODULE_VER_MAJOR) |
442a9643ea8Slogwang (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
443a9643ea8Slogwang (DRV_MODULE_VER_SUBMINOR <<
444a9643ea8Slogwang ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
4454418919fSjohnjiang host_info->num_cpus = rte_lcore_count();
446a9643ea8Slogwang
447*2d9fd380Sjfb8856606 host_info->driver_supported_features =
448*2d9fd380Sjfb8856606 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
449*2d9fd380Sjfb8856606
450a9643ea8Slogwang rc = ena_com_set_host_attributes(ena_dev);
451a9643ea8Slogwang if (rc) {
452d30ea906Sjfb8856606 if (rc == -ENA_COM_UNSUPPORTED)
4534418919fSjohnjiang PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
454d30ea906Sjfb8856606 else
4554418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
456d30ea906Sjfb8856606
457a9643ea8Slogwang goto err;
458a9643ea8Slogwang }
459a9643ea8Slogwang
460a9643ea8Slogwang return;
461a9643ea8Slogwang
462a9643ea8Slogwang err:
463a9643ea8Slogwang ena_com_delete_host_info(ena_dev);
464a9643ea8Slogwang }
465a9643ea8Slogwang
4664418919fSjohnjiang /* This function calculates the number of xstats based on the current config */
ena_xstats_calc_num(struct rte_eth_dev * dev)4674418919fSjohnjiang static unsigned int ena_xstats_calc_num(struct rte_eth_dev *dev)
468a9643ea8Slogwang {
469*2d9fd380Sjfb8856606 return ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENI +
4704418919fSjohnjiang (dev->data->nb_tx_queues * ENA_STATS_ARRAY_TX) +
4714418919fSjohnjiang (dev->data->nb_rx_queues * ENA_STATS_ARRAY_RX);
472a9643ea8Slogwang }
473a9643ea8Slogwang
ena_config_debug_area(struct ena_adapter * adapter)474a9643ea8Slogwang static void ena_config_debug_area(struct ena_adapter *adapter)
475a9643ea8Slogwang {
476a9643ea8Slogwang u32 debug_area_size;
477a9643ea8Slogwang int rc, ss_count;
478a9643ea8Slogwang
4794418919fSjohnjiang ss_count = ena_xstats_calc_num(adapter->rte_dev);
480a9643ea8Slogwang
481a9643ea8Slogwang /* allocate 32 bytes for each string and 64bit for the value */
482a9643ea8Slogwang debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
483a9643ea8Slogwang
484a9643ea8Slogwang rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size);
485a9643ea8Slogwang if (rc) {
4864418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot allocate debug area\n");
487a9643ea8Slogwang return;
488a9643ea8Slogwang }
489a9643ea8Slogwang
490a9643ea8Slogwang rc = ena_com_set_host_attributes(&adapter->ena_dev);
491a9643ea8Slogwang if (rc) {
492d30ea906Sjfb8856606 if (rc == -ENA_COM_UNSUPPORTED)
4934418919fSjohnjiang PMD_DRV_LOG(WARNING, "Cannot set host attributes\n");
494d30ea906Sjfb8856606 else
4954418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot set host attributes\n");
496d30ea906Sjfb8856606
497a9643ea8Slogwang goto err;
498a9643ea8Slogwang }
499a9643ea8Slogwang
500a9643ea8Slogwang return;
501a9643ea8Slogwang err:
502a9643ea8Slogwang ena_com_delete_debug_area(&adapter->ena_dev);
503a9643ea8Slogwang }
504a9643ea8Slogwang
ena_close(struct rte_eth_dev * dev)505*2d9fd380Sjfb8856606 static int ena_close(struct rte_eth_dev *dev)
506a9643ea8Slogwang {
507d30ea906Sjfb8856606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
508d30ea906Sjfb8856606 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
5094b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
510*2d9fd380Sjfb8856606 int ret = 0;
511*2d9fd380Sjfb8856606
512*2d9fd380Sjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
513*2d9fd380Sjfb8856606 return 0;
514a9643ea8Slogwang
515d30ea906Sjfb8856606 if (adapter->state == ENA_ADAPTER_STATE_RUNNING)
516*2d9fd380Sjfb8856606 ret = ena_stop(dev);
517d30ea906Sjfb8856606 adapter->state = ENA_ADAPTER_STATE_CLOSED;
518a9643ea8Slogwang
519a9643ea8Slogwang ena_rx_queue_release_all(dev);
520a9643ea8Slogwang ena_tx_queue_release_all(dev);
521d30ea906Sjfb8856606
522d30ea906Sjfb8856606 rte_free(adapter->drv_stats);
523d30ea906Sjfb8856606 adapter->drv_stats = NULL;
524d30ea906Sjfb8856606
525d30ea906Sjfb8856606 rte_intr_disable(intr_handle);
526d30ea906Sjfb8856606 rte_intr_callback_unregister(intr_handle,
527d30ea906Sjfb8856606 ena_interrupt_handler_rte,
528d30ea906Sjfb8856606 adapter);
529d30ea906Sjfb8856606
530d30ea906Sjfb8856606 /*
531d30ea906Sjfb8856606 * MAC is not allocated dynamically. Setting NULL should prevent from
532d30ea906Sjfb8856606 * release of the resource in the rte_eth_dev_release_port().
533d30ea906Sjfb8856606 */
534d30ea906Sjfb8856606 dev->data->mac_addrs = NULL;
535*2d9fd380Sjfb8856606
536*2d9fd380Sjfb8856606 return ret;
537d30ea906Sjfb8856606 }
538d30ea906Sjfb8856606
539d30ea906Sjfb8856606 static int
ena_dev_reset(struct rte_eth_dev * dev)540d30ea906Sjfb8856606 ena_dev_reset(struct rte_eth_dev *dev)
541d30ea906Sjfb8856606 {
5421646932aSjfb8856606 int rc = 0;
543d30ea906Sjfb8856606
5441646932aSjfb8856606 ena_destroy_device(dev);
5451646932aSjfb8856606 rc = eth_ena_dev_init(dev);
546d30ea906Sjfb8856606 if (rc)
5474418919fSjohnjiang PMD_INIT_LOG(CRIT, "Cannot initialize device");
5481646932aSjfb8856606
549d30ea906Sjfb8856606 return rc;
550d30ea906Sjfb8856606 }
551a9643ea8Slogwang
ena_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)552a9643ea8Slogwang static int ena_rss_reta_update(struct rte_eth_dev *dev,
553a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
554a9643ea8Slogwang uint16_t reta_size)
555a9643ea8Slogwang {
5564b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
557a9643ea8Slogwang struct ena_com_dev *ena_dev = &adapter->ena_dev;
558d30ea906Sjfb8856606 int rc, i;
559a9643ea8Slogwang u16 entry_value;
560a9643ea8Slogwang int conf_idx;
561a9643ea8Slogwang int idx;
562a9643ea8Slogwang
563a9643ea8Slogwang if ((reta_size == 0) || (reta_conf == NULL))
564a9643ea8Slogwang return -EINVAL;
565a9643ea8Slogwang
566a9643ea8Slogwang if (reta_size > ENA_RX_RSS_TABLE_SIZE) {
5674418919fSjohnjiang PMD_DRV_LOG(WARNING,
568a9643ea8Slogwang "indirection table %d is bigger than supported (%d)\n",
569a9643ea8Slogwang reta_size, ENA_RX_RSS_TABLE_SIZE);
570d30ea906Sjfb8856606 return -EINVAL;
571a9643ea8Slogwang }
572a9643ea8Slogwang
573a9643ea8Slogwang for (i = 0 ; i < reta_size ; i++) {
574a9643ea8Slogwang /* each reta_conf is for 64 entries.
575a9643ea8Slogwang * to support 128 we use 2 conf of 64
576a9643ea8Slogwang */
577a9643ea8Slogwang conf_idx = i / RTE_RETA_GROUP_SIZE;
578a9643ea8Slogwang idx = i % RTE_RETA_GROUP_SIZE;
579a9643ea8Slogwang if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
580a9643ea8Slogwang entry_value =
581a9643ea8Slogwang ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
582d30ea906Sjfb8856606
583d30ea906Sjfb8856606 rc = ena_com_indirect_table_fill_entry(ena_dev,
584a9643ea8Slogwang i,
585a9643ea8Slogwang entry_value);
586d30ea906Sjfb8856606 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5874418919fSjohnjiang PMD_DRV_LOG(ERR,
588a9643ea8Slogwang "Cannot fill indirect table\n");
589d30ea906Sjfb8856606 return rc;
590a9643ea8Slogwang }
591a9643ea8Slogwang }
592a9643ea8Slogwang }
593a9643ea8Slogwang
594*2d9fd380Sjfb8856606 rte_spinlock_lock(&adapter->admin_lock);
595d30ea906Sjfb8856606 rc = ena_com_indirect_table_set(ena_dev);
596*2d9fd380Sjfb8856606 rte_spinlock_unlock(&adapter->admin_lock);
597d30ea906Sjfb8856606 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
5984418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
599d30ea906Sjfb8856606 return rc;
600a9643ea8Slogwang }
601a9643ea8Slogwang
6024418919fSjohnjiang PMD_DRV_LOG(DEBUG, "%s(): RSS configured %d entries for port %d\n",
603a9643ea8Slogwang __func__, reta_size, adapter->rte_dev->data->port_id);
604d30ea906Sjfb8856606
605d30ea906Sjfb8856606 return 0;
606a9643ea8Slogwang }
607a9643ea8Slogwang
608a9643ea8Slogwang /* Query redirection table. */
ena_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)609a9643ea8Slogwang static int ena_rss_reta_query(struct rte_eth_dev *dev,
610a9643ea8Slogwang struct rte_eth_rss_reta_entry64 *reta_conf,
611a9643ea8Slogwang uint16_t reta_size)
612a9643ea8Slogwang {
6134b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
614a9643ea8Slogwang struct ena_com_dev *ena_dev = &adapter->ena_dev;
615d30ea906Sjfb8856606 int rc;
616a9643ea8Slogwang int i;
617a9643ea8Slogwang u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
618a9643ea8Slogwang int reta_conf_idx;
619a9643ea8Slogwang int reta_idx;
620a9643ea8Slogwang
621a9643ea8Slogwang if (reta_size == 0 || reta_conf == NULL ||
622a9643ea8Slogwang (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
623a9643ea8Slogwang return -EINVAL;
624a9643ea8Slogwang
625*2d9fd380Sjfb8856606 rte_spinlock_lock(&adapter->admin_lock);
626d30ea906Sjfb8856606 rc = ena_com_indirect_table_get(ena_dev, indirect_table);
627*2d9fd380Sjfb8856606 rte_spinlock_unlock(&adapter->admin_lock);
628d30ea906Sjfb8856606 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
6294418919fSjohnjiang PMD_DRV_LOG(ERR, "cannot get indirect table\n");
630d30ea906Sjfb8856606 return -ENOTSUP;
631a9643ea8Slogwang }
632a9643ea8Slogwang
633a9643ea8Slogwang for (i = 0 ; i < reta_size ; i++) {
634a9643ea8Slogwang reta_conf_idx = i / RTE_RETA_GROUP_SIZE;
635a9643ea8Slogwang reta_idx = i % RTE_RETA_GROUP_SIZE;
636a9643ea8Slogwang if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx))
637a9643ea8Slogwang reta_conf[reta_conf_idx].reta[reta_idx] =
638a9643ea8Slogwang ENA_IO_RXQ_IDX_REV(indirect_table[i]);
639a9643ea8Slogwang }
640d30ea906Sjfb8856606
641d30ea906Sjfb8856606 return 0;
642a9643ea8Slogwang }
643a9643ea8Slogwang
ena_rss_init_default(struct ena_adapter * adapter)644a9643ea8Slogwang static int ena_rss_init_default(struct ena_adapter *adapter)
645a9643ea8Slogwang {
646a9643ea8Slogwang struct ena_com_dev *ena_dev = &adapter->ena_dev;
647a9643ea8Slogwang uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues;
648a9643ea8Slogwang int rc, i;
649a9643ea8Slogwang u32 val;
650a9643ea8Slogwang
651a9643ea8Slogwang rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
652a9643ea8Slogwang if (unlikely(rc)) {
6534418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot init indirect table\n");
654a9643ea8Slogwang goto err_rss_init;
655a9643ea8Slogwang }
656a9643ea8Slogwang
657a9643ea8Slogwang for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
658a9643ea8Slogwang val = i % nb_rx_queues;
659a9643ea8Slogwang rc = ena_com_indirect_table_fill_entry(ena_dev, i,
660a9643ea8Slogwang ENA_IO_RXQ_IDX(val));
661d30ea906Sjfb8856606 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6624418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot fill indirect table\n");
663a9643ea8Slogwang goto err_fill_indir;
664a9643ea8Slogwang }
665a9643ea8Slogwang }
666a9643ea8Slogwang
667a9643ea8Slogwang rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
668a9643ea8Slogwang ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
669d30ea906Sjfb8856606 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6704418919fSjohnjiang PMD_DRV_LOG(INFO, "Cannot fill hash function\n");
671a9643ea8Slogwang goto err_fill_indir;
672a9643ea8Slogwang }
673a9643ea8Slogwang
674a9643ea8Slogwang rc = ena_com_set_default_hash_ctrl(ena_dev);
675d30ea906Sjfb8856606 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6764418919fSjohnjiang PMD_DRV_LOG(INFO, "Cannot fill hash control\n");
677a9643ea8Slogwang goto err_fill_indir;
678a9643ea8Slogwang }
679a9643ea8Slogwang
680a9643ea8Slogwang rc = ena_com_indirect_table_set(ena_dev);
681d30ea906Sjfb8856606 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
6824418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n");
683a9643ea8Slogwang goto err_fill_indir;
684a9643ea8Slogwang }
6854418919fSjohnjiang PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n",
686a9643ea8Slogwang adapter->rte_dev->data->port_id);
687a9643ea8Slogwang
688a9643ea8Slogwang return 0;
689a9643ea8Slogwang
690a9643ea8Slogwang err_fill_indir:
691a9643ea8Slogwang ena_com_rss_destroy(ena_dev);
692a9643ea8Slogwang err_rss_init:
693a9643ea8Slogwang
694a9643ea8Slogwang return rc;
695a9643ea8Slogwang }
696a9643ea8Slogwang
ena_rx_queue_release_all(struct rte_eth_dev * dev)697a9643ea8Slogwang static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
698a9643ea8Slogwang {
699a9643ea8Slogwang struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
700a9643ea8Slogwang int nb_queues = dev->data->nb_rx_queues;
701a9643ea8Slogwang int i;
702a9643ea8Slogwang
703a9643ea8Slogwang for (i = 0; i < nb_queues; i++)
704a9643ea8Slogwang ena_rx_queue_release(queues[i]);
705a9643ea8Slogwang }
706a9643ea8Slogwang
ena_tx_queue_release_all(struct rte_eth_dev * dev)707a9643ea8Slogwang static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
708a9643ea8Slogwang {
709a9643ea8Slogwang struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
710a9643ea8Slogwang int nb_queues = dev->data->nb_tx_queues;
711a9643ea8Slogwang int i;
712a9643ea8Slogwang
713a9643ea8Slogwang for (i = 0; i < nb_queues; i++)
714a9643ea8Slogwang ena_tx_queue_release(queues[i]);
715a9643ea8Slogwang }
716a9643ea8Slogwang
ena_rx_queue_release(void * queue)717a9643ea8Slogwang static void ena_rx_queue_release(void *queue)
718a9643ea8Slogwang {
719a9643ea8Slogwang struct ena_ring *ring = (struct ena_ring *)queue;
720a9643ea8Slogwang
721a9643ea8Slogwang /* Free ring resources */
722a9643ea8Slogwang if (ring->rx_buffer_info)
723a9643ea8Slogwang rte_free(ring->rx_buffer_info);
724a9643ea8Slogwang ring->rx_buffer_info = NULL;
725a9643ea8Slogwang
726d30ea906Sjfb8856606 if (ring->rx_refill_buffer)
727d30ea906Sjfb8856606 rte_free(ring->rx_refill_buffer);
728d30ea906Sjfb8856606 ring->rx_refill_buffer = NULL;
729d30ea906Sjfb8856606
730d30ea906Sjfb8856606 if (ring->empty_rx_reqs)
731d30ea906Sjfb8856606 rte_free(ring->empty_rx_reqs);
732d30ea906Sjfb8856606 ring->empty_rx_reqs = NULL;
733d30ea906Sjfb8856606
734a9643ea8Slogwang ring->configured = 0;
735a9643ea8Slogwang
7364418919fSjohnjiang PMD_DRV_LOG(NOTICE, "RX Queue %d:%d released\n",
737a9643ea8Slogwang ring->port_id, ring->id);
738a9643ea8Slogwang }
739a9643ea8Slogwang
ena_tx_queue_release(void * queue)740a9643ea8Slogwang static void ena_tx_queue_release(void *queue)
741a9643ea8Slogwang {
742a9643ea8Slogwang struct ena_ring *ring = (struct ena_ring *)queue;
743a9643ea8Slogwang
744a9643ea8Slogwang /* Free ring resources */
7454418919fSjohnjiang if (ring->push_buf_intermediate_buf)
7464418919fSjohnjiang rte_free(ring->push_buf_intermediate_buf);
7474418919fSjohnjiang
748a9643ea8Slogwang if (ring->tx_buffer_info)
749a9643ea8Slogwang rte_free(ring->tx_buffer_info);
750a9643ea8Slogwang
751a9643ea8Slogwang if (ring->empty_tx_reqs)
752a9643ea8Slogwang rte_free(ring->empty_tx_reqs);
753a9643ea8Slogwang
754a9643ea8Slogwang ring->empty_tx_reqs = NULL;
755a9643ea8Slogwang ring->tx_buffer_info = NULL;
7564418919fSjohnjiang ring->push_buf_intermediate_buf = NULL;
757a9643ea8Slogwang
758a9643ea8Slogwang ring->configured = 0;
759a9643ea8Slogwang
7604418919fSjohnjiang PMD_DRV_LOG(NOTICE, "TX Queue %d:%d released\n",
761a9643ea8Slogwang ring->port_id, ring->id);
762a9643ea8Slogwang }
763a9643ea8Slogwang
ena_rx_queue_release_bufs(struct ena_ring * ring)764a9643ea8Slogwang static void ena_rx_queue_release_bufs(struct ena_ring *ring)
765a9643ea8Slogwang {
7661646932aSjfb8856606 unsigned int i;
767a9643ea8Slogwang
768*2d9fd380Sjfb8856606 for (i = 0; i < ring->ring_size; ++i) {
769*2d9fd380Sjfb8856606 struct ena_rx_buffer *rx_info = &ring->rx_buffer_info[i];
770*2d9fd380Sjfb8856606 if (rx_info->mbuf) {
771*2d9fd380Sjfb8856606 rte_mbuf_raw_free(rx_info->mbuf);
772*2d9fd380Sjfb8856606 rx_info->mbuf = NULL;
773*2d9fd380Sjfb8856606 }
774a9643ea8Slogwang }
775a9643ea8Slogwang }
776a9643ea8Slogwang
ena_tx_queue_release_bufs(struct ena_ring * ring)777a9643ea8Slogwang static void ena_tx_queue_release_bufs(struct ena_ring *ring)
778a9643ea8Slogwang {
7792bfe3f2eSlogwang unsigned int i;
780a9643ea8Slogwang
7812bfe3f2eSlogwang for (i = 0; i < ring->ring_size; ++i) {
7822bfe3f2eSlogwang struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
783a9643ea8Slogwang
784a9643ea8Slogwang if (tx_buf->mbuf)
785a9643ea8Slogwang rte_pktmbuf_free(tx_buf->mbuf);
786a9643ea8Slogwang }
787a9643ea8Slogwang }
788a9643ea8Slogwang
ena_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)789a9643ea8Slogwang static int ena_link_update(struct rte_eth_dev *dev,
790a9643ea8Slogwang __rte_unused int wait_to_complete)
791a9643ea8Slogwang {
792a9643ea8Slogwang struct rte_eth_link *link = &dev->data->dev_link;
7934b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
794d30ea906Sjfb8856606
795d30ea906Sjfb8856606 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
796579bf1e2Sjfb8856606 link->link_speed = ETH_SPEED_NUM_NONE;
797a9643ea8Slogwang link->link_duplex = ETH_LINK_FULL_DUPLEX;
798a9643ea8Slogwang
799a9643ea8Slogwang return 0;
800a9643ea8Slogwang }
801a9643ea8Slogwang
ena_queue_start_all(struct rte_eth_dev * dev,enum ena_ring_type ring_type)8021646932aSjfb8856606 static int ena_queue_start_all(struct rte_eth_dev *dev,
803a9643ea8Slogwang enum ena_ring_type ring_type)
804a9643ea8Slogwang {
8054b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
806a9643ea8Slogwang struct ena_ring *queues = NULL;
807d30ea906Sjfb8856606 int nb_queues;
808a9643ea8Slogwang int i = 0;
809a9643ea8Slogwang int rc = 0;
810a9643ea8Slogwang
811d30ea906Sjfb8856606 if (ring_type == ENA_RING_TYPE_RX) {
812d30ea906Sjfb8856606 queues = adapter->rx_ring;
813d30ea906Sjfb8856606 nb_queues = dev->data->nb_rx_queues;
814d30ea906Sjfb8856606 } else {
815d30ea906Sjfb8856606 queues = adapter->tx_ring;
816d30ea906Sjfb8856606 nb_queues = dev->data->nb_tx_queues;
817d30ea906Sjfb8856606 }
818d30ea906Sjfb8856606 for (i = 0; i < nb_queues; i++) {
819a9643ea8Slogwang if (queues[i].configured) {
820a9643ea8Slogwang if (ring_type == ENA_RING_TYPE_RX) {
821a9643ea8Slogwang ena_assert_msg(
822a9643ea8Slogwang dev->data->rx_queues[i] == &queues[i],
823a9643ea8Slogwang "Inconsistent state of rx queues\n");
824a9643ea8Slogwang } else {
825a9643ea8Slogwang ena_assert_msg(
826a9643ea8Slogwang dev->data->tx_queues[i] == &queues[i],
827a9643ea8Slogwang "Inconsistent state of tx queues\n");
828a9643ea8Slogwang }
829a9643ea8Slogwang
8301646932aSjfb8856606 rc = ena_queue_start(&queues[i]);
831a9643ea8Slogwang
832a9643ea8Slogwang if (rc) {
833a9643ea8Slogwang PMD_INIT_LOG(ERR,
8341646932aSjfb8856606 "failed to start queue %d type(%d)",
835a9643ea8Slogwang i, ring_type);
8361646932aSjfb8856606 goto err;
837a9643ea8Slogwang }
838a9643ea8Slogwang }
839a9643ea8Slogwang }
840a9643ea8Slogwang
841a9643ea8Slogwang return 0;
8421646932aSjfb8856606
8431646932aSjfb8856606 err:
8441646932aSjfb8856606 while (i--)
8451646932aSjfb8856606 if (queues[i].configured)
8461646932aSjfb8856606 ena_queue_stop(&queues[i]);
8471646932aSjfb8856606
8481646932aSjfb8856606 return rc;
849a9643ea8Slogwang }
850a9643ea8Slogwang
ena_get_mtu_conf(struct ena_adapter * adapter)851a9643ea8Slogwang static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
852a9643ea8Slogwang {
853a9643ea8Slogwang uint32_t max_frame_len = adapter->max_mtu;
854a9643ea8Slogwang
855d30ea906Sjfb8856606 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads &
856d30ea906Sjfb8856606 DEV_RX_OFFLOAD_JUMBO_FRAME)
857a9643ea8Slogwang max_frame_len =
858a9643ea8Slogwang adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len;
859a9643ea8Slogwang
860a9643ea8Slogwang return max_frame_len;
861a9643ea8Slogwang }
862a9643ea8Slogwang
ena_check_valid_conf(struct ena_adapter * adapter)863a9643ea8Slogwang static int ena_check_valid_conf(struct ena_adapter *adapter)
864a9643ea8Slogwang {
865a9643ea8Slogwang uint32_t max_frame_len = ena_get_mtu_conf(adapter);
866a9643ea8Slogwang
867d30ea906Sjfb8856606 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
868d30ea906Sjfb8856606 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
8694418919fSjohnjiang "max mtu: %d, min mtu: %d",
870d30ea906Sjfb8856606 max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
871d30ea906Sjfb8856606 return ENA_COM_UNSUPPORTED;
872a9643ea8Slogwang }
873a9643ea8Slogwang
874a9643ea8Slogwang return 0;
875a9643ea8Slogwang }
876a9643ea8Slogwang
877a9643ea8Slogwang static int
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx * ctx,bool use_large_llq_hdr)878*2d9fd380Sjfb8856606 ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx,
879*2d9fd380Sjfb8856606 bool use_large_llq_hdr)
880a9643ea8Slogwang {
8814418919fSjohnjiang struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
8824418919fSjohnjiang struct ena_com_dev *ena_dev = ctx->ena_dev;
883*2d9fd380Sjfb8856606 uint32_t max_tx_queue_size;
884*2d9fd380Sjfb8856606 uint32_t max_rx_queue_size;
885a9643ea8Slogwang
8864418919fSjohnjiang if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
8874418919fSjohnjiang struct ena_admin_queue_ext_feature_fields *max_queue_ext =
8884418919fSjohnjiang &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
889*2d9fd380Sjfb8856606 max_rx_queue_size = RTE_MIN(max_queue_ext->max_rx_cq_depth,
8904418919fSjohnjiang max_queue_ext->max_rx_sq_depth);
891*2d9fd380Sjfb8856606 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
892a9643ea8Slogwang
8934418919fSjohnjiang if (ena_dev->tx_mem_queue_type ==
8944418919fSjohnjiang ENA_ADMIN_PLACEMENT_POLICY_DEV) {
895*2d9fd380Sjfb8856606 max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8964418919fSjohnjiang llq->max_llq_depth);
8974418919fSjohnjiang } else {
898*2d9fd380Sjfb8856606 max_tx_queue_size = RTE_MIN(max_tx_queue_size,
8994418919fSjohnjiang max_queue_ext->max_tx_sq_depth);
9004418919fSjohnjiang }
901a9643ea8Slogwang
9024418919fSjohnjiang ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9034418919fSjohnjiang max_queue_ext->max_per_packet_rx_descs);
9044418919fSjohnjiang ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9054418919fSjohnjiang max_queue_ext->max_per_packet_tx_descs);
9064418919fSjohnjiang } else {
9074418919fSjohnjiang struct ena_admin_queue_feature_desc *max_queues =
9084418919fSjohnjiang &ctx->get_feat_ctx->max_queues;
909*2d9fd380Sjfb8856606 max_rx_queue_size = RTE_MIN(max_queues->max_cq_depth,
9104418919fSjohnjiang max_queues->max_sq_depth);
911*2d9fd380Sjfb8856606 max_tx_queue_size = max_queues->max_cq_depth;
912a9643ea8Slogwang
9134418919fSjohnjiang if (ena_dev->tx_mem_queue_type ==
9144418919fSjohnjiang ENA_ADMIN_PLACEMENT_POLICY_DEV) {
915*2d9fd380Sjfb8856606 max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9164418919fSjohnjiang llq->max_llq_depth);
9174418919fSjohnjiang } else {
918*2d9fd380Sjfb8856606 max_tx_queue_size = RTE_MIN(max_tx_queue_size,
9194418919fSjohnjiang max_queues->max_sq_depth);
9204418919fSjohnjiang }
9214418919fSjohnjiang
9224418919fSjohnjiang ctx->max_rx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
9234418919fSjohnjiang max_queues->max_packet_rx_descs);
924*2d9fd380Sjfb8856606 ctx->max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
925*2d9fd380Sjfb8856606 max_queues->max_packet_tx_descs);
9264418919fSjohnjiang }
9274418919fSjohnjiang
9284418919fSjohnjiang /* Round down to the nearest power of 2 */
929*2d9fd380Sjfb8856606 max_rx_queue_size = rte_align32prevpow2(max_rx_queue_size);
930*2d9fd380Sjfb8856606 max_tx_queue_size = rte_align32prevpow2(max_tx_queue_size);
9314418919fSjohnjiang
932*2d9fd380Sjfb8856606 if (use_large_llq_hdr) {
933*2d9fd380Sjfb8856606 if ((llq->entry_size_ctrl_supported &
934*2d9fd380Sjfb8856606 ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
935*2d9fd380Sjfb8856606 (ena_dev->tx_mem_queue_type ==
936*2d9fd380Sjfb8856606 ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
937*2d9fd380Sjfb8856606 max_tx_queue_size /= 2;
938*2d9fd380Sjfb8856606 PMD_INIT_LOG(INFO,
939*2d9fd380Sjfb8856606 "Forcing large headers and decreasing maximum TX queue size to %d\n",
940*2d9fd380Sjfb8856606 max_tx_queue_size);
941*2d9fd380Sjfb8856606 } else {
942*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR,
943*2d9fd380Sjfb8856606 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
944*2d9fd380Sjfb8856606 }
945*2d9fd380Sjfb8856606 }
946*2d9fd380Sjfb8856606
947*2d9fd380Sjfb8856606 if (unlikely(max_rx_queue_size == 0 || max_tx_queue_size == 0)) {
9482bfe3f2eSlogwang PMD_INIT_LOG(ERR, "Invalid queue size");
949a9643ea8Slogwang return -EFAULT;
950a9643ea8Slogwang }
951a9643ea8Slogwang
952*2d9fd380Sjfb8856606 ctx->max_tx_queue_size = max_tx_queue_size;
953*2d9fd380Sjfb8856606 ctx->max_rx_queue_size = max_rx_queue_size;
954d30ea906Sjfb8856606
9554418919fSjohnjiang return 0;
956a9643ea8Slogwang }
957a9643ea8Slogwang
ena_stats_restart(struct rte_eth_dev * dev)958a9643ea8Slogwang static void ena_stats_restart(struct rte_eth_dev *dev)
959a9643ea8Slogwang {
9604b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
961a9643ea8Slogwang
962a9643ea8Slogwang rte_atomic64_init(&adapter->drv_stats->ierrors);
963a9643ea8Slogwang rte_atomic64_init(&adapter->drv_stats->oerrors);
964a9643ea8Slogwang rte_atomic64_init(&adapter->drv_stats->rx_nombuf);
965*2d9fd380Sjfb8856606 adapter->drv_stats->rx_drops = 0;
966a9643ea8Slogwang }
967a9643ea8Slogwang
ena_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)9682bfe3f2eSlogwang static int ena_stats_get(struct rte_eth_dev *dev,
969a9643ea8Slogwang struct rte_eth_stats *stats)
970a9643ea8Slogwang {
971a9643ea8Slogwang struct ena_admin_basic_stats ena_stats;
9724b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
973a9643ea8Slogwang struct ena_com_dev *ena_dev = &adapter->ena_dev;
974a9643ea8Slogwang int rc;
9754418919fSjohnjiang int i;
9764418919fSjohnjiang int max_rings_stats;
977a9643ea8Slogwang
978a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
9792bfe3f2eSlogwang return -ENOTSUP;
980a9643ea8Slogwang
981a9643ea8Slogwang memset(&ena_stats, 0, sizeof(ena_stats));
982*2d9fd380Sjfb8856606
983*2d9fd380Sjfb8856606 rte_spinlock_lock(&adapter->admin_lock);
984a9643ea8Slogwang rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats);
985*2d9fd380Sjfb8856606 rte_spinlock_unlock(&adapter->admin_lock);
986a9643ea8Slogwang if (unlikely(rc)) {
9874418919fSjohnjiang PMD_DRV_LOG(ERR, "Could not retrieve statistics from ENA\n");
9882bfe3f2eSlogwang return rc;
989a9643ea8Slogwang }
990a9643ea8Slogwang
991a9643ea8Slogwang /* Set of basic statistics from ENA */
992a9643ea8Slogwang stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high,
993a9643ea8Slogwang ena_stats.rx_pkts_low);
994a9643ea8Slogwang stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high,
995a9643ea8Slogwang ena_stats.tx_pkts_low);
996a9643ea8Slogwang stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high,
997a9643ea8Slogwang ena_stats.rx_bytes_low);
998a9643ea8Slogwang stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high,
999a9643ea8Slogwang ena_stats.tx_bytes_low);
1000a9643ea8Slogwang
1001a9643ea8Slogwang /* Driver related stats */
1002*2d9fd380Sjfb8856606 stats->imissed = adapter->drv_stats->rx_drops;
1003a9643ea8Slogwang stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors);
1004a9643ea8Slogwang stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors);
1005a9643ea8Slogwang stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf);
10064418919fSjohnjiang
10074418919fSjohnjiang max_rings_stats = RTE_MIN(dev->data->nb_rx_queues,
10084418919fSjohnjiang RTE_ETHDEV_QUEUE_STAT_CNTRS);
10094418919fSjohnjiang for (i = 0; i < max_rings_stats; ++i) {
10104418919fSjohnjiang struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats;
10114418919fSjohnjiang
10124418919fSjohnjiang stats->q_ibytes[i] = rx_stats->bytes;
10134418919fSjohnjiang stats->q_ipackets[i] = rx_stats->cnt;
10144418919fSjohnjiang stats->q_errors[i] = rx_stats->bad_desc_num +
10154418919fSjohnjiang rx_stats->bad_req_id;
10164418919fSjohnjiang }
10174418919fSjohnjiang
10184418919fSjohnjiang max_rings_stats = RTE_MIN(dev->data->nb_tx_queues,
10194418919fSjohnjiang RTE_ETHDEV_QUEUE_STAT_CNTRS);
10204418919fSjohnjiang for (i = 0; i < max_rings_stats; ++i) {
10214418919fSjohnjiang struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats;
10224418919fSjohnjiang
10234418919fSjohnjiang stats->q_obytes[i] = tx_stats->bytes;
10244418919fSjohnjiang stats->q_opackets[i] = tx_stats->cnt;
10254418919fSjohnjiang }
10264418919fSjohnjiang
10272bfe3f2eSlogwang return 0;
1028a9643ea8Slogwang }
1029a9643ea8Slogwang
ena_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)1030a9643ea8Slogwang static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1031a9643ea8Slogwang {
1032a9643ea8Slogwang struct ena_adapter *adapter;
1033a9643ea8Slogwang struct ena_com_dev *ena_dev;
1034a9643ea8Slogwang int rc = 0;
1035a9643ea8Slogwang
10364418919fSjohnjiang ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
10374418919fSjohnjiang ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
10384b05018fSfengbojiang adapter = dev->data->dev_private;
1039a9643ea8Slogwang
1040a9643ea8Slogwang ena_dev = &adapter->ena_dev;
10414418919fSjohnjiang ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
1042a9643ea8Slogwang
1043d30ea906Sjfb8856606 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
10444418919fSjohnjiang PMD_DRV_LOG(ERR,
1045d30ea906Sjfb8856606 "Invalid MTU setting. new_mtu: %d "
1046d30ea906Sjfb8856606 "max mtu: %d min mtu: %d\n",
1047d30ea906Sjfb8856606 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
1048d30ea906Sjfb8856606 return -EINVAL;
1049a9643ea8Slogwang }
1050a9643ea8Slogwang
1051a9643ea8Slogwang rc = ena_com_set_dev_mtu(ena_dev, mtu);
1052a9643ea8Slogwang if (rc)
10534418919fSjohnjiang PMD_DRV_LOG(ERR, "Could not set MTU: %d\n", mtu);
1054a9643ea8Slogwang else
10554418919fSjohnjiang PMD_DRV_LOG(NOTICE, "Set MTU: %d\n", mtu);
1056a9643ea8Slogwang
1057a9643ea8Slogwang return rc;
1058a9643ea8Slogwang }
1059a9643ea8Slogwang
ena_start(struct rte_eth_dev * dev)1060a9643ea8Slogwang static int ena_start(struct rte_eth_dev *dev)
1061a9643ea8Slogwang {
10624b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
1063d30ea906Sjfb8856606 uint64_t ticks;
1064a9643ea8Slogwang int rc = 0;
1065a9643ea8Slogwang
1066a9643ea8Slogwang rc = ena_check_valid_conf(adapter);
1067a9643ea8Slogwang if (rc)
1068a9643ea8Slogwang return rc;
1069a9643ea8Slogwang
10701646932aSjfb8856606 rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX);
1071a9643ea8Slogwang if (rc)
1072a9643ea8Slogwang return rc;
1073a9643ea8Slogwang
10741646932aSjfb8856606 rc = ena_queue_start_all(dev, ENA_RING_TYPE_TX);
1075a9643ea8Slogwang if (rc)
10761646932aSjfb8856606 goto err_start_tx;
1077a9643ea8Slogwang
1078a9643ea8Slogwang if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
1079579bf1e2Sjfb8856606 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
1080a9643ea8Slogwang rc = ena_rss_init_default(adapter);
1081a9643ea8Slogwang if (rc)
10821646932aSjfb8856606 goto err_rss_init;
1083a9643ea8Slogwang }
1084a9643ea8Slogwang
1085a9643ea8Slogwang ena_stats_restart(dev);
1086a9643ea8Slogwang
1087d30ea906Sjfb8856606 adapter->timestamp_wd = rte_get_timer_cycles();
1088d30ea906Sjfb8856606 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
1089d30ea906Sjfb8856606
1090d30ea906Sjfb8856606 ticks = rte_get_timer_hz();
1091d30ea906Sjfb8856606 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
1092d30ea906Sjfb8856606 ena_timer_wd_callback, adapter);
1093d30ea906Sjfb8856606
10944418919fSjohnjiang ++adapter->dev_stats.dev_start;
1095a9643ea8Slogwang adapter->state = ENA_ADAPTER_STATE_RUNNING;
1096a9643ea8Slogwang
1097a9643ea8Slogwang return 0;
10981646932aSjfb8856606
10991646932aSjfb8856606 err_rss_init:
11001646932aSjfb8856606 ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
11011646932aSjfb8856606 err_start_tx:
11021646932aSjfb8856606 ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
11031646932aSjfb8856606 return rc;
1104a9643ea8Slogwang }
1105a9643ea8Slogwang
ena_stop(struct rte_eth_dev * dev)1106*2d9fd380Sjfb8856606 static int ena_stop(struct rte_eth_dev *dev)
1107d30ea906Sjfb8856606 {
11084b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
11091646932aSjfb8856606 struct ena_com_dev *ena_dev = &adapter->ena_dev;
11101646932aSjfb8856606 int rc;
1111d30ea906Sjfb8856606
1112d30ea906Sjfb8856606 rte_timer_stop_sync(&adapter->timer_wd);
11131646932aSjfb8856606 ena_queue_stop_all(dev, ENA_RING_TYPE_TX);
11141646932aSjfb8856606 ena_queue_stop_all(dev, ENA_RING_TYPE_RX);
11151646932aSjfb8856606
11161646932aSjfb8856606 if (adapter->trigger_reset) {
11171646932aSjfb8856606 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
11181646932aSjfb8856606 if (rc)
11194418919fSjohnjiang PMD_DRV_LOG(ERR, "Device reset failed rc=%d\n", rc);
11201646932aSjfb8856606 }
1121d30ea906Sjfb8856606
11224418919fSjohnjiang ++adapter->dev_stats.dev_stop;
1123d30ea906Sjfb8856606 adapter->state = ENA_ADAPTER_STATE_STOPPED;
1124*2d9fd380Sjfb8856606 dev->data->dev_started = 0;
1125*2d9fd380Sjfb8856606
1126*2d9fd380Sjfb8856606 return 0;
1127d30ea906Sjfb8856606 }
1128d30ea906Sjfb8856606
ena_create_io_queue(struct ena_ring * ring)1129d30ea906Sjfb8856606 static int ena_create_io_queue(struct ena_ring *ring)
1130d30ea906Sjfb8856606 {
1131d30ea906Sjfb8856606 struct ena_adapter *adapter;
1132d30ea906Sjfb8856606 struct ena_com_dev *ena_dev;
1133d30ea906Sjfb8856606 struct ena_com_create_io_ctx ctx =
1134d30ea906Sjfb8856606 /* policy set to _HOST just to satisfy icc compiler */
1135d30ea906Sjfb8856606 { ENA_ADMIN_PLACEMENT_POLICY_HOST,
1136d30ea906Sjfb8856606 0, 0, 0, 0, 0 };
1137d30ea906Sjfb8856606 uint16_t ena_qid;
1138d30ea906Sjfb8856606 unsigned int i;
1139d30ea906Sjfb8856606 int rc;
1140d30ea906Sjfb8856606
1141d30ea906Sjfb8856606 adapter = ring->adapter;
1142d30ea906Sjfb8856606 ena_dev = &adapter->ena_dev;
1143d30ea906Sjfb8856606
1144d30ea906Sjfb8856606 if (ring->type == ENA_RING_TYPE_TX) {
1145d30ea906Sjfb8856606 ena_qid = ENA_IO_TXQ_IDX(ring->id);
1146d30ea906Sjfb8856606 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1147d30ea906Sjfb8856606 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1148d30ea906Sjfb8856606 for (i = 0; i < ring->ring_size; i++)
1149d30ea906Sjfb8856606 ring->empty_tx_reqs[i] = i;
1150d30ea906Sjfb8856606 } else {
1151d30ea906Sjfb8856606 ena_qid = ENA_IO_RXQ_IDX(ring->id);
1152d30ea906Sjfb8856606 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1153d30ea906Sjfb8856606 for (i = 0; i < ring->ring_size; i++)
1154d30ea906Sjfb8856606 ring->empty_rx_reqs[i] = i;
1155d30ea906Sjfb8856606 }
11560c6bd470Sfengbojiang ctx.queue_size = ring->ring_size;
1157d30ea906Sjfb8856606 ctx.qid = ena_qid;
1158d30ea906Sjfb8856606 ctx.msix_vector = -1; /* interrupts not used */
11594b05018fSfengbojiang ctx.numa_node = ring->numa_socket_id;
1160d30ea906Sjfb8856606
1161d30ea906Sjfb8856606 rc = ena_com_create_io_queue(ena_dev, &ctx);
1162d30ea906Sjfb8856606 if (rc) {
11634418919fSjohnjiang PMD_DRV_LOG(ERR,
1164d30ea906Sjfb8856606 "failed to create io queue #%d (qid:%d) rc: %d\n",
1165d30ea906Sjfb8856606 ring->id, ena_qid, rc);
1166d30ea906Sjfb8856606 return rc;
1167d30ea906Sjfb8856606 }
1168d30ea906Sjfb8856606
1169d30ea906Sjfb8856606 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1170d30ea906Sjfb8856606 &ring->ena_com_io_sq,
1171d30ea906Sjfb8856606 &ring->ena_com_io_cq);
1172d30ea906Sjfb8856606 if (rc) {
11734418919fSjohnjiang PMD_DRV_LOG(ERR,
1174d30ea906Sjfb8856606 "Failed to get io queue handlers. queue num %d rc: %d\n",
1175d30ea906Sjfb8856606 ring->id, rc);
1176d30ea906Sjfb8856606 ena_com_destroy_io_queue(ena_dev, ena_qid);
1177d30ea906Sjfb8856606 return rc;
1178d30ea906Sjfb8856606 }
1179d30ea906Sjfb8856606
1180d30ea906Sjfb8856606 if (ring->type == ENA_RING_TYPE_TX)
1181d30ea906Sjfb8856606 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node);
1182d30ea906Sjfb8856606
1183d30ea906Sjfb8856606 return 0;
1184d30ea906Sjfb8856606 }
1185d30ea906Sjfb8856606
ena_queue_stop(struct ena_ring * ring)11861646932aSjfb8856606 static void ena_queue_stop(struct ena_ring *ring)
1187d30ea906Sjfb8856606 {
11881646932aSjfb8856606 struct ena_com_dev *ena_dev = &ring->adapter->ena_dev;
1189d30ea906Sjfb8856606
11901646932aSjfb8856606 if (ring->type == ENA_RING_TYPE_RX) {
11911646932aSjfb8856606 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(ring->id));
11921646932aSjfb8856606 ena_rx_queue_release_bufs(ring);
11931646932aSjfb8856606 } else {
11941646932aSjfb8856606 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(ring->id));
11951646932aSjfb8856606 ena_tx_queue_release_bufs(ring);
1196d30ea906Sjfb8856606 }
1197d30ea906Sjfb8856606 }
1198d30ea906Sjfb8856606
ena_queue_stop_all(struct rte_eth_dev * dev,enum ena_ring_type ring_type)11991646932aSjfb8856606 static void ena_queue_stop_all(struct rte_eth_dev *dev,
12001646932aSjfb8856606 enum ena_ring_type ring_type)
12011646932aSjfb8856606 {
12024b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
12031646932aSjfb8856606 struct ena_ring *queues = NULL;
12041646932aSjfb8856606 uint16_t nb_queues, i;
12051646932aSjfb8856606
12061646932aSjfb8856606 if (ring_type == ENA_RING_TYPE_RX) {
12071646932aSjfb8856606 queues = adapter->rx_ring;
12081646932aSjfb8856606 nb_queues = dev->data->nb_rx_queues;
12091646932aSjfb8856606 } else {
12101646932aSjfb8856606 queues = adapter->tx_ring;
12111646932aSjfb8856606 nb_queues = dev->data->nb_tx_queues;
12121646932aSjfb8856606 }
12131646932aSjfb8856606
12141646932aSjfb8856606 for (i = 0; i < nb_queues; ++i)
12151646932aSjfb8856606 if (queues[i].configured)
12161646932aSjfb8856606 ena_queue_stop(&queues[i]);
12171646932aSjfb8856606 }
12181646932aSjfb8856606
ena_queue_start(struct ena_ring * ring)12191646932aSjfb8856606 static int ena_queue_start(struct ena_ring *ring)
1220a9643ea8Slogwang {
12212bfe3f2eSlogwang int rc, bufs_num;
1222a9643ea8Slogwang
1223a9643ea8Slogwang ena_assert_msg(ring->configured == 1,
12241646932aSjfb8856606 "Trying to start unconfigured queue\n");
1225a9643ea8Slogwang
1226d30ea906Sjfb8856606 rc = ena_create_io_queue(ring);
1227d30ea906Sjfb8856606 if (rc) {
12284418919fSjohnjiang PMD_INIT_LOG(ERR, "Failed to create IO queue!");
1229d30ea906Sjfb8856606 return rc;
1230d30ea906Sjfb8856606 }
1231d30ea906Sjfb8856606
1232a9643ea8Slogwang ring->next_to_clean = 0;
1233a9643ea8Slogwang ring->next_to_use = 0;
1234a9643ea8Slogwang
12354418919fSjohnjiang if (ring->type == ENA_RING_TYPE_TX) {
12364418919fSjohnjiang ring->tx_stats.available_desc =
1237*2d9fd380Sjfb8856606 ena_com_free_q_entries(ring->ena_com_io_sq);
1238a9643ea8Slogwang return 0;
12394418919fSjohnjiang }
1240a9643ea8Slogwang
12412bfe3f2eSlogwang bufs_num = ring->ring_size - 1;
12422bfe3f2eSlogwang rc = ena_populate_rx_queue(ring, bufs_num);
12432bfe3f2eSlogwang if (rc != bufs_num) {
12441646932aSjfb8856606 ena_com_destroy_io_queue(&ring->adapter->ena_dev,
12451646932aSjfb8856606 ENA_IO_RXQ_IDX(ring->id));
12462bfe3f2eSlogwang PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
1247d30ea906Sjfb8856606 return ENA_COM_FAULT;
1248a9643ea8Slogwang }
1249a9643ea8Slogwang
1250a9643ea8Slogwang return 0;
1251a9643ea8Slogwang }
1252a9643ea8Slogwang
ena_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)1253a9643ea8Slogwang static int ena_tx_queue_setup(struct rte_eth_dev *dev,
1254a9643ea8Slogwang uint16_t queue_idx,
1255a9643ea8Slogwang uint16_t nb_desc,
12564b05018fSfengbojiang unsigned int socket_id,
1257d30ea906Sjfb8856606 const struct rte_eth_txconf *tx_conf)
1258a9643ea8Slogwang {
1259a9643ea8Slogwang struct ena_ring *txq = NULL;
12604b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
1261a9643ea8Slogwang unsigned int i;
1262a9643ea8Slogwang
1263a9643ea8Slogwang txq = &adapter->tx_ring[queue_idx];
1264a9643ea8Slogwang
1265a9643ea8Slogwang if (txq->configured) {
12664418919fSjohnjiang PMD_DRV_LOG(CRIT,
1267a9643ea8Slogwang "API violation. Queue %d is already configured\n",
1268a9643ea8Slogwang queue_idx);
1269d30ea906Sjfb8856606 return ENA_COM_FAULT;
1270a9643ea8Slogwang }
1271a9643ea8Slogwang
12722bfe3f2eSlogwang if (!rte_is_power_of_2(nb_desc)) {
12734418919fSjohnjiang PMD_DRV_LOG(ERR,
12744418919fSjohnjiang "Unsupported size of TX queue: %d is not a power of 2.\n",
12752bfe3f2eSlogwang nb_desc);
12762bfe3f2eSlogwang return -EINVAL;
12772bfe3f2eSlogwang }
12782bfe3f2eSlogwang
1279*2d9fd380Sjfb8856606 if (nb_desc > adapter->max_tx_ring_size) {
12804418919fSjohnjiang PMD_DRV_LOG(ERR,
1281a9643ea8Slogwang "Unsupported size of TX queue (max size: %d)\n",
1282*2d9fd380Sjfb8856606 adapter->max_tx_ring_size);
1283a9643ea8Slogwang return -EINVAL;
1284a9643ea8Slogwang }
1285a9643ea8Slogwang
12864418919fSjohnjiang if (nb_desc == RTE_ETH_DEV_FALLBACK_TX_RINGSIZE)
1287*2d9fd380Sjfb8856606 nb_desc = adapter->max_tx_ring_size;
12884418919fSjohnjiang
1289a9643ea8Slogwang txq->port_id = dev->data->port_id;
1290a9643ea8Slogwang txq->next_to_clean = 0;
1291a9643ea8Slogwang txq->next_to_use = 0;
1292a9643ea8Slogwang txq->ring_size = nb_desc;
1293*2d9fd380Sjfb8856606 txq->size_mask = nb_desc - 1;
12944b05018fSfengbojiang txq->numa_socket_id = socket_id;
1295a9643ea8Slogwang
1296a9643ea8Slogwang txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
1297a9643ea8Slogwang sizeof(struct ena_tx_buffer) *
1298a9643ea8Slogwang txq->ring_size,
1299a9643ea8Slogwang RTE_CACHE_LINE_SIZE);
1300a9643ea8Slogwang if (!txq->tx_buffer_info) {
13014418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for tx buffer info\n");
1302a9643ea8Slogwang return -ENOMEM;
1303a9643ea8Slogwang }
1304a9643ea8Slogwang
1305a9643ea8Slogwang txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
1306a9643ea8Slogwang sizeof(u16) * txq->ring_size,
1307a9643ea8Slogwang RTE_CACHE_LINE_SIZE);
1308a9643ea8Slogwang if (!txq->empty_tx_reqs) {
13094418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for tx reqs\n");
1310a9643ea8Slogwang rte_free(txq->tx_buffer_info);
1311a9643ea8Slogwang return -ENOMEM;
1312a9643ea8Slogwang }
1313d30ea906Sjfb8856606
13144418919fSjohnjiang txq->push_buf_intermediate_buf =
13154418919fSjohnjiang rte_zmalloc("txq->push_buf_intermediate_buf",
13164418919fSjohnjiang txq->tx_max_header_size,
13174418919fSjohnjiang RTE_CACHE_LINE_SIZE);
13184418919fSjohnjiang if (!txq->push_buf_intermediate_buf) {
13194418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc push buff for LLQ\n");
13204418919fSjohnjiang rte_free(txq->tx_buffer_info);
13214418919fSjohnjiang rte_free(txq->empty_tx_reqs);
13224418919fSjohnjiang return -ENOMEM;
13234418919fSjohnjiang }
13244418919fSjohnjiang
1325a9643ea8Slogwang for (i = 0; i < txq->ring_size; i++)
1326a9643ea8Slogwang txq->empty_tx_reqs[i] = i;
1327a9643ea8Slogwang
1328d30ea906Sjfb8856606 if (tx_conf != NULL) {
1329d30ea906Sjfb8856606 txq->offloads =
1330d30ea906Sjfb8856606 tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1331d30ea906Sjfb8856606 }
1332a9643ea8Slogwang /* Store pointer to this queue in upper layer */
1333a9643ea8Slogwang txq->configured = 1;
1334a9643ea8Slogwang dev->data->tx_queues[queue_idx] = txq;
1335d30ea906Sjfb8856606
1336d30ea906Sjfb8856606 return 0;
1337a9643ea8Slogwang }
1338a9643ea8Slogwang
ena_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,__rte_unused const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)1339a9643ea8Slogwang static int ena_rx_queue_setup(struct rte_eth_dev *dev,
1340a9643ea8Slogwang uint16_t queue_idx,
1341a9643ea8Slogwang uint16_t nb_desc,
13424b05018fSfengbojiang unsigned int socket_id,
1343a9643ea8Slogwang __rte_unused const struct rte_eth_rxconf *rx_conf,
1344a9643ea8Slogwang struct rte_mempool *mp)
1345a9643ea8Slogwang {
13464b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
1347a9643ea8Slogwang struct ena_ring *rxq = NULL;
1348*2d9fd380Sjfb8856606 size_t buffer_size;
1349d30ea906Sjfb8856606 int i;
1350a9643ea8Slogwang
1351a9643ea8Slogwang rxq = &adapter->rx_ring[queue_idx];
1352a9643ea8Slogwang if (rxq->configured) {
13534418919fSjohnjiang PMD_DRV_LOG(CRIT,
1354a9643ea8Slogwang "API violation. Queue %d is already configured\n",
1355a9643ea8Slogwang queue_idx);
1356d30ea906Sjfb8856606 return ENA_COM_FAULT;
1357a9643ea8Slogwang }
1358a9643ea8Slogwang
13594418919fSjohnjiang if (nb_desc == RTE_ETH_DEV_FALLBACK_RX_RINGSIZE)
1360*2d9fd380Sjfb8856606 nb_desc = adapter->max_rx_ring_size;
13614418919fSjohnjiang
13622bfe3f2eSlogwang if (!rte_is_power_of_2(nb_desc)) {
13634418919fSjohnjiang PMD_DRV_LOG(ERR,
13644418919fSjohnjiang "Unsupported size of RX queue: %d is not a power of 2.\n",
13652bfe3f2eSlogwang nb_desc);
13662bfe3f2eSlogwang return -EINVAL;
13672bfe3f2eSlogwang }
13682bfe3f2eSlogwang
1369*2d9fd380Sjfb8856606 if (nb_desc > adapter->max_rx_ring_size) {
13704418919fSjohnjiang PMD_DRV_LOG(ERR,
1371a9643ea8Slogwang "Unsupported size of RX queue (max size: %d)\n",
1372*2d9fd380Sjfb8856606 adapter->max_rx_ring_size);
1373*2d9fd380Sjfb8856606 return -EINVAL;
1374*2d9fd380Sjfb8856606 }
1375*2d9fd380Sjfb8856606
1376*2d9fd380Sjfb8856606 /* ENA isn't supporting buffers smaller than 1400 bytes */
1377*2d9fd380Sjfb8856606 buffer_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
1378*2d9fd380Sjfb8856606 if (buffer_size < ENA_RX_BUF_MIN_SIZE) {
1379*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR,
1380*2d9fd380Sjfb8856606 "Unsupported size of RX buffer: %zu (min size: %d)\n",
1381*2d9fd380Sjfb8856606 buffer_size, ENA_RX_BUF_MIN_SIZE);
1382a9643ea8Slogwang return -EINVAL;
1383a9643ea8Slogwang }
1384a9643ea8Slogwang
1385a9643ea8Slogwang rxq->port_id = dev->data->port_id;
1386a9643ea8Slogwang rxq->next_to_clean = 0;
1387a9643ea8Slogwang rxq->next_to_use = 0;
1388a9643ea8Slogwang rxq->ring_size = nb_desc;
1389*2d9fd380Sjfb8856606 rxq->size_mask = nb_desc - 1;
13904b05018fSfengbojiang rxq->numa_socket_id = socket_id;
1391a9643ea8Slogwang rxq->mb_pool = mp;
1392a9643ea8Slogwang
1393a9643ea8Slogwang rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
1394*2d9fd380Sjfb8856606 sizeof(struct ena_rx_buffer) * nb_desc,
1395a9643ea8Slogwang RTE_CACHE_LINE_SIZE);
1396a9643ea8Slogwang if (!rxq->rx_buffer_info) {
13974418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for rx buffer info\n");
1398a9643ea8Slogwang return -ENOMEM;
1399a9643ea8Slogwang }
1400a9643ea8Slogwang
1401d30ea906Sjfb8856606 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
1402d30ea906Sjfb8856606 sizeof(struct rte_mbuf *) * nb_desc,
1403d30ea906Sjfb8856606 RTE_CACHE_LINE_SIZE);
1404d30ea906Sjfb8856606
1405d30ea906Sjfb8856606 if (!rxq->rx_refill_buffer) {
14064418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for rx refill buffer\n");
1407d30ea906Sjfb8856606 rte_free(rxq->rx_buffer_info);
1408d30ea906Sjfb8856606 rxq->rx_buffer_info = NULL;
1409d30ea906Sjfb8856606 return -ENOMEM;
1410d30ea906Sjfb8856606 }
1411d30ea906Sjfb8856606
1412d30ea906Sjfb8856606 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
1413d30ea906Sjfb8856606 sizeof(uint16_t) * nb_desc,
1414d30ea906Sjfb8856606 RTE_CACHE_LINE_SIZE);
1415d30ea906Sjfb8856606 if (!rxq->empty_rx_reqs) {
14164418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for empty rx reqs\n");
1417d30ea906Sjfb8856606 rte_free(rxq->rx_buffer_info);
1418d30ea906Sjfb8856606 rxq->rx_buffer_info = NULL;
1419d30ea906Sjfb8856606 rte_free(rxq->rx_refill_buffer);
1420d30ea906Sjfb8856606 rxq->rx_refill_buffer = NULL;
1421d30ea906Sjfb8856606 return -ENOMEM;
1422d30ea906Sjfb8856606 }
1423d30ea906Sjfb8856606
1424d30ea906Sjfb8856606 for (i = 0; i < nb_desc; i++)
14251646932aSjfb8856606 rxq->empty_rx_reqs[i] = i;
1426d30ea906Sjfb8856606
1427a9643ea8Slogwang /* Store pointer to this queue in upper layer */
1428a9643ea8Slogwang rxq->configured = 1;
1429a9643ea8Slogwang dev->data->rx_queues[queue_idx] = rxq;
1430a9643ea8Slogwang
1431d30ea906Sjfb8856606 return 0;
1432a9643ea8Slogwang }
1433a9643ea8Slogwang
ena_add_single_rx_desc(struct ena_com_io_sq * io_sq,struct rte_mbuf * mbuf,uint16_t id)1434*2d9fd380Sjfb8856606 static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq,
1435*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf, uint16_t id)
1436*2d9fd380Sjfb8856606 {
1437*2d9fd380Sjfb8856606 struct ena_com_buf ebuf;
1438*2d9fd380Sjfb8856606 int rc;
1439*2d9fd380Sjfb8856606
1440*2d9fd380Sjfb8856606 /* prepare physical address for DMA transaction */
1441*2d9fd380Sjfb8856606 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
1442*2d9fd380Sjfb8856606 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
1443*2d9fd380Sjfb8856606
1444*2d9fd380Sjfb8856606 /* pass resource to device */
1445*2d9fd380Sjfb8856606 rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id);
1446*2d9fd380Sjfb8856606 if (unlikely(rc != 0))
1447*2d9fd380Sjfb8856606 PMD_DRV_LOG(WARNING, "failed adding rx desc\n");
1448*2d9fd380Sjfb8856606
1449*2d9fd380Sjfb8856606 return rc;
1450*2d9fd380Sjfb8856606 }
1451*2d9fd380Sjfb8856606
ena_populate_rx_queue(struct ena_ring * rxq,unsigned int count)1452a9643ea8Slogwang static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
1453a9643ea8Slogwang {
1454a9643ea8Slogwang unsigned int i;
1455a9643ea8Slogwang int rc;
14562bfe3f2eSlogwang uint16_t next_to_use = rxq->next_to_use;
1457d30ea906Sjfb8856606 uint16_t in_use, req_id;
1458d30ea906Sjfb8856606 struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
1459a9643ea8Slogwang
1460a9643ea8Slogwang if (unlikely(!count))
1461a9643ea8Slogwang return 0;
1462a9643ea8Slogwang
1463*2d9fd380Sjfb8856606 in_use = rxq->ring_size - 1 -
1464*2d9fd380Sjfb8856606 ena_com_free_q_entries(rxq->ena_com_io_sq);
1465*2d9fd380Sjfb8856606 ena_assert_msg(((in_use + count) < rxq->ring_size),
1466*2d9fd380Sjfb8856606 "bad ring state\n");
1467a9643ea8Slogwang
1468a9643ea8Slogwang /* get resources for incoming packets */
1469d30ea906Sjfb8856606 rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
1470a9643ea8Slogwang if (unlikely(rc < 0)) {
1471a9643ea8Slogwang rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
14724418919fSjohnjiang ++rxq->rx_stats.mbuf_alloc_fail;
1473a9643ea8Slogwang PMD_RX_LOG(DEBUG, "there are no enough free buffers");
1474a9643ea8Slogwang return 0;
1475a9643ea8Slogwang }
1476a9643ea8Slogwang
1477a9643ea8Slogwang for (i = 0; i < count; i++) {
1478d30ea906Sjfb8856606 struct rte_mbuf *mbuf = mbufs[i];
1479*2d9fd380Sjfb8856606 struct ena_rx_buffer *rx_info;
1480a9643ea8Slogwang
1481d30ea906Sjfb8856606 if (likely((i + 4) < count))
1482d30ea906Sjfb8856606 rte_prefetch0(mbufs[i + 4]);
1483d30ea906Sjfb8856606
1484*2d9fd380Sjfb8856606 req_id = rxq->empty_rx_reqs[next_to_use];
1485d30ea906Sjfb8856606 rc = validate_rx_req_id(rxq, req_id);
1486*2d9fd380Sjfb8856606 if (unlikely(rc))
1487d30ea906Sjfb8856606 break;
1488d30ea906Sjfb8856606
1489*2d9fd380Sjfb8856606 rx_info = &rxq->rx_buffer_info[req_id];
1490*2d9fd380Sjfb8856606
1491*2d9fd380Sjfb8856606 rc = ena_add_single_rx_desc(rxq->ena_com_io_sq, mbuf, req_id);
1492*2d9fd380Sjfb8856606 if (unlikely(rc != 0))
1493a9643ea8Slogwang break;
1494*2d9fd380Sjfb8856606
1495*2d9fd380Sjfb8856606 rx_info->mbuf = mbuf;
1496*2d9fd380Sjfb8856606 next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, rxq->size_mask);
1497a9643ea8Slogwang }
1498a9643ea8Slogwang
1499d30ea906Sjfb8856606 if (unlikely(i < count)) {
15004418919fSjohnjiang PMD_DRV_LOG(WARNING, "refilled rx qid %d with only %d "
1501d30ea906Sjfb8856606 "buffers (from %d)\n", rxq->id, i, count);
1502d30ea906Sjfb8856606 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
1503d30ea906Sjfb8856606 count - i);
15044418919fSjohnjiang ++rxq->rx_stats.refill_partial;
1505d30ea906Sjfb8856606 }
1506d30ea906Sjfb8856606
1507a9643ea8Slogwang /* When we submitted free recources to device... */
1508d30ea906Sjfb8856606 if (likely(i > 0)) {
1509*2d9fd380Sjfb8856606 /* ...let HW know that it can fill buffers with data. */
1510a9643ea8Slogwang ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
1511a9643ea8Slogwang
1512a9643ea8Slogwang rxq->next_to_use = next_to_use;
1513a9643ea8Slogwang }
1514a9643ea8Slogwang
1515a9643ea8Slogwang return i;
1516a9643ea8Slogwang }
1517a9643ea8Slogwang
ena_device_init(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx,bool * wd_state)1518a9643ea8Slogwang static int ena_device_init(struct ena_com_dev *ena_dev,
1519d30ea906Sjfb8856606 struct ena_com_dev_get_features_ctx *get_feat_ctx,
1520d30ea906Sjfb8856606 bool *wd_state)
1521a9643ea8Slogwang {
1522d30ea906Sjfb8856606 uint32_t aenq_groups;
1523a9643ea8Slogwang int rc;
1524a9643ea8Slogwang bool readless_supported;
1525a9643ea8Slogwang
1526a9643ea8Slogwang /* Initialize mmio registers */
1527a9643ea8Slogwang rc = ena_com_mmio_reg_read_request_init(ena_dev);
1528a9643ea8Slogwang if (rc) {
15294418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to init mmio read less\n");
1530a9643ea8Slogwang return rc;
1531a9643ea8Slogwang }
1532a9643ea8Slogwang
1533a9643ea8Slogwang /* The PCIe configuration space revision id indicate if mmio reg
1534a9643ea8Slogwang * read is disabled.
1535a9643ea8Slogwang */
1536a9643ea8Slogwang readless_supported =
1537a9643ea8Slogwang !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id
1538a9643ea8Slogwang & ENA_MMIO_DISABLE_REG_READ);
1539a9643ea8Slogwang ena_com_set_mmio_read_mode(ena_dev, readless_supported);
1540a9643ea8Slogwang
1541a9643ea8Slogwang /* reset device */
1542d30ea906Sjfb8856606 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
1543a9643ea8Slogwang if (rc) {
15444418919fSjohnjiang PMD_DRV_LOG(ERR, "cannot reset device\n");
1545a9643ea8Slogwang goto err_mmio_read_less;
1546a9643ea8Slogwang }
1547a9643ea8Slogwang
1548a9643ea8Slogwang /* check FW version */
1549a9643ea8Slogwang rc = ena_com_validate_version(ena_dev);
1550a9643ea8Slogwang if (rc) {
15514418919fSjohnjiang PMD_DRV_LOG(ERR, "device version is too low\n");
1552a9643ea8Slogwang goto err_mmio_read_less;
1553a9643ea8Slogwang }
1554a9643ea8Slogwang
1555a9643ea8Slogwang ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
1556a9643ea8Slogwang
1557a9643ea8Slogwang /* ENA device administration layer init */
15584418919fSjohnjiang rc = ena_com_admin_init(ena_dev, &aenq_handlers);
1559a9643ea8Slogwang if (rc) {
15604418919fSjohnjiang PMD_DRV_LOG(ERR,
1561a9643ea8Slogwang "cannot initialize ena admin queue with device\n");
1562a9643ea8Slogwang goto err_mmio_read_less;
1563a9643ea8Slogwang }
1564a9643ea8Slogwang
1565a9643ea8Slogwang /* To enable the msix interrupts the driver needs to know the number
1566a9643ea8Slogwang * of queues. So the driver uses polling mode to retrieve this
1567a9643ea8Slogwang * information.
1568a9643ea8Slogwang */
1569a9643ea8Slogwang ena_com_set_admin_polling_mode(ena_dev, true);
1570a9643ea8Slogwang
15712bfe3f2eSlogwang ena_config_host_info(ena_dev);
15722bfe3f2eSlogwang
1573a9643ea8Slogwang /* Get Device Attributes and features */
1574a9643ea8Slogwang rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
1575a9643ea8Slogwang if (rc) {
15764418919fSjohnjiang PMD_DRV_LOG(ERR,
1577a9643ea8Slogwang "cannot get attribute for ena device rc= %d\n", rc);
1578a9643ea8Slogwang goto err_admin_init;
1579a9643ea8Slogwang }
1580a9643ea8Slogwang
1581d30ea906Sjfb8856606 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
1582d30ea906Sjfb8856606 BIT(ENA_ADMIN_NOTIFICATION) |
1583d30ea906Sjfb8856606 BIT(ENA_ADMIN_KEEP_ALIVE) |
1584d30ea906Sjfb8856606 BIT(ENA_ADMIN_FATAL_ERROR) |
1585d30ea906Sjfb8856606 BIT(ENA_ADMIN_WARNING);
1586d30ea906Sjfb8856606
1587d30ea906Sjfb8856606 aenq_groups &= get_feat_ctx->aenq.supported_groups;
1588d30ea906Sjfb8856606 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
1589d30ea906Sjfb8856606 if (rc) {
15904418919fSjohnjiang PMD_DRV_LOG(ERR, "Cannot configure aenq groups rc: %d\n", rc);
1591d30ea906Sjfb8856606 goto err_admin_init;
1592d30ea906Sjfb8856606 }
1593d30ea906Sjfb8856606
1594d30ea906Sjfb8856606 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
1595d30ea906Sjfb8856606
1596a9643ea8Slogwang return 0;
1597a9643ea8Slogwang
1598a9643ea8Slogwang err_admin_init:
1599a9643ea8Slogwang ena_com_admin_destroy(ena_dev);
1600a9643ea8Slogwang
1601a9643ea8Slogwang err_mmio_read_less:
1602a9643ea8Slogwang ena_com_mmio_reg_read_request_destroy(ena_dev);
1603a9643ea8Slogwang
1604a9643ea8Slogwang return rc;
1605a9643ea8Slogwang }
1606a9643ea8Slogwang
ena_interrupt_handler_rte(void * cb_arg)1607d30ea906Sjfb8856606 static void ena_interrupt_handler_rte(void *cb_arg)
1608d30ea906Sjfb8856606 {
16094b05018fSfengbojiang struct ena_adapter *adapter = cb_arg;
1610d30ea906Sjfb8856606 struct ena_com_dev *ena_dev = &adapter->ena_dev;
1611d30ea906Sjfb8856606
1612d30ea906Sjfb8856606 ena_com_admin_q_comp_intr_handler(ena_dev);
1613d30ea906Sjfb8856606 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
1614d30ea906Sjfb8856606 ena_com_aenq_intr_handler(ena_dev, adapter);
1615d30ea906Sjfb8856606 }
1616d30ea906Sjfb8856606
check_for_missing_keep_alive(struct ena_adapter * adapter)1617d30ea906Sjfb8856606 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
1618d30ea906Sjfb8856606 {
1619d30ea906Sjfb8856606 if (!adapter->wd_state)
1620d30ea906Sjfb8856606 return;
1621d30ea906Sjfb8856606
1622d30ea906Sjfb8856606 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
1623d30ea906Sjfb8856606 return;
1624d30ea906Sjfb8856606
1625d30ea906Sjfb8856606 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
1626d30ea906Sjfb8856606 adapter->keep_alive_timeout)) {
16274418919fSjohnjiang PMD_DRV_LOG(ERR, "Keep alive timeout\n");
1628d30ea906Sjfb8856606 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
1629d30ea906Sjfb8856606 adapter->trigger_reset = true;
16304418919fSjohnjiang ++adapter->dev_stats.wd_expired;
1631d30ea906Sjfb8856606 }
1632d30ea906Sjfb8856606 }
1633d30ea906Sjfb8856606
1634d30ea906Sjfb8856606 /* Check if admin queue is enabled */
check_for_admin_com_state(struct ena_adapter * adapter)1635d30ea906Sjfb8856606 static void check_for_admin_com_state(struct ena_adapter *adapter)
1636d30ea906Sjfb8856606 {
1637d30ea906Sjfb8856606 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
16384418919fSjohnjiang PMD_DRV_LOG(ERR, "ENA admin queue is not in running state!\n");
1639d30ea906Sjfb8856606 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
1640d30ea906Sjfb8856606 adapter->trigger_reset = true;
1641d30ea906Sjfb8856606 }
1642d30ea906Sjfb8856606 }
1643d30ea906Sjfb8856606
ena_timer_wd_callback(__rte_unused struct rte_timer * timer,void * arg)1644d30ea906Sjfb8856606 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
1645d30ea906Sjfb8856606 void *arg)
1646d30ea906Sjfb8856606 {
16474b05018fSfengbojiang struct ena_adapter *adapter = arg;
1648d30ea906Sjfb8856606 struct rte_eth_dev *dev = adapter->rte_dev;
1649d30ea906Sjfb8856606
1650d30ea906Sjfb8856606 check_for_missing_keep_alive(adapter);
1651d30ea906Sjfb8856606 check_for_admin_com_state(adapter);
1652d30ea906Sjfb8856606
1653d30ea906Sjfb8856606 if (unlikely(adapter->trigger_reset)) {
16544418919fSjohnjiang PMD_DRV_LOG(ERR, "Trigger reset is on\n");
1655*2d9fd380Sjfb8856606 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
1656d30ea906Sjfb8856606 NULL);
1657d30ea906Sjfb8856606 }
1658d30ea906Sjfb8856606 }
1659d30ea906Sjfb8856606
16604418919fSjohnjiang static inline void
set_default_llq_configurations(struct ena_llq_configurations * llq_config,struct ena_admin_feature_llq_desc * llq,bool use_large_llq_hdr)1661*2d9fd380Sjfb8856606 set_default_llq_configurations(struct ena_llq_configurations *llq_config,
1662*2d9fd380Sjfb8856606 struct ena_admin_feature_llq_desc *llq,
1663*2d9fd380Sjfb8856606 bool use_large_llq_hdr)
16644418919fSjohnjiang {
16654418919fSjohnjiang llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
16664418919fSjohnjiang llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
16674418919fSjohnjiang llq_config->llq_num_decs_before_header =
16684418919fSjohnjiang ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
1669*2d9fd380Sjfb8856606
1670*2d9fd380Sjfb8856606 if (use_large_llq_hdr &&
1671*2d9fd380Sjfb8856606 (llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
1672*2d9fd380Sjfb8856606 llq_config->llq_ring_entry_size =
1673*2d9fd380Sjfb8856606 ENA_ADMIN_LIST_ENTRY_SIZE_256B;
1674*2d9fd380Sjfb8856606 llq_config->llq_ring_entry_size_value = 256;
1675*2d9fd380Sjfb8856606 } else {
1676*2d9fd380Sjfb8856606 llq_config->llq_ring_entry_size =
1677*2d9fd380Sjfb8856606 ENA_ADMIN_LIST_ENTRY_SIZE_128B;
16784418919fSjohnjiang llq_config->llq_ring_entry_size_value = 128;
16794418919fSjohnjiang }
1680*2d9fd380Sjfb8856606 }
16814418919fSjohnjiang
16824418919fSjohnjiang static int
ena_set_queues_placement_policy(struct ena_adapter * adapter,struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq,struct ena_llq_configurations * llq_default_configurations)16834418919fSjohnjiang ena_set_queues_placement_policy(struct ena_adapter *adapter,
16844418919fSjohnjiang struct ena_com_dev *ena_dev,
16854418919fSjohnjiang struct ena_admin_feature_llq_desc *llq,
16864418919fSjohnjiang struct ena_llq_configurations *llq_default_configurations)
16874418919fSjohnjiang {
16884418919fSjohnjiang int rc;
16894418919fSjohnjiang u32 llq_feature_mask;
16904418919fSjohnjiang
16914418919fSjohnjiang llq_feature_mask = 1 << ENA_ADMIN_LLQ;
16924418919fSjohnjiang if (!(ena_dev->supported_features & llq_feature_mask)) {
16934418919fSjohnjiang PMD_DRV_LOG(INFO,
16944418919fSjohnjiang "LLQ is not supported. Fallback to host mode policy.\n");
16954418919fSjohnjiang ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
16964418919fSjohnjiang return 0;
16974418919fSjohnjiang }
16984418919fSjohnjiang
16994418919fSjohnjiang rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
17004418919fSjohnjiang if (unlikely(rc)) {
17014418919fSjohnjiang PMD_INIT_LOG(WARNING, "Failed to config dev mode. "
17024418919fSjohnjiang "Fallback to host mode policy.");
17034418919fSjohnjiang ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17044418919fSjohnjiang return 0;
17054418919fSjohnjiang }
17064418919fSjohnjiang
17074418919fSjohnjiang /* Nothing to config, exit */
17084418919fSjohnjiang if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
17094418919fSjohnjiang return 0;
17104418919fSjohnjiang
17114418919fSjohnjiang if (!adapter->dev_mem_base) {
17124418919fSjohnjiang PMD_DRV_LOG(ERR, "Unable to access LLQ bar resource. "
17134418919fSjohnjiang "Fallback to host mode policy.\n.");
17144418919fSjohnjiang ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
17154418919fSjohnjiang return 0;
17164418919fSjohnjiang }
17174418919fSjohnjiang
17184418919fSjohnjiang ena_dev->mem_bar = adapter->dev_mem_base;
17194418919fSjohnjiang
17204418919fSjohnjiang return 0;
17214418919fSjohnjiang }
17224418919fSjohnjiang
ena_calc_max_io_queue_num(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)1723*2d9fd380Sjfb8856606 static uint32_t ena_calc_max_io_queue_num(struct ena_com_dev *ena_dev,
1724d30ea906Sjfb8856606 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1725d30ea906Sjfb8856606 {
1726*2d9fd380Sjfb8856606 uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
1727d30ea906Sjfb8856606
17284418919fSjohnjiang /* Regular queues capabilities */
17294418919fSjohnjiang if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
17304418919fSjohnjiang struct ena_admin_queue_ext_feature_fields *max_queue_ext =
17314418919fSjohnjiang &get_feat_ctx->max_queue_ext.max_queue_ext;
17324418919fSjohnjiang io_rx_num = RTE_MIN(max_queue_ext->max_rx_sq_num,
17334418919fSjohnjiang max_queue_ext->max_rx_cq_num);
17344418919fSjohnjiang io_tx_sq_num = max_queue_ext->max_tx_sq_num;
17354418919fSjohnjiang io_tx_cq_num = max_queue_ext->max_tx_cq_num;
17364418919fSjohnjiang } else {
17374418919fSjohnjiang struct ena_admin_queue_feature_desc *max_queues =
17384418919fSjohnjiang &get_feat_ctx->max_queues;
17394418919fSjohnjiang io_tx_sq_num = max_queues->max_sq_num;
17404418919fSjohnjiang io_tx_cq_num = max_queues->max_cq_num;
17414418919fSjohnjiang io_rx_num = RTE_MIN(io_tx_sq_num, io_tx_cq_num);
17424418919fSjohnjiang }
1743d30ea906Sjfb8856606
17444418919fSjohnjiang /* In case of LLQ use the llq number in the get feature cmd */
17454418919fSjohnjiang if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
17464418919fSjohnjiang io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
17474418919fSjohnjiang
1748*2d9fd380Sjfb8856606 max_num_io_queues = RTE_MIN(ENA_MAX_NUM_IO_QUEUES, io_rx_num);
1749*2d9fd380Sjfb8856606 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_sq_num);
1750*2d9fd380Sjfb8856606 max_num_io_queues = RTE_MIN(max_num_io_queues, io_tx_cq_num);
1751d30ea906Sjfb8856606
1752*2d9fd380Sjfb8856606 if (unlikely(max_num_io_queues == 0)) {
17534418919fSjohnjiang PMD_DRV_LOG(ERR, "Number of IO queues should not be 0\n");
1754d30ea906Sjfb8856606 return -EFAULT;
1755d30ea906Sjfb8856606 }
1756d30ea906Sjfb8856606
1757*2d9fd380Sjfb8856606 return max_num_io_queues;
1758d30ea906Sjfb8856606 }
1759d30ea906Sjfb8856606
eth_ena_dev_init(struct rte_eth_dev * eth_dev)1760a9643ea8Slogwang static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
1761a9643ea8Slogwang {
17624418919fSjohnjiang struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
1763a9643ea8Slogwang struct rte_pci_device *pci_dev;
1764d30ea906Sjfb8856606 struct rte_intr_handle *intr_handle;
17654b05018fSfengbojiang struct ena_adapter *adapter = eth_dev->data->dev_private;
1766a9643ea8Slogwang struct ena_com_dev *ena_dev = &adapter->ena_dev;
1767a9643ea8Slogwang struct ena_com_dev_get_features_ctx get_feat_ctx;
17684418919fSjohnjiang struct ena_llq_configurations llq_config;
17694418919fSjohnjiang const char *queue_type_str;
1770*2d9fd380Sjfb8856606 uint32_t max_num_io_queues;
17714418919fSjohnjiang int rc;
1772a9643ea8Slogwang static int adapters_found;
1773*2d9fd380Sjfb8856606 bool disable_meta_caching;
17740c6bd470Sfengbojiang bool wd_state = false;
1775a9643ea8Slogwang
1776a9643ea8Slogwang eth_dev->dev_ops = &ena_dev_ops;
1777a9643ea8Slogwang eth_dev->rx_pkt_burst = ð_ena_recv_pkts;
1778a9643ea8Slogwang eth_dev->tx_pkt_burst = ð_ena_xmit_pkts;
17792bfe3f2eSlogwang eth_dev->tx_pkt_prepare = ð_ena_prep_pkts;
1780a9643ea8Slogwang
1781a9643ea8Slogwang if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1782a9643ea8Slogwang return 0;
1783a9643ea8Slogwang
1784*2d9fd380Sjfb8856606 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1785*2d9fd380Sjfb8856606
17861646932aSjfb8856606 memset(adapter, 0, sizeof(struct ena_adapter));
17871646932aSjfb8856606 ena_dev = &adapter->ena_dev;
17881646932aSjfb8856606
17891646932aSjfb8856606 adapter->rte_eth_dev_data = eth_dev->data;
17901646932aSjfb8856606 adapter->rte_dev = eth_dev;
17911646932aSjfb8856606
17922bfe3f2eSlogwang pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1793a9643ea8Slogwang adapter->pdev = pci_dev;
1794a9643ea8Slogwang
17952bfe3f2eSlogwang PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
1796a9643ea8Slogwang pci_dev->addr.domain,
1797a9643ea8Slogwang pci_dev->addr.bus,
1798a9643ea8Slogwang pci_dev->addr.devid,
1799a9643ea8Slogwang pci_dev->addr.function);
1800a9643ea8Slogwang
1801d30ea906Sjfb8856606 intr_handle = &pci_dev->intr_handle;
1802d30ea906Sjfb8856606
1803a9643ea8Slogwang adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
1804a9643ea8Slogwang adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
1805a9643ea8Slogwang
1806d30ea906Sjfb8856606 if (!adapter->regs) {
18072bfe3f2eSlogwang PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
1808a9643ea8Slogwang ENA_REGS_BAR);
1809d30ea906Sjfb8856606 return -ENXIO;
1810d30ea906Sjfb8856606 }
1811a9643ea8Slogwang
1812a9643ea8Slogwang ena_dev->reg_bar = adapter->regs;
1813a9643ea8Slogwang ena_dev->dmadev = adapter->pdev;
1814a9643ea8Slogwang
1815a9643ea8Slogwang adapter->id_number = adapters_found;
1816a9643ea8Slogwang
1817a9643ea8Slogwang snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d",
1818a9643ea8Slogwang adapter->id_number);
1819a9643ea8Slogwang
1820*2d9fd380Sjfb8856606 rc = ena_parse_devargs(adapter, pci_dev->device.devargs);
1821*2d9fd380Sjfb8856606 if (rc != 0) {
1822*2d9fd380Sjfb8856606 PMD_INIT_LOG(CRIT, "Failed to parse devargs\n");
1823*2d9fd380Sjfb8856606 goto err;
1824*2d9fd380Sjfb8856606 }
1825*2d9fd380Sjfb8856606
1826a9643ea8Slogwang /* device specific initialization routine */
1827d30ea906Sjfb8856606 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
1828a9643ea8Slogwang if (rc) {
18292bfe3f2eSlogwang PMD_INIT_LOG(CRIT, "Failed to init ENA device");
1830d30ea906Sjfb8856606 goto err;
1831a9643ea8Slogwang }
1832d30ea906Sjfb8856606 adapter->wd_state = wd_state;
1833a9643ea8Slogwang
1834*2d9fd380Sjfb8856606 set_default_llq_configurations(&llq_config, &get_feat_ctx.llq,
1835*2d9fd380Sjfb8856606 adapter->use_large_llq_hdr);
18364418919fSjohnjiang rc = ena_set_queues_placement_policy(adapter, ena_dev,
18374418919fSjohnjiang &get_feat_ctx.llq, &llq_config);
18384418919fSjohnjiang if (unlikely(rc)) {
18394418919fSjohnjiang PMD_INIT_LOG(CRIT, "Failed to set placement policy");
18404418919fSjohnjiang return rc;
18414418919fSjohnjiang }
18424418919fSjohnjiang
18434418919fSjohnjiang if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
18444418919fSjohnjiang queue_type_str = "Regular";
18454418919fSjohnjiang else
18464418919fSjohnjiang queue_type_str = "Low latency";
18474418919fSjohnjiang PMD_DRV_LOG(INFO, "Placement policy: %s\n", queue_type_str);
18484418919fSjohnjiang
18494418919fSjohnjiang calc_queue_ctx.ena_dev = ena_dev;
18504418919fSjohnjiang calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
18515af785ecSfengbojiang(姜凤波)
1852*2d9fd380Sjfb8856606 max_num_io_queues = ena_calc_max_io_queue_num(ena_dev, &get_feat_ctx);
1853*2d9fd380Sjfb8856606 rc = ena_calc_io_queue_size(&calc_queue_ctx,
1854*2d9fd380Sjfb8856606 adapter->use_large_llq_hdr);
1855*2d9fd380Sjfb8856606 if (unlikely((rc != 0) || (max_num_io_queues == 0))) {
1856d30ea906Sjfb8856606 rc = -EFAULT;
1857d30ea906Sjfb8856606 goto err_device_destroy;
1858d30ea906Sjfb8856606 }
1859a9643ea8Slogwang
1860*2d9fd380Sjfb8856606 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
1861*2d9fd380Sjfb8856606 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
18624418919fSjohnjiang adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
18634418919fSjohnjiang adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
1864*2d9fd380Sjfb8856606 adapter->max_num_io_queues = max_num_io_queues;
1865*2d9fd380Sjfb8856606
1866*2d9fd380Sjfb8856606 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1867*2d9fd380Sjfb8856606 disable_meta_caching =
1868*2d9fd380Sjfb8856606 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
1869*2d9fd380Sjfb8856606 BIT(ENA_ADMIN_DISABLE_META_CACHING));
1870*2d9fd380Sjfb8856606 } else {
1871*2d9fd380Sjfb8856606 disable_meta_caching = false;
1872*2d9fd380Sjfb8856606 }
1873d30ea906Sjfb8856606
1874a9643ea8Slogwang /* prepare ring structures */
1875*2d9fd380Sjfb8856606 ena_init_rings(adapter, disable_meta_caching);
1876a9643ea8Slogwang
1877a9643ea8Slogwang ena_config_debug_area(adapter);
1878a9643ea8Slogwang
1879a9643ea8Slogwang /* Set max MTU for this device */
1880a9643ea8Slogwang adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
1881a9643ea8Slogwang
18824418919fSjohnjiang /* set device support for offloads */
18834418919fSjohnjiang adapter->offloads.tso4_supported = (get_feat_ctx.offload.tx &
18844418919fSjohnjiang ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0;
18854418919fSjohnjiang adapter->offloads.tx_csum_supported = (get_feat_ctx.offload.tx &
18864418919fSjohnjiang ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) != 0;
18874418919fSjohnjiang adapter->offloads.rx_csum_supported =
18884418919fSjohnjiang (get_feat_ctx.offload.rx_supported &
18894418919fSjohnjiang ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0;
18902bfe3f2eSlogwang
1891a9643ea8Slogwang /* Copy MAC address and point DPDK to it */
18924418919fSjohnjiang eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr;
18934418919fSjohnjiang rte_ether_addr_copy((struct rte_ether_addr *)
18944418919fSjohnjiang get_feat_ctx.dev_attr.mac_addr,
18954418919fSjohnjiang (struct rte_ether_addr *)adapter->mac_addr);
1896a9643ea8Slogwang
1897a9643ea8Slogwang adapter->drv_stats = rte_zmalloc("adapter stats",
1898a9643ea8Slogwang sizeof(*adapter->drv_stats),
1899a9643ea8Slogwang RTE_CACHE_LINE_SIZE);
1900a9643ea8Slogwang if (!adapter->drv_stats) {
19014418919fSjohnjiang PMD_DRV_LOG(ERR, "failed to alloc mem for adapter stats\n");
1902d30ea906Sjfb8856606 rc = -ENOMEM;
1903d30ea906Sjfb8856606 goto err_delete_debug_area;
1904a9643ea8Slogwang }
1905a9643ea8Slogwang
1906*2d9fd380Sjfb8856606 rte_spinlock_init(&adapter->admin_lock);
1907*2d9fd380Sjfb8856606
1908d30ea906Sjfb8856606 rte_intr_callback_register(intr_handle,
1909d30ea906Sjfb8856606 ena_interrupt_handler_rte,
1910d30ea906Sjfb8856606 adapter);
1911d30ea906Sjfb8856606 rte_intr_enable(intr_handle);
1912d30ea906Sjfb8856606 ena_com_set_admin_polling_mode(ena_dev, false);
1913d30ea906Sjfb8856606 ena_com_admin_aenq_enable(ena_dev);
1914d30ea906Sjfb8856606
1915d30ea906Sjfb8856606 if (adapters_found == 0)
1916d30ea906Sjfb8856606 rte_timer_subsystem_init();
1917d30ea906Sjfb8856606 rte_timer_init(&adapter->timer_wd);
1918d30ea906Sjfb8856606
1919a9643ea8Slogwang adapters_found++;
1920a9643ea8Slogwang adapter->state = ENA_ADAPTER_STATE_INIT;
1921a9643ea8Slogwang
1922a9643ea8Slogwang return 0;
1923d30ea906Sjfb8856606
1924d30ea906Sjfb8856606 err_delete_debug_area:
1925d30ea906Sjfb8856606 ena_com_delete_debug_area(ena_dev);
1926d30ea906Sjfb8856606
1927d30ea906Sjfb8856606 err_device_destroy:
1928d30ea906Sjfb8856606 ena_com_delete_host_info(ena_dev);
1929d30ea906Sjfb8856606 ena_com_admin_destroy(ena_dev);
1930d30ea906Sjfb8856606
1931d30ea906Sjfb8856606 err:
1932d30ea906Sjfb8856606 return rc;
1933d30ea906Sjfb8856606 }
1934d30ea906Sjfb8856606
ena_destroy_device(struct rte_eth_dev * eth_dev)19351646932aSjfb8856606 static void ena_destroy_device(struct rte_eth_dev *eth_dev)
1936d30ea906Sjfb8856606 {
19374b05018fSfengbojiang struct ena_adapter *adapter = eth_dev->data->dev_private;
19381646932aSjfb8856606 struct ena_com_dev *ena_dev = &adapter->ena_dev;
1939d30ea906Sjfb8856606
19401646932aSjfb8856606 if (adapter->state == ENA_ADAPTER_STATE_FREE)
19411646932aSjfb8856606 return;
19421646932aSjfb8856606
19431646932aSjfb8856606 ena_com_set_admin_running_state(ena_dev, false);
1944d30ea906Sjfb8856606
1945d30ea906Sjfb8856606 if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
1946d30ea906Sjfb8856606 ena_close(eth_dev);
1947d30ea906Sjfb8856606
19481646932aSjfb8856606 ena_com_delete_debug_area(ena_dev);
19491646932aSjfb8856606 ena_com_delete_host_info(ena_dev);
19501646932aSjfb8856606
19511646932aSjfb8856606 ena_com_abort_admin_commands(ena_dev);
19521646932aSjfb8856606 ena_com_wait_for_abort_completion(ena_dev);
19531646932aSjfb8856606 ena_com_admin_destroy(ena_dev);
19541646932aSjfb8856606 ena_com_mmio_reg_read_request_destroy(ena_dev);
19551646932aSjfb8856606
19561646932aSjfb8856606 adapter->state = ENA_ADAPTER_STATE_FREE;
19571646932aSjfb8856606 }
19581646932aSjfb8856606
eth_ena_dev_uninit(struct rte_eth_dev * eth_dev)19591646932aSjfb8856606 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
19601646932aSjfb8856606 {
19611646932aSjfb8856606 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
19621646932aSjfb8856606 return 0;
19631646932aSjfb8856606
19641646932aSjfb8856606 ena_destroy_device(eth_dev);
19651646932aSjfb8856606
1966d30ea906Sjfb8856606 return 0;
1967a9643ea8Slogwang }
1968a9643ea8Slogwang
ena_dev_configure(struct rte_eth_dev * dev)1969a9643ea8Slogwang static int ena_dev_configure(struct rte_eth_dev *dev)
1970a9643ea8Slogwang {
19714b05018fSfengbojiang struct ena_adapter *adapter = dev->data->dev_private;
1972a9643ea8Slogwang
19735af785ecSfengbojiang(姜凤波) adapter->state = ENA_ADAPTER_STATE_CONFIG;
19745af785ecSfengbojiang(姜凤波)
1975d30ea906Sjfb8856606 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
1976d30ea906Sjfb8856606 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
1977a9643ea8Slogwang return 0;
1978a9643ea8Slogwang }
1979a9643ea8Slogwang
ena_init_rings(struct ena_adapter * adapter,bool disable_meta_caching)1980*2d9fd380Sjfb8856606 static void ena_init_rings(struct ena_adapter *adapter,
1981*2d9fd380Sjfb8856606 bool disable_meta_caching)
1982a9643ea8Slogwang {
1983*2d9fd380Sjfb8856606 size_t i;
1984a9643ea8Slogwang
1985*2d9fd380Sjfb8856606 for (i = 0; i < adapter->max_num_io_queues; i++) {
1986a9643ea8Slogwang struct ena_ring *ring = &adapter->tx_ring[i];
1987a9643ea8Slogwang
1988a9643ea8Slogwang ring->configured = 0;
1989a9643ea8Slogwang ring->type = ENA_RING_TYPE_TX;
1990a9643ea8Slogwang ring->adapter = adapter;
1991a9643ea8Slogwang ring->id = i;
1992a9643ea8Slogwang ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
1993a9643ea8Slogwang ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
1994d30ea906Sjfb8856606 ring->sgl_size = adapter->max_tx_sgl_size;
1995*2d9fd380Sjfb8856606 ring->disable_meta_caching = disable_meta_caching;
1996a9643ea8Slogwang }
1997a9643ea8Slogwang
1998*2d9fd380Sjfb8856606 for (i = 0; i < adapter->max_num_io_queues; i++) {
1999a9643ea8Slogwang struct ena_ring *ring = &adapter->rx_ring[i];
2000a9643ea8Slogwang
2001a9643ea8Slogwang ring->configured = 0;
2002a9643ea8Slogwang ring->type = ENA_RING_TYPE_RX;
2003a9643ea8Slogwang ring->adapter = adapter;
2004a9643ea8Slogwang ring->id = i;
20054418919fSjohnjiang ring->sgl_size = adapter->max_rx_sgl_size;
2006a9643ea8Slogwang }
2007a9643ea8Slogwang }
2008a9643ea8Slogwang
ena_infos_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)20094418919fSjohnjiang static int ena_infos_get(struct rte_eth_dev *dev,
2010a9643ea8Slogwang struct rte_eth_dev_info *dev_info)
2011a9643ea8Slogwang {
2012a9643ea8Slogwang struct ena_adapter *adapter;
2013a9643ea8Slogwang struct ena_com_dev *ena_dev;
2014d30ea906Sjfb8856606 uint64_t rx_feat = 0, tx_feat = 0;
2015a9643ea8Slogwang
20164418919fSjohnjiang ena_assert_msg(dev->data != NULL, "Uninitialized device\n");
20174418919fSjohnjiang ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device\n");
20184b05018fSfengbojiang adapter = dev->data->dev_private;
2019a9643ea8Slogwang
2020a9643ea8Slogwang ena_dev = &adapter->ena_dev;
20214418919fSjohnjiang ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
2022a9643ea8Slogwang
2023a9643ea8Slogwang dev_info->speed_capa =
2024a9643ea8Slogwang ETH_LINK_SPEED_1G |
2025a9643ea8Slogwang ETH_LINK_SPEED_2_5G |
2026a9643ea8Slogwang ETH_LINK_SPEED_5G |
2027a9643ea8Slogwang ETH_LINK_SPEED_10G |
2028a9643ea8Slogwang ETH_LINK_SPEED_25G |
2029a9643ea8Slogwang ETH_LINK_SPEED_40G |
2030a9643ea8Slogwang ETH_LINK_SPEED_50G |
2031a9643ea8Slogwang ETH_LINK_SPEED_100G;
2032a9643ea8Slogwang
2033a9643ea8Slogwang /* Set Tx & Rx features available for device */
20344418919fSjohnjiang if (adapter->offloads.tso4_supported)
2035a9643ea8Slogwang tx_feat |= DEV_TX_OFFLOAD_TCP_TSO;
2036a9643ea8Slogwang
20374418919fSjohnjiang if (adapter->offloads.tx_csum_supported)
2038a9643ea8Slogwang tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM |
2039a9643ea8Slogwang DEV_TX_OFFLOAD_UDP_CKSUM |
2040a9643ea8Slogwang DEV_TX_OFFLOAD_TCP_CKSUM;
2041a9643ea8Slogwang
20424418919fSjohnjiang if (adapter->offloads.rx_csum_supported)
2043a9643ea8Slogwang rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
2044a9643ea8Slogwang DEV_RX_OFFLOAD_UDP_CKSUM |
2045a9643ea8Slogwang DEV_RX_OFFLOAD_TCP_CKSUM;
2046a9643ea8Slogwang
2047d30ea906Sjfb8856606 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
2048d30ea906Sjfb8856606
2049a9643ea8Slogwang /* Inform framework about available features */
2050a9643ea8Slogwang dev_info->rx_offload_capa = rx_feat;
2051d30ea906Sjfb8856606 dev_info->rx_queue_offload_capa = rx_feat;
2052a9643ea8Slogwang dev_info->tx_offload_capa = tx_feat;
2053d30ea906Sjfb8856606 dev_info->tx_queue_offload_capa = tx_feat;
2054a9643ea8Slogwang
20551646932aSjfb8856606 dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP |
20561646932aSjfb8856606 ETH_RSS_UDP;
20571646932aSjfb8856606
2058a9643ea8Slogwang dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
2059a9643ea8Slogwang dev_info->max_rx_pktlen = adapter->max_mtu;
2060a9643ea8Slogwang dev_info->max_mac_addrs = 1;
2061a9643ea8Slogwang
2062*2d9fd380Sjfb8856606 dev_info->max_rx_queues = adapter->max_num_io_queues;
2063*2d9fd380Sjfb8856606 dev_info->max_tx_queues = adapter->max_num_io_queues;
2064a9643ea8Slogwang dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE;
2065d30ea906Sjfb8856606
2066d30ea906Sjfb8856606 adapter->tx_supported_offloads = tx_feat;
2067d30ea906Sjfb8856606 adapter->rx_supported_offloads = rx_feat;
2068d30ea906Sjfb8856606
2069*2d9fd380Sjfb8856606 dev_info->rx_desc_lim.nb_max = adapter->max_rx_ring_size;
2070d30ea906Sjfb8856606 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
20714418919fSjohnjiang dev_info->rx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
20724418919fSjohnjiang adapter->max_rx_sgl_size);
20734418919fSjohnjiang dev_info->rx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
20744418919fSjohnjiang adapter->max_rx_sgl_size);
2075d30ea906Sjfb8856606
2076*2d9fd380Sjfb8856606 dev_info->tx_desc_lim.nb_max = adapter->max_tx_ring_size;
2077d30ea906Sjfb8856606 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
2078d30ea906Sjfb8856606 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
20794418919fSjohnjiang adapter->max_tx_sgl_size);
2080d30ea906Sjfb8856606 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
20814418919fSjohnjiang adapter->max_tx_sgl_size);
20824418919fSjohnjiang
20834418919fSjohnjiang return 0;
2084a9643ea8Slogwang }
2085a9643ea8Slogwang
ena_init_rx_mbuf(struct rte_mbuf * mbuf,uint16_t len)2086*2d9fd380Sjfb8856606 static inline void ena_init_rx_mbuf(struct rte_mbuf *mbuf, uint16_t len)
2087*2d9fd380Sjfb8856606 {
2088*2d9fd380Sjfb8856606 mbuf->data_len = len;
2089*2d9fd380Sjfb8856606 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2090*2d9fd380Sjfb8856606 mbuf->refcnt = 1;
2091*2d9fd380Sjfb8856606 mbuf->next = NULL;
2092*2d9fd380Sjfb8856606 }
2093*2d9fd380Sjfb8856606
ena_rx_mbuf(struct ena_ring * rx_ring,struct ena_com_rx_buf_info * ena_bufs,uint32_t descs,uint16_t * next_to_clean,uint8_t offset)2094*2d9fd380Sjfb8856606 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
2095*2d9fd380Sjfb8856606 struct ena_com_rx_buf_info *ena_bufs,
2096*2d9fd380Sjfb8856606 uint32_t descs,
2097*2d9fd380Sjfb8856606 uint16_t *next_to_clean,
2098*2d9fd380Sjfb8856606 uint8_t offset)
2099*2d9fd380Sjfb8856606 {
2100*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf;
2101*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf_head;
2102*2d9fd380Sjfb8856606 struct ena_rx_buffer *rx_info;
2103*2d9fd380Sjfb8856606 int rc;
2104*2d9fd380Sjfb8856606 uint16_t ntc, len, req_id, buf = 0;
2105*2d9fd380Sjfb8856606
2106*2d9fd380Sjfb8856606 if (unlikely(descs == 0))
2107*2d9fd380Sjfb8856606 return NULL;
2108*2d9fd380Sjfb8856606
2109*2d9fd380Sjfb8856606 ntc = *next_to_clean;
2110*2d9fd380Sjfb8856606
2111*2d9fd380Sjfb8856606 len = ena_bufs[buf].len;
2112*2d9fd380Sjfb8856606 req_id = ena_bufs[buf].req_id;
2113*2d9fd380Sjfb8856606 if (unlikely(validate_rx_req_id(rx_ring, req_id)))
2114*2d9fd380Sjfb8856606 return NULL;
2115*2d9fd380Sjfb8856606
2116*2d9fd380Sjfb8856606 rx_info = &rx_ring->rx_buffer_info[req_id];
2117*2d9fd380Sjfb8856606
2118*2d9fd380Sjfb8856606 mbuf = rx_info->mbuf;
2119*2d9fd380Sjfb8856606 RTE_ASSERT(mbuf != NULL);
2120*2d9fd380Sjfb8856606
2121*2d9fd380Sjfb8856606 ena_init_rx_mbuf(mbuf, len);
2122*2d9fd380Sjfb8856606
2123*2d9fd380Sjfb8856606 /* Fill the mbuf head with the data specific for 1st segment. */
2124*2d9fd380Sjfb8856606 mbuf_head = mbuf;
2125*2d9fd380Sjfb8856606 mbuf_head->nb_segs = descs;
2126*2d9fd380Sjfb8856606 mbuf_head->port = rx_ring->port_id;
2127*2d9fd380Sjfb8856606 mbuf_head->pkt_len = len;
2128*2d9fd380Sjfb8856606 mbuf_head->data_off += offset;
2129*2d9fd380Sjfb8856606
2130*2d9fd380Sjfb8856606 rx_info->mbuf = NULL;
2131*2d9fd380Sjfb8856606 rx_ring->empty_rx_reqs[ntc] = req_id;
2132*2d9fd380Sjfb8856606 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2133*2d9fd380Sjfb8856606
2134*2d9fd380Sjfb8856606 while (--descs) {
2135*2d9fd380Sjfb8856606 ++buf;
2136*2d9fd380Sjfb8856606 len = ena_bufs[buf].len;
2137*2d9fd380Sjfb8856606 req_id = ena_bufs[buf].req_id;
2138*2d9fd380Sjfb8856606 if (unlikely(validate_rx_req_id(rx_ring, req_id))) {
2139*2d9fd380Sjfb8856606 rte_mbuf_raw_free(mbuf_head);
2140*2d9fd380Sjfb8856606 return NULL;
2141*2d9fd380Sjfb8856606 }
2142*2d9fd380Sjfb8856606
2143*2d9fd380Sjfb8856606 rx_info = &rx_ring->rx_buffer_info[req_id];
2144*2d9fd380Sjfb8856606 RTE_ASSERT(rx_info->mbuf != NULL);
2145*2d9fd380Sjfb8856606
2146*2d9fd380Sjfb8856606 if (unlikely(len == 0)) {
2147*2d9fd380Sjfb8856606 /*
2148*2d9fd380Sjfb8856606 * Some devices can pass descriptor with the length 0.
2149*2d9fd380Sjfb8856606 * To avoid confusion, the PMD is simply putting the
2150*2d9fd380Sjfb8856606 * descriptor back, as it was never used. We'll avoid
2151*2d9fd380Sjfb8856606 * mbuf allocation that way.
2152*2d9fd380Sjfb8856606 */
2153*2d9fd380Sjfb8856606 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq,
2154*2d9fd380Sjfb8856606 rx_info->mbuf, req_id);
2155*2d9fd380Sjfb8856606 if (unlikely(rc != 0)) {
2156*2d9fd380Sjfb8856606 /* Free the mbuf in case of an error. */
2157*2d9fd380Sjfb8856606 rte_mbuf_raw_free(rx_info->mbuf);
2158*2d9fd380Sjfb8856606 } else {
2159*2d9fd380Sjfb8856606 /*
2160*2d9fd380Sjfb8856606 * If there was no error, just exit the loop as
2161*2d9fd380Sjfb8856606 * 0 length descriptor is always the last one.
2162*2d9fd380Sjfb8856606 */
2163*2d9fd380Sjfb8856606 break;
2164*2d9fd380Sjfb8856606 }
2165*2d9fd380Sjfb8856606 } else {
2166*2d9fd380Sjfb8856606 /* Create an mbuf chain. */
2167*2d9fd380Sjfb8856606 mbuf->next = rx_info->mbuf;
2168*2d9fd380Sjfb8856606 mbuf = mbuf->next;
2169*2d9fd380Sjfb8856606
2170*2d9fd380Sjfb8856606 ena_init_rx_mbuf(mbuf, len);
2171*2d9fd380Sjfb8856606 mbuf_head->pkt_len += len;
2172*2d9fd380Sjfb8856606 }
2173*2d9fd380Sjfb8856606
2174*2d9fd380Sjfb8856606 /*
2175*2d9fd380Sjfb8856606 * Mark the descriptor as depleted and perform necessary
2176*2d9fd380Sjfb8856606 * cleanup.
2177*2d9fd380Sjfb8856606 * This code will execute in two cases:
2178*2d9fd380Sjfb8856606 * 1. Descriptor len was greater than 0 - normal situation.
2179*2d9fd380Sjfb8856606 * 2. Descriptor len was 0 and we failed to add the descriptor
2180*2d9fd380Sjfb8856606 * to the device. In that situation, we should try to add
2181*2d9fd380Sjfb8856606 * the mbuf again in the populate routine and mark the
2182*2d9fd380Sjfb8856606 * descriptor as used up by the device.
2183*2d9fd380Sjfb8856606 */
2184*2d9fd380Sjfb8856606 rx_info->mbuf = NULL;
2185*2d9fd380Sjfb8856606 rx_ring->empty_rx_reqs[ntc] = req_id;
2186*2d9fd380Sjfb8856606 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask);
2187*2d9fd380Sjfb8856606 }
2188*2d9fd380Sjfb8856606
2189*2d9fd380Sjfb8856606 *next_to_clean = ntc;
2190*2d9fd380Sjfb8856606
2191*2d9fd380Sjfb8856606 return mbuf_head;
2192*2d9fd380Sjfb8856606 }
2193*2d9fd380Sjfb8856606
eth_ena_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2194a9643ea8Slogwang static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
2195a9643ea8Slogwang uint16_t nb_pkts)
2196a9643ea8Slogwang {
2197a9643ea8Slogwang struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue);
2198*2d9fd380Sjfb8856606 unsigned int free_queue_entries;
2199*2d9fd380Sjfb8856606 unsigned int refill_threshold;
2200a9643ea8Slogwang uint16_t next_to_clean = rx_ring->next_to_clean;
2201*2d9fd380Sjfb8856606 uint16_t descs_in_use;
2202*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf;
2203*2d9fd380Sjfb8856606 uint16_t completed;
2204a9643ea8Slogwang struct ena_com_rx_ctx ena_rx_ctx;
2205*2d9fd380Sjfb8856606 int i, rc = 0;
2206a9643ea8Slogwang
2207a9643ea8Slogwang /* Check adapter state */
2208a9643ea8Slogwang if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
22094418919fSjohnjiang PMD_DRV_LOG(ALERT,
2210a9643ea8Slogwang "Trying to receive pkts while device is NOT running\n");
2211a9643ea8Slogwang return 0;
2212a9643ea8Slogwang }
2213a9643ea8Slogwang
2214*2d9fd380Sjfb8856606 descs_in_use = rx_ring->ring_size -
2215*2d9fd380Sjfb8856606 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1;
2216*2d9fd380Sjfb8856606 nb_pkts = RTE_MIN(descs_in_use, nb_pkts);
2217a9643ea8Slogwang
2218a9643ea8Slogwang for (completed = 0; completed < nb_pkts; completed++) {
22194418919fSjohnjiang ena_rx_ctx.max_bufs = rx_ring->sgl_size;
2220a9643ea8Slogwang ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
2221a9643ea8Slogwang ena_rx_ctx.descs = 0;
2222*2d9fd380Sjfb8856606 ena_rx_ctx.pkt_offset = 0;
2223a9643ea8Slogwang /* receive packet context */
2224a9643ea8Slogwang rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
2225a9643ea8Slogwang rx_ring->ena_com_io_sq,
2226a9643ea8Slogwang &ena_rx_ctx);
2227a9643ea8Slogwang if (unlikely(rc)) {
22284418919fSjohnjiang PMD_DRV_LOG(ERR, "ena_com_rx_pkt error %d\n", rc);
22291646932aSjfb8856606 rx_ring->adapter->reset_reason =
22301646932aSjfb8856606 ENA_REGS_RESET_TOO_MANY_RX_DESCS;
2231d30ea906Sjfb8856606 rx_ring->adapter->trigger_reset = true;
22324418919fSjohnjiang ++rx_ring->rx_stats.bad_desc_num;
2233a9643ea8Slogwang return 0;
2234a9643ea8Slogwang }
2235a9643ea8Slogwang
2236*2d9fd380Sjfb8856606 mbuf = ena_rx_mbuf(rx_ring,
2237*2d9fd380Sjfb8856606 ena_rx_ctx.ena_bufs,
2238*2d9fd380Sjfb8856606 ena_rx_ctx.descs,
2239*2d9fd380Sjfb8856606 &next_to_clean,
2240*2d9fd380Sjfb8856606 ena_rx_ctx.pkt_offset);
2241*2d9fd380Sjfb8856606 if (unlikely(mbuf == NULL)) {
2242*2d9fd380Sjfb8856606 for (i = 0; i < ena_rx_ctx.descs; ++i) {
2243*2d9fd380Sjfb8856606 rx_ring->empty_rx_reqs[next_to_clean] =
2244*2d9fd380Sjfb8856606 rx_ring->ena_bufs[i].req_id;
2245*2d9fd380Sjfb8856606 next_to_clean = ENA_IDX_NEXT_MASKED(
2246*2d9fd380Sjfb8856606 next_to_clean, rx_ring->size_mask);
2247*2d9fd380Sjfb8856606 }
2248d30ea906Sjfb8856606 break;
22491646932aSjfb8856606 }
2250d30ea906Sjfb8856606
2251a9643ea8Slogwang /* fill mbuf attributes if any */
2252*2d9fd380Sjfb8856606 ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx);
22534b05018fSfengbojiang
2254*2d9fd380Sjfb8856606 if (unlikely(mbuf->ol_flags &
22554418919fSjohnjiang (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
22564b05018fSfengbojiang rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
22574418919fSjohnjiang ++rx_ring->rx_stats.bad_csum;
22584418919fSjohnjiang }
22594b05018fSfengbojiang
2260*2d9fd380Sjfb8856606 mbuf->hash.rss = ena_rx_ctx.hash;
2261a9643ea8Slogwang
2262*2d9fd380Sjfb8856606 rx_pkts[completed] = mbuf;
2263*2d9fd380Sjfb8856606 rx_ring->rx_stats.bytes += mbuf->pkt_len;
2264a9643ea8Slogwang }
2265a9643ea8Slogwang
2266*2d9fd380Sjfb8856606 rx_ring->rx_stats.cnt += completed;
22672bfe3f2eSlogwang rx_ring->next_to_clean = next_to_clean;
2268a9643ea8Slogwang
2269*2d9fd380Sjfb8856606 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
2270*2d9fd380Sjfb8856606 refill_threshold =
2271*2d9fd380Sjfb8856606 RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
2272*2d9fd380Sjfb8856606 (unsigned int)ENA_REFILL_THRESH_PACKET);
2273*2d9fd380Sjfb8856606
22742bfe3f2eSlogwang /* Burst refill to save doorbells, memory barriers, const interval */
2275*2d9fd380Sjfb8856606 if (free_queue_entries > refill_threshold) {
22761646932aSjfb8856606 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
2277*2d9fd380Sjfb8856606 ena_populate_rx_queue(rx_ring, free_queue_entries);
22781646932aSjfb8856606 }
2279a9643ea8Slogwang
2280*2d9fd380Sjfb8856606 return completed;
2281a9643ea8Slogwang }
2282a9643ea8Slogwang
22832bfe3f2eSlogwang static uint16_t
eth_ena_prep_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)22842bfe3f2eSlogwang eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
22852bfe3f2eSlogwang uint16_t nb_pkts)
22862bfe3f2eSlogwang {
22872bfe3f2eSlogwang int32_t ret;
22882bfe3f2eSlogwang uint32_t i;
22892bfe3f2eSlogwang struct rte_mbuf *m;
22902bfe3f2eSlogwang struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
22914418919fSjohnjiang struct rte_ipv4_hdr *ip_hdr;
22922bfe3f2eSlogwang uint64_t ol_flags;
22932bfe3f2eSlogwang uint16_t frag_field;
22942bfe3f2eSlogwang
22952bfe3f2eSlogwang for (i = 0; i != nb_pkts; i++) {
22962bfe3f2eSlogwang m = tx_pkts[i];
22972bfe3f2eSlogwang ol_flags = m->ol_flags;
22982bfe3f2eSlogwang
22992bfe3f2eSlogwang if (!(ol_flags & PKT_TX_IPV4))
23002bfe3f2eSlogwang continue;
23012bfe3f2eSlogwang
23022bfe3f2eSlogwang /* If there was not L2 header length specified, assume it is
23032bfe3f2eSlogwang * length of the ethernet header.
23042bfe3f2eSlogwang */
23052bfe3f2eSlogwang if (unlikely(m->l2_len == 0))
23064418919fSjohnjiang m->l2_len = sizeof(struct rte_ether_hdr);
23072bfe3f2eSlogwang
23084418919fSjohnjiang ip_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
23092bfe3f2eSlogwang m->l2_len);
23102bfe3f2eSlogwang frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
23112bfe3f2eSlogwang
23124418919fSjohnjiang if ((frag_field & RTE_IPV4_HDR_DF_FLAG) != 0) {
23132bfe3f2eSlogwang m->packet_type |= RTE_PTYPE_L4_NONFRAG;
23142bfe3f2eSlogwang
23152bfe3f2eSlogwang /* If IPv4 header has DF flag enabled and TSO support is
23162bfe3f2eSlogwang * disabled, partial chcecksum should not be calculated.
23172bfe3f2eSlogwang */
23184418919fSjohnjiang if (!tx_ring->adapter->offloads.tso4_supported)
23192bfe3f2eSlogwang continue;
23202bfe3f2eSlogwang }
23212bfe3f2eSlogwang
23222bfe3f2eSlogwang if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
23232bfe3f2eSlogwang (ol_flags & PKT_TX_L4_MASK) ==
23242bfe3f2eSlogwang PKT_TX_SCTP_CKSUM) {
23251646932aSjfb8856606 rte_errno = ENOTSUP;
23262bfe3f2eSlogwang return i;
23272bfe3f2eSlogwang }
23282bfe3f2eSlogwang
23292bfe3f2eSlogwang #ifdef RTE_LIBRTE_ETHDEV_DEBUG
23302bfe3f2eSlogwang ret = rte_validate_tx_offload(m);
23312bfe3f2eSlogwang if (ret != 0) {
23321646932aSjfb8856606 rte_errno = -ret;
23332bfe3f2eSlogwang return i;
23342bfe3f2eSlogwang }
23352bfe3f2eSlogwang #endif
23362bfe3f2eSlogwang
23372bfe3f2eSlogwang /* In case we are supposed to TSO and have DF not set (DF=0)
23382bfe3f2eSlogwang * hardware must be provided with partial checksum, otherwise
23392bfe3f2eSlogwang * it will take care of necessary calculations.
23402bfe3f2eSlogwang */
23412bfe3f2eSlogwang
23422bfe3f2eSlogwang ret = rte_net_intel_cksum_flags_prepare(m,
23432bfe3f2eSlogwang ol_flags & ~PKT_TX_TCP_SEG);
23442bfe3f2eSlogwang if (ret != 0) {
23451646932aSjfb8856606 rte_errno = -ret;
23462bfe3f2eSlogwang return i;
23472bfe3f2eSlogwang }
23482bfe3f2eSlogwang }
23492bfe3f2eSlogwang
23502bfe3f2eSlogwang return i;
23512bfe3f2eSlogwang }
23522bfe3f2eSlogwang
ena_update_hints(struct ena_adapter * adapter,struct ena_admin_ena_hw_hints * hints)2353d30ea906Sjfb8856606 static void ena_update_hints(struct ena_adapter *adapter,
2354d30ea906Sjfb8856606 struct ena_admin_ena_hw_hints *hints)
2355d30ea906Sjfb8856606 {
2356d30ea906Sjfb8856606 if (hints->admin_completion_tx_timeout)
2357d30ea906Sjfb8856606 adapter->ena_dev.admin_queue.completion_timeout =
2358d30ea906Sjfb8856606 hints->admin_completion_tx_timeout * 1000;
2359d30ea906Sjfb8856606
2360d30ea906Sjfb8856606 if (hints->mmio_read_timeout)
2361d30ea906Sjfb8856606 /* convert to usec */
2362d30ea906Sjfb8856606 adapter->ena_dev.mmio_read.reg_read_to =
2363d30ea906Sjfb8856606 hints->mmio_read_timeout * 1000;
2364d30ea906Sjfb8856606
2365d30ea906Sjfb8856606 if (hints->driver_watchdog_timeout) {
2366d30ea906Sjfb8856606 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2367d30ea906Sjfb8856606 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2368d30ea906Sjfb8856606 else
2369d30ea906Sjfb8856606 // Convert msecs to ticks
2370d30ea906Sjfb8856606 adapter->keep_alive_timeout =
2371d30ea906Sjfb8856606 (hints->driver_watchdog_timeout *
2372d30ea906Sjfb8856606 rte_get_timer_hz()) / 1000;
2373d30ea906Sjfb8856606 }
2374d30ea906Sjfb8856606 }
2375d30ea906Sjfb8856606
ena_check_and_linearize_mbuf(struct ena_ring * tx_ring,struct rte_mbuf * mbuf)2376d30ea906Sjfb8856606 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
2377d30ea906Sjfb8856606 struct rte_mbuf *mbuf)
2378d30ea906Sjfb8856606 {
23794418919fSjohnjiang struct ena_com_dev *ena_dev;
23804418919fSjohnjiang int num_segments, header_len, rc;
2381d30ea906Sjfb8856606
23824418919fSjohnjiang ena_dev = &tx_ring->adapter->ena_dev;
2383d30ea906Sjfb8856606 num_segments = mbuf->nb_segs;
23844418919fSjohnjiang header_len = mbuf->data_len;
2385d30ea906Sjfb8856606
2386d30ea906Sjfb8856606 if (likely(num_segments < tx_ring->sgl_size))
2387d30ea906Sjfb8856606 return 0;
2388d30ea906Sjfb8856606
23894418919fSjohnjiang if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
23904418919fSjohnjiang (num_segments == tx_ring->sgl_size) &&
23914418919fSjohnjiang (header_len < tx_ring->tx_max_header_size))
23924418919fSjohnjiang return 0;
23934418919fSjohnjiang
23944418919fSjohnjiang ++tx_ring->tx_stats.linearize;
2395d30ea906Sjfb8856606 rc = rte_pktmbuf_linearize(mbuf);
23964418919fSjohnjiang if (unlikely(rc)) {
23974418919fSjohnjiang PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n");
23984418919fSjohnjiang rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
23994418919fSjohnjiang ++tx_ring->tx_stats.linearize_failed;
24004418919fSjohnjiang return rc;
24014418919fSjohnjiang }
2402d30ea906Sjfb8856606
2403d30ea906Sjfb8856606 return rc;
2404d30ea906Sjfb8856606 }
2405d30ea906Sjfb8856606
ena_tx_map_mbuf(struct ena_ring * tx_ring,struct ena_tx_buffer * tx_info,struct rte_mbuf * mbuf,void ** push_header,uint16_t * header_len)2406*2d9fd380Sjfb8856606 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
2407*2d9fd380Sjfb8856606 struct ena_tx_buffer *tx_info,
2408*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf,
2409*2d9fd380Sjfb8856606 void **push_header,
2410*2d9fd380Sjfb8856606 uint16_t *header_len)
2411a9643ea8Slogwang {
2412*2d9fd380Sjfb8856606 struct ena_com_buf *ena_buf;
2413*2d9fd380Sjfb8856606 uint16_t delta, seg_len, push_len;
24144418919fSjohnjiang
24154418919fSjohnjiang delta = 0;
24164418919fSjohnjiang seg_len = mbuf->data_len;
24174418919fSjohnjiang
2418*2d9fd380Sjfb8856606 tx_info->mbuf = mbuf;
2419*2d9fd380Sjfb8856606 ena_buf = tx_info->bufs;
2420*2d9fd380Sjfb8856606
2421*2d9fd380Sjfb8856606 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2422*2d9fd380Sjfb8856606 /*
2423*2d9fd380Sjfb8856606 * Tx header might be (and will be in most cases) smaller than
2424*2d9fd380Sjfb8856606 * tx_max_header_size. But it's not an issue to send more data
2425*2d9fd380Sjfb8856606 * to the device, than actually needed if the mbuf size is
2426*2d9fd380Sjfb8856606 * greater than tx_max_header_size.
2427*2d9fd380Sjfb8856606 */
2428*2d9fd380Sjfb8856606 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size);
2429*2d9fd380Sjfb8856606 *header_len = push_len;
24304418919fSjohnjiang
24314418919fSjohnjiang if (likely(push_len <= seg_len)) {
2432*2d9fd380Sjfb8856606 /* If the push header is in the single segment, then
2433*2d9fd380Sjfb8856606 * just point it to the 1st mbuf data.
24344418919fSjohnjiang */
2435*2d9fd380Sjfb8856606 *push_header = rte_pktmbuf_mtod(mbuf, uint8_t *);
24364418919fSjohnjiang } else {
2437*2d9fd380Sjfb8856606 /* If the push header lays in the several segments, copy
2438*2d9fd380Sjfb8856606 * it to the intermediate buffer.
24394418919fSjohnjiang */
24404418919fSjohnjiang rte_pktmbuf_read(mbuf, 0, push_len,
24414418919fSjohnjiang tx_ring->push_buf_intermediate_buf);
2442*2d9fd380Sjfb8856606 *push_header = tx_ring->push_buf_intermediate_buf;
24434418919fSjohnjiang delta = push_len - seg_len;
24444418919fSjohnjiang }
2445*2d9fd380Sjfb8856606 } else {
2446*2d9fd380Sjfb8856606 *push_header = NULL;
2447*2d9fd380Sjfb8856606 *header_len = 0;
2448*2d9fd380Sjfb8856606 push_len = 0;
2449*2d9fd380Sjfb8856606 }
2450a9643ea8Slogwang
2451*2d9fd380Sjfb8856606 /* Process first segment taking into consideration pushed header */
24524418919fSjohnjiang if (seg_len > push_len) {
2453*2d9fd380Sjfb8856606 ena_buf->paddr = mbuf->buf_iova +
2454a9643ea8Slogwang mbuf->data_off +
24554418919fSjohnjiang push_len;
2456*2d9fd380Sjfb8856606 ena_buf->len = seg_len - push_len;
2457*2d9fd380Sjfb8856606 ena_buf++;
2458a9643ea8Slogwang tx_info->num_of_bufs++;
2459a9643ea8Slogwang }
2460a9643ea8Slogwang
2461a9643ea8Slogwang while ((mbuf = mbuf->next) != NULL) {
24624418919fSjohnjiang seg_len = mbuf->data_len;
24634418919fSjohnjiang
24644418919fSjohnjiang /* Skip mbufs if whole data is pushed as a header */
24654418919fSjohnjiang if (unlikely(delta > seg_len)) {
24664418919fSjohnjiang delta -= seg_len;
24674418919fSjohnjiang continue;
24684418919fSjohnjiang }
24694418919fSjohnjiang
2470*2d9fd380Sjfb8856606 ena_buf->paddr = mbuf->buf_iova + mbuf->data_off + delta;
2471*2d9fd380Sjfb8856606 ena_buf->len = seg_len - delta;
2472*2d9fd380Sjfb8856606 ena_buf++;
2473a9643ea8Slogwang tx_info->num_of_bufs++;
24744418919fSjohnjiang
24754418919fSjohnjiang delta = 0;
2476a9643ea8Slogwang }
2477*2d9fd380Sjfb8856606 }
2478a9643ea8Slogwang
ena_xmit_mbuf(struct ena_ring * tx_ring,struct rte_mbuf * mbuf)2479*2d9fd380Sjfb8856606 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf)
2480*2d9fd380Sjfb8856606 {
2481*2d9fd380Sjfb8856606 struct ena_tx_buffer *tx_info;
2482*2d9fd380Sjfb8856606 struct ena_com_tx_ctx ena_tx_ctx = { { 0 } };
2483*2d9fd380Sjfb8856606 uint16_t next_to_use;
2484*2d9fd380Sjfb8856606 uint16_t header_len;
2485*2d9fd380Sjfb8856606 uint16_t req_id;
2486*2d9fd380Sjfb8856606 void *push_header;
2487*2d9fd380Sjfb8856606 int nb_hw_desc;
2488*2d9fd380Sjfb8856606 int rc;
2489*2d9fd380Sjfb8856606
2490*2d9fd380Sjfb8856606 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
2491*2d9fd380Sjfb8856606 if (unlikely(rc))
2492*2d9fd380Sjfb8856606 return rc;
2493*2d9fd380Sjfb8856606
2494*2d9fd380Sjfb8856606 next_to_use = tx_ring->next_to_use;
2495*2d9fd380Sjfb8856606
2496*2d9fd380Sjfb8856606 req_id = tx_ring->empty_tx_reqs[next_to_use];
2497*2d9fd380Sjfb8856606 tx_info = &tx_ring->tx_buffer_info[req_id];
2498*2d9fd380Sjfb8856606 tx_info->num_of_bufs = 0;
2499*2d9fd380Sjfb8856606
2500*2d9fd380Sjfb8856606 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len);
2501*2d9fd380Sjfb8856606
2502*2d9fd380Sjfb8856606 ena_tx_ctx.ena_bufs = tx_info->bufs;
2503*2d9fd380Sjfb8856606 ena_tx_ctx.push_header = push_header;
2504a9643ea8Slogwang ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2505*2d9fd380Sjfb8856606 ena_tx_ctx.req_id = req_id;
2506*2d9fd380Sjfb8856606 ena_tx_ctx.header_len = header_len;
2507a9643ea8Slogwang
2508*2d9fd380Sjfb8856606 /* Set Tx offloads flags, if applicable */
2509*2d9fd380Sjfb8856606 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads,
2510*2d9fd380Sjfb8856606 tx_ring->disable_meta_caching);
2511*2d9fd380Sjfb8856606
2512*2d9fd380Sjfb8856606 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq,
2513*2d9fd380Sjfb8856606 &ena_tx_ctx))) {
2514*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG,
2515*2d9fd380Sjfb8856606 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
25164418919fSjohnjiang tx_ring->id);
25174418919fSjohnjiang ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
25184418919fSjohnjiang }
25194418919fSjohnjiang
25204418919fSjohnjiang /* prepare the packet's descriptors to dma engine */
2521*2d9fd380Sjfb8856606 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2522*2d9fd380Sjfb8856606 &nb_hw_desc);
25234418919fSjohnjiang if (unlikely(rc)) {
25244418919fSjohnjiang ++tx_ring->tx_stats.prepare_ctx_err;
2525*2d9fd380Sjfb8856606 return rc;
25264418919fSjohnjiang }
2527*2d9fd380Sjfb8856606
2528a9643ea8Slogwang tx_info->tx_descs = nb_hw_desc;
2529a9643ea8Slogwang
25304418919fSjohnjiang tx_ring->tx_stats.cnt++;
2531*2d9fd380Sjfb8856606 tx_ring->tx_stats.bytes += mbuf->pkt_len;
2532a9643ea8Slogwang
2533*2d9fd380Sjfb8856606 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use,
2534*2d9fd380Sjfb8856606 tx_ring->size_mask);
2535*2d9fd380Sjfb8856606
2536*2d9fd380Sjfb8856606 return 0;
2537a9643ea8Slogwang }
2538a9643ea8Slogwang
ena_tx_cleanup(struct ena_ring * tx_ring)2539*2d9fd380Sjfb8856606 static void ena_tx_cleanup(struct ena_ring *tx_ring)
2540*2d9fd380Sjfb8856606 {
2541*2d9fd380Sjfb8856606 unsigned int cleanup_budget;
2542*2d9fd380Sjfb8856606 unsigned int total_tx_descs = 0;
2543*2d9fd380Sjfb8856606 uint16_t next_to_clean = tx_ring->next_to_clean;
2544*2d9fd380Sjfb8856606
2545*2d9fd380Sjfb8856606 cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER,
2546*2d9fd380Sjfb8856606 (unsigned int)ENA_REFILL_THRESH_PACKET);
2547*2d9fd380Sjfb8856606
2548*2d9fd380Sjfb8856606 while (likely(total_tx_descs < cleanup_budget)) {
2549*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf;
2550*2d9fd380Sjfb8856606 struct ena_tx_buffer *tx_info;
2551*2d9fd380Sjfb8856606 uint16_t req_id;
2552*2d9fd380Sjfb8856606
2553*2d9fd380Sjfb8856606 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0)
2554*2d9fd380Sjfb8856606 break;
2555*2d9fd380Sjfb8856606
2556*2d9fd380Sjfb8856606 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0))
2557d30ea906Sjfb8856606 break;
2558d30ea906Sjfb8856606
2559a9643ea8Slogwang /* Get Tx info & store how many descs were processed */
2560a9643ea8Slogwang tx_info = &tx_ring->tx_buffer_info[req_id];
2561a9643ea8Slogwang
2562a9643ea8Slogwang mbuf = tx_info->mbuf;
2563a9643ea8Slogwang rte_pktmbuf_free(mbuf);
2564*2d9fd380Sjfb8856606
25652bfe3f2eSlogwang tx_info->mbuf = NULL;
2566*2d9fd380Sjfb8856606 tx_ring->empty_tx_reqs[next_to_clean] = req_id;
2567*2d9fd380Sjfb8856606
2568*2d9fd380Sjfb8856606 total_tx_descs += tx_info->tx_descs;
2569a9643ea8Slogwang
2570a9643ea8Slogwang /* Put back descriptor to the ring for reuse */
2571*2d9fd380Sjfb8856606 next_to_clean = ENA_IDX_NEXT_MASKED(next_to_clean,
2572*2d9fd380Sjfb8856606 tx_ring->size_mask);
2573a9643ea8Slogwang }
2574a9643ea8Slogwang
2575*2d9fd380Sjfb8856606 if (likely(total_tx_descs > 0)) {
2576a9643ea8Slogwang /* acknowledge completion of sent packets */
25772bfe3f2eSlogwang tx_ring->next_to_clean = next_to_clean;
25781646932aSjfb8856606 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
25791646932aSjfb8856606 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
2580a9643ea8Slogwang }
2581*2d9fd380Sjfb8856606 }
2582a9643ea8Slogwang
eth_ena_xmit_pkts(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)2583*2d9fd380Sjfb8856606 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
2584*2d9fd380Sjfb8856606 uint16_t nb_pkts)
2585*2d9fd380Sjfb8856606 {
2586*2d9fd380Sjfb8856606 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
2587*2d9fd380Sjfb8856606 uint16_t sent_idx = 0;
2588*2d9fd380Sjfb8856606
2589*2d9fd380Sjfb8856606 /* Check adapter state */
2590*2d9fd380Sjfb8856606 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) {
2591*2d9fd380Sjfb8856606 PMD_DRV_LOG(ALERT,
2592*2d9fd380Sjfb8856606 "Trying to xmit pkts while device is NOT running\n");
2593*2d9fd380Sjfb8856606 return 0;
2594*2d9fd380Sjfb8856606 }
2595*2d9fd380Sjfb8856606
2596*2d9fd380Sjfb8856606 nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq),
2597*2d9fd380Sjfb8856606 nb_pkts);
2598*2d9fd380Sjfb8856606
2599*2d9fd380Sjfb8856606 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
2600*2d9fd380Sjfb8856606 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx]))
2601*2d9fd380Sjfb8856606 break;
2602*2d9fd380Sjfb8856606
2603*2d9fd380Sjfb8856606 rte_prefetch0(tx_pkts[ENA_IDX_ADD_MASKED(sent_idx, 4,
2604*2d9fd380Sjfb8856606 tx_ring->size_mask)]);
2605*2d9fd380Sjfb8856606 }
2606*2d9fd380Sjfb8856606
2607*2d9fd380Sjfb8856606 tx_ring->tx_stats.available_desc =
2608*2d9fd380Sjfb8856606 ena_com_free_q_entries(tx_ring->ena_com_io_sq);
2609*2d9fd380Sjfb8856606
2610*2d9fd380Sjfb8856606 /* If there are ready packets to be xmitted... */
2611*2d9fd380Sjfb8856606 if (sent_idx > 0) {
2612*2d9fd380Sjfb8856606 /* ...let HW do its best :-) */
2613*2d9fd380Sjfb8856606 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2614*2d9fd380Sjfb8856606 tx_ring->tx_stats.doorbells++;
2615*2d9fd380Sjfb8856606 }
2616*2d9fd380Sjfb8856606
2617*2d9fd380Sjfb8856606 ena_tx_cleanup(tx_ring);
2618*2d9fd380Sjfb8856606
2619*2d9fd380Sjfb8856606 tx_ring->tx_stats.available_desc =
2620*2d9fd380Sjfb8856606 ena_com_free_q_entries(tx_ring->ena_com_io_sq);
26214418919fSjohnjiang tx_ring->tx_stats.tx_poll++;
26224418919fSjohnjiang
2623a9643ea8Slogwang return sent_idx;
2624a9643ea8Slogwang }
2625a9643ea8Slogwang
ena_copy_eni_stats(struct ena_adapter * adapter)2626*2d9fd380Sjfb8856606 int ena_copy_eni_stats(struct ena_adapter *adapter)
2627*2d9fd380Sjfb8856606 {
2628*2d9fd380Sjfb8856606 struct ena_admin_eni_stats admin_eni_stats;
2629*2d9fd380Sjfb8856606 int rc;
2630*2d9fd380Sjfb8856606
2631*2d9fd380Sjfb8856606 rte_spinlock_lock(&adapter->admin_lock);
2632*2d9fd380Sjfb8856606 rc = ena_com_get_eni_stats(&adapter->ena_dev, &admin_eni_stats);
2633*2d9fd380Sjfb8856606 rte_spinlock_unlock(&adapter->admin_lock);
2634*2d9fd380Sjfb8856606 if (rc != 0) {
2635*2d9fd380Sjfb8856606 if (rc == ENA_COM_UNSUPPORTED) {
2636*2d9fd380Sjfb8856606 PMD_DRV_LOG(DEBUG,
2637*2d9fd380Sjfb8856606 "Retrieving ENI metrics is not supported.\n");
2638*2d9fd380Sjfb8856606 } else {
2639*2d9fd380Sjfb8856606 PMD_DRV_LOG(WARNING,
2640*2d9fd380Sjfb8856606 "Failed to get ENI metrics: %d\n", rc);
2641*2d9fd380Sjfb8856606 }
2642*2d9fd380Sjfb8856606 return rc;
2643*2d9fd380Sjfb8856606 }
2644*2d9fd380Sjfb8856606
2645*2d9fd380Sjfb8856606 rte_memcpy(&adapter->eni_stats, &admin_eni_stats,
2646*2d9fd380Sjfb8856606 sizeof(struct ena_stats_eni));
2647*2d9fd380Sjfb8856606
2648*2d9fd380Sjfb8856606 return 0;
2649*2d9fd380Sjfb8856606 }
2650*2d9fd380Sjfb8856606
26514418919fSjohnjiang /**
26524418919fSjohnjiang * DPDK callback to retrieve names of extended device statistics
26534418919fSjohnjiang *
26544418919fSjohnjiang * @param dev
26554418919fSjohnjiang * Pointer to Ethernet device structure.
26564418919fSjohnjiang * @param[out] xstats_names
26574418919fSjohnjiang * Buffer to insert names into.
26584418919fSjohnjiang * @param n
26594418919fSjohnjiang * Number of names.
26604418919fSjohnjiang *
26614418919fSjohnjiang * @return
26624418919fSjohnjiang * Number of xstats names.
26634418919fSjohnjiang */
ena_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,unsigned int n)26644418919fSjohnjiang static int ena_xstats_get_names(struct rte_eth_dev *dev,
26654418919fSjohnjiang struct rte_eth_xstat_name *xstats_names,
26664418919fSjohnjiang unsigned int n)
26674418919fSjohnjiang {
26684418919fSjohnjiang unsigned int xstats_count = ena_xstats_calc_num(dev);
26694418919fSjohnjiang unsigned int stat, i, count = 0;
26704418919fSjohnjiang
26714418919fSjohnjiang if (n < xstats_count || !xstats_names)
26724418919fSjohnjiang return xstats_count;
26734418919fSjohnjiang
26744418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++)
26754418919fSjohnjiang strcpy(xstats_names[count].name,
26764418919fSjohnjiang ena_stats_global_strings[stat].name);
26774418919fSjohnjiang
2678*2d9fd380Sjfb8856606 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++)
2679*2d9fd380Sjfb8856606 strcpy(xstats_names[count].name,
2680*2d9fd380Sjfb8856606 ena_stats_eni_strings[stat].name);
2681*2d9fd380Sjfb8856606
26824418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++)
26834418919fSjohnjiang for (i = 0; i < dev->data->nb_rx_queues; i++, count++)
26844418919fSjohnjiang snprintf(xstats_names[count].name,
26854418919fSjohnjiang sizeof(xstats_names[count].name),
26864418919fSjohnjiang "rx_q%d_%s", i,
26874418919fSjohnjiang ena_stats_rx_strings[stat].name);
26884418919fSjohnjiang
26894418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++)
26904418919fSjohnjiang for (i = 0; i < dev->data->nb_tx_queues; i++, count++)
26914418919fSjohnjiang snprintf(xstats_names[count].name,
26924418919fSjohnjiang sizeof(xstats_names[count].name),
26934418919fSjohnjiang "tx_q%d_%s", i,
26944418919fSjohnjiang ena_stats_tx_strings[stat].name);
26954418919fSjohnjiang
26964418919fSjohnjiang return xstats_count;
26974418919fSjohnjiang }
26984418919fSjohnjiang
26994418919fSjohnjiang /**
27004418919fSjohnjiang * DPDK callback to get extended device statistics.
27014418919fSjohnjiang *
27024418919fSjohnjiang * @param dev
27034418919fSjohnjiang * Pointer to Ethernet device structure.
27044418919fSjohnjiang * @param[out] stats
27054418919fSjohnjiang * Stats table output buffer.
27064418919fSjohnjiang * @param n
27074418919fSjohnjiang * The size of the stats table.
27084418919fSjohnjiang *
27094418919fSjohnjiang * @return
27104418919fSjohnjiang * Number of xstats on success, negative on failure.
27114418919fSjohnjiang */
ena_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned int n)27124418919fSjohnjiang static int ena_xstats_get(struct rte_eth_dev *dev,
27134418919fSjohnjiang struct rte_eth_xstat *xstats,
27144418919fSjohnjiang unsigned int n)
27154418919fSjohnjiang {
27164418919fSjohnjiang struct ena_adapter *adapter = dev->data->dev_private;
27174418919fSjohnjiang unsigned int xstats_count = ena_xstats_calc_num(dev);
27184418919fSjohnjiang unsigned int stat, i, count = 0;
27194418919fSjohnjiang int stat_offset;
27204418919fSjohnjiang void *stats_begin;
27214418919fSjohnjiang
27224418919fSjohnjiang if (n < xstats_count)
27234418919fSjohnjiang return xstats_count;
27244418919fSjohnjiang
27254418919fSjohnjiang if (!xstats)
27264418919fSjohnjiang return 0;
27274418919fSjohnjiang
27284418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_GLOBAL; stat++, count++) {
27290c6bd470Sfengbojiang stat_offset = ena_stats_global_strings[stat].stat_offset;
27304418919fSjohnjiang stats_begin = &adapter->dev_stats;
27314418919fSjohnjiang
27324418919fSjohnjiang xstats[count].id = count;
27334418919fSjohnjiang xstats[count].value = *((uint64_t *)
27344418919fSjohnjiang ((char *)stats_begin + stat_offset));
27354418919fSjohnjiang }
27364418919fSjohnjiang
2737*2d9fd380Sjfb8856606 /* Even if the function below fails, we should copy previous (or initial
2738*2d9fd380Sjfb8856606 * values) to keep structure of rte_eth_xstat consistent.
2739*2d9fd380Sjfb8856606 */
2740*2d9fd380Sjfb8856606 ena_copy_eni_stats(adapter);
2741*2d9fd380Sjfb8856606 for (stat = 0; stat < ENA_STATS_ARRAY_ENI; stat++, count++) {
2742*2d9fd380Sjfb8856606 stat_offset = ena_stats_eni_strings[stat].stat_offset;
2743*2d9fd380Sjfb8856606 stats_begin = &adapter->eni_stats;
2744*2d9fd380Sjfb8856606
2745*2d9fd380Sjfb8856606 xstats[count].id = count;
2746*2d9fd380Sjfb8856606 xstats[count].value = *((uint64_t *)
2747*2d9fd380Sjfb8856606 ((char *)stats_begin + stat_offset));
2748*2d9fd380Sjfb8856606 }
2749*2d9fd380Sjfb8856606
27504418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_RX; stat++) {
27514418919fSjohnjiang for (i = 0; i < dev->data->nb_rx_queues; i++, count++) {
27524418919fSjohnjiang stat_offset = ena_stats_rx_strings[stat].stat_offset;
27534418919fSjohnjiang stats_begin = &adapter->rx_ring[i].rx_stats;
27544418919fSjohnjiang
27554418919fSjohnjiang xstats[count].id = count;
27564418919fSjohnjiang xstats[count].value = *((uint64_t *)
27574418919fSjohnjiang ((char *)stats_begin + stat_offset));
27584418919fSjohnjiang }
27594418919fSjohnjiang }
27604418919fSjohnjiang
27614418919fSjohnjiang for (stat = 0; stat < ENA_STATS_ARRAY_TX; stat++) {
27624418919fSjohnjiang for (i = 0; i < dev->data->nb_tx_queues; i++, count++) {
27634418919fSjohnjiang stat_offset = ena_stats_tx_strings[stat].stat_offset;
27644418919fSjohnjiang stats_begin = &adapter->tx_ring[i].rx_stats;
27654418919fSjohnjiang
27664418919fSjohnjiang xstats[count].id = count;
27674418919fSjohnjiang xstats[count].value = *((uint64_t *)
27684418919fSjohnjiang ((char *)stats_begin + stat_offset));
27694418919fSjohnjiang }
27704418919fSjohnjiang }
27714418919fSjohnjiang
27724418919fSjohnjiang return count;
27734418919fSjohnjiang }
27744418919fSjohnjiang
ena_xstats_get_by_id(struct rte_eth_dev * dev,const uint64_t * ids,uint64_t * values,unsigned int n)27754418919fSjohnjiang static int ena_xstats_get_by_id(struct rte_eth_dev *dev,
27764418919fSjohnjiang const uint64_t *ids,
27774418919fSjohnjiang uint64_t *values,
27784418919fSjohnjiang unsigned int n)
27794418919fSjohnjiang {
27804418919fSjohnjiang struct ena_adapter *adapter = dev->data->dev_private;
27814418919fSjohnjiang uint64_t id;
27824418919fSjohnjiang uint64_t rx_entries, tx_entries;
27834418919fSjohnjiang unsigned int i;
27844418919fSjohnjiang int qid;
27854418919fSjohnjiang int valid = 0;
2786*2d9fd380Sjfb8856606 bool was_eni_copied = false;
2787*2d9fd380Sjfb8856606
27884418919fSjohnjiang for (i = 0; i < n; ++i) {
27894418919fSjohnjiang id = ids[i];
27904418919fSjohnjiang /* Check if id belongs to global statistics */
27914418919fSjohnjiang if (id < ENA_STATS_ARRAY_GLOBAL) {
27924418919fSjohnjiang values[i] = *((uint64_t *)&adapter->dev_stats + id);
27934418919fSjohnjiang ++valid;
27944418919fSjohnjiang continue;
27954418919fSjohnjiang }
27964418919fSjohnjiang
2797*2d9fd380Sjfb8856606 /* Check if id belongs to ENI statistics */
27984418919fSjohnjiang id -= ENA_STATS_ARRAY_GLOBAL;
2799*2d9fd380Sjfb8856606 if (id < ENA_STATS_ARRAY_ENI) {
2800*2d9fd380Sjfb8856606 /* Avoid reading ENI stats multiple times in a single
2801*2d9fd380Sjfb8856606 * function call, as it requires communication with the
2802*2d9fd380Sjfb8856606 * admin queue.
2803*2d9fd380Sjfb8856606 */
2804*2d9fd380Sjfb8856606 if (!was_eni_copied) {
2805*2d9fd380Sjfb8856606 was_eni_copied = true;
2806*2d9fd380Sjfb8856606 ena_copy_eni_stats(adapter);
2807*2d9fd380Sjfb8856606 }
2808*2d9fd380Sjfb8856606 values[i] = *((uint64_t *)&adapter->eni_stats + id);
2809*2d9fd380Sjfb8856606 ++valid;
2810*2d9fd380Sjfb8856606 continue;
2811*2d9fd380Sjfb8856606 }
2812*2d9fd380Sjfb8856606
2813*2d9fd380Sjfb8856606 /* Check if id belongs to rx queue statistics */
2814*2d9fd380Sjfb8856606 id -= ENA_STATS_ARRAY_ENI;
28154418919fSjohnjiang rx_entries = ENA_STATS_ARRAY_RX * dev->data->nb_rx_queues;
28164418919fSjohnjiang if (id < rx_entries) {
28174418919fSjohnjiang qid = id % dev->data->nb_rx_queues;
28184418919fSjohnjiang id /= dev->data->nb_rx_queues;
28194418919fSjohnjiang values[i] = *((uint64_t *)
28204418919fSjohnjiang &adapter->rx_ring[qid].rx_stats + id);
28214418919fSjohnjiang ++valid;
28224418919fSjohnjiang continue;
28234418919fSjohnjiang }
28244418919fSjohnjiang /* Check if id belongs to rx queue statistics */
28254418919fSjohnjiang id -= rx_entries;
28264418919fSjohnjiang tx_entries = ENA_STATS_ARRAY_TX * dev->data->nb_tx_queues;
28274418919fSjohnjiang if (id < tx_entries) {
28284418919fSjohnjiang qid = id % dev->data->nb_tx_queues;
28294418919fSjohnjiang id /= dev->data->nb_tx_queues;
28304418919fSjohnjiang values[i] = *((uint64_t *)
28314418919fSjohnjiang &adapter->tx_ring[qid].tx_stats + id);
28324418919fSjohnjiang ++valid;
28334418919fSjohnjiang continue;
28344418919fSjohnjiang }
28354418919fSjohnjiang }
28364418919fSjohnjiang
28374418919fSjohnjiang return valid;
28384418919fSjohnjiang }
28394418919fSjohnjiang
ena_process_bool_devarg(const char * key,const char * value,void * opaque)2840*2d9fd380Sjfb8856606 static int ena_process_bool_devarg(const char *key,
2841*2d9fd380Sjfb8856606 const char *value,
2842*2d9fd380Sjfb8856606 void *opaque)
2843*2d9fd380Sjfb8856606 {
2844*2d9fd380Sjfb8856606 struct ena_adapter *adapter = opaque;
2845*2d9fd380Sjfb8856606 bool bool_value;
2846*2d9fd380Sjfb8856606
2847*2d9fd380Sjfb8856606 /* Parse the value. */
2848*2d9fd380Sjfb8856606 if (strcmp(value, "1") == 0) {
2849*2d9fd380Sjfb8856606 bool_value = true;
2850*2d9fd380Sjfb8856606 } else if (strcmp(value, "0") == 0) {
2851*2d9fd380Sjfb8856606 bool_value = false;
2852*2d9fd380Sjfb8856606 } else {
2853*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR,
2854*2d9fd380Sjfb8856606 "Invalid value: '%s' for key '%s'. Accepted: '0' or '1'\n",
2855*2d9fd380Sjfb8856606 value, key);
2856*2d9fd380Sjfb8856606 return -EINVAL;
2857*2d9fd380Sjfb8856606 }
2858*2d9fd380Sjfb8856606
2859*2d9fd380Sjfb8856606 /* Now, assign it to the proper adapter field. */
2860*2d9fd380Sjfb8856606 if (strcmp(key, ENA_DEVARG_LARGE_LLQ_HDR))
2861*2d9fd380Sjfb8856606 adapter->use_large_llq_hdr = bool_value;
2862*2d9fd380Sjfb8856606
2863*2d9fd380Sjfb8856606 return 0;
2864*2d9fd380Sjfb8856606 }
2865*2d9fd380Sjfb8856606
ena_parse_devargs(struct ena_adapter * adapter,struct rte_devargs * devargs)2866*2d9fd380Sjfb8856606 static int ena_parse_devargs(struct ena_adapter *adapter,
2867*2d9fd380Sjfb8856606 struct rte_devargs *devargs)
2868*2d9fd380Sjfb8856606 {
2869*2d9fd380Sjfb8856606 static const char * const allowed_args[] = {
2870*2d9fd380Sjfb8856606 ENA_DEVARG_LARGE_LLQ_HDR,
2871*2d9fd380Sjfb8856606 };
2872*2d9fd380Sjfb8856606 struct rte_kvargs *kvlist;
2873*2d9fd380Sjfb8856606 int rc;
2874*2d9fd380Sjfb8856606
2875*2d9fd380Sjfb8856606 if (devargs == NULL)
2876*2d9fd380Sjfb8856606 return 0;
2877*2d9fd380Sjfb8856606
2878*2d9fd380Sjfb8856606 kvlist = rte_kvargs_parse(devargs->args, allowed_args);
2879*2d9fd380Sjfb8856606 if (kvlist == NULL) {
2880*2d9fd380Sjfb8856606 PMD_INIT_LOG(ERR, "Invalid device arguments: %s\n",
2881*2d9fd380Sjfb8856606 devargs->args);
2882*2d9fd380Sjfb8856606 return -EINVAL;
2883*2d9fd380Sjfb8856606 }
2884*2d9fd380Sjfb8856606
2885*2d9fd380Sjfb8856606 rc = rte_kvargs_process(kvlist, ENA_DEVARG_LARGE_LLQ_HDR,
2886*2d9fd380Sjfb8856606 ena_process_bool_devarg, adapter);
2887*2d9fd380Sjfb8856606
2888*2d9fd380Sjfb8856606 rte_kvargs_free(kvlist);
2889*2d9fd380Sjfb8856606
2890*2d9fd380Sjfb8856606 return rc;
2891*2d9fd380Sjfb8856606 }
2892*2d9fd380Sjfb8856606
2893d30ea906Sjfb8856606 /*********************************************************************
2894d30ea906Sjfb8856606 * PMD configuration
2895d30ea906Sjfb8856606 *********************************************************************/
eth_ena_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)28962bfe3f2eSlogwang static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
28972bfe3f2eSlogwang struct rte_pci_device *pci_dev)
2898a9643ea8Slogwang {
28992bfe3f2eSlogwang return rte_eth_dev_pci_generic_probe(pci_dev,
29002bfe3f2eSlogwang sizeof(struct ena_adapter), eth_ena_dev_init);
29012bfe3f2eSlogwang }
29022bfe3f2eSlogwang
eth_ena_pci_remove(struct rte_pci_device * pci_dev)29032bfe3f2eSlogwang static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
29042bfe3f2eSlogwang {
2905d30ea906Sjfb8856606 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
29062bfe3f2eSlogwang }
29072bfe3f2eSlogwang
29082bfe3f2eSlogwang static struct rte_pci_driver rte_ena_pmd = {
2909a9643ea8Slogwang .id_table = pci_id_ena_map,
2910d30ea906Sjfb8856606 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2911d30ea906Sjfb8856606 RTE_PCI_DRV_WC_ACTIVATE,
29122bfe3f2eSlogwang .probe = eth_ena_pci_probe,
29132bfe3f2eSlogwang .remove = eth_ena_pci_remove,
2914a9643ea8Slogwang };
2915a9643ea8Slogwang
29162bfe3f2eSlogwang RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
29172bfe3f2eSlogwang RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
29182bfe3f2eSlogwang RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
2919*2d9fd380Sjfb8856606 RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>");
2920*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_init, pmd.net.ena.init, NOTICE);
2921*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_driver, pmd.net.ena.driver, NOTICE);
29224418919fSjohnjiang #ifdef RTE_LIBRTE_ENA_DEBUG_RX
2923*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_rx, pmd.net.ena.rx, NOTICE);
29244418919fSjohnjiang #endif
29254418919fSjohnjiang #ifdef RTE_LIBRTE_ENA_DEBUG_TX
2926*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_tx, pmd.net.ena.tx, NOTICE);
29274418919fSjohnjiang #endif
29284418919fSjohnjiang #ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE
2929*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_tx_free, pmd.net.ena.tx_free, NOTICE);
29304418919fSjohnjiang #endif
29314418919fSjohnjiang #ifdef RTE_LIBRTE_ENA_COM_DEBUG
2932*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(ena_logtype_com, pmd.net.ena.com, NOTICE);
29334418919fSjohnjiang #endif
2934d30ea906Sjfb8856606
2935d30ea906Sjfb8856606 /******************************************************************************
2936d30ea906Sjfb8856606 ******************************** AENQ Handlers *******************************
2937d30ea906Sjfb8856606 *****************************************************************************/
ena_update_on_link_change(void * adapter_data,struct ena_admin_aenq_entry * aenq_e)2938d30ea906Sjfb8856606 static void ena_update_on_link_change(void *adapter_data,
2939d30ea906Sjfb8856606 struct ena_admin_aenq_entry *aenq_e)
2940d30ea906Sjfb8856606 {
2941d30ea906Sjfb8856606 struct rte_eth_dev *eth_dev;
2942d30ea906Sjfb8856606 struct ena_adapter *adapter;
2943d30ea906Sjfb8856606 struct ena_admin_aenq_link_change_desc *aenq_link_desc;
2944d30ea906Sjfb8856606 uint32_t status;
2945d30ea906Sjfb8856606
29464b05018fSfengbojiang adapter = adapter_data;
2947d30ea906Sjfb8856606 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
2948d30ea906Sjfb8856606 eth_dev = adapter->rte_dev;
2949d30ea906Sjfb8856606
2950d30ea906Sjfb8856606 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
2951d30ea906Sjfb8856606 adapter->link_status = status;
2952d30ea906Sjfb8856606
2953d30ea906Sjfb8856606 ena_link_update(eth_dev, 0);
2954*2d9fd380Sjfb8856606 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
2955d30ea906Sjfb8856606 }
2956d30ea906Sjfb8856606
ena_notification(void * data,struct ena_admin_aenq_entry * aenq_e)2957d30ea906Sjfb8856606 static void ena_notification(void *data,
2958d30ea906Sjfb8856606 struct ena_admin_aenq_entry *aenq_e)
2959d30ea906Sjfb8856606 {
29604b05018fSfengbojiang struct ena_adapter *adapter = data;
2961d30ea906Sjfb8856606 struct ena_admin_ena_hw_hints *hints;
2962d30ea906Sjfb8856606
2963d30ea906Sjfb8856606 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
29644418919fSjohnjiang PMD_DRV_LOG(WARNING, "Invalid group(%x) expected %x\n",
2965d30ea906Sjfb8856606 aenq_e->aenq_common_desc.group,
2966d30ea906Sjfb8856606 ENA_ADMIN_NOTIFICATION);
2967d30ea906Sjfb8856606
2968d30ea906Sjfb8856606 switch (aenq_e->aenq_common_desc.syndrom) {
2969d30ea906Sjfb8856606 case ENA_ADMIN_UPDATE_HINTS:
2970d30ea906Sjfb8856606 hints = (struct ena_admin_ena_hw_hints *)
2971d30ea906Sjfb8856606 (&aenq_e->inline_data_w4);
2972d30ea906Sjfb8856606 ena_update_hints(adapter, hints);
2973d30ea906Sjfb8856606 break;
2974d30ea906Sjfb8856606 default:
29754418919fSjohnjiang PMD_DRV_LOG(ERR, "Invalid aenq notification link state %d\n",
2976d30ea906Sjfb8856606 aenq_e->aenq_common_desc.syndrom);
2977d30ea906Sjfb8856606 }
2978d30ea906Sjfb8856606 }
2979d30ea906Sjfb8856606
ena_keep_alive(void * adapter_data,__rte_unused struct ena_admin_aenq_entry * aenq_e)2980d30ea906Sjfb8856606 static void ena_keep_alive(void *adapter_data,
2981d30ea906Sjfb8856606 __rte_unused struct ena_admin_aenq_entry *aenq_e)
2982d30ea906Sjfb8856606 {
29834b05018fSfengbojiang struct ena_adapter *adapter = adapter_data;
29844418919fSjohnjiang struct ena_admin_aenq_keep_alive_desc *desc;
29854418919fSjohnjiang uint64_t rx_drops;
2986*2d9fd380Sjfb8856606 uint64_t tx_drops;
2987d30ea906Sjfb8856606
2988d30ea906Sjfb8856606 adapter->timestamp_wd = rte_get_timer_cycles();
29894418919fSjohnjiang
29904418919fSjohnjiang desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
29914418919fSjohnjiang rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2992*2d9fd380Sjfb8856606 tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2993*2d9fd380Sjfb8856606
2994*2d9fd380Sjfb8856606 adapter->drv_stats->rx_drops = rx_drops;
2995*2d9fd380Sjfb8856606 adapter->dev_stats.tx_drops = tx_drops;
2996d30ea906Sjfb8856606 }
2997d30ea906Sjfb8856606
2998d30ea906Sjfb8856606 /**
2999d30ea906Sjfb8856606 * This handler will called for unknown event group or unimplemented handlers
3000d30ea906Sjfb8856606 **/
unimplemented_aenq_handler(__rte_unused void * data,__rte_unused struct ena_admin_aenq_entry * aenq_e)3001d30ea906Sjfb8856606 static void unimplemented_aenq_handler(__rte_unused void *data,
3002d30ea906Sjfb8856606 __rte_unused struct ena_admin_aenq_entry *aenq_e)
3003d30ea906Sjfb8856606 {
30044418919fSjohnjiang PMD_DRV_LOG(ERR, "Unknown event was received or event with "
3005d30ea906Sjfb8856606 "unimplemented handler\n");
3006d30ea906Sjfb8856606 }
3007d30ea906Sjfb8856606
3008d30ea906Sjfb8856606 static struct ena_aenq_handlers aenq_handlers = {
3009d30ea906Sjfb8856606 .handlers = {
3010d30ea906Sjfb8856606 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3011d30ea906Sjfb8856606 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3012d30ea906Sjfb8856606 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
3013d30ea906Sjfb8856606 },
3014d30ea906Sjfb8856606 .unimplemented_handler = unimplemented_aenq_handler
3015d30ea906Sjfb8856606 };
3016