1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of copyright holder nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_ether.h> 35 #include <rte_ethdev_driver.h> 36 #include <rte_ethdev_pci.h> 37 #include <rte_tcp.h> 38 #include <rte_atomic.h> 39 #include <rte_dev.h> 40 #include <rte_errno.h> 41 #include <rte_version.h> 42 #include <rte_eal_memconfig.h> 43 #include <rte_net.h> 44 45 #include "ena_ethdev.h" 46 #include "ena_logs.h" 47 #include "ena_platform.h" 48 #include "ena_com.h" 49 #include "ena_eth_com.h" 50 51 #include <ena_common_defs.h> 52 #include <ena_regs_defs.h> 53 #include <ena_admin_defs.h> 54 #include <ena_eth_io_defs.h> 55 56 #define DRV_MODULE_VER_MAJOR 1 57 #define DRV_MODULE_VER_MINOR 1 58 #define DRV_MODULE_VER_SUBMINOR 1 59 60 #define ENA_IO_TXQ_IDX(q) (2 * (q)) 61 #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) 62 /*reverse version of ENA_IO_RXQ_IDX*/ 63 #define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) 64 65 /* While processing submitted and completed descriptors (rx and tx path 66 * respectively) in a loop it is desired to: 67 * - perform batch submissions while populating sumbissmion queue 68 * - avoid blocking transmission of other packets during cleanup phase 69 * Hence the utilization ratio of 1/8 of a queue size. 70 */ 71 #define ENA_RING_DESCS_RATIO(ring_size) (ring_size / 8) 72 73 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) 74 #define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) 75 76 #define GET_L4_HDR_LEN(mbuf) \ 77 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \ 78 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) 79 80 #define ENA_RX_RSS_TABLE_LOG_SIZE 7 81 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) 82 #define ENA_HASH_KEY_SIZE 40 83 #define ENA_ETH_SS_STATS 0xFF 84 #define ETH_GSTRING_LEN 32 85 86 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 87 88 #define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE 89 #define ENA_MIN_RING_DESC 128 90 91 enum ethtool_stringset { 92 ETH_SS_TEST = 0, 93 ETH_SS_STATS, 94 }; 95 96 struct ena_stats { 97 char name[ETH_GSTRING_LEN]; 98 int stat_offset; 99 }; 100 101 #define ENA_STAT_ENA_COM_ENTRY(stat) { \ 102 .name = #stat, \ 103 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ 104 } 105 106 #define ENA_STAT_ENTRY(stat, stat_type) { \ 107 .name = #stat, \ 108 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ 109 } 110 111 #define ENA_STAT_RX_ENTRY(stat) \ 112 ENA_STAT_ENTRY(stat, rx) 113 114 #define ENA_STAT_TX_ENTRY(stat) \ 115 ENA_STAT_ENTRY(stat, tx) 116 117 #define ENA_STAT_GLOBAL_ENTRY(stat) \ 118 ENA_STAT_ENTRY(stat, dev) 119 120 /* 121 * Each rte_memzone should have unique name. 122 * To satisfy it, count number of allocation and add it to name. 123 */ 124 uint32_t ena_alloc_cnt; 125 126 static const struct ena_stats ena_stats_global_strings[] = { 127 ENA_STAT_GLOBAL_ENTRY(tx_timeout), 128 ENA_STAT_GLOBAL_ENTRY(io_suspend), 129 ENA_STAT_GLOBAL_ENTRY(io_resume), 130 ENA_STAT_GLOBAL_ENTRY(wd_expired), 131 ENA_STAT_GLOBAL_ENTRY(interface_up), 132 ENA_STAT_GLOBAL_ENTRY(interface_down), 133 ENA_STAT_GLOBAL_ENTRY(admin_q_pause), 134 }; 135 136 static const struct ena_stats ena_stats_tx_strings[] = { 137 ENA_STAT_TX_ENTRY(cnt), 138 ENA_STAT_TX_ENTRY(bytes), 139 ENA_STAT_TX_ENTRY(queue_stop), 140 ENA_STAT_TX_ENTRY(queue_wakeup), 141 ENA_STAT_TX_ENTRY(dma_mapping_err), 142 ENA_STAT_TX_ENTRY(linearize), 143 ENA_STAT_TX_ENTRY(linearize_failed), 144 ENA_STAT_TX_ENTRY(tx_poll), 145 ENA_STAT_TX_ENTRY(doorbells), 146 ENA_STAT_TX_ENTRY(prepare_ctx_err), 147 ENA_STAT_TX_ENTRY(missing_tx_comp), 148 ENA_STAT_TX_ENTRY(bad_req_id), 149 }; 150 151 static const struct ena_stats ena_stats_rx_strings[] = { 152 ENA_STAT_RX_ENTRY(cnt), 153 ENA_STAT_RX_ENTRY(bytes), 154 ENA_STAT_RX_ENTRY(refil_partial), 155 ENA_STAT_RX_ENTRY(bad_csum), 156 ENA_STAT_RX_ENTRY(page_alloc_fail), 157 ENA_STAT_RX_ENTRY(skb_alloc_fail), 158 ENA_STAT_RX_ENTRY(dma_mapping_err), 159 ENA_STAT_RX_ENTRY(bad_desc_num), 160 ENA_STAT_RX_ENTRY(small_copy_len_pkt), 161 }; 162 163 static const struct ena_stats ena_stats_ena_com_strings[] = { 164 ENA_STAT_ENA_COM_ENTRY(aborted_cmd), 165 ENA_STAT_ENA_COM_ENTRY(submitted_cmd), 166 ENA_STAT_ENA_COM_ENTRY(completed_cmd), 167 ENA_STAT_ENA_COM_ENTRY(out_of_space), 168 ENA_STAT_ENA_COM_ENTRY(no_completion), 169 }; 170 171 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) 172 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) 173 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) 174 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) 175 176 #define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ 177 DEV_TX_OFFLOAD_UDP_CKSUM |\ 178 DEV_TX_OFFLOAD_IPV4_CKSUM |\ 179 DEV_TX_OFFLOAD_TCP_TSO) 180 #define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ 181 PKT_TX_IP_CKSUM |\ 182 PKT_TX_TCP_SEG) 183 184 /** Vendor ID used by Amazon devices */ 185 #define PCI_VENDOR_ID_AMAZON 0x1D0F 186 /** Amazon devices */ 187 #define PCI_DEVICE_ID_ENA_VF 0xEC20 188 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 189 190 #define ENA_TX_OFFLOAD_MASK (\ 191 PKT_TX_L4_MASK | \ 192 PKT_TX_IP_CKSUM | \ 193 PKT_TX_TCP_SEG) 194 195 #define ENA_TX_OFFLOAD_NOTSUP_MASK \ 196 (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) 197 198 int ena_logtype_init; 199 int ena_logtype_driver; 200 201 static const struct rte_pci_id pci_id_ena_map[] = { 202 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, 203 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, 204 { .device_id = 0 }, 205 }; 206 207 static struct ena_aenq_handlers aenq_handlers; 208 209 static int ena_device_init(struct ena_com_dev *ena_dev, 210 struct ena_com_dev_get_features_ctx *get_feat_ctx, 211 bool *wd_state); 212 static int ena_dev_configure(struct rte_eth_dev *dev); 213 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 214 uint16_t nb_pkts); 215 static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 216 uint16_t nb_pkts); 217 static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 218 uint16_t nb_desc, unsigned int socket_id, 219 const struct rte_eth_txconf *tx_conf); 220 static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 221 uint16_t nb_desc, unsigned int socket_id, 222 const struct rte_eth_rxconf *rx_conf, 223 struct rte_mempool *mp); 224 static uint16_t eth_ena_recv_pkts(void *rx_queue, 225 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 226 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count); 227 static void ena_init_rings(struct ena_adapter *adapter); 228 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); 229 static int ena_start(struct rte_eth_dev *dev); 230 static void ena_stop(struct rte_eth_dev *dev); 231 static void ena_close(struct rte_eth_dev *dev); 232 static int ena_dev_reset(struct rte_eth_dev *dev); 233 static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); 234 static void ena_rx_queue_release_all(struct rte_eth_dev *dev); 235 static void ena_tx_queue_release_all(struct rte_eth_dev *dev); 236 static void ena_rx_queue_release(void *queue); 237 static void ena_tx_queue_release(void *queue); 238 static void ena_rx_queue_release_bufs(struct ena_ring *ring); 239 static void ena_tx_queue_release_bufs(struct ena_ring *ring); 240 static int ena_link_update(struct rte_eth_dev *dev, 241 int wait_to_complete); 242 static int ena_create_io_queue(struct ena_ring *ring); 243 static void ena_free_io_queues_all(struct ena_adapter *adapter); 244 static int ena_queue_restart(struct ena_ring *ring); 245 static int ena_queue_restart_all(struct rte_eth_dev *dev, 246 enum ena_ring_type ring_type); 247 static void ena_stats_restart(struct rte_eth_dev *dev); 248 static void ena_infos_get(struct rte_eth_dev *dev, 249 struct rte_eth_dev_info *dev_info); 250 static int ena_rss_reta_update(struct rte_eth_dev *dev, 251 struct rte_eth_rss_reta_entry64 *reta_conf, 252 uint16_t reta_size); 253 static int ena_rss_reta_query(struct rte_eth_dev *dev, 254 struct rte_eth_rss_reta_entry64 *reta_conf, 255 uint16_t reta_size); 256 static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); 257 static void ena_interrupt_handler_rte(void *cb_arg); 258 static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); 259 260 static const struct eth_dev_ops ena_dev_ops = { 261 .dev_configure = ena_dev_configure, 262 .dev_infos_get = ena_infos_get, 263 .rx_queue_setup = ena_rx_queue_setup, 264 .tx_queue_setup = ena_tx_queue_setup, 265 .dev_start = ena_start, 266 .dev_stop = ena_stop, 267 .link_update = ena_link_update, 268 .stats_get = ena_stats_get, 269 .mtu_set = ena_mtu_set, 270 .rx_queue_release = ena_rx_queue_release, 271 .tx_queue_release = ena_tx_queue_release, 272 .dev_close = ena_close, 273 .dev_reset = ena_dev_reset, 274 .reta_update = ena_rss_reta_update, 275 .reta_query = ena_rss_reta_query, 276 }; 277 278 #define NUMA_NO_NODE SOCKET_ID_ANY 279 280 static inline int ena_cpu_to_node(int cpu) 281 { 282 struct rte_config *config = rte_eal_get_configuration(); 283 struct rte_fbarray *arr = &config->mem_config->memzones; 284 const struct rte_memzone *mz; 285 286 if (unlikely(cpu >= RTE_MAX_MEMZONE)) 287 return NUMA_NO_NODE; 288 289 mz = rte_fbarray_get(arr, cpu); 290 291 return mz->socket_id; 292 } 293 294 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, 295 struct ena_com_rx_ctx *ena_rx_ctx) 296 { 297 uint64_t ol_flags = 0; 298 uint32_t packet_type = 0; 299 300 if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) 301 packet_type |= RTE_PTYPE_L4_TCP; 302 else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) 303 packet_type |= RTE_PTYPE_L4_UDP; 304 305 if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) 306 packet_type |= RTE_PTYPE_L3_IPV4; 307 else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) 308 packet_type |= RTE_PTYPE_L3_IPV6; 309 310 if (unlikely(ena_rx_ctx->l4_csum_err)) 311 ol_flags |= PKT_RX_L4_CKSUM_BAD; 312 if (unlikely(ena_rx_ctx->l3_csum_err)) 313 ol_flags |= PKT_RX_IP_CKSUM_BAD; 314 315 mbuf->ol_flags = ol_flags; 316 mbuf->packet_type = packet_type; 317 } 318 319 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, 320 struct ena_com_tx_ctx *ena_tx_ctx, 321 uint64_t queue_offloads) 322 { 323 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; 324 325 if ((mbuf->ol_flags & MBUF_OFFLOADS) && 326 (queue_offloads & QUEUE_OFFLOADS)) { 327 /* check if TSO is required */ 328 if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && 329 (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { 330 ena_tx_ctx->tso_enable = true; 331 332 ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); 333 } 334 335 /* check if L3 checksum is needed */ 336 if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && 337 (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) 338 ena_tx_ctx->l3_csum_enable = true; 339 340 if (mbuf->ol_flags & PKT_TX_IPV6) { 341 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; 342 } else { 343 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; 344 345 /* set don't fragment (DF) flag */ 346 if (mbuf->packet_type & 347 (RTE_PTYPE_L4_NONFRAG 348 | RTE_PTYPE_INNER_L4_NONFRAG)) 349 ena_tx_ctx->df = true; 350 } 351 352 /* check if L4 checksum is needed */ 353 if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && 354 (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { 355 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; 356 ena_tx_ctx->l4_csum_enable = true; 357 } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && 358 (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { 359 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; 360 ena_tx_ctx->l4_csum_enable = true; 361 } else { 362 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UNKNOWN; 363 ena_tx_ctx->l4_csum_enable = false; 364 } 365 366 ena_meta->mss = mbuf->tso_segsz; 367 ena_meta->l3_hdr_len = mbuf->l3_len; 368 ena_meta->l3_hdr_offset = mbuf->l2_len; 369 370 ena_tx_ctx->meta_valid = true; 371 } else { 372 ena_tx_ctx->meta_valid = false; 373 } 374 } 375 376 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) 377 { 378 if (likely(req_id < rx_ring->ring_size)) 379 return 0; 380 381 RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); 382 383 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; 384 rx_ring->adapter->trigger_reset = true; 385 386 return -EFAULT; 387 } 388 389 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) 390 { 391 struct ena_tx_buffer *tx_info = NULL; 392 393 if (likely(req_id < tx_ring->ring_size)) { 394 tx_info = &tx_ring->tx_buffer_info[req_id]; 395 if (likely(tx_info->mbuf)) 396 return 0; 397 } 398 399 if (tx_info) 400 RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); 401 else 402 RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); 403 404 /* Trigger device reset */ 405 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; 406 tx_ring->adapter->trigger_reset = true; 407 return -EFAULT; 408 } 409 410 static void ena_config_host_info(struct ena_com_dev *ena_dev) 411 { 412 struct ena_admin_host_info *host_info; 413 int rc; 414 415 /* Allocate only the host info */ 416 rc = ena_com_allocate_host_info(ena_dev); 417 if (rc) { 418 RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); 419 return; 420 } 421 422 host_info = ena_dev->host_attr.host_info; 423 424 host_info->os_type = ENA_ADMIN_OS_DPDK; 425 host_info->kernel_ver = RTE_VERSION; 426 snprintf((char *)host_info->kernel_ver_str, 427 sizeof(host_info->kernel_ver_str), 428 "%s", rte_version()); 429 host_info->os_dist = RTE_VERSION; 430 snprintf((char *)host_info->os_dist_str, 431 sizeof(host_info->os_dist_str), 432 "%s", rte_version()); 433 host_info->driver_version = 434 (DRV_MODULE_VER_MAJOR) | 435 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | 436 (DRV_MODULE_VER_SUBMINOR << 437 ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); 438 439 rc = ena_com_set_host_attributes(ena_dev); 440 if (rc) { 441 if (rc == -ENA_COM_UNSUPPORTED) 442 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 443 else 444 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 445 446 goto err; 447 } 448 449 return; 450 451 err: 452 ena_com_delete_host_info(ena_dev); 453 } 454 455 static int 456 ena_get_sset_count(struct rte_eth_dev *dev, int sset) 457 { 458 if (sset != ETH_SS_STATS) 459 return -EOPNOTSUPP; 460 461 /* Workaround for clang: 462 * touch internal structures to prevent 463 * compiler error 464 */ 465 ENA_TOUCH(ena_stats_global_strings); 466 ENA_TOUCH(ena_stats_tx_strings); 467 ENA_TOUCH(ena_stats_rx_strings); 468 ENA_TOUCH(ena_stats_ena_com_strings); 469 470 return dev->data->nb_tx_queues * 471 (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + 472 ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; 473 } 474 475 static void ena_config_debug_area(struct ena_adapter *adapter) 476 { 477 u32 debug_area_size; 478 int rc, ss_count; 479 480 ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); 481 if (ss_count <= 0) { 482 RTE_LOG(ERR, PMD, "SS count is negative\n"); 483 return; 484 } 485 486 /* allocate 32 bytes for each string and 64bit for the value */ 487 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; 488 489 rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); 490 if (rc) { 491 RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); 492 return; 493 } 494 495 rc = ena_com_set_host_attributes(&adapter->ena_dev); 496 if (rc) { 497 if (rc == -ENA_COM_UNSUPPORTED) 498 RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); 499 else 500 RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); 501 502 goto err; 503 } 504 505 return; 506 err: 507 ena_com_delete_debug_area(&adapter->ena_dev); 508 } 509 510 static void ena_close(struct rte_eth_dev *dev) 511 { 512 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); 513 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; 514 struct ena_adapter *adapter = 515 (struct ena_adapter *)(dev->data->dev_private); 516 517 if (adapter->state == ENA_ADAPTER_STATE_RUNNING) 518 ena_stop(dev); 519 adapter->state = ENA_ADAPTER_STATE_CLOSED; 520 521 ena_rx_queue_release_all(dev); 522 ena_tx_queue_release_all(dev); 523 524 rte_free(adapter->drv_stats); 525 adapter->drv_stats = NULL; 526 527 rte_intr_disable(intr_handle); 528 rte_intr_callback_unregister(intr_handle, 529 ena_interrupt_handler_rte, 530 adapter); 531 532 /* 533 * MAC is not allocated dynamically. Setting NULL should prevent from 534 * release of the resource in the rte_eth_dev_release_port(). 535 */ 536 dev->data->mac_addrs = NULL; 537 } 538 539 static int 540 ena_dev_reset(struct rte_eth_dev *dev) 541 { 542 struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES]; 543 struct rte_eth_dev *eth_dev; 544 struct rte_pci_device *pci_dev; 545 struct rte_intr_handle *intr_handle; 546 struct ena_com_dev *ena_dev; 547 struct ena_com_dev_get_features_ctx get_feat_ctx; 548 struct ena_adapter *adapter; 549 int nb_queues; 550 int rc, i; 551 bool wd_state; 552 553 adapter = (struct ena_adapter *)(dev->data->dev_private); 554 ena_dev = &adapter->ena_dev; 555 eth_dev = adapter->rte_dev; 556 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 557 intr_handle = &pci_dev->intr_handle; 558 nb_queues = eth_dev->data->nb_rx_queues; 559 560 ena_com_set_admin_running_state(ena_dev, false); 561 562 rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); 563 if (rc) 564 RTE_LOG(ERR, PMD, "Device reset failed\n"); 565 566 for (i = 0; i < nb_queues; i++) 567 mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; 568 569 ena_rx_queue_release_all(eth_dev); 570 ena_tx_queue_release_all(eth_dev); 571 572 rte_intr_disable(intr_handle); 573 574 ena_com_abort_admin_commands(ena_dev); 575 ena_com_wait_for_abort_completion(ena_dev); 576 ena_com_admin_destroy(ena_dev); 577 ena_com_mmio_reg_read_request_destroy(ena_dev); 578 579 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 580 if (rc) { 581 PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); 582 return rc; 583 } 584 adapter->wd_state = wd_state; 585 586 rte_intr_enable(intr_handle); 587 ena_com_set_admin_polling_mode(ena_dev, false); 588 ena_com_admin_aenq_enable(ena_dev); 589 590 for (i = 0; i < nb_queues; ++i) 591 ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL, 592 mb_pool_rx[i]); 593 594 for (i = 0; i < nb_queues; ++i) 595 ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL); 596 597 adapter->trigger_reset = false; 598 599 return 0; 600 } 601 602 static int ena_rss_reta_update(struct rte_eth_dev *dev, 603 struct rte_eth_rss_reta_entry64 *reta_conf, 604 uint16_t reta_size) 605 { 606 struct ena_adapter *adapter = 607 (struct ena_adapter *)(dev->data->dev_private); 608 struct ena_com_dev *ena_dev = &adapter->ena_dev; 609 int rc, i; 610 u16 entry_value; 611 int conf_idx; 612 int idx; 613 614 if ((reta_size == 0) || (reta_conf == NULL)) 615 return -EINVAL; 616 617 if (reta_size > ENA_RX_RSS_TABLE_SIZE) { 618 RTE_LOG(WARNING, PMD, 619 "indirection table %d is bigger than supported (%d)\n", 620 reta_size, ENA_RX_RSS_TABLE_SIZE); 621 return -EINVAL; 622 } 623 624 for (i = 0 ; i < reta_size ; i++) { 625 /* each reta_conf is for 64 entries. 626 * to support 128 we use 2 conf of 64 627 */ 628 conf_idx = i / RTE_RETA_GROUP_SIZE; 629 idx = i % RTE_RETA_GROUP_SIZE; 630 if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { 631 entry_value = 632 ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); 633 634 rc = ena_com_indirect_table_fill_entry(ena_dev, 635 i, 636 entry_value); 637 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 638 RTE_LOG(ERR, PMD, 639 "Cannot fill indirect table\n"); 640 return rc; 641 } 642 } 643 } 644 645 rc = ena_com_indirect_table_set(ena_dev); 646 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 647 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 648 return rc; 649 } 650 651 RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", 652 __func__, reta_size, adapter->rte_dev->data->port_id); 653 654 return 0; 655 } 656 657 /* Query redirection table. */ 658 static int ena_rss_reta_query(struct rte_eth_dev *dev, 659 struct rte_eth_rss_reta_entry64 *reta_conf, 660 uint16_t reta_size) 661 { 662 struct ena_adapter *adapter = 663 (struct ena_adapter *)(dev->data->dev_private); 664 struct ena_com_dev *ena_dev = &adapter->ena_dev; 665 int rc; 666 int i; 667 u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; 668 int reta_conf_idx; 669 int reta_idx; 670 671 if (reta_size == 0 || reta_conf == NULL || 672 (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) 673 return -EINVAL; 674 675 rc = ena_com_indirect_table_get(ena_dev, indirect_table); 676 if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { 677 RTE_LOG(ERR, PMD, "cannot get indirect table\n"); 678 return -ENOTSUP; 679 } 680 681 for (i = 0 ; i < reta_size ; i++) { 682 reta_conf_idx = i / RTE_RETA_GROUP_SIZE; 683 reta_idx = i % RTE_RETA_GROUP_SIZE; 684 if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) 685 reta_conf[reta_conf_idx].reta[reta_idx] = 686 ENA_IO_RXQ_IDX_REV(indirect_table[i]); 687 } 688 689 return 0; 690 } 691 692 static int ena_rss_init_default(struct ena_adapter *adapter) 693 { 694 struct ena_com_dev *ena_dev = &adapter->ena_dev; 695 uint16_t nb_rx_queues = adapter->rte_dev->data->nb_rx_queues; 696 int rc, i; 697 u32 val; 698 699 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); 700 if (unlikely(rc)) { 701 RTE_LOG(ERR, PMD, "Cannot init indirect table\n"); 702 goto err_rss_init; 703 } 704 705 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { 706 val = i % nb_rx_queues; 707 rc = ena_com_indirect_table_fill_entry(ena_dev, i, 708 ENA_IO_RXQ_IDX(val)); 709 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 710 RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); 711 goto err_fill_indir; 712 } 713 } 714 715 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, 716 ENA_HASH_KEY_SIZE, 0xFFFFFFFF); 717 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 718 RTE_LOG(INFO, PMD, "Cannot fill hash function\n"); 719 goto err_fill_indir; 720 } 721 722 rc = ena_com_set_default_hash_ctrl(ena_dev); 723 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 724 RTE_LOG(INFO, PMD, "Cannot fill hash control\n"); 725 goto err_fill_indir; 726 } 727 728 rc = ena_com_indirect_table_set(ena_dev); 729 if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { 730 RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); 731 goto err_fill_indir; 732 } 733 RTE_LOG(DEBUG, PMD, "RSS configured for port %d\n", 734 adapter->rte_dev->data->port_id); 735 736 return 0; 737 738 err_fill_indir: 739 ena_com_rss_destroy(ena_dev); 740 err_rss_init: 741 742 return rc; 743 } 744 745 static void ena_rx_queue_release_all(struct rte_eth_dev *dev) 746 { 747 struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; 748 int nb_queues = dev->data->nb_rx_queues; 749 int i; 750 751 for (i = 0; i < nb_queues; i++) 752 ena_rx_queue_release(queues[i]); 753 } 754 755 static void ena_tx_queue_release_all(struct rte_eth_dev *dev) 756 { 757 struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues; 758 int nb_queues = dev->data->nb_tx_queues; 759 int i; 760 761 for (i = 0; i < nb_queues; i++) 762 ena_tx_queue_release(queues[i]); 763 } 764 765 static void ena_rx_queue_release(void *queue) 766 { 767 struct ena_ring *ring = (struct ena_ring *)queue; 768 769 ena_assert_msg(ring->configured, 770 "API violation - releasing not configured queue"); 771 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 772 "API violation"); 773 774 /* Free ring resources */ 775 if (ring->rx_buffer_info) 776 rte_free(ring->rx_buffer_info); 777 ring->rx_buffer_info = NULL; 778 779 if (ring->rx_refill_buffer) 780 rte_free(ring->rx_refill_buffer); 781 ring->rx_refill_buffer = NULL; 782 783 if (ring->empty_rx_reqs) 784 rte_free(ring->empty_rx_reqs); 785 ring->empty_rx_reqs = NULL; 786 787 ring->configured = 0; 788 789 RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", 790 ring->port_id, ring->id); 791 } 792 793 static void ena_tx_queue_release(void *queue) 794 { 795 struct ena_ring *ring = (struct ena_ring *)queue; 796 797 ena_assert_msg(ring->configured, 798 "API violation. Releasing not configured queue"); 799 ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, 800 "API violation"); 801 802 /* Free all bufs */ 803 ena_tx_queue_release_bufs(ring); 804 805 /* Free ring resources */ 806 if (ring->tx_buffer_info) 807 rte_free(ring->tx_buffer_info); 808 809 if (ring->empty_tx_reqs) 810 rte_free(ring->empty_tx_reqs); 811 812 ring->empty_tx_reqs = NULL; 813 ring->tx_buffer_info = NULL; 814 815 ring->configured = 0; 816 817 RTE_LOG(NOTICE, PMD, "TX Queue %d:%d released\n", 818 ring->port_id, ring->id); 819 } 820 821 static void ena_rx_queue_release_bufs(struct ena_ring *ring) 822 { 823 unsigned int ring_mask = ring->ring_size - 1; 824 825 while (ring->next_to_clean != ring->next_to_use) { 826 struct rte_mbuf *m = 827 ring->rx_buffer_info[ring->next_to_clean & ring_mask]; 828 829 if (m) 830 rte_mbuf_raw_free(m); 831 832 ring->next_to_clean++; 833 } 834 } 835 836 static void ena_tx_queue_release_bufs(struct ena_ring *ring) 837 { 838 unsigned int i; 839 840 for (i = 0; i < ring->ring_size; ++i) { 841 struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; 842 843 if (tx_buf->mbuf) 844 rte_pktmbuf_free(tx_buf->mbuf); 845 846 ring->next_to_clean++; 847 } 848 } 849 850 static int ena_link_update(struct rte_eth_dev *dev, 851 __rte_unused int wait_to_complete) 852 { 853 struct rte_eth_link *link = &dev->data->dev_link; 854 struct ena_adapter *adapter; 855 856 adapter = (struct ena_adapter *)(dev->data->dev_private); 857 858 link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; 859 link->link_speed = ETH_SPEED_NUM_NONE; 860 link->link_duplex = ETH_LINK_FULL_DUPLEX; 861 862 return 0; 863 } 864 865 static int ena_queue_restart_all(struct rte_eth_dev *dev, 866 enum ena_ring_type ring_type) 867 { 868 struct ena_adapter *adapter = 869 (struct ena_adapter *)(dev->data->dev_private); 870 struct ena_ring *queues = NULL; 871 int nb_queues; 872 int i = 0; 873 int rc = 0; 874 875 if (ring_type == ENA_RING_TYPE_RX) { 876 queues = adapter->rx_ring; 877 nb_queues = dev->data->nb_rx_queues; 878 } else { 879 queues = adapter->tx_ring; 880 nb_queues = dev->data->nb_tx_queues; 881 } 882 for (i = 0; i < nb_queues; i++) { 883 if (queues[i].configured) { 884 if (ring_type == ENA_RING_TYPE_RX) { 885 ena_assert_msg( 886 dev->data->rx_queues[i] == &queues[i], 887 "Inconsistent state of rx queues\n"); 888 } else { 889 ena_assert_msg( 890 dev->data->tx_queues[i] == &queues[i], 891 "Inconsistent state of tx queues\n"); 892 } 893 894 rc = ena_queue_restart(&queues[i]); 895 896 if (rc) { 897 PMD_INIT_LOG(ERR, 898 "failed to restart queue %d type(%d)", 899 i, ring_type); 900 return rc; 901 } 902 } 903 } 904 905 return 0; 906 } 907 908 static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter) 909 { 910 uint32_t max_frame_len = adapter->max_mtu; 911 912 if (adapter->rte_eth_dev_data->dev_conf.rxmode.offloads & 913 DEV_RX_OFFLOAD_JUMBO_FRAME) 914 max_frame_len = 915 adapter->rte_eth_dev_data->dev_conf.rxmode.max_rx_pkt_len; 916 917 return max_frame_len; 918 } 919 920 static int ena_check_valid_conf(struct ena_adapter *adapter) 921 { 922 uint32_t max_frame_len = ena_get_mtu_conf(adapter); 923 924 if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { 925 PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " 926 "max mtu: %d, min mtu: %d\n", 927 max_frame_len, adapter->max_mtu, ENA_MIN_MTU); 928 return ENA_COM_UNSUPPORTED; 929 } 930 931 return 0; 932 } 933 934 static int 935 ena_calc_queue_size(struct ena_com_dev *ena_dev, 936 u16 *max_tx_sgl_size, 937 struct ena_com_dev_get_features_ctx *get_feat_ctx) 938 { 939 uint32_t queue_size = ENA_DEFAULT_RING_SIZE; 940 941 queue_size = RTE_MIN(queue_size, 942 get_feat_ctx->max_queues.max_cq_depth); 943 queue_size = RTE_MIN(queue_size, 944 get_feat_ctx->max_queues.max_sq_depth); 945 946 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 947 queue_size = RTE_MIN(queue_size, 948 get_feat_ctx->max_queues.max_llq_depth); 949 950 /* Round down to power of 2 */ 951 if (!rte_is_power_of_2(queue_size)) 952 queue_size = rte_align32pow2(queue_size >> 1); 953 954 if (unlikely(queue_size == 0)) { 955 PMD_INIT_LOG(ERR, "Invalid queue size"); 956 return -EFAULT; 957 } 958 959 *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, 960 get_feat_ctx->max_queues.max_packet_tx_descs); 961 962 return queue_size; 963 } 964 965 static void ena_stats_restart(struct rte_eth_dev *dev) 966 { 967 struct ena_adapter *adapter = 968 (struct ena_adapter *)(dev->data->dev_private); 969 970 rte_atomic64_init(&adapter->drv_stats->ierrors); 971 rte_atomic64_init(&adapter->drv_stats->oerrors); 972 rte_atomic64_init(&adapter->drv_stats->rx_nombuf); 973 } 974 975 static int ena_stats_get(struct rte_eth_dev *dev, 976 struct rte_eth_stats *stats) 977 { 978 struct ena_admin_basic_stats ena_stats; 979 struct ena_adapter *adapter = 980 (struct ena_adapter *)(dev->data->dev_private); 981 struct ena_com_dev *ena_dev = &adapter->ena_dev; 982 int rc; 983 984 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 985 return -ENOTSUP; 986 987 memset(&ena_stats, 0, sizeof(ena_stats)); 988 rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); 989 if (unlikely(rc)) { 990 RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); 991 return rc; 992 } 993 994 /* Set of basic statistics from ENA */ 995 stats->ipackets = __MERGE_64B_H_L(ena_stats.rx_pkts_high, 996 ena_stats.rx_pkts_low); 997 stats->opackets = __MERGE_64B_H_L(ena_stats.tx_pkts_high, 998 ena_stats.tx_pkts_low); 999 stats->ibytes = __MERGE_64B_H_L(ena_stats.rx_bytes_high, 1000 ena_stats.rx_bytes_low); 1001 stats->obytes = __MERGE_64B_H_L(ena_stats.tx_bytes_high, 1002 ena_stats.tx_bytes_low); 1003 stats->imissed = __MERGE_64B_H_L(ena_stats.rx_drops_high, 1004 ena_stats.rx_drops_low); 1005 1006 /* Driver related stats */ 1007 stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); 1008 stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); 1009 stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); 1010 return 0; 1011 } 1012 1013 static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 1014 { 1015 struct ena_adapter *adapter; 1016 struct ena_com_dev *ena_dev; 1017 int rc = 0; 1018 1019 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1020 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1021 adapter = (struct ena_adapter *)(dev->data->dev_private); 1022 1023 ena_dev = &adapter->ena_dev; 1024 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1025 1026 if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { 1027 RTE_LOG(ERR, PMD, 1028 "Invalid MTU setting. new_mtu: %d " 1029 "max mtu: %d min mtu: %d\n", 1030 mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); 1031 return -EINVAL; 1032 } 1033 1034 rc = ena_com_set_dev_mtu(ena_dev, mtu); 1035 if (rc) 1036 RTE_LOG(ERR, PMD, "Could not set MTU: %d\n", mtu); 1037 else 1038 RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); 1039 1040 return rc; 1041 } 1042 1043 static int ena_start(struct rte_eth_dev *dev) 1044 { 1045 struct ena_adapter *adapter = 1046 (struct ena_adapter *)(dev->data->dev_private); 1047 uint64_t ticks; 1048 int rc = 0; 1049 1050 rc = ena_check_valid_conf(adapter); 1051 if (rc) 1052 return rc; 1053 1054 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_RX); 1055 if (rc) 1056 return rc; 1057 1058 rc = ena_queue_restart_all(dev, ENA_RING_TYPE_TX); 1059 if (rc) 1060 return rc; 1061 1062 if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & 1063 ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { 1064 rc = ena_rss_init_default(adapter); 1065 if (rc) 1066 return rc; 1067 } 1068 1069 ena_stats_restart(dev); 1070 1071 adapter->timestamp_wd = rte_get_timer_cycles(); 1072 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; 1073 1074 ticks = rte_get_timer_hz(); 1075 rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(), 1076 ena_timer_wd_callback, adapter); 1077 1078 adapter->state = ENA_ADAPTER_STATE_RUNNING; 1079 1080 return 0; 1081 } 1082 1083 static void ena_stop(struct rte_eth_dev *dev) 1084 { 1085 struct ena_adapter *adapter = 1086 (struct ena_adapter *)(dev->data->dev_private); 1087 1088 rte_timer_stop_sync(&adapter->timer_wd); 1089 ena_free_io_queues_all(adapter); 1090 1091 adapter->state = ENA_ADAPTER_STATE_STOPPED; 1092 } 1093 1094 static int ena_create_io_queue(struct ena_ring *ring) 1095 { 1096 struct ena_adapter *adapter; 1097 struct ena_com_dev *ena_dev; 1098 struct ena_com_create_io_ctx ctx = 1099 /* policy set to _HOST just to satisfy icc compiler */ 1100 { ENA_ADMIN_PLACEMENT_POLICY_HOST, 1101 0, 0, 0, 0, 0 }; 1102 uint16_t ena_qid; 1103 unsigned int i; 1104 int rc; 1105 1106 adapter = ring->adapter; 1107 ena_dev = &adapter->ena_dev; 1108 1109 if (ring->type == ENA_RING_TYPE_TX) { 1110 ena_qid = ENA_IO_TXQ_IDX(ring->id); 1111 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; 1112 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; 1113 ctx.queue_size = adapter->tx_ring_size; 1114 for (i = 0; i < ring->ring_size; i++) 1115 ring->empty_tx_reqs[i] = i; 1116 } else { 1117 ena_qid = ENA_IO_RXQ_IDX(ring->id); 1118 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; 1119 ctx.queue_size = adapter->rx_ring_size; 1120 for (i = 0; i < ring->ring_size; i++) 1121 ring->empty_rx_reqs[i] = i; 1122 } 1123 ctx.qid = ena_qid; 1124 ctx.msix_vector = -1; /* interrupts not used */ 1125 ctx.numa_node = ena_cpu_to_node(ring->id); 1126 1127 rc = ena_com_create_io_queue(ena_dev, &ctx); 1128 if (rc) { 1129 RTE_LOG(ERR, PMD, 1130 "failed to create io queue #%d (qid:%d) rc: %d\n", 1131 ring->id, ena_qid, rc); 1132 return rc; 1133 } 1134 1135 rc = ena_com_get_io_handlers(ena_dev, ena_qid, 1136 &ring->ena_com_io_sq, 1137 &ring->ena_com_io_cq); 1138 if (rc) { 1139 RTE_LOG(ERR, PMD, 1140 "Failed to get io queue handlers. queue num %d rc: %d\n", 1141 ring->id, rc); 1142 ena_com_destroy_io_queue(ena_dev, ena_qid); 1143 return rc; 1144 } 1145 1146 if (ring->type == ENA_RING_TYPE_TX) 1147 ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); 1148 1149 return 0; 1150 } 1151 1152 static void ena_free_io_queues_all(struct ena_adapter *adapter) 1153 { 1154 struct rte_eth_dev *eth_dev = adapter->rte_dev; 1155 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1156 int i; 1157 uint16_t ena_qid; 1158 uint16_t nb_rxq = eth_dev->data->nb_rx_queues; 1159 uint16_t nb_txq = eth_dev->data->nb_tx_queues; 1160 1161 for (i = 0; i < nb_txq; ++i) { 1162 ena_qid = ENA_IO_TXQ_IDX(i); 1163 ena_com_destroy_io_queue(ena_dev, ena_qid); 1164 1165 ena_tx_queue_release_bufs(&adapter->tx_ring[i]); 1166 } 1167 1168 for (i = 0; i < nb_rxq; ++i) { 1169 ena_qid = ENA_IO_RXQ_IDX(i); 1170 ena_com_destroy_io_queue(ena_dev, ena_qid); 1171 1172 ena_rx_queue_release_bufs(&adapter->rx_ring[i]); 1173 } 1174 } 1175 1176 static int ena_queue_restart(struct ena_ring *ring) 1177 { 1178 int rc, bufs_num; 1179 1180 ena_assert_msg(ring->configured == 1, 1181 "Trying to restart unconfigured queue\n"); 1182 1183 rc = ena_create_io_queue(ring); 1184 if (rc) { 1185 PMD_INIT_LOG(ERR, "Failed to create IO queue!\n"); 1186 return rc; 1187 } 1188 1189 ring->next_to_clean = 0; 1190 ring->next_to_use = 0; 1191 1192 if (ring->type == ENA_RING_TYPE_TX) 1193 return 0; 1194 1195 bufs_num = ring->ring_size - 1; 1196 rc = ena_populate_rx_queue(ring, bufs_num); 1197 if (rc != bufs_num) { 1198 PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); 1199 return ENA_COM_FAULT; 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int ena_tx_queue_setup(struct rte_eth_dev *dev, 1206 uint16_t queue_idx, 1207 uint16_t nb_desc, 1208 __rte_unused unsigned int socket_id, 1209 const struct rte_eth_txconf *tx_conf) 1210 { 1211 struct ena_ring *txq = NULL; 1212 struct ena_adapter *adapter = 1213 (struct ena_adapter *)(dev->data->dev_private); 1214 unsigned int i; 1215 1216 txq = &adapter->tx_ring[queue_idx]; 1217 1218 if (txq->configured) { 1219 RTE_LOG(CRIT, PMD, 1220 "API violation. Queue %d is already configured\n", 1221 queue_idx); 1222 return ENA_COM_FAULT; 1223 } 1224 1225 if (!rte_is_power_of_2(nb_desc)) { 1226 RTE_LOG(ERR, PMD, 1227 "Unsupported size of TX queue: %d is not a power of 2.", 1228 nb_desc); 1229 return -EINVAL; 1230 } 1231 1232 if (nb_desc > adapter->tx_ring_size) { 1233 RTE_LOG(ERR, PMD, 1234 "Unsupported size of TX queue (max size: %d)\n", 1235 adapter->tx_ring_size); 1236 return -EINVAL; 1237 } 1238 1239 txq->port_id = dev->data->port_id; 1240 txq->next_to_clean = 0; 1241 txq->next_to_use = 0; 1242 txq->ring_size = nb_desc; 1243 1244 txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", 1245 sizeof(struct ena_tx_buffer) * 1246 txq->ring_size, 1247 RTE_CACHE_LINE_SIZE); 1248 if (!txq->tx_buffer_info) { 1249 RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); 1250 return -ENOMEM; 1251 } 1252 1253 txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", 1254 sizeof(u16) * txq->ring_size, 1255 RTE_CACHE_LINE_SIZE); 1256 if (!txq->empty_tx_reqs) { 1257 RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); 1258 rte_free(txq->tx_buffer_info); 1259 return -ENOMEM; 1260 } 1261 1262 for (i = 0; i < txq->ring_size; i++) 1263 txq->empty_tx_reqs[i] = i; 1264 1265 if (tx_conf != NULL) { 1266 txq->offloads = 1267 tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1268 } 1269 1270 /* Store pointer to this queue in upper layer */ 1271 txq->configured = 1; 1272 dev->data->tx_queues[queue_idx] = txq; 1273 1274 return 0; 1275 } 1276 1277 static int ena_rx_queue_setup(struct rte_eth_dev *dev, 1278 uint16_t queue_idx, 1279 uint16_t nb_desc, 1280 __rte_unused unsigned int socket_id, 1281 __rte_unused const struct rte_eth_rxconf *rx_conf, 1282 struct rte_mempool *mp) 1283 { 1284 struct ena_adapter *adapter = 1285 (struct ena_adapter *)(dev->data->dev_private); 1286 struct ena_ring *rxq = NULL; 1287 int i; 1288 1289 rxq = &adapter->rx_ring[queue_idx]; 1290 if (rxq->configured) { 1291 RTE_LOG(CRIT, PMD, 1292 "API violation. Queue %d is already configured\n", 1293 queue_idx); 1294 return ENA_COM_FAULT; 1295 } 1296 1297 if (!rte_is_power_of_2(nb_desc)) { 1298 RTE_LOG(ERR, PMD, 1299 "Unsupported size of RX queue: %d is not a power of 2.", 1300 nb_desc); 1301 return -EINVAL; 1302 } 1303 1304 if (nb_desc > adapter->rx_ring_size) { 1305 RTE_LOG(ERR, PMD, 1306 "Unsupported size of RX queue (max size: %d)\n", 1307 adapter->rx_ring_size); 1308 return -EINVAL; 1309 } 1310 1311 rxq->port_id = dev->data->port_id; 1312 rxq->next_to_clean = 0; 1313 rxq->next_to_use = 0; 1314 rxq->ring_size = nb_desc; 1315 rxq->mb_pool = mp; 1316 1317 rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", 1318 sizeof(struct rte_mbuf *) * nb_desc, 1319 RTE_CACHE_LINE_SIZE); 1320 if (!rxq->rx_buffer_info) { 1321 RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); 1322 return -ENOMEM; 1323 } 1324 1325 rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", 1326 sizeof(struct rte_mbuf *) * nb_desc, 1327 RTE_CACHE_LINE_SIZE); 1328 1329 if (!rxq->rx_refill_buffer) { 1330 RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n"); 1331 rte_free(rxq->rx_buffer_info); 1332 rxq->rx_buffer_info = NULL; 1333 return -ENOMEM; 1334 } 1335 1336 rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", 1337 sizeof(uint16_t) * nb_desc, 1338 RTE_CACHE_LINE_SIZE); 1339 if (!rxq->empty_rx_reqs) { 1340 RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); 1341 rte_free(rxq->rx_buffer_info); 1342 rxq->rx_buffer_info = NULL; 1343 rte_free(rxq->rx_refill_buffer); 1344 rxq->rx_refill_buffer = NULL; 1345 return -ENOMEM; 1346 } 1347 1348 for (i = 0; i < nb_desc; i++) 1349 rxq->empty_tx_reqs[i] = i; 1350 1351 /* Store pointer to this queue in upper layer */ 1352 rxq->configured = 1; 1353 dev->data->rx_queues[queue_idx] = rxq; 1354 1355 return 0; 1356 } 1357 1358 static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) 1359 { 1360 unsigned int i; 1361 int rc; 1362 uint16_t ring_size = rxq->ring_size; 1363 uint16_t ring_mask = ring_size - 1; 1364 uint16_t next_to_use = rxq->next_to_use; 1365 uint16_t in_use, req_id; 1366 struct rte_mbuf **mbufs = rxq->rx_refill_buffer; 1367 1368 if (unlikely(!count)) 1369 return 0; 1370 1371 in_use = rxq->next_to_use - rxq->next_to_clean; 1372 ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); 1373 1374 /* get resources for incoming packets */ 1375 rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); 1376 if (unlikely(rc < 0)) { 1377 rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); 1378 PMD_RX_LOG(DEBUG, "there are no enough free buffers"); 1379 return 0; 1380 } 1381 1382 for (i = 0; i < count; i++) { 1383 uint16_t next_to_use_masked = next_to_use & ring_mask; 1384 struct rte_mbuf *mbuf = mbufs[i]; 1385 struct ena_com_buf ebuf; 1386 1387 if (likely((i + 4) < count)) 1388 rte_prefetch0(mbufs[i + 4]); 1389 1390 req_id = rxq->empty_rx_reqs[next_to_use_masked]; 1391 rc = validate_rx_req_id(rxq, req_id); 1392 if (unlikely(rc < 0)) 1393 break; 1394 rxq->rx_buffer_info[req_id] = mbuf; 1395 1396 /* prepare physical address for DMA transaction */ 1397 ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; 1398 ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; 1399 /* pass resource to device */ 1400 rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, 1401 &ebuf, req_id); 1402 if (unlikely(rc)) { 1403 RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); 1404 rxq->rx_buffer_info[req_id] = NULL; 1405 break; 1406 } 1407 next_to_use++; 1408 } 1409 1410 if (unlikely(i < count)) { 1411 RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " 1412 "buffers (from %d)\n", rxq->id, i, count); 1413 rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), 1414 count - i); 1415 } 1416 1417 /* When we submitted free recources to device... */ 1418 if (likely(i > 0)) { 1419 /* ...let HW know that it can fill buffers with data 1420 * 1421 * Add memory barrier to make sure the desc were written before 1422 * issue a doorbell 1423 */ 1424 rte_wmb(); 1425 ena_com_write_sq_doorbell(rxq->ena_com_io_sq); 1426 1427 rxq->next_to_use = next_to_use; 1428 } 1429 1430 return i; 1431 } 1432 1433 static int ena_device_init(struct ena_com_dev *ena_dev, 1434 struct ena_com_dev_get_features_ctx *get_feat_ctx, 1435 bool *wd_state) 1436 { 1437 uint32_t aenq_groups; 1438 int rc; 1439 bool readless_supported; 1440 1441 /* Initialize mmio registers */ 1442 rc = ena_com_mmio_reg_read_request_init(ena_dev); 1443 if (rc) { 1444 RTE_LOG(ERR, PMD, "failed to init mmio read less\n"); 1445 return rc; 1446 } 1447 1448 /* The PCIe configuration space revision id indicate if mmio reg 1449 * read is disabled. 1450 */ 1451 readless_supported = 1452 !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id 1453 & ENA_MMIO_DISABLE_REG_READ); 1454 ena_com_set_mmio_read_mode(ena_dev, readless_supported); 1455 1456 /* reset device */ 1457 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); 1458 if (rc) { 1459 RTE_LOG(ERR, PMD, "cannot reset device\n"); 1460 goto err_mmio_read_less; 1461 } 1462 1463 /* check FW version */ 1464 rc = ena_com_validate_version(ena_dev); 1465 if (rc) { 1466 RTE_LOG(ERR, PMD, "device version is too low\n"); 1467 goto err_mmio_read_less; 1468 } 1469 1470 ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev); 1471 1472 /* ENA device administration layer init */ 1473 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); 1474 if (rc) { 1475 RTE_LOG(ERR, PMD, 1476 "cannot initialize ena admin queue with device\n"); 1477 goto err_mmio_read_less; 1478 } 1479 1480 /* To enable the msix interrupts the driver needs to know the number 1481 * of queues. So the driver uses polling mode to retrieve this 1482 * information. 1483 */ 1484 ena_com_set_admin_polling_mode(ena_dev, true); 1485 1486 ena_config_host_info(ena_dev); 1487 1488 /* Get Device Attributes and features */ 1489 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); 1490 if (rc) { 1491 RTE_LOG(ERR, PMD, 1492 "cannot get attribute for ena device rc= %d\n", rc); 1493 goto err_admin_init; 1494 } 1495 1496 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | 1497 BIT(ENA_ADMIN_NOTIFICATION) | 1498 BIT(ENA_ADMIN_KEEP_ALIVE) | 1499 BIT(ENA_ADMIN_FATAL_ERROR) | 1500 BIT(ENA_ADMIN_WARNING); 1501 1502 aenq_groups &= get_feat_ctx->aenq.supported_groups; 1503 rc = ena_com_set_aenq_config(ena_dev, aenq_groups); 1504 if (rc) { 1505 RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc); 1506 goto err_admin_init; 1507 } 1508 1509 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); 1510 1511 return 0; 1512 1513 err_admin_init: 1514 ena_com_admin_destroy(ena_dev); 1515 1516 err_mmio_read_less: 1517 ena_com_mmio_reg_read_request_destroy(ena_dev); 1518 1519 return rc; 1520 } 1521 1522 static void ena_interrupt_handler_rte(void *cb_arg) 1523 { 1524 struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; 1525 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1526 1527 ena_com_admin_q_comp_intr_handler(ena_dev); 1528 if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) 1529 ena_com_aenq_intr_handler(ena_dev, adapter); 1530 } 1531 1532 static void check_for_missing_keep_alive(struct ena_adapter *adapter) 1533 { 1534 if (!adapter->wd_state) 1535 return; 1536 1537 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) 1538 return; 1539 1540 if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >= 1541 adapter->keep_alive_timeout)) { 1542 RTE_LOG(ERR, PMD, "Keep alive timeout\n"); 1543 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; 1544 adapter->trigger_reset = true; 1545 } 1546 } 1547 1548 /* Check if admin queue is enabled */ 1549 static void check_for_admin_com_state(struct ena_adapter *adapter) 1550 { 1551 if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) { 1552 RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n"); 1553 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; 1554 adapter->trigger_reset = true; 1555 } 1556 } 1557 1558 static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, 1559 void *arg) 1560 { 1561 struct ena_adapter *adapter = (struct ena_adapter *)arg; 1562 struct rte_eth_dev *dev = adapter->rte_dev; 1563 1564 check_for_missing_keep_alive(adapter); 1565 check_for_admin_com_state(adapter); 1566 1567 if (unlikely(adapter->trigger_reset)) { 1568 RTE_LOG(ERR, PMD, "Trigger reset is on\n"); 1569 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, 1570 NULL); 1571 } 1572 } 1573 1574 static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, 1575 struct ena_com_dev_get_features_ctx *get_feat_ctx) 1576 { 1577 int io_sq_num, io_cq_num, io_queue_num; 1578 1579 io_sq_num = get_feat_ctx->max_queues.max_sq_num; 1580 io_cq_num = get_feat_ctx->max_queues.max_cq_num; 1581 1582 io_queue_num = RTE_MIN(io_sq_num, io_cq_num); 1583 1584 if (unlikely(io_queue_num == 0)) { 1585 RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); 1586 return -EFAULT; 1587 } 1588 1589 return io_queue_num; 1590 } 1591 1592 static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) 1593 { 1594 struct rte_pci_device *pci_dev; 1595 struct rte_intr_handle *intr_handle; 1596 struct ena_adapter *adapter = 1597 (struct ena_adapter *)(eth_dev->data->dev_private); 1598 struct ena_com_dev *ena_dev = &adapter->ena_dev; 1599 struct ena_com_dev_get_features_ctx get_feat_ctx; 1600 int queue_size, rc; 1601 u16 tx_sgl_size = 0; 1602 1603 static int adapters_found; 1604 bool wd_state; 1605 1606 memset(adapter, 0, sizeof(struct ena_adapter)); 1607 ena_dev = &adapter->ena_dev; 1608 1609 eth_dev->dev_ops = &ena_dev_ops; 1610 eth_dev->rx_pkt_burst = ð_ena_recv_pkts; 1611 eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; 1612 eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; 1613 adapter->rte_eth_dev_data = eth_dev->data; 1614 adapter->rte_dev = eth_dev; 1615 1616 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1617 return 0; 1618 1619 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1620 adapter->pdev = pci_dev; 1621 1622 PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", 1623 pci_dev->addr.domain, 1624 pci_dev->addr.bus, 1625 pci_dev->addr.devid, 1626 pci_dev->addr.function); 1627 1628 intr_handle = &pci_dev->intr_handle; 1629 1630 adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; 1631 adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; 1632 1633 if (!adapter->regs) { 1634 PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", 1635 ENA_REGS_BAR); 1636 return -ENXIO; 1637 } 1638 1639 ena_dev->reg_bar = adapter->regs; 1640 ena_dev->dmadev = adapter->pdev; 1641 1642 adapter->id_number = adapters_found; 1643 1644 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", 1645 adapter->id_number); 1646 1647 /* device specific initialization routine */ 1648 rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); 1649 if (rc) { 1650 PMD_INIT_LOG(CRIT, "Failed to init ENA device"); 1651 goto err; 1652 } 1653 adapter->wd_state = wd_state; 1654 1655 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; 1656 adapter->num_queues = ena_calc_io_queue_num(ena_dev, 1657 &get_feat_ctx); 1658 1659 queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); 1660 if (queue_size <= 0 || adapter->num_queues <= 0) { 1661 rc = -EFAULT; 1662 goto err_device_destroy; 1663 } 1664 1665 adapter->tx_ring_size = queue_size; 1666 adapter->rx_ring_size = queue_size; 1667 1668 adapter->max_tx_sgl_size = tx_sgl_size; 1669 1670 /* prepare ring structures */ 1671 ena_init_rings(adapter); 1672 1673 ena_config_debug_area(adapter); 1674 1675 /* Set max MTU for this device */ 1676 adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; 1677 1678 /* set device support for TSO */ 1679 adapter->tso4_supported = get_feat_ctx.offload.tx & 1680 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; 1681 1682 /* Copy MAC address and point DPDK to it */ 1683 eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; 1684 ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, 1685 (struct ether_addr *)adapter->mac_addr); 1686 1687 /* 1688 * Pass the information to the rte_eth_dev_close() that it should also 1689 * release the private port resources. 1690 */ 1691 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1692 1693 adapter->drv_stats = rte_zmalloc("adapter stats", 1694 sizeof(*adapter->drv_stats), 1695 RTE_CACHE_LINE_SIZE); 1696 if (!adapter->drv_stats) { 1697 RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); 1698 rc = -ENOMEM; 1699 goto err_delete_debug_area; 1700 } 1701 1702 rte_intr_callback_register(intr_handle, 1703 ena_interrupt_handler_rte, 1704 adapter); 1705 rte_intr_enable(intr_handle); 1706 ena_com_set_admin_polling_mode(ena_dev, false); 1707 ena_com_admin_aenq_enable(ena_dev); 1708 1709 if (adapters_found == 0) 1710 rte_timer_subsystem_init(); 1711 rte_timer_init(&adapter->timer_wd); 1712 1713 adapters_found++; 1714 adapter->state = ENA_ADAPTER_STATE_INIT; 1715 1716 return 0; 1717 1718 err_delete_debug_area: 1719 ena_com_delete_debug_area(ena_dev); 1720 1721 err_device_destroy: 1722 ena_com_delete_host_info(ena_dev); 1723 ena_com_admin_destroy(ena_dev); 1724 1725 err: 1726 return rc; 1727 } 1728 1729 static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) 1730 { 1731 struct ena_adapter *adapter = 1732 (struct ena_adapter *)(eth_dev->data->dev_private); 1733 1734 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1735 return 0; 1736 1737 if (adapter->state != ENA_ADAPTER_STATE_CLOSED) 1738 ena_close(eth_dev); 1739 1740 eth_dev->dev_ops = NULL; 1741 eth_dev->rx_pkt_burst = NULL; 1742 eth_dev->tx_pkt_burst = NULL; 1743 eth_dev->tx_pkt_prepare = NULL; 1744 1745 adapter->state = ENA_ADAPTER_STATE_FREE; 1746 1747 return 0; 1748 } 1749 1750 static int ena_dev_configure(struct rte_eth_dev *dev) 1751 { 1752 struct ena_adapter *adapter = 1753 (struct ena_adapter *)(dev->data->dev_private); 1754 1755 adapter->state = ENA_ADAPTER_STATE_CONFIG; 1756 1757 adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; 1758 adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; 1759 return 0; 1760 } 1761 1762 static void ena_init_rings(struct ena_adapter *adapter) 1763 { 1764 int i; 1765 1766 for (i = 0; i < adapter->num_queues; i++) { 1767 struct ena_ring *ring = &adapter->tx_ring[i]; 1768 1769 ring->configured = 0; 1770 ring->type = ENA_RING_TYPE_TX; 1771 ring->adapter = adapter; 1772 ring->id = i; 1773 ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; 1774 ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; 1775 ring->sgl_size = adapter->max_tx_sgl_size; 1776 } 1777 1778 for (i = 0; i < adapter->num_queues; i++) { 1779 struct ena_ring *ring = &adapter->rx_ring[i]; 1780 1781 ring->configured = 0; 1782 ring->type = ENA_RING_TYPE_RX; 1783 ring->adapter = adapter; 1784 ring->id = i; 1785 } 1786 } 1787 1788 static void ena_infos_get(struct rte_eth_dev *dev, 1789 struct rte_eth_dev_info *dev_info) 1790 { 1791 struct ena_adapter *adapter; 1792 struct ena_com_dev *ena_dev; 1793 struct ena_com_dev_get_features_ctx feat; 1794 uint64_t rx_feat = 0, tx_feat = 0; 1795 int rc = 0; 1796 1797 ena_assert_msg(dev->data != NULL, "Uninitialized device"); 1798 ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); 1799 adapter = (struct ena_adapter *)(dev->data->dev_private); 1800 1801 ena_dev = &adapter->ena_dev; 1802 ena_assert_msg(ena_dev != NULL, "Uninitialized device"); 1803 1804 dev_info->speed_capa = 1805 ETH_LINK_SPEED_1G | 1806 ETH_LINK_SPEED_2_5G | 1807 ETH_LINK_SPEED_5G | 1808 ETH_LINK_SPEED_10G | 1809 ETH_LINK_SPEED_25G | 1810 ETH_LINK_SPEED_40G | 1811 ETH_LINK_SPEED_50G | 1812 ETH_LINK_SPEED_100G; 1813 1814 /* Get supported features from HW */ 1815 rc = ena_com_get_dev_attr_feat(ena_dev, &feat); 1816 if (unlikely(rc)) { 1817 RTE_LOG(ERR, PMD, 1818 "Cannot get attribute for ena device rc= %d\n", rc); 1819 return; 1820 } 1821 1822 /* Set Tx & Rx features available for device */ 1823 if (feat.offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) 1824 tx_feat |= DEV_TX_OFFLOAD_TCP_TSO; 1825 1826 if (feat.offload.tx & 1827 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) 1828 tx_feat |= DEV_TX_OFFLOAD_IPV4_CKSUM | 1829 DEV_TX_OFFLOAD_UDP_CKSUM | 1830 DEV_TX_OFFLOAD_TCP_CKSUM; 1831 1832 if (feat.offload.rx_supported & 1833 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) 1834 rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | 1835 DEV_RX_OFFLOAD_UDP_CKSUM | 1836 DEV_RX_OFFLOAD_TCP_CKSUM; 1837 1838 rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME; 1839 1840 /* Inform framework about available features */ 1841 dev_info->rx_offload_capa = rx_feat; 1842 dev_info->rx_queue_offload_capa = rx_feat; 1843 dev_info->tx_offload_capa = tx_feat; 1844 dev_info->tx_queue_offload_capa = tx_feat; 1845 1846 dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; 1847 dev_info->max_rx_pktlen = adapter->max_mtu; 1848 dev_info->max_mac_addrs = 1; 1849 1850 dev_info->max_rx_queues = adapter->num_queues; 1851 dev_info->max_tx_queues = adapter->num_queues; 1852 dev_info->reta_size = ENA_RX_RSS_TABLE_SIZE; 1853 1854 adapter->tx_supported_offloads = tx_feat; 1855 adapter->rx_supported_offloads = rx_feat; 1856 1857 dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; 1858 dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1859 1860 dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; 1861 dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; 1862 dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1863 feat.max_queues.max_packet_tx_descs); 1864 dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, 1865 feat.max_queues.max_packet_tx_descs); 1866 } 1867 1868 static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1869 uint16_t nb_pkts) 1870 { 1871 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); 1872 unsigned int ring_size = rx_ring->ring_size; 1873 unsigned int ring_mask = ring_size - 1; 1874 uint16_t next_to_clean = rx_ring->next_to_clean; 1875 uint16_t desc_in_use = 0; 1876 uint16_t req_id; 1877 unsigned int recv_idx = 0; 1878 struct rte_mbuf *mbuf = NULL; 1879 struct rte_mbuf *mbuf_head = NULL; 1880 struct rte_mbuf *mbuf_prev = NULL; 1881 struct rte_mbuf **rx_buff_info = rx_ring->rx_buffer_info; 1882 unsigned int completed; 1883 1884 struct ena_com_rx_ctx ena_rx_ctx; 1885 int rc = 0; 1886 1887 /* Check adapter state */ 1888 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 1889 RTE_LOG(ALERT, PMD, 1890 "Trying to receive pkts while device is NOT running\n"); 1891 return 0; 1892 } 1893 1894 desc_in_use = rx_ring->next_to_use - next_to_clean; 1895 if (unlikely(nb_pkts > desc_in_use)) 1896 nb_pkts = desc_in_use; 1897 1898 for (completed = 0; completed < nb_pkts; completed++) { 1899 int segments = 0; 1900 1901 ena_rx_ctx.max_bufs = rx_ring->ring_size; 1902 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; 1903 ena_rx_ctx.descs = 0; 1904 /* receive packet context */ 1905 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, 1906 rx_ring->ena_com_io_sq, 1907 &ena_rx_ctx); 1908 if (unlikely(rc)) { 1909 RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); 1910 rx_ring->adapter->trigger_reset = true; 1911 return 0; 1912 } 1913 1914 if (unlikely(ena_rx_ctx.descs == 0)) 1915 break; 1916 1917 while (segments < ena_rx_ctx.descs) { 1918 req_id = ena_rx_ctx.ena_bufs[segments].req_id; 1919 rc = validate_rx_req_id(rx_ring, req_id); 1920 if (unlikely(rc)) 1921 break; 1922 1923 mbuf = rx_buff_info[req_id]; 1924 mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; 1925 mbuf->data_off = RTE_PKTMBUF_HEADROOM; 1926 mbuf->refcnt = 1; 1927 mbuf->next = NULL; 1928 if (unlikely(segments == 0)) { 1929 mbuf->nb_segs = ena_rx_ctx.descs; 1930 mbuf->port = rx_ring->port_id; 1931 mbuf->pkt_len = 0; 1932 mbuf_head = mbuf; 1933 } else { 1934 /* for multi-segment pkts create mbuf chain */ 1935 mbuf_prev->next = mbuf; 1936 } 1937 mbuf_head->pkt_len += mbuf->data_len; 1938 1939 mbuf_prev = mbuf; 1940 rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = 1941 req_id; 1942 segments++; 1943 next_to_clean++; 1944 } 1945 1946 /* fill mbuf attributes if any */ 1947 ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); 1948 mbuf_head->hash.rss = ena_rx_ctx.hash; 1949 1950 /* pass to DPDK application head mbuf */ 1951 rx_pkts[recv_idx] = mbuf_head; 1952 recv_idx++; 1953 } 1954 1955 rx_ring->next_to_clean = next_to_clean; 1956 1957 desc_in_use = desc_in_use - completed + 1; 1958 /* Burst refill to save doorbells, memory barriers, const interval */ 1959 if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) 1960 ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); 1961 1962 return recv_idx; 1963 } 1964 1965 static uint16_t 1966 eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 1967 uint16_t nb_pkts) 1968 { 1969 int32_t ret; 1970 uint32_t i; 1971 struct rte_mbuf *m; 1972 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 1973 struct ipv4_hdr *ip_hdr; 1974 uint64_t ol_flags; 1975 uint16_t frag_field; 1976 1977 for (i = 0; i != nb_pkts; i++) { 1978 m = tx_pkts[i]; 1979 ol_flags = m->ol_flags; 1980 1981 if (!(ol_flags & PKT_TX_IPV4)) 1982 continue; 1983 1984 /* If there was not L2 header length specified, assume it is 1985 * length of the ethernet header. 1986 */ 1987 if (unlikely(m->l2_len == 0)) 1988 m->l2_len = sizeof(struct ether_hdr); 1989 1990 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, 1991 m->l2_len); 1992 frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); 1993 1994 if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { 1995 m->packet_type |= RTE_PTYPE_L4_NONFRAG; 1996 1997 /* If IPv4 header has DF flag enabled and TSO support is 1998 * disabled, partial chcecksum should not be calculated. 1999 */ 2000 if (!tx_ring->adapter->tso4_supported) 2001 continue; 2002 } 2003 2004 if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || 2005 (ol_flags & PKT_TX_L4_MASK) == 2006 PKT_TX_SCTP_CKSUM) { 2007 rte_errno = -ENOTSUP; 2008 return i; 2009 } 2010 2011 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 2012 ret = rte_validate_tx_offload(m); 2013 if (ret != 0) { 2014 rte_errno = ret; 2015 return i; 2016 } 2017 #endif 2018 2019 /* In case we are supposed to TSO and have DF not set (DF=0) 2020 * hardware must be provided with partial checksum, otherwise 2021 * it will take care of necessary calculations. 2022 */ 2023 2024 ret = rte_net_intel_cksum_flags_prepare(m, 2025 ol_flags & ~PKT_TX_TCP_SEG); 2026 if (ret != 0) { 2027 rte_errno = ret; 2028 return i; 2029 } 2030 } 2031 2032 return i; 2033 } 2034 2035 static void ena_update_hints(struct ena_adapter *adapter, 2036 struct ena_admin_ena_hw_hints *hints) 2037 { 2038 if (hints->admin_completion_tx_timeout) 2039 adapter->ena_dev.admin_queue.completion_timeout = 2040 hints->admin_completion_tx_timeout * 1000; 2041 2042 if (hints->mmio_read_timeout) 2043 /* convert to usec */ 2044 adapter->ena_dev.mmio_read.reg_read_to = 2045 hints->mmio_read_timeout * 1000; 2046 2047 if (hints->driver_watchdog_timeout) { 2048 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) 2049 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; 2050 else 2051 // Convert msecs to ticks 2052 adapter->keep_alive_timeout = 2053 (hints->driver_watchdog_timeout * 2054 rte_get_timer_hz()) / 1000; 2055 } 2056 } 2057 2058 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, 2059 struct rte_mbuf *mbuf) 2060 { 2061 int num_segments, rc; 2062 2063 num_segments = mbuf->nb_segs; 2064 2065 if (likely(num_segments < tx_ring->sgl_size)) 2066 return 0; 2067 2068 rc = rte_pktmbuf_linearize(mbuf); 2069 if (unlikely(rc)) 2070 RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); 2071 2072 return rc; 2073 } 2074 2075 static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 2076 uint16_t nb_pkts) 2077 { 2078 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); 2079 uint16_t next_to_use = tx_ring->next_to_use; 2080 uint16_t next_to_clean = tx_ring->next_to_clean; 2081 struct rte_mbuf *mbuf; 2082 unsigned int ring_size = tx_ring->ring_size; 2083 unsigned int ring_mask = ring_size - 1; 2084 struct ena_com_tx_ctx ena_tx_ctx; 2085 struct ena_tx_buffer *tx_info; 2086 struct ena_com_buf *ebuf; 2087 uint16_t rc, req_id, total_tx_descs = 0; 2088 uint16_t sent_idx = 0, empty_tx_reqs; 2089 int nb_hw_desc; 2090 2091 /* Check adapter state */ 2092 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { 2093 RTE_LOG(ALERT, PMD, 2094 "Trying to xmit pkts while device is NOT running\n"); 2095 return 0; 2096 } 2097 2098 empty_tx_reqs = ring_size - (next_to_use - next_to_clean); 2099 if (nb_pkts > empty_tx_reqs) 2100 nb_pkts = empty_tx_reqs; 2101 2102 for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { 2103 mbuf = tx_pkts[sent_idx]; 2104 2105 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); 2106 if (unlikely(rc)) 2107 break; 2108 2109 req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; 2110 tx_info = &tx_ring->tx_buffer_info[req_id]; 2111 tx_info->mbuf = mbuf; 2112 tx_info->num_of_bufs = 0; 2113 ebuf = tx_info->bufs; 2114 2115 /* Prepare TX context */ 2116 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); 2117 memset(&ena_tx_ctx.ena_meta, 0x0, 2118 sizeof(struct ena_com_tx_meta)); 2119 ena_tx_ctx.ena_bufs = ebuf; 2120 ena_tx_ctx.req_id = req_id; 2121 if (tx_ring->tx_mem_queue_type == 2122 ENA_ADMIN_PLACEMENT_POLICY_DEV) { 2123 /* prepare the push buffer with 2124 * virtual address of the data 2125 */ 2126 ena_tx_ctx.header_len = 2127 RTE_MIN(mbuf->data_len, 2128 tx_ring->tx_max_header_size); 2129 ena_tx_ctx.push_header = 2130 (void *)((char *)mbuf->buf_addr + 2131 mbuf->data_off); 2132 } /* there's no else as we take advantage of memset zeroing */ 2133 2134 /* Set TX offloads flags, if applicable */ 2135 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); 2136 2137 if (unlikely(mbuf->ol_flags & 2138 (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) 2139 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); 2140 2141 rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); 2142 2143 /* Process first segment taking into 2144 * consideration pushed header 2145 */ 2146 if (mbuf->data_len > ena_tx_ctx.header_len) { 2147 ebuf->paddr = mbuf->buf_iova + 2148 mbuf->data_off + 2149 ena_tx_ctx.header_len; 2150 ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; 2151 ebuf++; 2152 tx_info->num_of_bufs++; 2153 } 2154 2155 while ((mbuf = mbuf->next) != NULL) { 2156 ebuf->paddr = mbuf->buf_iova + mbuf->data_off; 2157 ebuf->len = mbuf->data_len; 2158 ebuf++; 2159 tx_info->num_of_bufs++; 2160 } 2161 2162 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; 2163 2164 /* Write data to device */ 2165 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, 2166 &ena_tx_ctx, &nb_hw_desc); 2167 if (unlikely(rc)) 2168 break; 2169 2170 tx_info->tx_descs = nb_hw_desc; 2171 2172 next_to_use++; 2173 } 2174 2175 /* If there are ready packets to be xmitted... */ 2176 if (sent_idx > 0) { 2177 /* ...let HW do its best :-) */ 2178 rte_wmb(); 2179 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); 2180 2181 tx_ring->next_to_use = next_to_use; 2182 } 2183 2184 /* Clear complete packets */ 2185 while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { 2186 rc = validate_tx_req_id(tx_ring, req_id); 2187 if (rc) 2188 break; 2189 2190 /* Get Tx info & store how many descs were processed */ 2191 tx_info = &tx_ring->tx_buffer_info[req_id]; 2192 total_tx_descs += tx_info->tx_descs; 2193 2194 /* Free whole mbuf chain */ 2195 mbuf = tx_info->mbuf; 2196 rte_pktmbuf_free(mbuf); 2197 tx_info->mbuf = NULL; 2198 2199 /* Put back descriptor to the ring for reuse */ 2200 tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; 2201 next_to_clean++; 2202 2203 /* If too many descs to clean, leave it for another run */ 2204 if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) 2205 break; 2206 } 2207 2208 if (total_tx_descs > 0) { 2209 /* acknowledge completion of sent packets */ 2210 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); 2211 tx_ring->next_to_clean = next_to_clean; 2212 } 2213 2214 return sent_idx; 2215 } 2216 2217 /********************************************************************* 2218 * PMD configuration 2219 *********************************************************************/ 2220 static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2221 struct rte_pci_device *pci_dev) 2222 { 2223 return rte_eth_dev_pci_generic_probe(pci_dev, 2224 sizeof(struct ena_adapter), eth_ena_dev_init); 2225 } 2226 2227 static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) 2228 { 2229 return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit); 2230 } 2231 2232 static struct rte_pci_driver rte_ena_pmd = { 2233 .id_table = pci_id_ena_map, 2234 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2235 RTE_PCI_DRV_WC_ACTIVATE, 2236 .probe = eth_ena_pci_probe, 2237 .remove = eth_ena_pci_remove, 2238 }; 2239 2240 RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); 2241 RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); 2242 RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); 2243 2244 RTE_INIT(ena_init_log) 2245 { 2246 ena_logtype_init = rte_log_register("pmd.net.ena.init"); 2247 if (ena_logtype_init >= 0) 2248 rte_log_set_level(ena_logtype_init, RTE_LOG_NOTICE); 2249 ena_logtype_driver = rte_log_register("pmd.net.ena.driver"); 2250 if (ena_logtype_driver >= 0) 2251 rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE); 2252 } 2253 2254 /****************************************************************************** 2255 ******************************** AENQ Handlers ******************************* 2256 *****************************************************************************/ 2257 static void ena_update_on_link_change(void *adapter_data, 2258 struct ena_admin_aenq_entry *aenq_e) 2259 { 2260 struct rte_eth_dev *eth_dev; 2261 struct ena_adapter *adapter; 2262 struct ena_admin_aenq_link_change_desc *aenq_link_desc; 2263 uint32_t status; 2264 2265 adapter = (struct ena_adapter *)adapter_data; 2266 aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; 2267 eth_dev = adapter->rte_dev; 2268 2269 status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc); 2270 adapter->link_status = status; 2271 2272 ena_link_update(eth_dev, 0); 2273 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2274 } 2275 2276 static void ena_notification(void *data, 2277 struct ena_admin_aenq_entry *aenq_e) 2278 { 2279 struct ena_adapter *adapter = (struct ena_adapter *)data; 2280 struct ena_admin_ena_hw_hints *hints; 2281 2282 if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) 2283 RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n", 2284 aenq_e->aenq_common_desc.group, 2285 ENA_ADMIN_NOTIFICATION); 2286 2287 switch (aenq_e->aenq_common_desc.syndrom) { 2288 case ENA_ADMIN_UPDATE_HINTS: 2289 hints = (struct ena_admin_ena_hw_hints *) 2290 (&aenq_e->inline_data_w4); 2291 ena_update_hints(adapter, hints); 2292 break; 2293 default: 2294 RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n", 2295 aenq_e->aenq_common_desc.syndrom); 2296 } 2297 } 2298 2299 static void ena_keep_alive(void *adapter_data, 2300 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2301 { 2302 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; 2303 2304 adapter->timestamp_wd = rte_get_timer_cycles(); 2305 } 2306 2307 /** 2308 * This handler will called for unknown event group or unimplemented handlers 2309 **/ 2310 static void unimplemented_aenq_handler(__rte_unused void *data, 2311 __rte_unused struct ena_admin_aenq_entry *aenq_e) 2312 { 2313 RTE_LOG(ERR, PMD, "Unknown event was received or event with " 2314 "unimplemented handler\n"); 2315 } 2316 2317 static struct ena_aenq_handlers aenq_handlers = { 2318 .handlers = { 2319 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, 2320 [ENA_ADMIN_NOTIFICATION] = ena_notification, 2321 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive 2322 }, 2323 .unimplemented_handler = unimplemented_aenq_handler 2324 }; 2325