Lines Matching refs:tx_ring
168 static void ena_tx_map_mbuf(struct ena_ring *tx_ring,
173 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf);
174 static void ena_tx_cleanup(struct ena_ring *tx_ring);
397 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
401 if (likely(req_id < tx_ring->ring_size)) { in validate_tx_req_id()
402 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
413 ++tx_ring->tx_stats.bad_req_id; in validate_tx_req_id()
414 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; in validate_tx_req_id()
415 tx_ring->adapter->trigger_reset = true; in validate_tx_req_id()
815 queues = adapter->tx_ring; in ena_queue_start_all()
1021 struct ena_stats_tx *tx_stats = &adapter->tx_ring[i].tx_stats; in ena_stats_get()
1210 queues = adapter->tx_ring; in ena_queue_stop_all()
1263 txq = &adapter->tx_ring[queue_idx]; in ena_tx_queue_setup()
1986 struct ena_ring *ring = &adapter->tx_ring[i]; in ena_init_rings()
2290 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); in eth_ena_prep_pkts() local
2318 if (!tx_ring->adapter->offloads.tso4_supported) in eth_ena_prep_pkts()
2376 static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, in ena_check_and_linearize_mbuf() argument
2382 ena_dev = &tx_ring->adapter->ena_dev; in ena_check_and_linearize_mbuf()
2386 if (likely(num_segments < tx_ring->sgl_size)) in ena_check_and_linearize_mbuf()
2390 (num_segments == tx_ring->sgl_size) && in ena_check_and_linearize_mbuf()
2391 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_mbuf()
2394 ++tx_ring->tx_stats.linearize; in ena_check_and_linearize_mbuf()
2398 rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); in ena_check_and_linearize_mbuf()
2399 ++tx_ring->tx_stats.linearize_failed; in ena_check_and_linearize_mbuf()
2406 static void ena_tx_map_mbuf(struct ena_ring *tx_ring, in ena_tx_map_mbuf() argument
2421 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_mbuf()
2428 push_len = RTE_MIN(mbuf->pkt_len, tx_ring->tx_max_header_size); in ena_tx_map_mbuf()
2441 tx_ring->push_buf_intermediate_buf); in ena_tx_map_mbuf()
2442 *push_header = tx_ring->push_buf_intermediate_buf; in ena_tx_map_mbuf()
2479 static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) in ena_xmit_mbuf() argument
2490 rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); in ena_xmit_mbuf()
2494 next_to_use = tx_ring->next_to_use; in ena_xmit_mbuf()
2496 req_id = tx_ring->empty_tx_reqs[next_to_use]; in ena_xmit_mbuf()
2497 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_xmit_mbuf()
2500 ena_tx_map_mbuf(tx_ring, tx_info, mbuf, &push_header, &header_len); in ena_xmit_mbuf()
2509 ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads, in ena_xmit_mbuf()
2510 tx_ring->disable_meta_caching); in ena_xmit_mbuf()
2512 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, in ena_xmit_mbuf()
2516 tx_ring->id); in ena_xmit_mbuf()
2517 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_xmit_mbuf()
2521 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, in ena_xmit_mbuf()
2524 ++tx_ring->tx_stats.prepare_ctx_err; in ena_xmit_mbuf()
2530 tx_ring->tx_stats.cnt++; in ena_xmit_mbuf()
2531 tx_ring->tx_stats.bytes += mbuf->pkt_len; in ena_xmit_mbuf()
2533 tx_ring->next_to_use = ENA_IDX_NEXT_MASKED(next_to_use, in ena_xmit_mbuf()
2534 tx_ring->size_mask); in ena_xmit_mbuf()
2539 static void ena_tx_cleanup(struct ena_ring *tx_ring) in ena_tx_cleanup() argument
2543 uint16_t next_to_clean = tx_ring->next_to_clean; in ena_tx_cleanup()
2545 cleanup_budget = RTE_MIN(tx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, in ena_tx_cleanup()
2553 if (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) != 0) in ena_tx_cleanup()
2556 if (unlikely(validate_tx_req_id(tx_ring, req_id) != 0)) in ena_tx_cleanup()
2560 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_tx_cleanup()
2566 tx_ring->empty_tx_reqs[next_to_clean] = req_id; in ena_tx_cleanup()
2572 tx_ring->size_mask); in ena_tx_cleanup()
2577 tx_ring->next_to_clean = next_to_clean; in ena_tx_cleanup()
2578 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); in ena_tx_cleanup()
2579 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); in ena_tx_cleanup()
2586 struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); in eth_ena_xmit_pkts() local
2590 if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { in eth_ena_xmit_pkts()
2596 nb_pkts = RTE_MIN(ena_com_free_q_entries(tx_ring->ena_com_io_sq), in eth_ena_xmit_pkts()
2600 if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) in eth_ena_xmit_pkts()
2604 tx_ring->size_mask)]); in eth_ena_xmit_pkts()
2607 tx_ring->tx_stats.available_desc = in eth_ena_xmit_pkts()
2608 ena_com_free_q_entries(tx_ring->ena_com_io_sq); in eth_ena_xmit_pkts()
2613 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in eth_ena_xmit_pkts()
2614 tx_ring->tx_stats.doorbells++; in eth_ena_xmit_pkts()
2617 ena_tx_cleanup(tx_ring); in eth_ena_xmit_pkts()
2619 tx_ring->tx_stats.available_desc = in eth_ena_xmit_pkts()
2620 ena_com_free_q_entries(tx_ring->ena_com_io_sq); in eth_ena_xmit_pkts()
2621 tx_ring->tx_stats.tx_poll++; in eth_ena_xmit_pkts()
2764 stats_begin = &adapter->tx_ring[i].rx_stats; in ena_xstats_get()
2831 &adapter->tx_ring[qid].tx_stats + id); in ena_xstats_get_by_id()