Lines Matching refs:rx_ring

187 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring,
383 static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) in validate_rx_req_id() argument
385 if (likely(req_id < rx_ring->ring_size)) in validate_rx_req_id()
390 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; in validate_rx_req_id()
391 rx_ring->adapter->trigger_reset = true; in validate_rx_req_id()
392 ++rx_ring->rx_stats.bad_req_id; in validate_rx_req_id()
812 queues = adapter->rx_ring; in ena_queue_start_all()
1010 struct ena_stats_rx *rx_stats = &adapter->rx_ring[i].rx_stats; in ena_stats_get()
1207 queues = adapter->rx_ring; in ena_queue_stop_all()
1351 rxq = &adapter->rx_ring[queue_idx]; in ena_rx_queue_setup()
1999 struct ena_ring *ring = &adapter->rx_ring[i]; in ena_init_rings()
2094 static struct rte_mbuf *ena_rx_mbuf(struct ena_ring *rx_ring, in ena_rx_mbuf() argument
2113 if (unlikely(validate_rx_req_id(rx_ring, req_id))) in ena_rx_mbuf()
2116 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_mbuf()
2126 mbuf_head->port = rx_ring->port_id; in ena_rx_mbuf()
2131 rx_ring->empty_rx_reqs[ntc] = req_id; in ena_rx_mbuf()
2132 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); in ena_rx_mbuf()
2138 if (unlikely(validate_rx_req_id(rx_ring, req_id))) { in ena_rx_mbuf()
2143 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_mbuf()
2153 rc = ena_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_rx_mbuf()
2185 rx_ring->empty_rx_reqs[ntc] = req_id; in ena_rx_mbuf()
2186 ntc = ENA_IDX_NEXT_MASKED(ntc, rx_ring->size_mask); in ena_rx_mbuf()
2197 struct ena_ring *rx_ring = (struct ena_ring *)(rx_queue); in eth_ena_recv_pkts() local
2200 uint16_t next_to_clean = rx_ring->next_to_clean; in eth_ena_recv_pkts()
2208 if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { in eth_ena_recv_pkts()
2214 descs_in_use = rx_ring->ring_size - in eth_ena_recv_pkts()
2215 ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; in eth_ena_recv_pkts()
2219 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in eth_ena_recv_pkts()
2220 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in eth_ena_recv_pkts()
2224 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in eth_ena_recv_pkts()
2225 rx_ring->ena_com_io_sq, in eth_ena_recv_pkts()
2229 rx_ring->adapter->reset_reason = in eth_ena_recv_pkts()
2231 rx_ring->adapter->trigger_reset = true; in eth_ena_recv_pkts()
2232 ++rx_ring->rx_stats.bad_desc_num; in eth_ena_recv_pkts()
2236 mbuf = ena_rx_mbuf(rx_ring, in eth_ena_recv_pkts()
2243 rx_ring->empty_rx_reqs[next_to_clean] = in eth_ena_recv_pkts()
2244 rx_ring->ena_bufs[i].req_id; in eth_ena_recv_pkts()
2246 next_to_clean, rx_ring->size_mask); in eth_ena_recv_pkts()
2256 rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); in eth_ena_recv_pkts()
2257 ++rx_ring->rx_stats.bad_csum; in eth_ena_recv_pkts()
2263 rx_ring->rx_stats.bytes += mbuf->pkt_len; in eth_ena_recv_pkts()
2266 rx_ring->rx_stats.cnt += completed; in eth_ena_recv_pkts()
2267 rx_ring->next_to_clean = next_to_clean; in eth_ena_recv_pkts()
2269 free_queue_entries = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in eth_ena_recv_pkts()
2271 RTE_MIN(rx_ring->ring_size / ENA_REFILL_THRESH_DIVIDER, in eth_ena_recv_pkts()
2276 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in eth_ena_recv_pkts()
2277 ena_populate_rx_queue(rx_ring, free_queue_entries); in eth_ena_recv_pkts()
2753 stats_begin = &adapter->rx_ring[i].rx_stats; in ena_xstats_get()
2820 &adapter->rx_ring[qid].rx_stats + id); in ena_xstats_get_by_id()