Home
last modified time | relevance | path

Searched refs:rx (Results 1 – 25 of 146) sorted by relevance

123456

/dpdk/drivers/net/cnxk/
H A Dmeson.build37 'rx/cn9k/rx_0_15.c',
38 'rx/cn9k/rx_16_31.c',
39 'rx/cn9k/rx_32_47.c',
40 'rx/cn9k/rx_48_63.c',
41 'rx/cn9k/rx_64_79.c',
42 'rx/cn9k/rx_80_95.c',
43 'rx/cn9k/rx_96_111.c',
44 'rx/cn9k/rx_112_127.c',
53 'rx/cn9k/rx_0_15_vec.c',
116 'rx/cn10k/rx_0_15.c',
[all …]
H A Dcn9k_rx.h127 sg = *(const uint64_t *)(rx + 1); in nix_cqe_xtract_mseg()
142 eol = ((const rte_iova_t *)(rx + 1) + in nix_cqe_xtract_mseg()
230 m_len = rx->cn9k.pkt_lenm1 + 1; in nix_rx_sec_mbuf_err_update()
266 const union nix_rx_parse_u *rx = in nix_rx_sec_mbuf_update() local
270 uint8_t lcptr = rx->lcptr; in nix_rx_sec_mbuf_update()
330 const union nix_rx_parse_u *rx = in cn9k_nix_cqe_to_mbuf() local
332 uint16_t len = rx->cn9k.pkt_lenm1 + 1; in cn9k_nix_cqe_to_mbuf()
377 if (rx->cn9k.vtag0_gone) { in cn9k_nix_cqe_to_mbuf()
379 mbuf->vlan_tci = rx->cn9k.vtag0_tci; in cn9k_nix_cqe_to_mbuf()
381 if (rx->cn9k.vtag1_gone) { in cn9k_nix_cqe_to_mbuf()
[all …]
/dpdk/drivers/common/cnxk/
H A Droc_nix_debug.c765 rx->express); in roc_nix_cqe_dump()
767 rx->errlev, rx->errcode); in roc_nix_cqe_dump()
769 rx->latype, rx->lbtype, rx->lctype); in roc_nix_cqe_dump()
771 rx->ldtype, rx->letype, rx->lftype); in roc_nix_cqe_dump()
776 rx->l2m, rx->l2b, rx->l3m, rx->l3b); in roc_nix_cqe_dump()
786 rx->laflags, rx->lbflags, rx->lcflags); in roc_nix_cqe_dump()
788 rx->ldflags, rx->leflags, rx->lfflags); in roc_nix_cqe_dump()
792 rx->eoh_ptr, rx->wqe_aura, rx->pb_aura); in roc_nix_cqe_dump()
796 rx->lbptr, rx->lcptr); in roc_nix_cqe_dump()
798 rx->leptr, rx->lfptr); in roc_nix_cqe_dump()
[all …]
H A Droc_nix_vlan.c129 vtag_cfg->rx.capture_vtag = 1; /* Always capture */ in roc_nix_vlan_strip_vtag_ena_dis()
130 vtag_cfg->rx.vtag_type = 0; /* Use index 0 */ in roc_nix_vlan_strip_vtag_ena_dis()
133 vtag_cfg->rx.strip_vtag = 1; in roc_nix_vlan_strip_vtag_ena_dis()
135 vtag_cfg->rx.strip_vtag = 0; in roc_nix_vlan_strip_vtag_ena_dis()
/dpdk/examples/distributor/
H A Dmain.c49 } rx __rte_cache_aligned;
257 app_stats.rx.rx_pkts += nb_rx; in lcore_rx()
291 app_stats.rx.enqueued_pkts += sent; in lcore_rx()
494 (app_stats.rx.rx_pkts - in print_stats()
497 (app_stats.rx.returned_pkts - in print_stats()
500 (app_stats.rx.enqueued_pkts - in print_stats()
503 (app_stats.rx.enqdrop_pkts - in print_stats()
540 prev_app_stats.rx.rx_pkts = app_stats.rx.rx_pkts; in print_stats()
541 prev_app_stats.rx.returned_pkts = app_stats.rx.returned_pkts; in print_stats()
542 prev_app_stats.rx.enqueued_pkts = app_stats.rx.enqueued_pkts; in print_stats()
[all …]
/dpdk/examples/ip_pipeline/
H A Dlink.c115 (params->rx.n_queues == 0) || in link_create()
116 (params->rx.queue_size == 0) || in link_create()
135 mempool = mempool_find(params->rx.mempool_name); in link_create()
139 rss = params->rx.rss; in link_create()
172 params->rx.n_queues, in link_create()
186 for (i = 0; i < params->rx.n_queues; i++) { in link_create()
190 params->rx.queue_size, in link_create()
243 link->n_rxq = params->rx.n_queues; in link_create()
/dpdk/lib/eventdev/
H A Deventdev_trace_points.c45 lib.eventdev.rx.adapter.create)
48 lib.eventdev.rx.adapter.free)
51 lib.eventdev.rx.adapter.queue.add)
54 lib.eventdev.rx.adapter.queue.del)
57 lib.eventdev.rx.adapter.start)
60 lib.eventdev.rx.adapter.stop)
/dpdk/doc/guides/tools/
H A Dpdump.rst41 (rx-dev=<iface or pcap file> |
83 ``rx-dev``:
91 * To receive ingress packets only, ``rx-dev`` should be passed.
95 * To receive ingress and egress packets separately ``rx-dev`` and ``tx-dev``
98 * To receive ingress and egress packets together, ``rx-dev`` and ``tx-dev``
120 $ sudo ./<build_dir>/app/dpdk-pdump -l 3 -- --pdump 'port=0,queue=*,rx-dev=/tmp/rx.pcap'
121 …p -l 3,4,5 -- --multi --pdump 'port=0,queue=*,rx-dev=/tmp/rx-1.pcap' --pdump 'port=1,queue=*,rx-de…
/dpdk/examples/multi_process/client_server_mp/mp_server/
H A Dmain.c133 (unsigned)ports->id[i], ports->rx_stats.rx[i], in do_stats_display()
140 const unsigned long long rx = clients[i].stats.rx; in do_stats_display() local
144 i, rx, rx_drop, client_tx[i], client_tx_drop[i]); in do_stats_display()
189 clients[i].stats.rx = clients[i].stats.rx_drop = 0; in clear_stats()
213 cl->stats.rx += cl_rx_buf[client].count; in flush_rx_queue()
265 ports->rx_stats.rx[port_num] += rx_count; in do_packet_forwarding()
/dpdk/examples/server_node_efd/server/
H A Dmain.c142 (unsigned int)info->id[i], info->rx_stats.rx[i], in do_stats_display()
154 const unsigned long long rx = nodes[i].stats.rx; in do_stats_display() local
162 i, rx, rx_drop, node_tx[i], node_tx_drop[i], in do_stats_display()
210 nodes[i].stats.rx = nodes[i].stats.rx_drop = 0; in clear_stats()
235 cl->stats.rx += cl_rx_buf[node].count; in flush_rx_queue()
316 info->rx_stats.rx[port_num] += rx_count; in do_packet_forwarding()
/dpdk/drivers/net/af_xdp/
H A Dcompat.h24 struct xsk_ring_cons *rx, in create_shared_socket() argument
30 return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem, rx, in create_shared_socket()
39 struct xsk_ring_cons *rx __rte_unused, in create_shared_socket()
H A Drte_eth_af_xdp.c111 struct xsk_ring_cons rx; member
282 struct xsk_ring_cons *rx = &rxq->rx; in af_xdp_rx_zc() local
315 rx->cached_cons -= nb_pkts; in af_xdp_rx_zc()
325 desc = xsk_ring_cons__rx_desc(rx, idx_rx++); in af_xdp_rx_zc()
344 xsk_ring_cons__release(rx, nb_pkts); in af_xdp_rx_zc()
358 struct xsk_ring_cons *rx = &rxq->rx; in af_xdp_rx_cp() local
383 rx->cached_cons -= nb_pkts; in af_xdp_rx_cp()
393 desc = xsk_ring_cons__rx_desc(rx, idx_rx++); in af_xdp_rx_cp()
406 xsk_ring_cons__release(rx, nb_pkts); in af_xdp_rx_cp()
762 unsigned int *prod = rxq->rx.producer; in eth_get_monitor_addr()
[all …]
/dpdk/drivers/net/hns3/
H A Dhns3_rxtx_vec_neon.h110 l234_info = rxdp[i].rx.l234_info; in hns3_desc_parse_field()
111 ol_info = rxdp[i].rx.ol_info; in hns3_desc_parse_field()
112 bd_base_info = rxdp[i].rx.bd_base_info; in hns3_desc_parse_field()
178 bd_vld = vset_lane_u16(rxdp[0].rx.bdtype_vld_udp0, bd_vld, 0); in hns3_recv_burst_vec()
179 bd_vld = vset_lane_u16(rxdp[1].rx.bdtype_vld_udp0, bd_vld, 1); in hns3_recv_burst_vec()
180 bd_vld = vset_lane_u16(rxdp[2].rx.bdtype_vld_udp0, bd_vld, 2); in hns3_recv_burst_vec()
181 bd_vld = vset_lane_u16(rxdp[3].rx.bdtype_vld_udp0, bd_vld, 3); in hns3_recv_burst_vec()
H A Dhns3_rxtx_vec.c82 rxdp[0].rx.bd_base_info = 0; in hns3_rxq_rearm_mbuf()
86 rxdp[1].rx.bd_base_info = 0; in hns3_rxq_rearm_mbuf()
90 rxdp[2].rx.bd_base_info = 0; in hns3_rxq_rearm_mbuf()
94 rxdp[3].rx.bd_base_info = 0; in hns3_rxq_rearm_mbuf()
123 if (unlikely(!(rxdp->rx.bd_base_info & in hns3_recv_pkts_vec()
/dpdk/examples/pipeline/
H A Dobj.c197 (params->rx.n_queues == 0) || in link_create()
198 (params->rx.queue_size == 0) || in link_create()
217 mempool = mempool_find(obj, params->rx.mempool_name); in link_create()
221 rss = params->rx.rss; in link_create()
254 params->rx.n_queues, in link_create()
268 for (i = 0; i < params->rx.n_queues; i++) { in link_create()
272 params->rx.queue_size, in link_create()
326 link->n_rxq = params->rx.n_queues; in link_create()
/dpdk/drivers/net/fm10k/base/
H A Dfm10k_mbx.c217 u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); in fm10k_mbx_pushed_tail_len()
301 struct fm10k_mbx_fifo *fifo = &mbx->rx; in fm10k_mbx_validate_msg_size()
432 struct fm10k_mbx_fifo *fifo = &mbx->rx; in fm10k_mbx_read_copy()
483 struct fm10k_mbx_fifo *fifo = &mbx->rx; in fm10k_mbx_push_tail()
1078 mbx->max_size = mbx->rx.size - 1; in fm10k_mbx_reset_work()
1100 mbx->rx.tail = 0; in fm10k_mbx_reset_work()
1101 mbx->rx.head = 0; in fm10k_mbx_reset_work()
1183 if (size > mbx->rx.size) { in fm10k_mbx_process_connect()
1184 mbx->max_size = mbx->rx.size - 1; in fm10k_mbx_process_connect()
1464 if (!mbx->rx.buffer) in fm10k_mbx_connect()
[all …]
/dpdk/drivers/net/enic/
H A Denic_rxtx_vec_avx2.c43 struct rte_mbuf **rx, **rxmb; in enic_noscatter_vec_recv_pkts() local
74 rx = rx_pkts; in enic_noscatter_vec_recv_pkts()
86 *rx++ = rx_one(cqd, *rxmb++, enic); in enic_noscatter_vec_recv_pkts()
423 _mm256_storeu_si256((void *)rx, in enic_noscatter_vec_recv_pkts()
425 _mm256_storeu_si256((void *)(rx + 4), in enic_noscatter_vec_recv_pkts()
751 rx += 8; in enic_noscatter_vec_recv_pkts()
766 *rx++ = rx_one(cqd, *rxmb++, enic); in enic_noscatter_vec_recv_pkts()
806 return rx - rx_pkts; in enic_noscatter_vec_recv_pkts()
/dpdk/doc/guides/sample_app_ug/
H A Dvhost.rst134 **--rx-retry 0|1**
135 The rx-retry option enables/disables enqueue retries when the guests Rx queue
140 **--rx-retry-num num**
141 The rx-retry-num option specifies the number of retries on an Rx burst, it
142 takes effect only when rx retry is enabled. The default value is 4.
144 **--rx-retry-delay msec**
145 The rx-retry-delay option specifies the timeout (in micro seconds) between
146 retries on an RX burst, it takes effect only when rx retry is enabled. The
/dpdk/drivers/net/kni/
H A Drte_eth_kni.c44 struct pmd_queue_stats rx; member
88 kni_q->rx.pkts += nb_pkts; in eth_kni_rx()
307 stats->q_ipackets[i] = q->rx.pkts; in eth_kni_stats_get()
308 stats->q_ibytes[i] = q->rx.bytes; in eth_kni_stats_get()
340 q->rx.pkts = 0; in eth_kni_stats_reset()
341 q->rx.bytes = 0; in eth_kni_stats_reset()
/dpdk/drivers/net/pcap/
H A Dpcap_ethdev.c606 struct pcap_rx_queue *rx; in eth_dev_start() local
611 rx = &internals->rx_queue[0]; in eth_dev_start()
641 rx = &internals->rx_queue[i]; in eth_dev_start()
646 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { in eth_dev_start()
647 if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0) in eth_dev_start()
650 if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0) in eth_dev_start()
1033 struct pmd_devargs *rx = extra_args; in open_rx_pcap() local
1039 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) { in open_rx_pcap()
1295 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; in eth_from_pcaps_common() local
1299 strlcpy(rx->name, queue->name, sizeof(rx->name)); in eth_from_pcaps_common()
[all …]
/dpdk/drivers/event/sw/
H A Dsw_evdev_xstats.c11 rx, enumerator
56 case rx: return sw->stats.rx_pkts; in get_dev_stat()
76 case rx: return p->stats.rx_pkts; in get_port_stat()
111 case rx: return qid->stats.rx_pkts; in get_qid_stat()
187 static const enum xstats_type dev_types[] = { rx, tx, dropped, in sw_xstats_init() enumerator
199 static const enum xstats_type port_types[] = { rx, tx, dropped, in sw_xstats_init() enumerator
218 static const enum xstats_type qid_types[] = { rx, tx, dropped, in sw_xstats_init() enumerator
/dpdk/examples/packet_ordering/
H A Dmain.c63 } rx __rte_cache_aligned;
360 app_stats.rx.rx_pkts); in print_stats()
362 app_stats.rx.enqueue_pkts); in print_stats()
454 app_stats.rx.rx_pkts += nb_rx_pkts; in rx_thread()
463 app_stats.rx.enqueue_pkts += ret; in rx_thread()
465 app_stats.rx.enqueue_failed_pkts += in rx_thread()
/dpdk/drivers/net/ipn3ke/
H A Dipn3ke_ethdev.c214 ipn3ke_mtu_cal(uint32_t tx, uint32_t rx) in ipn3ke_mtu_cal() argument
217 tmp = RTE_MIN(tx, rx); in ipn3ke_mtu_cal()
229 uint32_t rx; in ipn3ke_mtu_set() local
242 &rx, in ipn3ke_mtu_set()
247 tmp = ipn3ke_mtu_cal(tx, rx); in ipn3ke_mtu_set()
/dpdk/examples/ipsec-secgw/
H A Dipsec-secgw.h96 uint64_t rx; member
149 core_statistics[lcore_id].rx += n; in core_stats_update_rx()
/dpdk/drivers/net/vhost/
H A Drte_eth_vhost.h22 bool rx; member

123456