| /f-stack/dpdk/lib/librte_mbuf/ |
| H A D | rte_mbuf.h | 588 struct rte_mbuf *m; in rte_mbuf_raw_alloc() 894 struct rte_mbuf *m; in rte_pktmbuf_alloc() 1108 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) in rte_mbuf_dynfield_copy() 1115 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc) in __rte_pktmbuf_copy_hdr() 1147 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m) in rte_pktmbuf_attach() 1205 struct rte_mbuf *md; in __rte_pktmbuf_free_direct() 1399 struct rte_mbuf *m_next; in rte_pktmbuf_free() 1443 struct rte_mbuf * 1468 struct rte_mbuf * 1529 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) in rte_pktmbuf_lastseg() [all …]
|
| H A D | rte_mbuf.c | 83 struct rte_mbuf *m = _m; in rte_pktmbuf_init() 123 struct rte_mbuf *m = opaque; in rte_pktmbuf_free_pinned_extmem() 171 struct rte_mbuf *m = _m; in __rte_pktmbuf_init_extmem() 330 elt_size = sizeof(struct rte_mbuf) + in rte_pktmbuf_pool_create_extbuf() 517 struct rte_mbuf * 520 struct rte_mbuf *mc, *mi, **prev; in rte_pktmbuf_clone() 560 struct rte_mbuf *m; in __rte_pktmbuf_linearize() 561 struct rte_mbuf *m_next; in __rte_pktmbuf_linearize() 593 struct rte_mbuf * 597 const struct rte_mbuf *seg = m; in rte_pktmbuf_copy() [all …]
|
| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_rxtx_vec.h | 34 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == 35 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 36 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) == 37 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 38 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) == 42 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) == 43 offsetof(struct rte_mbuf, rearm_data) + 8); 44 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) == 45 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); 48 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) == [all …]
|
| /f-stack/dpdk/drivers/net/virtio/ |
| H A D | virtio_ethdev.h | 80 uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 82 uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, 85 uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 89 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 92 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 94 uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, 97 uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 99 uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, 102 uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, 105 uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, [all …]
|
| /f-stack/dpdk/drivers/net/thunderx/ |
| H A D | nicvf_rxtx.h | 25 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt) in fill_sq_desc_gather() 50 fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt) in fill_sq_desc_gather() 60 nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init, in nicvf_mbuff_init_update() 89 uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts, 91 uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts, 96 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 99 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 101 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 103 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 105 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); [all …]
|
| /f-stack/dpdk/lib/librte_ipsec/ |
| H A D | sa.h | 144 struct rte_mbuf *mb[], uint16_t num); 148 struct rte_mbuf *mb[], uint16_t num); 152 struct rte_mbuf *mb[], uint16_t num); 156 struct rte_mbuf *mb[], uint16_t num); 160 struct rte_mbuf *mb[], uint16_t num); 178 struct rte_mbuf *mb[], uint16_t num); 182 struct rte_mbuf *mb[], uint16_t num); 186 struct rte_mbuf *mb[], uint16_t num); 190 struct rte_mbuf *mb[], uint16_t num); 194 struct rte_mbuf *mb[], uint16_t num); [all …]
|
| H A D | esp_inb.c | 146 static struct rte_mbuf * 150 struct rte_mbuf *ms; in move_icv() 224 struct rte_mbuf *ml; in inb_prepare() 354 process_step1(struct rte_mbuf *mb, uint32_t tlen, struct rte_mbuf **ml, in process_step1() 399 trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, in trs_process_check() 422 tun_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, in tun_process_check() 438 tun_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, in tun_process_step2() 462 trs_process_step2(struct rte_mbuf *mb, struct rte_mbuf *ml, uint32_t hlen, in trs_process_step2() 522 struct rte_mbuf *ml[num]; in tun_process() 582 struct rte_mbuf *ml[num]; in trs_process() [all …]
|
| H A D | rte_ipsec.h | 38 struct rte_mbuf *mb[], 42 struct rte_mbuf *mb[], 46 struct rte_mbuf *mb[], 119 struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num) in rte_ipsec_pkt_crypto_prepare() 126 struct rte_mbuf *mb[], uint16_t num) in rte_ipsec_pkt_cpu_prepare() 155 rte_ipsec_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], in rte_ipsec_pkt_process()
|
| /f-stack/dpdk/lib/librte_distributor/ |
| H A D | rte_distributor_single.h | 23 struct rte_mbuf; 78 struct rte_mbuf **mbufs, unsigned int num_mbufs); 96 struct rte_mbuf **mbufs, unsigned int max_mbufs); 150 struct rte_mbuf * 152 unsigned int worker_id, struct rte_mbuf *oldpkt); 168 unsigned int worker_id, struct rte_mbuf *mbuf); 192 unsigned int worker_id, struct rte_mbuf *oldpkt); 210 struct rte_mbuf *
|
| H A D | rte_distributor.h | 28 struct rte_mbuf; 89 struct rte_mbuf **mbufs, unsigned int num_mbufs); 107 struct rte_mbuf **mbufs, unsigned int max_mbufs); 167 unsigned int worker_id, struct rte_mbuf **pkts, 168 struct rte_mbuf **oldpkt, unsigned int retcount); 186 unsigned int worker_id, struct rte_mbuf **oldpkt, int num); 212 unsigned int worker_id, struct rte_mbuf **oldpkt, 236 unsigned int worker_id, struct rte_mbuf **mbufs);
|
| /f-stack/dpdk/drivers/net/iavf/ |
| H A D | iavf_rxtx.h | 142 struct rte_mbuf *mb, 201 struct rte_mbuf *mbuf; 207 struct rte_mbuf *mbuf; 417 struct rte_mbuf **rx_pkts, 420 struct rte_mbuf **rx_pkts, 423 struct rte_mbuf **rx_pkts, 444 struct rte_mbuf **rx_pkts, 454 struct rte_mbuf **rx_pkts, 457 struct rte_mbuf **rx_pkts, 460 struct rte_mbuf **rx_pkts, [all …]
|
| H A D | iavf_rxtx_vec_sse.c | 27 struct rte_mbuf *mb0, *mb1; in iavf_rxq_rearm() 59 offsetof(struct rte_mbuf, buf_addr) + 8); in iavf_rxq_rearm() 96 struct rte_mbuf **rx_pkts) in desc_to_olflags_v() 183 offsetof(struct rte_mbuf, rearm_data) + 8); in desc_to_olflags_v() 213 struct rte_mbuf **rx_pkts) in flex_desc_to_olflags_v() 335 offsetof(struct rte_mbuf, rearm_data) + 8); in flex_desc_to_olflags_v() 394 struct rte_mbuf **sw_ring; in _recv_raw_pkts_vec() 639 struct rte_mbuf **rx_pkts, in _recv_raw_pkts_vec_flex_rxd() 643 struct rte_mbuf **sw_ring; in _recv_raw_pkts_vec_flex_rxd() 1023 struct rte_mbuf **rx_pkts, in iavf_recv_scattered_burst_vec_flex_rxd() [all …]
|
| /f-stack/dpdk/drivers/net/ice/ |
| H A D | ice_rxtx.h | 46 struct rte_mbuf *mb, 50 struct rte_mbuf *mbuf; 63 struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */ 67 struct rte_mbuf fake_mbuf; /**< dummy mbuf */ 68 struct rte_mbuf *rx_stage[ICE_RX_MAX_BURST * 2]; 94 struct rte_mbuf *mbuf; 100 struct rte_mbuf *mbuf; 214 uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 216 uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 253 struct rte_mbuf **rx_pkts, [all …]
|
| H A D | ice_rxtx_vec_sse.c | 39 struct rte_mbuf *mb0, *mb1; in ice_rxq_rearm() 72 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != in ice_rxq_rearm() 73 offsetof(struct rte_mbuf, buf_addr) + 8); in ice_rxq_rearm() 105 struct rte_mbuf **rx_pkts) in ice_rx_desc_to_olflags_v() 226 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != in ice_rx_desc_to_olflags_v() 227 offsetof(struct rte_mbuf, rearm_data) + 8); in ice_rx_desc_to_olflags_v() 306 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _ice_recv_raw_pkts_vec() 308 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != in _ice_recv_raw_pkts_vec() 346 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _ice_recv_raw_pkts_vec() 348 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != in _ice_recv_raw_pkts_vec() [all …]
|
| /f-stack/dpdk/lib/librte_ip_frag/ |
| H A D | rte_ip_frag.h | 28 struct rte_mbuf; 42 struct rte_mbuf *mb; /**< fragment mbuf */ 82 struct rte_mbuf *row[IP_FRAG_DEATH_ROW_MBUF_LEN]; 170 rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, 171 struct rte_mbuf **pkts_out, 198 struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, 200 struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr, 247 int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, 248 struct rte_mbuf **pkts_out, 272 struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, [all …]
|
| /f-stack/dpdk/drivers/net/ixgbe/ |
| H A D | ixgbe_rxtx.h | 71 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ 75 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */ 82 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ 91 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ 105 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ 106 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ 136 struct rte_mbuf fake_mbuf; 138 struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; 282 uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 285 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); [all …]
|
| H A D | ixgbe_rxtx_vec_sse.c | 26 struct rte_mbuf *mb0, *mb1; in ixgbe_rxq_rearm() 61 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != in ixgbe_rxq_rearm() 62 offsetof(struct rte_mbuf, buf_addr) + 8); in ixgbe_rxq_rearm() 135 struct rte_mbuf **rx_pkts) in desc_to_olflags_v() 233 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != in desc_to_olflags_v() 234 offsetof(struct rte_mbuf, rearm_data) + 8); in desc_to_olflags_v() 262 struct rte_mbuf **rx_pkts) in desc_to_ptype_v() 337 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _recv_raw_pkts_vec() 389 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _recv_raw_pkts_vec() 395 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != in _recv_raw_pkts_vec() [all …]
|
| /f-stack/dpdk/drivers/net/fm10k/ |
| H A D | fm10k.h | 155 struct rte_mbuf **sw_ring; 162 struct rte_mbuf fake_mbuf; 197 struct rte_mbuf **sw_ring; 262 fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port) in fm10k_pktmbuf_reset() 296 fm10k_addr_alignment_valid(struct rte_mbuf *mb) in fm10k_addr_alignment_valid() 320 uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 324 struct rte_mbuf **rx_pkts, uint16_t nb_pkts); 339 uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 342 uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 348 uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t); [all …]
|
| H A D | fm10k_rxtx_vec.c | 262 struct rte_mbuf *mb0, *mb1; in fm10k_rxq_rearm() 308 offsetof(struct rte_mbuf, buf_addr) + 8); in fm10k_rxq_rearm() 382 struct rte_mbuf **mbufp; in fm10k_recv_raw_pkts_vec() 604 struct rte_mbuf **rx_bufs, in fm10k_reassemble_packets() 608 struct rte_mbuf *start = rxq->pkt_first_seg; in fm10k_reassemble_packets() 609 struct rte_mbuf *end = rxq->pkt_last_seg; in fm10k_reassemble_packets() 742 struct rte_mbuf *pkt, uint64_t flags) in vtx1() 763 struct rte_mbuf **txep; in fm10k_tx_free_bufs() 817 tx_backlog_entry(struct rte_mbuf **txep, in tx_backlog_entry() 832 struct rte_mbuf **txep; in fm10k_xmit_fixed_burst_vec() [all …]
|
| /f-stack/dpdk/drivers/net/qede/ |
| H A D | qede_rxtx.h | 165 struct rte_mbuf *mbuf; 172 struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */ 173 struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */ 209 struct rte_mbuf *mbuf; 274 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, 276 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts, 281 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts, 284 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, 286 uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts, 289 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, [all …]
|
| /f-stack/dpdk/drivers/net/octeontx2/ |
| H A D | otx2_tx.c | 190 struct rte_mbuf *mbuf; in nix_xmit_pkts_vector() 196 offsetof(struct rte_mbuf, buf_iova)); in nix_xmit_pkts_vector() 206 offsetof(struct rte_mbuf, buf_iova)); in nix_xmit_pkts_vector() 215 offsetof(struct rte_mbuf, buf_iova)); in nix_xmit_pkts_vector() 224 offsetof(struct rte_mbuf, buf_iova)); in nix_xmit_pkts_vector() 238 struct rte_mbuf *mbuf; in nix_xmit_pkts_vector() 243 offsetof(struct rte_mbuf, buf_iova)); in nix_xmit_pkts_vector() 266 offsetof(struct rte_mbuf, pool) - in nix_xmit_pkts_vector() 269 offsetof(struct rte_mbuf, pool) - in nix_xmit_pkts_vector() 272 offsetof(struct rte_mbuf, pool) - in nix_xmit_pkts_vector() [all …]
|
| /f-stack/dpdk/drivers/net/dpaa/ |
| H A D | dpaa_rxtx.c | 337 struct rte_mbuf * 400 struct rte_mbuf *mbuf; in dpaa_eth_fd_to_mbuf() 440 struct rte_mbuf *mbuf; in dpaa_free_mbuf() 462 temp = (struct rte_mbuf *) in dpaa_free_mbuf() 505 struct rte_mbuf *mbuf; in dpaa_rx_cb_no_prefetch() 561 struct rte_mbuf *mbuf; in dpaa_rx_cb() 614 struct rte_mbuf **bufs, in dpaa_eth_queue_portal_rx() 639 struct rte_mbuf *mbuf; in dpaa_rx_cb_parallel() 667 struct rte_mbuf *mbuf; in dpaa_rx_cb_atomic() 695 struct rte_mbuf *mbuf; in dpaa_eth_err_queue() [all …]
|
| /f-stack/dpdk/drivers/net/i40e/ |
| H A D | i40e_rxtx_vec_sse.c | 28 struct rte_mbuf *mb0, *mb1; in i40e_rxq_rearm() 61 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != in i40e_rxq_rearm() 62 offsetof(struct rte_mbuf, buf_addr) + 8); in i40e_rxq_rearm() 216 __m128i descs[4], struct rte_mbuf **rx_pkts) in desc_to_olflags_v() 317 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != in desc_to_olflags_v() 318 offsetof(struct rte_mbuf, rearm_data) + 8); in desc_to_olflags_v() 376 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _recv_raw_pkts_vec() 378 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != in _recv_raw_pkts_vec() 426 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != in _recv_raw_pkts_vec() 432 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != in _recv_raw_pkts_vec() [all …]
|
| /f-stack/dpdk/lib/librte_gso/ |
| H A D | gso_common.c | 14 hdr_segment_init(struct rte_mbuf *hdr_segment, struct rte_mbuf *pkt, in hdr_segment_init() 33 free_gso_segment(struct rte_mbuf **pkts, uint16_t nb_pkts) in free_gso_segment() 42 gso_do_segment(struct rte_mbuf *pkt, in gso_do_segment() 47 struct rte_mbuf **pkts_out, in gso_do_segment() 50 struct rte_mbuf *pkt_in; in gso_do_segment() 51 struct rte_mbuf *hdr_segment, *pyld_segment, *prev_segment; in gso_do_segment()
|
| /f-stack/dpdk/drivers/net/bnxt/ |
| H A D | bnxt_rxr.h | 46 struct rte_mbuf *mbuf; 61 struct rte_mbuf **rx_buf_ring; /* sw ring */ 62 struct rte_mbuf **ag_buf_ring; /* sw ring */ 78 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 87 uint16_t bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 94 struct rte_mbuf *mbuf); 100 bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf) in bnxt_cfa_code_dynfield()
|