Home
last modified time | relevance | path

Searched refs:mbufs (Results 1 – 25 of 116) sorted by relevance

12345

/f-stack/dpdk/examples/qos_sched/
H A Dapp_thread.c117 struct rte_mbuf **mbufs; in app_send_burst() local
120 mbufs = (struct rte_mbuf **)qconf->m_table; in app_send_burst()
128 mbufs = (struct rte_mbuf **)&mbufs[ret]; in app_send_burst()
141 qconf->m_table[len] = mbufs[i]; in app_send_packets()
157 struct rte_mbuf *mbufs[burst_conf.qos_dequeue]; in app_tx_thread() local
167 app_send_packets(conf, mbufs, burst_conf.qos_dequeue); in app_tx_thread()
195 struct rte_mbuf *mbufs[burst_conf.ring_burst]; in app_worker_thread() local
213 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs, in app_worker_thread()
217 (void **)mbufs, nb_pkt, NULL) == 0) in app_worker_thread()
230 struct rte_mbuf *mbufs[burst_conf.ring_burst]; in app_mixed_thread() local
[all …]
/f-stack/dpdk/doc/guides/prog_guide/
H A Dreorder_lib.rst9 The Reorder Library provides a mechanism for reordering mbufs based on their
15 The reorder library is essentially a buffer that reorders mbufs.
17 mbufs from it.
27 and late mbufs depending on the sequence number of the inserted mbuf:
35 mbufs.
45 late mbufs are returned to the user with an error.
49 To that end, mbufs in the Order buffer are moved into the Ready buffer.
51 late mbufs.
67 When draining mbufs, the reorder buffer would return mbufs in the Ready
85 mbufs into the reorder buffer and finally transmit drained mbufs.
[all …]
H A Dip_fragment_reassembly_lib.rst16 For each fragment two new mbufs are created:
26 Finally 'direct' and 'indirect' mbufs for each fragment are linked together via mbuf's next filed t…
28 …to explicitly specify which mempools should be used to allocate 'direct' and 'indirect' mbufs from.
30 For more information about direct and indirect mbufs, refer to :ref:`direct_indirect_buffer`.
66 At any given time up to (2 \* bucket_entries \* RTE_LIBRTE_IP_FRAG_MAX \* <maximum number of mbufs
87 …b) Delete a timed-out entry, free mbufs associated with it mbufs and store a new entry with specif…
/f-stack/dpdk/app/test/
H A Dtest_table_tables.c151 PREPARE_PACKET(mbufs[i], 0xadadadad); in test_table_stub()
153 PREPARE_PACKET(mbufs[i], 0xadadadab); in test_table_stub()
163 rte_pktmbuf_free(mbufs[i]); in test_table_stub()
261 PREPARE_PACKET(mbufs[i], 10); in test_table_array()
263 PREPARE_PACKET(mbufs[i], 20); in test_table_array()
277 rte_pktmbuf_free(mbufs[i]); in test_table_array()
443 rte_pktmbuf_free(mbufs[i]); in test_table_lpm()
621 rte_pktmbuf_free(mbufs[i]); in test_table_lpm_ipv6()
724 rte_pktmbuf_free(mbufs[i]); in test_table_hash_lru_generic()
834 rte_pktmbuf_free(mbufs[i]); in test_table_hash_ext_generic()
[all …]
H A Dtest_mbuf.c743 struct rte_mbuf *mbufs[NB_MBUF]; in test_pktmbuf_pool_bulk() local
820 rte_pktmbuf_free_bulk(mbufs, NB_MBUF); in test_pktmbuf_pool_bulk()
837 ret = rte_pktmbuf_chain(mbufs[0], mbufs[i]); in test_pktmbuf_pool_bulk()
842 mbufs[i] = NULL; in test_pktmbuf_pool_bulk()
845 rte_pktmbuf_free_bulk(mbufs, 1); in test_pktmbuf_pool_bulk()
864 mbufs[i / CHAIN_LEN] = m; in test_pktmbuf_pool_bulk()
874 rte_pktmbuf_free_bulk(mbufs, 1); in test_pktmbuf_pool_bulk()
1873 mbufs[loop] != NULL; loop++) in test_pktmbuf_alloc_bulk()
1874 rte_pktmbuf_free(mbufs[loop]); in test_pktmbuf_alloc_bulk()
1907 mbufs[loop] != NULL; loop++) in test_neg_pktmbuf_alloc_bulk()
[all …]
/f-stack/dpdk/drivers/event/opdl/
H A Dopdl_test.c196 struct rte_mbuf *mbufs[3]; in ordered_basic() local
251 if (!mbufs[i]) { in ordered_basic()
258 ev.mbuf = mbufs[i]; in ordered_basic()
334 struct rte_mbuf *mbufs[3]; in atomic_basic() local
390 if (!mbufs[i]) { in atomic_basic()
398 ev.mbuf = mbufs[i]; in atomic_basic()
562 struct rte_mbuf *mbufs[3]; in single_link_w_stats() local
563 RTE_SET_USED(mbufs); in single_link_w_stats()
620 if (!mbufs[i]) { in single_link_w_stats()
627 ev.mbuf = mbufs[i]; in single_link_w_stats()
[all …]
/f-stack/dpdk/lib/librte_distributor/
H A Drte_distributor.h89 struct rte_mbuf **mbufs, unsigned int num_mbufs);
107 struct rte_mbuf **mbufs, unsigned int max_mbufs);
236 unsigned int worker_id, struct rte_mbuf **mbufs);
H A Drte_distributor_single.c114 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK] in store_return()
208 struct rte_mbuf **mbufs, unsigned num_mbufs) in rte_distributor_process_single() argument
228 next_mb = mbufs[next_idx++]; in rte_distributor_process_single()
324 struct rte_mbuf **mbufs, unsigned max_mbufs) in rte_distributor_returned_pkts_single() argument
333 mbufs[i] = returns->mbufs[idx]; in rte_distributor_returned_pkts_single()
376 memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs)); in rte_distributor_clear_returns_single()
H A Drte_distributor_single.h78 struct rte_mbuf **mbufs, unsigned int num_mbufs);
96 struct rte_mbuf **mbufs, unsigned int max_mbufs);
H A Drte_distributor.c227 d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK] in store_return()
444 struct rte_mbuf **mbufs, unsigned int num_mbufs) in rte_distributor_process() argument
457 mbufs, num_mbufs); in rte_distributor_process()
490 if (mbufs[next_idx + i]) { in rte_distributor_process()
492 flows[i] = mbufs[next_idx + i]->hash.usr | 1; in rte_distributor_process()
523 next_mb = mbufs[next_idx++]; in rte_distributor_process()
614 struct rte_mbuf **mbufs, unsigned int max_mbufs) in rte_distributor_returned_pkts() argument
624 mbufs, max_mbufs); in rte_distributor_returned_pkts()
631 mbufs[i] = returns->mbufs[idx]; in rte_distributor_returned_pkts()
/f-stack/dpdk/examples/packet_ordering/
H A Dmain.c534 struct rte_mbuf *mbufs[MAX_PKTS_BURST]; in send_thread() local
546 (void *)mbufs, MAX_PKTS_BURST, NULL); in send_thread()
555 ret = rte_reorder_insert(args->buffer, mbufs[i]); in send_thread()
562 outp = mbufs[i]->port; in send_thread()
564 rte_pktmbuf_free(mbufs[i]); in send_thread()
568 rte_pktmbuf_free(mbufs[i]); in send_thread()
576 rte_pktmbuf_free(mbufs[i]); in send_thread()
618 struct rte_mbuf *mbufs[MAX_PKTS_BURST]; in tx_thread() local
631 (void *)mbufs, MAX_PKTS_BURST, NULL); in tx_thread()
639 outp = mbufs[i]->port; in tx_thread()
[all …]
/f-stack/dpdk/drivers/net/nfb/
H A Dnfb_rx.h156 struct rte_mbuf *mbufs[nb_pkts]; in nfb_eth_ndp_rx() local
166 i = rte_pktmbuf_alloc_bulk(ndp->mb_pool, mbufs, nb_pkts); in nfb_eth_ndp_rx()
174 rte_pktmbuf_free(mbufs[i]); in nfb_eth_ndp_rx()
186 mbuf = mbufs[i]; in nfb_eth_ndp_rx()
/f-stack/dpdk/drivers/net/ark/
H A Dark_ethdev_rx.c31 struct rte_mbuf **mbufs);
457 struct rte_mbuf **mbufs = &queue->reserve_q[seed_m]; in eth_ark_rx_seed_mbufs() local
458 int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb); in eth_ark_rx_seed_mbufs()
462 status = eth_ark_rx_seed_recovery(queue, &nb, mbufs); in eth_ark_rx_seed_mbufs()
489 (*mbufs++)->buf_iova; in eth_ark_rx_seed_mbufs()
494 (*mbufs++)->buf_iova; in eth_ark_rx_seed_mbufs()
499 (*mbufs++)->buf_iova; in eth_ark_rx_seed_mbufs()
504 (*mbufs++)->buf_iova; in eth_ark_rx_seed_mbufs()
517 struct rte_mbuf **mbufs) in eth_ark_rx_seed_recovery() argument
526 status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, *pnb); in eth_ark_rx_seed_recovery()
/f-stack/dpdk/examples/bbdev_app/
H A Dmain.c303 rte_pktmbuf_free(mbufs[i]); in pktmbuf_free_bulk()
311 struct rte_mbuf *rx_pkt = *mbuf_input(mbufs[i]); in pktmbuf_input_free_bulk()
313 rte_pktmbuf_free(mbufs[i]); in pktmbuf_input_free_bulk()
373 add_awgn(struct rte_mbuf **mbufs, uint16_t num_pkts) in add_awgn() argument
375 RTE_SET_USED(mbufs); in add_awgn()
397 uint16_t pkt_data_len = rte_pktmbuf_data_len(mbufs[i]) - in transform_enc_out_dec_in()
402 char *data = rte_pktmbuf_append(mbufs[i], in transform_enc_out_dec_in()
415 mbufs[i], uint8_t *, in transform_enc_out_dec_in()
431 rte_memcpy(rte_pktmbuf_mtod_offset(mbufs[i], uint8_t *, in transform_enc_out_dec_in()
437 verify_data(struct rte_mbuf **mbufs, uint16_t num_pkts) in verify_data() argument
[all …]
/f-stack/dpdk/lib/librte_mbuf/
H A Drte_mbuf.h915 struct rte_mbuf **mbufs, unsigned count) in rte_pktmbuf_alloc_bulk() argument
920 rc = rte_mempool_get_bulk(pool, (void **)mbufs, count); in rte_pktmbuf_alloc_bulk()
932 __rte_mbuf_raw_sanity_check(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
933 rte_pktmbuf_reset(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
937 __rte_mbuf_raw_sanity_check(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
938 rte_pktmbuf_reset(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
942 __rte_mbuf_raw_sanity_check(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
943 rte_pktmbuf_reset(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
947 __rte_mbuf_raw_sanity_check(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
948 rte_pktmbuf_reset(mbufs[idx]); in rte_pktmbuf_alloc_bulk()
[all …]
/f-stack/dpdk/lib/librte_kni/
H A Drte_kni.h169 unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,
188 unsigned rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,
/f-stack/dpdk/examples/flow_filtering/
H A Dmain.c58 struct rte_mbuf *mbufs[32]; in main_loop() local
69 i, mbufs, 32); in main_loop()
72 struct rte_mbuf *m = mbufs[j]; in main_loop()
/f-stack/dpdk/drivers/net/mvneta/
H A Dmvneta_rxtx.c77 struct rte_mbuf *mbufs[MRVL_NETA_BUF_RELEASE_BURST_SIZE_MAX]; in mvneta_buffs_refill() local
82 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc); in mvneta_buffs_refill()
89 MVNETA_SET_COOKIE_HIGH_ADDR(mbufs[0]); in mvneta_buffs_refill()
92 if (unlikely(!MVNETA_CHECK_COOKIE_HIGH_ADDR(mbufs[i]))) { in mvneta_buffs_refill()
95 (uint64_t)mbufs[i] >> 32, in mvneta_buffs_refill()
100 entries[i].addr = rte_mbuf_data_iova_default(mbufs[i]); in mvneta_buffs_refill()
101 entries[i].cookie = (neta_cookie_t)(uint64_t)mbufs[i]; in mvneta_buffs_refill()
107 rte_pktmbuf_free(mbufs[i]); in mvneta_buffs_refill()
/f-stack/dpdk/doc/guides/tools/
H A Dpdump.rst45 [total-num-mbufs=<number of mbufs>]'
110 ``total-num-mbufs``:
111 Total number mbufs in mempool. This is used internally for mempool creation. This is an optional pa…
/f-stack/dpdk/doc/guides/mempool/
H A Dstack.rst9 large per-lcore caches, the mbufs will likely stay in the per-lcore caches and
12 packet-processing workloads (which allocate and free mbufs on different lcores)
/f-stack/dpdk/lib/librte_reorder/
H A Drte_reorder.h167 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs,
H A Drte_reorder.c384 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs, in rte_reorder_drain() argument
394 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail]; in rte_reorder_drain()
404 mbufs[drain_cnt++] = order_buf->entries[order_buf->head]; in rte_reorder_drain()
/f-stack/dpdk/devtools/
H A Dtest-null.sh32 --no-mlockall --total-num-mbufs=2048 $testpmd_options -ia
/f-stack/dpdk/lib/librte_node/
H A Dethdev_rx.c68 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs, in eth_pkt_parse_cb() argument
81 pkts = mbufs; in eth_pkt_parse_cb()
/f-stack/dpdk/doc/guides/nics/
H A Docteontx.rst78 --total-num-mbufs=16384 -i
168 When running testpmd on OCTEON TX the application can limit the number of mbufs
169 by using the option ``--total-num-mbufs=131072``.

12345