Home
last modified time | relevance | path

Searched refs:buffers (Results 1 – 25 of 86) sorted by relevance

1234

/dpdk/drivers/common/qat/
H A Dqat_common.c42 list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset; in qat_sgl_fill_array()
43 list->buffers[nr].resrvd = 0; in qat_sgl_fill_array()
44 list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset); in qat_sgl_fill_array()
50 buf_len += list->buffers[nr].len; in qat_sgl_fill_array()
53 list->buffers[nr].len -= buf_len - data_len; in qat_sgl_fill_array()
75 nr, list->buffers[nr].len, in qat_sgl_fill_array()
76 list->buffers[nr].addr); in qat_sgl_fill_array()
79 list->buffers[nr].len); in qat_sgl_fill_array()
H A Dqat_common.h61 struct qat_flat_buf buffers[0]; member
/dpdk/doc/guides/compressdevs/
H A Doverview.rst17 to let input buffers pass-through it, copying the input to the output,
22 which means PMD supports different scatter-gather styled input and output buffers
27 which means PMD supports input from scatter-gathered styled buffers, outputting linear buffers
32 which means PMD supports input from linear buffer, outputting scatter-gathered styled buffers.
/dpdk/doc/guides/prog_guide/
H A Dmbuf_lib.rst9 The mbuf library provides the ability to allocate and free buffers (mbufs)
10 that may be used by the DPDK application to store message buffers.
13 A rte_mbuf struct generally carries network packet buffers, but it can actually
26 #. Use separate memory buffers for the metadata structure and for the packet data.
36 Message buffers that are used to carry network packets can handle buffer chaining
37 where multiple buffers are required to hold the complete packet.
42 Message buffers may be used to carry control information, packets, events,
242 since indirect buffers provide the means to reuse the same packet data across multiple buffers.
250 There are a few things to remember when dealing with indirect buffers.
261 Since indirect buffers are not supposed to actually hold any data,
[all …]
H A Doverview.rst121 The mbuf library provides the facility to create and destroy buffers
122 that may be used by the DPDK application to store message buffers.
123 The message buffers are created at startup time and stored in a mempool, using the DPDK mempool lib…
126 packet buffers which are used to carry network packets.
/dpdk/doc/guides/cryptodevs/
H A Doverview.rst22 which means PMD supports different scatter-gather styled input and output buffers
27 which means PMD supports input from scatter-gathered styled buffers,
28 outputting linear buffers (i.e. single segment).
33 scatter-gathered styled buffers.
38 with linear input and output buffers.
/dpdk/doc/guides/nics/
H A Dvmxnet3.rst32 The packet buffers and features to be supported are made available to hypervisor via VMXNET3 PCI co…
33 During RX/TX, the packet buffers are exchanged by their GPAs,
34 and the hypervisor loads the buffers with packets in the RX case and sends packets to vSwitch in th…
38 The driver pre-allocates the packet buffers and loads the command ring descriptors in advance.
39 The hypervisor fills those packet buffers on packet arrival and write completion ring descriptors,
41 After reception, the DPDK application frees the descriptors and loads new packet buffers for the co…
48 The rings are read by the PMD in the next transmit routine call and the buffers and descriptors are…
74 multiple segment buffers are not supported.
75 Only cmd_ring_0 is used for packet buffers, one for each descriptor.
H A Dmemif.rst101 Regions contain rings and buffers. Rings and buffers can also be separated into multiple
102 regions. For no-zero-copy, rings and buffers are stored inside single memory
125 Descriptors are assigned packet buffers in order of rings creation. If we have one ring
126 in each direction and ring size is 1024, then first 1024 buffers will belong to C2S ring and
127 last 1024 will belong to S2C ring. In case of zero-copy, buffers are dequeued and
151 …AG_NEXT|Is chained buffer. When set, the packet is divided into multiple buffers. May not be conti…
H A Dnetvsc.rst19 * It supports merge-able buffers per packet when receiving packets and scattered buffer per packet
141 A non-zero value tells netvsc to attach external buffers to mbuf on
142 receiving packets, thus avoid copying memory. Use of external buffers
/dpdk/drivers/raw/skeleton/
H A Dskeleton_rawdev_test.c373 struct rte_rawdev_buf buffers[1]; in test_rawdev_enqdeq() local
376 buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); in test_rawdev_enqdeq()
377 if (!buffers[0].buf_addr) in test_rawdev_enqdeq()
379 snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", in test_rawdev_enqdeq()
383 (struct rte_rawdev_buf **)&buffers, in test_rawdev_enqdeq()
402 free(buffers[0].buf_addr); in test_rawdev_enqdeq()
H A Dskeleton_rawdev.c411 struct rte_rawdev_buf **buffers, in skeleton_rawdev_enqueue_bufs() argument
427 queue_buf[q_id].bufs[i] = buffers[i]->buf_addr; in skeleton_rawdev_enqueue_bufs()
433 struct rte_rawdev_buf **buffers, in skeleton_rawdev_dequeue_bufs() argument
449 buffers[i]->buf_addr = queue_buf[q_id].bufs[i]; in skeleton_rawdev_dequeue_bufs()
/dpdk/drivers/raw/dpaa2_cmdif/
H A Ddpaa2_cmdif.c52 struct rte_rawdev_buf **buffers, in dpaa2_cmdif_enqueue_bufs() argument
93 DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr)); in dpaa2_cmdif_enqueue_bufs()
116 struct rte_rawdev_buf **buffers, in dpaa2_cmdif_dequeue_bufs() argument
178 buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR( in dpaa2_cmdif_dequeue_bufs()
/dpdk/drivers/raw/cnxk_bphy/
H A Dcnxk_bphy_cgx.c154 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_cgx_enqueue_bufs() argument
167 ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]); in cnxk_bphy_cgx_enqueue_bufs()
176 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_cgx_dequeue_bufs() argument
191 buffers[0]->buf_addr = qp->rsp; in cnxk_bphy_cgx_dequeue_bufs()
H A Dcnxk_bphy.c168 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_irq_enqueue_bufs() argument
172 struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr; in cnxk_bphy_irq_enqueue_bufs()
248 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_irq_dequeue_bufs() argument
263 buffers[0]->buf_addr = qp->rsp; in cnxk_bphy_irq_dequeue_bufs()
/dpdk/drivers/compress/qat/
H A Dqat_comp_pmd.c304 sgl->buffers[lb].addr = in qat_comp_setup_inter_buffers()
307 sgl->buffers[lb].len = buff_size; in qat_comp_setup_inter_buffers()
308 sgl->buffers[lb].resrvd = 0; in qat_comp_setup_inter_buffers()
312 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len); in qat_comp_setup_inter_buffers()
397 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE; in qat_comp_stream_init()
398 ram_banks_desc->buffers[0].addr = memzone->iova in qat_comp_stream_init()
/dpdk/lib/rawdev/
H A Drte_rawdev.h411 struct rte_rawdev_buf **buffers,
442 struct rte_rawdev_buf **buffers,
H A Drte_rawdev_pmd.h303 struct rte_rawdev_buf **buffers,
328 struct rte_rawdev_buf **buffers,
H A Drte_rawdev.c209 struct rte_rawdev_buf **buffers, in rte_rawdev_enqueue_buffers() argument
219 return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context); in rte_rawdev_enqueue_buffers()
224 struct rte_rawdev_buf **buffers, in rte_rawdev_dequeue_buffers() argument
234 return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context); in rte_rawdev_dequeue_buffers()
/dpdk/doc/guides/sample_app_ug/
H A Dipv4_multicast.rst14 The application demonstrates the use of zero-copy buffers for packet forwarding.
19 * The IPv4 Multicast sample application makes use of indirect buffers.
97 Two of the pools are for indirect buffers used for packet duplication purposes.
98 Memory pools for indirect buffers are initialized differently from the memory pool for direct buffe…
106 The reason for this is because indirect buffers are not supposed to hold any packet data and
226 …unction that performs the packet duplication (either with or without actually cloning the buffers):
/dpdk/drivers/net/memif/
H A Drte_eth_memif.c265 rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); in memif_free_stored_mbufs()
266 rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); in memif_free_stored_mbufs()
486 mbuf_head = mq->buffers[s0]; in eth_memif_rx_zc()
506 mbuf = mq->buffers[s0]; in eth_memif_rx_zc()
540 rte_prefetch0(mq->buffers[head & mask]); in eth_memif_rx_zc()
543 mbuf = mq->buffers[s0]; in eth_memif_rx_zc()
709 mq->buffers[slot & mask] = mbuf; in memif_tx_one_zc()
1110 mq->buffers = NULL; in memif_init_queues()
1114 if (mq->buffers == NULL) in memif_init_queues()
1134 mq->buffers = NULL; in memif_init_queues()
[all …]
/dpdk/drivers/bus/fslmc/qbman/
H A Dqbman_portal.c178 const uint64_t *buffers, unsigned int num_buffers);
182 const uint64_t *buffers, unsigned int num_buffers);
186 const uint64_t *buffers, unsigned int num_buffers);
2507 const uint64_t *buffers, in qbman_swp_release_direct() argument
2525 u64_to_le32_copy(&p[2], buffers, num_buffers); in qbman_swp_release_direct()
2540 const uint64_t *buffers, in qbman_swp_release_cinh_direct() argument
2571 const uint64_t *buffers, in qbman_swp_release_mem_back() argument
2589 u64_to_le32_copy(&p[2], buffers, num_buffers); in qbman_swp_release_mem_back()
2604 const uint64_t *buffers, in qbman_swp_release() argument
2635 uint64_t *buffers, unsigned int num_buffers) in qbman_swp_acquire_direct() argument
[all …]
/dpdk/doc/guides/rawdevs/
H A Dntb.rst137 buffers and writes used_ring and tx_tail to tell the peer which buffers
141 buffers.
/dpdk/drivers/raw/cnxk_gpio/
H A Dcnxk_gpio.c576 cnxk_gpio_enqueue_bufs(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, in cnxk_gpio_enqueue_bufs() argument
591 ret = cnxk_gpio_process_buf(gpio, buffers[0]); in cnxk_gpio_enqueue_bufs()
599 cnxk_gpio_dequeue_bufs(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, in cnxk_gpio_dequeue_bufs() argument
614 buffers[0]->buf_addr = gpio->rsp; in cnxk_gpio_dequeue_bufs()
/dpdk/drivers/crypto/qat/dev/
H A Dqat_crypto_pmd_gens.h168 list->buffers[i].len = src_vec[i].len; in qat_sym_build_req_set_data()
169 list->buffers[i].resrvd = 0; in qat_sym_build_req_set_data()
170 list->buffers[i].addr = src_vec[i].iova; in qat_sym_build_req_set_data()
187 list->buffers[i].len = dst_vec[i].len; in qat_sym_build_req_set_data()
188 list->buffers[i].resrvd = 0; in qat_sym_build_req_set_data()
189 list->buffers[i].addr = dst_vec[i].iova; in qat_sym_build_req_set_data()
/dpdk/doc/guides/eventdevs/
H A Dcnxk.rst91 SSO GGRPs i.e. queue uses DRAM & SRAM buffers to hold in-flight
92 events. By default the buffers are assigned to the SSO GGRPs to
94 buffers to GGRPs based on a preconfigured threshold.

1234