| /dpdk/drivers/common/qat/ |
| H A D | qat_common.c | 42 list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset; in qat_sgl_fill_array() 43 list->buffers[nr].resrvd = 0; in qat_sgl_fill_array() 44 list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset); in qat_sgl_fill_array() 50 buf_len += list->buffers[nr].len; in qat_sgl_fill_array() 53 list->buffers[nr].len -= buf_len - data_len; in qat_sgl_fill_array() 75 nr, list->buffers[nr].len, in qat_sgl_fill_array() 76 list->buffers[nr].addr); in qat_sgl_fill_array() 79 list->buffers[nr].len); in qat_sgl_fill_array()
|
| H A D | qat_common.h | 61 struct qat_flat_buf buffers[0]; member
|
| /dpdk/doc/guides/compressdevs/ |
| H A D | overview.rst | 17 to let input buffers pass-through it, copying the input to the output, 22 which means PMD supports different scatter-gather styled input and output buffers 27 which means PMD supports input from scatter-gathered styled buffers, outputting linear buffers 32 which means PMD supports input from linear buffer, outputting scatter-gathered styled buffers.
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | mbuf_lib.rst | 9 The mbuf library provides the ability to allocate and free buffers (mbufs) 10 that may be used by the DPDK application to store message buffers. 13 A rte_mbuf struct generally carries network packet buffers, but it can actually 26 #. Use separate memory buffers for the metadata structure and for the packet data. 36 Message buffers that are used to carry network packets can handle buffer chaining 37 where multiple buffers are required to hold the complete packet. 42 Message buffers may be used to carry control information, packets, events, 242 since indirect buffers provide the means to reuse the same packet data across multiple buffers. 250 There are a few things to remember when dealing with indirect buffers. 261 Since indirect buffers are not supposed to actually hold any data, [all …]
|
| H A D | overview.rst | 121 The mbuf library provides the facility to create and destroy buffers 122 that may be used by the DPDK application to store message buffers. 123 The message buffers are created at startup time and stored in a mempool, using the DPDK mempool lib… 126 packet buffers which are used to carry network packets.
|
| /dpdk/doc/guides/cryptodevs/ |
| H A D | overview.rst | 22 which means PMD supports different scatter-gather styled input and output buffers 27 which means PMD supports input from scatter-gathered styled buffers, 28 outputting linear buffers (i.e. single segment). 33 scatter-gathered styled buffers. 38 with linear input and output buffers.
|
| /dpdk/doc/guides/nics/ |
| H A D | vmxnet3.rst | 32 The packet buffers and features to be supported are made available to hypervisor via VMXNET3 PCI co… 33 During RX/TX, the packet buffers are exchanged by their GPAs, 34 and the hypervisor loads the buffers with packets in the RX case and sends packets to vSwitch in th… 38 The driver pre-allocates the packet buffers and loads the command ring descriptors in advance. 39 The hypervisor fills those packet buffers on packet arrival and write completion ring descriptors, 41 After reception, the DPDK application frees the descriptors and loads new packet buffers for the co… 48 The rings are read by the PMD in the next transmit routine call and the buffers and descriptors are… 74 multiple segment buffers are not supported. 75 Only cmd_ring_0 is used for packet buffers, one for each descriptor.
|
| H A D | memif.rst | 101 Regions contain rings and buffers. Rings and buffers can also be separated into multiple 102 regions. For no-zero-copy, rings and buffers are stored inside single memory 125 Descriptors are assigned packet buffers in order of rings creation. If we have one ring 126 in each direction and ring size is 1024, then first 1024 buffers will belong to C2S ring and 127 last 1024 will belong to S2C ring. In case of zero-copy, buffers are dequeued and 151 …AG_NEXT|Is chained buffer. When set, the packet is divided into multiple buffers. May not be conti…
|
| H A D | netvsc.rst | 19 * It supports merge-able buffers per packet when receiving packets and scattered buffer per packet 141 A non-zero value tells netvsc to attach external buffers to mbuf on 142 receiving packets, thus avoid copying memory. Use of external buffers
|
| /dpdk/drivers/raw/skeleton/ |
| H A D | skeleton_rawdev_test.c | 373 struct rte_rawdev_buf buffers[1]; in test_rawdev_enqdeq() local 376 buffers[0].buf_addr = malloc(strlen(TEST_DEV_NAME) + 3); in test_rawdev_enqdeq() 377 if (!buffers[0].buf_addr) in test_rawdev_enqdeq() 379 snprintf(buffers[0].buf_addr, strlen(TEST_DEV_NAME) + 2, "%s%d", in test_rawdev_enqdeq() 383 (struct rte_rawdev_buf **)&buffers, in test_rawdev_enqdeq() 402 free(buffers[0].buf_addr); in test_rawdev_enqdeq()
|
| H A D | skeleton_rawdev.c | 411 struct rte_rawdev_buf **buffers, in skeleton_rawdev_enqueue_bufs() argument 427 queue_buf[q_id].bufs[i] = buffers[i]->buf_addr; in skeleton_rawdev_enqueue_bufs() 433 struct rte_rawdev_buf **buffers, in skeleton_rawdev_dequeue_bufs() argument 449 buffers[i]->buf_addr = queue_buf[q_id].bufs[i]; in skeleton_rawdev_dequeue_bufs()
|
| /dpdk/drivers/raw/dpaa2_cmdif/ |
| H A D | dpaa2_cmdif.c | 52 struct rte_rawdev_buf **buffers, in dpaa2_cmdif_enqueue_bufs() argument 93 DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr)); in dpaa2_cmdif_enqueue_bufs() 116 struct rte_rawdev_buf **buffers, in dpaa2_cmdif_dequeue_bufs() argument 178 buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR( in dpaa2_cmdif_dequeue_bufs()
|
| /dpdk/drivers/raw/cnxk_bphy/ |
| H A D | cnxk_bphy_cgx.c | 154 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_cgx_enqueue_bufs() argument 167 ret = cnxk_bphy_cgx_process_buf(cgx, queue, buffers[0]); in cnxk_bphy_cgx_enqueue_bufs() 176 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_cgx_dequeue_bufs() argument 191 buffers[0]->buf_addr = qp->rsp; in cnxk_bphy_cgx_dequeue_bufs()
|
| H A D | cnxk_bphy.c | 168 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_irq_enqueue_bufs() argument 172 struct cnxk_bphy_irq_msg *msg = buffers[0]->buf_addr; in cnxk_bphy_irq_enqueue_bufs() 248 struct rte_rawdev_buf **buffers, unsigned int count, in cnxk_bphy_irq_dequeue_bufs() argument 263 buffers[0]->buf_addr = qp->rsp; in cnxk_bphy_irq_dequeue_bufs()
|
| /dpdk/drivers/compress/qat/ |
| H A D | qat_comp_pmd.c | 304 sgl->buffers[lb].addr = in qat_comp_setup_inter_buffers() 307 sgl->buffers[lb].len = buff_size; in qat_comp_setup_inter_buffers() 308 sgl->buffers[lb].resrvd = 0; in qat_comp_setup_inter_buffers() 312 lb, sgl->buffers[lb].addr, sgl->buffers[lb].len); in qat_comp_setup_inter_buffers() 397 ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE; in qat_comp_stream_init() 398 ram_banks_desc->buffers[0].addr = memzone->iova in qat_comp_stream_init()
|
| /dpdk/lib/rawdev/ |
| H A D | rte_rawdev.h | 411 struct rte_rawdev_buf **buffers, 442 struct rte_rawdev_buf **buffers,
|
| H A D | rte_rawdev_pmd.h | 303 struct rte_rawdev_buf **buffers, 328 struct rte_rawdev_buf **buffers,
|
| H A D | rte_rawdev.c | 209 struct rte_rawdev_buf **buffers, in rte_rawdev_enqueue_buffers() argument 219 return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context); in rte_rawdev_enqueue_buffers() 224 struct rte_rawdev_buf **buffers, in rte_rawdev_dequeue_buffers() argument 234 return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context); in rte_rawdev_dequeue_buffers()
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | ipv4_multicast.rst | 14 The application demonstrates the use of zero-copy buffers for packet forwarding. 19 * The IPv4 Multicast sample application makes use of indirect buffers. 97 Two of the pools are for indirect buffers used for packet duplication purposes. 98 Memory pools for indirect buffers are initialized differently from the memory pool for direct buffe… 106 The reason for this is because indirect buffers are not supposed to hold any packet data and 226 …unction that performs the packet duplication (either with or without actually cloning the buffers):
|
| /dpdk/drivers/net/memif/ |
| H A D | rte_eth_memif.c | 265 rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); in memif_free_stored_mbufs() 266 rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); in memif_free_stored_mbufs() 486 mbuf_head = mq->buffers[s0]; in eth_memif_rx_zc() 506 mbuf = mq->buffers[s0]; in eth_memif_rx_zc() 540 rte_prefetch0(mq->buffers[head & mask]); in eth_memif_rx_zc() 543 mbuf = mq->buffers[s0]; in eth_memif_rx_zc() 709 mq->buffers[slot & mask] = mbuf; in memif_tx_one_zc() 1110 mq->buffers = NULL; in memif_init_queues() 1114 if (mq->buffers == NULL) in memif_init_queues() 1134 mq->buffers = NULL; in memif_init_queues() [all …]
|
| /dpdk/drivers/bus/fslmc/qbman/ |
| H A D | qbman_portal.c | 178 const uint64_t *buffers, unsigned int num_buffers); 182 const uint64_t *buffers, unsigned int num_buffers); 186 const uint64_t *buffers, unsigned int num_buffers); 2507 const uint64_t *buffers, in qbman_swp_release_direct() argument 2525 u64_to_le32_copy(&p[2], buffers, num_buffers); in qbman_swp_release_direct() 2540 const uint64_t *buffers, in qbman_swp_release_cinh_direct() argument 2571 const uint64_t *buffers, in qbman_swp_release_mem_back() argument 2589 u64_to_le32_copy(&p[2], buffers, num_buffers); in qbman_swp_release_mem_back() 2604 const uint64_t *buffers, in qbman_swp_release() argument 2635 uint64_t *buffers, unsigned int num_buffers) in qbman_swp_acquire_direct() argument [all …]
|
| /dpdk/doc/guides/rawdevs/ |
| H A D | ntb.rst | 137 buffers and writes used_ring and tx_tail to tell the peer which buffers 141 buffers.
|
| /dpdk/drivers/raw/cnxk_gpio/ |
| H A D | cnxk_gpio.c | 576 cnxk_gpio_enqueue_bufs(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, in cnxk_gpio_enqueue_bufs() argument 591 ret = cnxk_gpio_process_buf(gpio, buffers[0]); in cnxk_gpio_enqueue_bufs() 599 cnxk_gpio_dequeue_bufs(struct rte_rawdev *dev, struct rte_rawdev_buf **buffers, in cnxk_gpio_dequeue_bufs() argument 614 buffers[0]->buf_addr = gpio->rsp; in cnxk_gpio_dequeue_bufs()
|
| /dpdk/drivers/crypto/qat/dev/ |
| H A D | qat_crypto_pmd_gens.h | 168 list->buffers[i].len = src_vec[i].len; in qat_sym_build_req_set_data() 169 list->buffers[i].resrvd = 0; in qat_sym_build_req_set_data() 170 list->buffers[i].addr = src_vec[i].iova; in qat_sym_build_req_set_data() 187 list->buffers[i].len = dst_vec[i].len; in qat_sym_build_req_set_data() 188 list->buffers[i].resrvd = 0; in qat_sym_build_req_set_data() 189 list->buffers[i].addr = dst_vec[i].iova; in qat_sym_build_req_set_data()
|
| /dpdk/doc/guides/eventdevs/ |
| H A D | cnxk.rst | 91 SSO GGRPs i.e. queue uses DRAM & SRAM buffers to hold in-flight 92 events. By default the buffers are assigned to the SSO GGRPs to 94 buffers to GGRPs based on a preconfigured threshold.
|