| /dpdk/lib/mempool/ |
| H A D | mempool_trace_points.c | 10 lib.mempool.ops.deq.bulk) 16 lib.mempool.ops.enq.bulk) 22 lib.mempool.put.bulk) 28 lib.mempool.get.bulk)
|
| /dpdk/drivers/net/sfc/ |
| H A D | sfc_ef10_tx.c | 150 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; in sfc_ef10_tx_reap() local 166 if ((nb == RTE_DIM(bulk)) || in sfc_ef10_tx_reap() 168 rte_mempool_put_bulk(bulk[0]->pool, in sfc_ef10_tx_reap() 169 (void *)bulk, nb); in sfc_ef10_tx_reap() 173 bulk[nb++] = m; in sfc_ef10_tx_reap() 177 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); in sfc_ef10_tx_reap() 764 if (nb == RTE_DIM(bulk)) { in sfc_ef10_simple_tx_reap() 765 rte_mempool_put_bulk(bulk[0]->pool, in sfc_ef10_simple_tx_reap() 766 (void *)bulk, nb); in sfc_ef10_simple_tx_reap() 770 bulk[nb++] = txd->mbuf; in sfc_ef10_simple_tx_reap() [all …]
|
| H A D | sfc_ef100_tx.c | 277 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE]; in sfc_ef100_tx_reap_num_descs() local 294 if (nb == RTE_DIM(bulk) || in sfc_ef100_tx_reap_num_descs() 295 (nb != 0 && m->pool != bulk[0]->pool)) { in sfc_ef100_tx_reap_num_descs() 296 rte_mempool_put_bulk(bulk[0]->pool, in sfc_ef100_tx_reap_num_descs() 297 (void *)bulk, nb); in sfc_ef100_tx_reap_num_descs() 301 bulk[nb++] = m; in sfc_ef100_tx_reap_num_descs() 305 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb); in sfc_ef100_tx_reap_num_descs()
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | ring_lib.rst | 38 * Adapted to bulk enqueue/dequeue operations. 40 Also, a bulk dequeue of many objects does not cost more than a dequeue of a simple object. 113 …ariable points to the next element of the table, or several elements after in case of bulk enqueue. 166 …ble points to the next element of the table, or several elements after in the case of bulk dequeue. 220 or several elements after in the case of bulk enqueue.
|
| H A D | writing_efficient_code.rst | 43 lockless access to objects, NUMA awareness, bulk get/put and per-lcore cache. 102 The ring supports bulk and burst access, 105 Performance is greatly improved when using bulk access operations. 125 The DPDK Poll Mode Driver (PMD) is also able to work in bulk/burst mode,
|
| H A D | member_lib.rst | 329 The ``rte_member_lookup_bulk()`` function is used to look up a bulk of keys/elements in the 332 which is a pointer to a bulk of keys that are to be looked up, 355 The ``rte_membership_lookup_multi_bulk()`` function looks up a bulk of keys/elements in the 362 a pointer to a bulk of keys that are to be looked up, ``num_keys`` is the number
|
| H A D | toeplitz_hash_lib.rst | 47 ``rte_thash_gfni_bulk()`` bulk implementation of the rte_thash_gfni(). 61 * Number of tuples in a bulk.
|
| H A D | packet_distrib_lib.rst | 62 If worker lcores buffer up packets internally for transmission in bulk afterwards,
|
| H A D | efd_lib.rst | 213 target values for a given individual flow key or a bulk of keys. 260 function calls per key, it is always recommended to use a bulk lookup 262 lookup function. ``rte_efd_lookup_bulk()`` is the bulk lookup function,
|
| H A D | fib_lib.rst | 45 * ``rte_fib_lookup_bulk()``: Provides a bulk Longest Prefix Match (LPM) lookup function
|
| H A D | overview.rst | 101 It has some advantages over lockless queues; easier to implement, adapted to bulk operations and fa…
|
| H A D | mempool_lib.rst | 80 the memory pool allocator can maintain a per-core cache and do bulk requests to the memory pool's r…
|
| H A D | link_bonding_poll_mode_drv_lib.rst | 49 the last. Packets are bulk dequeued from devices then serviced in a
|
| /dpdk/doc/guides/nics/ |
| H A D | fm10k.rst | 32 on the number of instructions when bulk processing packets. 38 Some constraints apply as pre-conditions for specific optimizations on bulk
|
| H A D | ixgbe.rst | 14 …e to hold multiple packet buffers so as to save instruction number when processing bulk of packets. 21 Some constraints apply as pre-conditions for specific optimizations on bulk packet transfers. 32 * To enable vPMD to work for RX, bulk allocation for Rx must be allowed.
|
| H A D | bnxt.rst | 868 * TX: transmit completions are processed in bulk. 869 * RX: bulk allocation of mbufs is used when allocating rxq buffers.
|
| H A D | enic.rst | 357 AVX2 SIMD instructions. It is meant for bulk, throughput oriented workloads
|
| /dpdk/drivers/net/softnic/ |
| H A D | rte_eth_softnic_thread.c | 639 int bulk; member 1448 uint32_t bulk = in softnic_pipeline_table_rule_add_bulk() local 1488 if (bulk) { in softnic_pipeline_table_rule_add_bulk() 1545 req->table_rule_add_bulk.bulk = in softnic_pipeline_table_rule_add_bulk() 2656 uint32_t bulk = req->table_rule_add_bulk.bulk; in pipeline_msg_handle_table_rule_add_bulk() local 2702 if (bulk) { in pipeline_msg_handle_table_rule_add_bulk()
|
| /dpdk/examples/ip_pipeline/ |
| H A D | thread.c | 608 int bulk; member 1630 req->table_rule_add_bulk.bulk = table->params.match_type == TABLE_ACL; in pipeline_table_rule_add_bulk() 2803 uint32_t bulk = req->table_rule_add_bulk.bulk; in pipeline_msg_handle_table_rule_add_bulk() local 2812 .bulk_supported = bulk, in pipeline_msg_handle_table_rule_add_bulk()
|
| /dpdk/doc/guides/rel_notes/ |
| H A D | release_17_05.rst | 325 * Added an extra parameter to the burst/bulk enqueue functions to 328 * Added an extra parameter to the burst/bulk dequeue functions to return 330 * Changed the return value of the enqueue and dequeue bulk functions to
|
| H A D | release_16_04.rst | 17 * **Enabled bulk allocation of mbufs.** 20 to bulk allocate mbufs.
|
| H A D | release_2_1.rst | 464 Enabled vector ixgbe and i40e bulk alloc for BSD as it is already done for
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | ip_pipeline.rst | 505 Add bulk rules to table for specific pipeline instance :: 507 pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>
|
| H A D | l3_forward.rst | 408 it does not have separate functions for single and bulk lookups.
|
| /dpdk/doc/guides/contributing/ |
| H A D | unit_test.rst | 131 These are the bulk of the unit tests to validate functional blocks.
|