| /dpdk/lib/acl/ |
| H A D | acl_run.h | 50 struct completion *last_cmplt; 51 struct completion *cmplt_array; 58 struct completion { struct 73 struct completion *cmplt; argument 85 static inline struct completion * 86 alloc_completion(struct completion *p, uint32_t size, uint32_t tries, in alloc_completion() 175 acl_set_flow(struct acl_flow_data *flows, struct completion *cmplt, in acl_set_flow()
|
| H A D | acl_run_neon.h | 166 struct completion cmplt[8]; in search_neon_8() 228 struct completion cmplt[4]; in search_neon_4()
|
| H A D | acl_run_sse.h | 200 struct completion cmplt[MAX_SEARCHES_SSE8]; in search_sse_8() 289 struct completion cmplt[MAX_SEARCHES_SSE4]; in search_sse_4()
|
| H A D | acl_run_altivec.h | 193 struct completion cmplt[MAX_SEARCHES_ALTIVEC8]; in search_altivec_8() 264 struct completion cmplt[MAX_SEARCHES_ALTIVEC4]; in search_altivec_4()
|
| H A D | acl_run_scalar.c | 118 struct completion cmplt[MAX_SEARCHES_SCALAR]; in rte_acl_classify_scalar()
|
| H A D | acl_run_avx2.h | 166 struct completion cmplt[MAX_SEARCHES_AVX16]; in search_avx2x16()
|
| /dpdk/doc/guides/vdpadevs/ |
| H A D | mlx5.rst | 83 The completion queue moderation mode: 87 - 1, Latency is counted from the first packet completion report. 89 - 2, Latency is counted from the last packet completion. 93 - 1 - 4095, The maximum time in microseconds that packet completion report
|
| /dpdk/drivers/crypto/nitrox/ |
| H A D | nitrox_sym_reqmgr.c | 120 uint64_t completion; member 371 sr->resp.completion = PENDING_SIG; in create_cipher_outbuf() 372 sr->out.sglist[cnt].len = sizeof(sr->resp.completion); in create_cipher_outbuf() 374 resp.completion); in create_cipher_outbuf() 375 sr->out.sglist[cnt].virt = &sr->resp.completion; in create_cipher_outbuf() 597 sr->resp.completion = PENDING_SIG; in create_aead_outbuf() 598 sr->out.sglist[cnt].len = sizeof(sr->resp.completion); in create_aead_outbuf() 600 resp.completion); in create_aead_outbuf() 601 sr->out.sglist[cnt].virt = &sr->resp.completion; in create_aead_outbuf() 796 cc = *(volatile uint64_t *)(&sr->resp.completion); in nitrox_check_se_req()
|
| /dpdk/drivers/net/hinic/base/ |
| H A D | hinic_pmd_cmdq.h | 126 struct hinic_cmdq_completion completion; member 134 struct hinic_cmdq_completion completion; member
|
| H A D | hinic_pmd_cmdq.c | 343 cmdq_set_completion(&wqe_lcmd->completion, buf_out); in cmdq_set_lcmd_wqe() 348 wqe_lcmd->completion.direct_resp = 0; in cmdq_set_lcmd_wqe() 352 wqe_lcmd->completion.direct_resp = 0; in cmdq_set_lcmd_wqe() 832 *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp); in cmdq_sync_cmd_direct_resp()
|
| /dpdk/drivers/raw/ioat/ |
| H A D | rte_idxd_rawdev_fns.h | 51 rte_iova_t completion; member 187 idxd->desc_ring[write_idx].completion = __desc_idx_to_iova(idxd, write_idx & mask); in __idxd_write_desc() 285 .completion = __desc_idx_to_iova(idxd, comp_idx), in __idxd_perform_ops()
|
| /dpdk/drivers/dma/idxd/ |
| H A D | idxd_hw_defs.h | 32 rte_iova_t completion; member
|
| H A D | idxd_common.c | 47 desc.completion = comp_addr; in __submit() 58 .completion = comp_addr, in __submit()
|
| /dpdk/doc/guides/nics/ |
| H A D | ena.rst | 55 * Create I/O completion queue 57 * Destroy I/O completion queue 69 SQ correspondingly). Each SQ has a completion queue (CQ) associated 189 application runs to completion, the ENA can be detached from attached module if
|
| H A D | vmxnet3.rst | 39 The hypervisor fills those packet buffers on packet arrival and write completion ring descriptors, 47 …visor takes packets and passes them to the vSwitch, It writes into the completion descriptors ring.
|
| /dpdk/drivers/net/cxgbe/ |
| H A D | cxgbe_filter.h | 168 struct t4_completion completion; /* completion rendezvous */ member
|
| H A D | cxgbe_flow.c | 1125 t4_init_completion(&ctx.completion); in __cxgbe_flow_create() 1137 &ctx.completion); in __cxgbe_flow_create() 1215 t4_init_completion(&ctx.completion); in __cxgbe_flow_destroy() 1226 &ctx.completion); in __cxgbe_flow_destroy()
|
| H A D | cxgbe_filter.c | 985 t4_complete(&ctx->completion); in cxgbe_del_filter() 1236 t4_complete(&ctx->completion); in cxgbe_hash_filter_rpl() 1300 t4_complete(&ctx->completion); in cxgbe_filter_rpl() 1463 t4_complete(&ctx->completion); in cxgbe_hash_del_filter_rpl()
|
| /dpdk/doc/guides/mempool/ |
| H A D | stack.rst | 8 ``rte_stack`` DPDK library. For run-to-completion workloads with sufficiently
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | overview.rst | 27 The DPDK implements a run to completion model for packet processing, 33 In addition to the run-to-completion model,
|
| H A D | poll_mode_drv.rst | 22 The DPDK environment for packet processing applications allows for two models, run-to-completion an… 24 * In the *run-to-completion* model, a specific port's RX descriptor ring is polled for packets t… 31 In a synchronous run-to-completion model, 80 This allows a run-to-completion processing stack to statically fix or 117 The run-to-completion model also performs better if packet or data manipulation is in local memory …
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | vhost_crypto.rst | 54 guest works in polling mode, thus will NOT notify the guest completion of
|
| H A D | cmd_line.rst | 25 …e sample application supports some of the features of the GNU readline library such as, completion,
|
| /dpdk/doc/guides/rawdevs/ |
| H A D | ioat.rst | 188 If it is not needed, the tracking by the driver of user-provided completion 217 Once copies have been completed, the completion will be reported back when 272 application a set of completion handles passed in when the relevant copies
|
| /dpdk/doc/guides/dmadevs/ |
| H A D | ioat.rst | 93 documentation for details on operation enqueue, submission and completion API usage.
|