Home
last modified time | relevance | path

Searched refs:wqe (Results 1 – 22 of 22) sorted by relevance

/f-stack/dpdk/drivers/event/octeontx/
H A Dssovf_worker.h65 uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size; in ssovf_octeontx_wqe_xtract_mseg()
67 nb_segs = wqe->s.w0.bufs; in ssovf_octeontx_wqe_xtract_mseg()
103 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty]; in ssovf_octeontx_wqe_to_pkt()
106 mbuf->pkt_len = wqe->s.w1.len; in ssovf_octeontx_wqe_to_pkt()
110 wqe->w[2]); in ssovf_octeontx_wqe_to_pkt()
113 mbuf->nb_segs = wqe->s.w0.bufs; in ssovf_octeontx_wqe_to_pkt()
114 mbuf->data_len = wqe->s.w5.size; in ssovf_octeontx_wqe_to_pkt()
115 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf); in ssovf_octeontx_wqe_to_pkt()
122 if (likely(wqe->s.w2.vv)) { in ssovf_octeontx_wqe_to_pkt()
126 mbuf->data_off + wqe->s.w4.vlptr + 2))); in ssovf_octeontx_wqe_to_pkt()
[all …]
H A Dtimvf_worker.h167 if (entry->wqe != tim->ev.u64) { in timvf_rem_entry()
180 entry->w0 = entry->wqe = 0; in timvf_rem_entry()
H A Dtimvf_evdev.h146 uint64_t wqe; member
H A Dtimvf_worker.c36 entry->wqe = tim->ev.u64; in timvf_format_event()
/f-stack/dpdk/drivers/net/hinic/base/
H A Dhinic_pmd_cmdq.c121 #define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) argument
292 wqe_lcmd = &wqe->wqe_lcmd; in cmdq_prepare_wqe_ctrl()
310 WQE_HEADER(wqe)->header_info = in cmdq_prepare_wqe_ctrl()
396 struct hinic_cmdq_wqe *wqe) in clear_wqe_complete_bit() argument
408 wqe_lcmd = &wqe->wqe_lcmd; in clear_wqe_complete_bit()
411 inline_wqe = &wqe->inline_wqe; in clear_wqe_complete_bit()
715 struct hinic_cmdq_wqe *wqe; in hinic_cmdq_poll_msg() local
727 if (wqe == NULL) { in hinic_cmdq_poll_msg()
741 wqe_lcmd = &wqe->wqe_lcmd; in hinic_cmdq_poll_msg()
797 memset(&wqe, 0, sizeof(wqe)); in cmdq_sync_cmd_direct_resp()
[all …]
H A Dhinic_pmd_wq.h61 #define WQE_SHADOW_PAGE(wq, wqe) \ argument
62 (u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
65 #define WQE_IN_RANGE(wqe, start, end) \ argument
66 (((unsigned long)(wqe) >= (unsigned long)(start)) && \
67 ((unsigned long)(wqe) < (unsigned long)(end)))
/f-stack/dpdk/drivers/net/mlx5/
H A Dmlx5_flow_age.c212 volatile struct mlx5_aso_wqe *restrict wqe; in mlx5_aso_init_sq() local
218 for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) { in mlx5_aso_init_sq()
220 (sizeof(*wqe) >> 4)); in mlx5_aso_init_sq()
226 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32 in mlx5_aso_init_sq()
232 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX); in mlx5_aso_init_sq()
379 volatile struct mlx5_aso_wqe *wqe; in mlx5_aso_sq_enqueue_burst() local
392 wqe = &sq->wqes[sq->head & mask]; in mlx5_aso_sq_enqueue_burst()
399 wqe->general_cseg.misc = in mlx5_aso_sq_enqueue_burst()
404 wqe->general_cseg.opcode = rte_cpu_to_be_32 in mlx5_aso_sq_enqueue_burst()
444 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1], in mlx5_aso_dump_err_objs()
[all …]
H A Dmlx5_rxtx.c1413 rte_prefetch0(wqe); in mlx5_rx_burst()
3147 struct mlx5_wqe *wqe; in mlx5_tx_schedule_send() local
3264 loc->wqe_last = wqe; in mlx5_tx_packet_multi_tso()
3348 loc->wqe_last = wqe; in mlx5_tx_packet_multi_send()
3351 dseg = &wqe->dseg[0]; in mlx5_tx_packet_multi_send()
3551 loc->wqe_last = wqe; in mlx5_tx_packet_multi_inline()
3758 loc->wqe_last = wqe; in mlx5_tx_burst_tso()
4631 loc->wqe_last = wqe; in mlx5_tx_burst_single_send()
4693 loc->wqe_last = wqe; in mlx5_tx_burst_single_send()
4734 loc->wqe_last = wqe; in mlx5_tx_burst_single_send()
[all …]
H A Dmlx5_rxtx.h618 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, in mlx5_tx_dbrec_cond_wmb() argument
622 volatile uint64_t *src = ((volatile uint64_t *)wqe); in mlx5_tx_dbrec_cond_wmb()
642 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) in mlx5_tx_dbrec() argument
644 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); in mlx5_tx_dbrec()
753 volatile struct mlx5_wqe_data_seg *wqe = in mprq_buf_replace() local
764 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); in mprq_buf_replace()
767 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); in mprq_buf_replace()
H A Dmlx5_txpp.c200 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; in mlx5_txpp_fill_wqe_rearm_queue() local
209 cs = &wqe[i + 0].cseg; in mlx5_txpp_fill_wqe_rearm_queue()
221 cs = &wqe[i + 1].cseg; in mlx5_txpp_fill_wqe_rearm_queue()
369 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; in mlx5_txpp_fill_wqe_clock_queue() local
370 struct mlx5_wqe_cseg *cs = &wqe->cseg; in mlx5_txpp_fill_wqe_clock_queue()
393 struct mlx5_wqe_eseg *es = &wqe->eseg; in mlx5_txpp_fill_wqe_clock_queue()
/f-stack/dpdk/drivers/regex/mlx5/
H A Dmlx5_regex_fastpath.c113 uint8_t *wqe = (uint8_t *)sq->wqe + wqe_offset; in prep_one() local
116 set_wqe_ctrl_seg((struct mlx5_wqe_ctrl_seg *)wqe, sq->pi, in prep_one()
119 set_regex_ctrl_seg(wqe + 12, 0, op->group_id0, op->group_id1, in prep_one()
123 (struct mlx5_wqe_data_seg *)(wqe + in prep_one()
140 uint8_t *wqe = (uint8_t *)sq->wqe + wqe_offset; in send_doorbell() local
141 ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; in send_doorbell()
148 *doorbell_addr = *(volatile uint64_t *)wqe; in send_doorbell()
304 uint8_t *wqe = (uint8_t *)sq->wqe; in setup_sqs() local
310 (wqe + MLX5_REGEX_WQE_METADATA_OFFSET), in setup_sqs()
314 (wqe + MLX5_REGEX_WQE_SCATTER_OFFSET), in setup_sqs()
[all …]
H A Dmlx5_regex_control.c221 sq->wqe = buf; in regex_ctrl_create_sq()
304 if (sq->wqe) { in regex_ctrl_destroy_sq()
305 rte_free((void *)(uintptr_t)sq->wqe); in regex_ctrl_destroy_sq()
306 sq->wqe = NULL; in regex_ctrl_destroy_sq()
H A Dmlx5_regex.h23 uint8_t *wqe; /* The SQ ring buffer. */ member
/f-stack/freebsd/contrib/octeon-sdk/
H A Dcvmx-raid.h105 …uint64_t wqe : 1; /**< Indicates whether RAD submits a work queue entry or write… member
/f-stack/dpdk/drivers/event/octeontx2/
H A Dotx2_evdev.h246 struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1; in otx2_wqe_to_mbuf() local
252 otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, in otx2_wqe_to_mbuf()
H A Dotx2_tim_worker.c38 entry->wqe = tim->ev.u64; in tim_format_event()
H A Dotx2_tim_worker.h557 if (entry->wqe != tim->ev.u64) { in tim_rm_entry()
573 entry->wqe = 0; in tim_rm_entry()
H A Dotx2_tim_evdev.h112 uint64_t wqe; member
/f-stack/dpdk/drivers/net/mlx4/
H A Dmlx4_txq.c193 elt->wqe = NULL; in mlx4_txq_free_elts()
473 (&(*txq->elts)[0])->wqe = in mlx4_tx_queue_setup()
H A Dmlx4_rxtx.h83 volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */ member
H A Dmlx4_rxtx.c905 ctrl = elt->wqe; in mlx4_tx_burst()
1044 elt->wqe = ctrl; in mlx4_tx_burst()
/f-stack/dpdk/drivers/net/hinic/
H A Dhinic_pmd_rx.c267 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr, in hinic_prepare_rq_wqe() argument
270 struct hinic_rq_wqe *rq_wqe = wqe; in hinic_prepare_rq_wqe()