| /f-stack/dpdk/drivers/event/octeontx/ |
| H A D | ssovf_worker.h | 65 uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size; in ssovf_octeontx_wqe_xtract_mseg() 67 nb_segs = wqe->s.w0.bufs; in ssovf_octeontx_wqe_xtract_mseg() 103 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty]; in ssovf_octeontx_wqe_to_pkt() 106 mbuf->pkt_len = wqe->s.w1.len; in ssovf_octeontx_wqe_to_pkt() 110 wqe->w[2]); in ssovf_octeontx_wqe_to_pkt() 113 mbuf->nb_segs = wqe->s.w0.bufs; in ssovf_octeontx_wqe_to_pkt() 114 mbuf->data_len = wqe->s.w5.size; in ssovf_octeontx_wqe_to_pkt() 115 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf); in ssovf_octeontx_wqe_to_pkt() 122 if (likely(wqe->s.w2.vv)) { in ssovf_octeontx_wqe_to_pkt() 126 mbuf->data_off + wqe->s.w4.vlptr + 2))); in ssovf_octeontx_wqe_to_pkt() [all …]
|
| H A D | timvf_worker.h | 167 if (entry->wqe != tim->ev.u64) { in timvf_rem_entry() 180 entry->w0 = entry->wqe = 0; in timvf_rem_entry()
|
| H A D | timvf_evdev.h | 146 uint64_t wqe; member
|
| H A D | timvf_worker.c | 36 entry->wqe = tim->ev.u64; in timvf_format_event()
|
| /f-stack/dpdk/drivers/net/hinic/base/ |
| H A D | hinic_pmd_cmdq.c | 121 #define WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) argument 292 wqe_lcmd = &wqe->wqe_lcmd; in cmdq_prepare_wqe_ctrl() 310 WQE_HEADER(wqe)->header_info = in cmdq_prepare_wqe_ctrl() 396 struct hinic_cmdq_wqe *wqe) in clear_wqe_complete_bit() argument 408 wqe_lcmd = &wqe->wqe_lcmd; in clear_wqe_complete_bit() 411 inline_wqe = &wqe->inline_wqe; in clear_wqe_complete_bit() 715 struct hinic_cmdq_wqe *wqe; in hinic_cmdq_poll_msg() local 727 if (wqe == NULL) { in hinic_cmdq_poll_msg() 741 wqe_lcmd = &wqe->wqe_lcmd; in hinic_cmdq_poll_msg() 797 memset(&wqe, 0, sizeof(wqe)); in cmdq_sync_cmd_direct_resp() [all …]
|
| H A D | hinic_pmd_wq.h | 61 #define WQE_SHADOW_PAGE(wq, wqe) \ argument 62 (u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ 65 #define WQE_IN_RANGE(wqe, start, end) \ argument 66 (((unsigned long)(wqe) >= (unsigned long)(start)) && \ 67 ((unsigned long)(wqe) < (unsigned long)(end)))
|
| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_flow_age.c | 212 volatile struct mlx5_aso_wqe *restrict wqe; in mlx5_aso_init_sq() local 218 for (i = 0, wqe = &sq->wqes[0]; i < size; ++i, ++wqe) { in mlx5_aso_init_sq() 220 (sizeof(*wqe) >> 4)); in mlx5_aso_init_sq() 226 wqe->aso_cseg.operand_masks = rte_cpu_to_be_32 in mlx5_aso_init_sq() 232 wqe->aso_cseg.data_mask = RTE_BE64(UINT64_MAX); in mlx5_aso_init_sq() 379 volatile struct mlx5_aso_wqe *wqe; in mlx5_aso_sq_enqueue_burst() local 392 wqe = &sq->wqes[sq->head & mask]; in mlx5_aso_sq_enqueue_burst() 399 wqe->general_cseg.misc = in mlx5_aso_sq_enqueue_burst() 404 wqe->general_cseg.opcode = rte_cpu_to_be_32 in mlx5_aso_sq_enqueue_burst() 444 DRV_LOG(ERR, "%08X %08X %08X %08X", wqe[i], wqe[i + 1], in mlx5_aso_dump_err_objs() [all …]
|
| H A D | mlx5_rxtx.c | 1413 rte_prefetch0(wqe); in mlx5_rx_burst() 3147 struct mlx5_wqe *wqe; in mlx5_tx_schedule_send() local 3264 loc->wqe_last = wqe; in mlx5_tx_packet_multi_tso() 3348 loc->wqe_last = wqe; in mlx5_tx_packet_multi_send() 3351 dseg = &wqe->dseg[0]; in mlx5_tx_packet_multi_send() 3551 loc->wqe_last = wqe; in mlx5_tx_packet_multi_inline() 3758 loc->wqe_last = wqe; in mlx5_tx_burst_tso() 4631 loc->wqe_last = wqe; in mlx5_tx_burst_single_send() 4693 loc->wqe_last = wqe; in mlx5_tx_burst_single_send() 4734 loc->wqe_last = wqe; in mlx5_tx_burst_single_send() [all …]
|
| H A D | mlx5_rxtx.h | 618 mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, in mlx5_tx_dbrec_cond_wmb() argument 622 volatile uint64_t *src = ((volatile uint64_t *)wqe); in mlx5_tx_dbrec_cond_wmb() 642 mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) in mlx5_tx_dbrec() argument 644 mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); in mlx5_tx_dbrec() 753 volatile struct mlx5_wqe_data_seg *wqe = in mprq_buf_replace() local 764 wqe->addr = rte_cpu_to_be_64((uintptr_t)addr); in mprq_buf_replace() 767 wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr); in mprq_buf_replace()
|
| H A D | mlx5_txpp.c | 200 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; in mlx5_txpp_fill_wqe_rearm_queue() local 209 cs = &wqe[i + 0].cseg; in mlx5_txpp_fill_wqe_rearm_queue() 221 cs = &wqe[i + 1].cseg; in mlx5_txpp_fill_wqe_rearm_queue() 369 struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes; in mlx5_txpp_fill_wqe_clock_queue() local 370 struct mlx5_wqe_cseg *cs = &wqe->cseg; in mlx5_txpp_fill_wqe_clock_queue() 393 struct mlx5_wqe_eseg *es = &wqe->eseg; in mlx5_txpp_fill_wqe_clock_queue()
|
| /f-stack/dpdk/drivers/regex/mlx5/ |
| H A D | mlx5_regex_fastpath.c | 113 uint8_t *wqe = (uint8_t *)sq->wqe + wqe_offset; in prep_one() local 116 set_wqe_ctrl_seg((struct mlx5_wqe_ctrl_seg *)wqe, sq->pi, in prep_one() 119 set_regex_ctrl_seg(wqe + 12, 0, op->group_id0, op->group_id1, in prep_one() 123 (struct mlx5_wqe_data_seg *)(wqe + in prep_one() 140 uint8_t *wqe = (uint8_t *)sq->wqe + wqe_offset; in send_doorbell() local 141 ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; in send_doorbell() 148 *doorbell_addr = *(volatile uint64_t *)wqe; in send_doorbell() 304 uint8_t *wqe = (uint8_t *)sq->wqe; in setup_sqs() local 310 (wqe + MLX5_REGEX_WQE_METADATA_OFFSET), in setup_sqs() 314 (wqe + MLX5_REGEX_WQE_SCATTER_OFFSET), in setup_sqs() [all …]
|
| H A D | mlx5_regex_control.c | 221 sq->wqe = buf; in regex_ctrl_create_sq() 304 if (sq->wqe) { in regex_ctrl_destroy_sq() 305 rte_free((void *)(uintptr_t)sq->wqe); in regex_ctrl_destroy_sq() 306 sq->wqe = NULL; in regex_ctrl_destroy_sq()
|
| H A D | mlx5_regex.h | 23 uint8_t *wqe; /* The SQ ring buffer. */ member
|
| /f-stack/freebsd/contrib/octeon-sdk/ |
| H A D | cvmx-raid.h | 105 …uint64_t wqe : 1; /**< Indicates whether RAD submits a work queue entry or write… member
|
| /f-stack/dpdk/drivers/event/octeontx2/ |
| H A D | otx2_evdev.h | 246 struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1; in otx2_wqe_to_mbuf() local 252 otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, in otx2_wqe_to_mbuf()
|
| H A D | otx2_tim_worker.c | 38 entry->wqe = tim->ev.u64; in tim_format_event()
|
| H A D | otx2_tim_worker.h | 557 if (entry->wqe != tim->ev.u64) { in tim_rm_entry() 573 entry->wqe = 0; in tim_rm_entry()
|
| H A D | otx2_tim_evdev.h | 112 uint64_t wqe; member
|
| /f-stack/dpdk/drivers/net/mlx4/ |
| H A D | mlx4_txq.c | 193 elt->wqe = NULL; in mlx4_txq_free_elts() 473 (&(*txq->elts)[0])->wqe = in mlx4_tx_queue_setup()
|
| H A D | mlx4_rxtx.h | 83 volatile struct mlx4_wqe_ctrl_seg *wqe; /**< SQ WQE. */ member
|
| H A D | mlx4_rxtx.c | 905 ctrl = elt->wqe; in mlx4_tx_burst() 1044 elt->wqe = ctrl; in mlx4_tx_burst()
|
| /f-stack/dpdk/drivers/net/hinic/ |
| H A D | hinic_pmd_rx.c | 267 hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr, in hinic_prepare_rq_wqe() argument 270 struct hinic_rq_wqe *rq_wqe = wqe; in hinic_prepare_rq_wqe()
|