| /f-stack/dpdk/drivers/net/enic/base/ |
| H A D | vnic_wq.c | 14 if (!wq->ctrl) in vnic_wq_get_ctrl() 38 wq->head_idx = 0; in vnic_wq_alloc_bufs() 39 wq->tail_idx = 0; in vnic_wq_alloc_bufs() 49 vdev = wq->vdev; in vnic_wq_free() 53 rte_free(wq->bufs); in vnic_wq_free() 54 wq->ctrl = NULL; in vnic_wq_free() 63 wq->index = index; in vnic_wq_alloc() 64 wq->vdev = vdev; in vnic_wq_alloc() 80 vnic_wq_free(wq); in vnic_wq_alloc() 106 wq->tail_idx = wq->head_idx; in vnic_wq_init_start() [all …]
|
| H A D | vnic_wq.h | 55 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 58 return wq->ring.desc_avail; in vnic_wq_desc_avail() 61 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 64 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 149 void vnic_wq_free(struct vnic_wq *wq); 156 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 159 void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error); 160 unsigned int vnic_wq_error_status(struct vnic_wq *wq); 161 void vnic_wq_enable(struct vnic_wq *wq); 162 int vnic_wq_disable(struct vnic_wq *wq); [all …]
|
| /f-stack/dpdk/drivers/net/hinic/base/ |
| H A D | hinic_pmd_wq.c | 11 dma_free_coherent(hwdev, wq->wq_buf_size, (void *)wq->queue_buf_vaddr, in free_wq_pages() 14 wq->queue_buf_paddr = 0; in free_wq_pages() 15 wq->queue_buf_vaddr = 0; in free_wq_pages() 70 wq->cons_idx = 0; in hinic_wq_allocate() 71 wq->prod_idx = 0; in hinic_wq_allocate() 72 wq->delta = q_depth; in hinic_wq_allocate() 93 if ((wq->delta + num_wqebbs) > wq->q_depth) in hinic_read_wqe() 124 wq[i].cons_idx = 0; in hinic_cmdq_alloc() 151 wq->cons_idx = 0; in hinic_wq_wqe_pg_clear() 152 wq->prod_idx = 0; in hinic_wq_wqe_pg_clear() [all …]
|
| H A D | hinic_pmd_wq.h | 10 #define WQ_SIZE(wq) (u32)((u64)(wq)->q_depth * (wq)->wqebb_size) argument 12 #define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \ argument 13 ((wq)->num_q_pages - 1)) 15 #define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \ argument 20 #define WQ_PAGE_ADDR(wq, idx) \ argument 36 #define WQ_BASE_VADDR(wqs, wq) \ argument 40 #define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \ argument 43 #define WQ_BASE_ADDR(wqs, wq) \ argument 59 #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) argument 63 / (wq)->max_wqe_size) [all …]
|
| H A D | hinic_pmd_nicio.c | 152 struct hinic_wq *wq = sq->wq; in hinic_sq_prepare_ctxt() local 159 ci_start = (u16)(wq->cons_idx); in hinic_sq_prepare_ctxt() 160 pi_start = (u16)(wq->prod_idx); in hinic_sq_prepare_ctxt() 212 struct hinic_wq *wq = rq->wq; in hinic_rq_prepare_ctxt() local 219 ci_start = (u16)(wq->cons_idx); in hinic_rq_prepare_ctxt() 679 return (wq->delta) - 1; in hinic_get_sq_free_wqebbs() 687 return (wq->delta) - 1; in hinic_get_rq_free_wqebbs() 695 return (wq->cons_idx) & wq->mask; in hinic_get_sq_local_ci() 707 sq->wq->delta += num_wqebbs; in hinic_return_sq_wqe() 733 rq->wq->delta += num_wqebbs; in hinic_return_rq_wqe() [all …]
|
| H A D | hinic_pmd_cmdq.c | 144 #define WQE_NUM_WQEBBS(wqe_size, wq) \ argument 145 ((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size)) 159 struct hinic_wq *wq = cmdq->wq; in hinic_cmdq_idle() local 161 return ((wq->delta) == wq->q_depth ? true : false); in hinic_cmdq_idle() 422 hinic_put_wqe(cmdq->wq, num_wqebbs); in clear_wqe_complete_bit() 490 cmdq->wq = wq; in init_cmdq() 689 struct hinic_wq *wq = cmdq->wq; in cmdq_init_queue_ctxt() local 693 u16 start_ci = (u16)(wq->cons_idx); in cmdq_init_queue_ctxt() 777 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp() local 801 if (next_prod_idx >= wq->q_depth) { in cmdq_sync_cmd_direct_resp() [all …]
|
| H A D | hinic_pmd_nicio.h | 173 struct hinic_wq *wq; member 183 struct hinic_wq *wq; member
|
| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_txpp.c | 123 if (wq->sq) in mlx5_txpp_destroy_send_queue() 127 if (wq->sq_buf) in mlx5_txpp_destroy_send_queue() 129 if (wq->cq) in mlx5_txpp_destroy_send_queue() 135 memset(wq, 0, sizeof(*wq)); in mlx5_txpp_destroy_send_queue() 171 cs.w32[1] = wq->wqes[ci & (wq->sq_size - 1)].ctrl[1]; in mlx5_txpp_doorbell_rearm_queue() 174 *wq->sq_dbrec = rte_cpu_to_be_32(wq->sq_ci); in mlx5_txpp_doorbell_rearm_queue() 291 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec); in mlx5_txpp_create_rearm_queue() 345 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec + in mlx5_txpp_create_rearm_queue() 531 wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec); in mlx5_txpp_create_clock_queue() 598 wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec + in mlx5_txpp_create_clock_queue() [all …]
|
| /f-stack/dpdk/drivers/net/enic/ |
| H A D | enic_rxtx.c | 344 tail_idx = wq->tail_idx; in enic_free_wq_bufs() 369 wq->tail_idx = tail_idx; in enic_free_wq_bufs() 461 head_idx = wq->head_idx; in enic_xmit_pkts() 535 wq->cq_pend++; in enic_xmit_pkts() 539 wq->cq_pend = 0; in enic_xmit_pkts() 555 wq->cq_pend++; in enic_xmit_pkts() 561 wq->cq_pend = 0; in enic_xmit_pkts() 585 wq->head_idx = head_idx; in enic_xmit_pkts() 644 struct vnic_wq *wq; in enic_simple_xmit_pkts() local 656 head_idx = wq->head_idx; in enic_simple_xmit_pkts() [all …]
|
| H A D | enic_main.c | 505 struct vnic_wq *wq; in enic_prep_wq_for_simple_tx() local 513 wq = &enic->wq[queue_idx]; in enic_prep_wq_for_simple_tx() 1013 struct vnic_wq *wq; in enic_free_wq() local 1019 wq = (struct vnic_wq *)txq; in enic_free_wq() 1022 vnic_wq_free(wq); in enic_free_wq() 1031 struct vnic_wq *wq; in enic_alloc_wq() local 1049 wq = &enic->wq[queue_idx]; in enic_alloc_wq() 1050 wq->socket_id = socket_id; in enic_alloc_wq() 1070 vnic_wq_free(wq); in enic_alloc_wq() 1082 if (!wq->cqmsg_rz) in enic_alloc_wq() [all …]
|
| H A D | enic.h | 157 struct vnic_wq *wq; member 301 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) in enic_cq_wq() argument 303 return enic->rq_count + wq; in enic_cq_wq() 426 unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq); 427 void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, 432 void enic_post_wq_index(struct vnic_wq *wq);
|
| H A D | enic_vf_representor.c | 48 struct vnic_wq *wq; in enic_vf_dev_tx_queue_setup() local 60 wq = &pf->wq[vf->pf_wq_idx]; in enic_vf_dev_tx_queue_setup() 61 wq->offloads = tx_conf->offloads | in enic_vf_dev_tx_queue_setup() 63 eth_dev->data->tx_queues[0] = (void *)wq; in enic_vf_dev_tx_queue_setup() 192 vnic_wq_init(&pf->wq[index], cq_idx, 1, 0); in enic_vf_dev_start() 203 (uint64_t)pf->wq[index].cqmsg_rz->iova); in enic_vf_dev_start() 205 vnic_wq_enable(&pf->wq[index]); in enic_vf_dev_start() 257 vnic_wq_disable(&pf->wq[vf->pf_wq_idx]); in enic_vf_dev_stop() 258 vnic_wq_clean(&pf->wq[vf->pf_wq_idx], enic_free_wq_buf); in enic_vf_dev_stop()
|
| H A D | enic_ethdev.c | 136 if (!enic->wq[index].ctrl) in enicpmd_dev_setup_intr() 171 struct vnic_wq *wq; in enicpmd_dev_tx_queue_setup() local 178 wq = &enic->wq[queue_idx]; in enicpmd_dev_tx_queue_setup() 179 wq->offloads = tx_conf->offloads | in enicpmd_dev_tx_queue_setup() 181 eth_dev->data->tx_queues[queue_idx] = (void *)wq; in enicpmd_dev_tx_queue_setup() 911 struct vnic_wq *wq = &enic->wq[tx_queue_id]; in enicpmd_dev_txq_info_get() local 914 qinfo->nb_desc = wq->ring.desc_count; in enicpmd_dev_txq_info_get() 916 qinfo->conf.offloads = wq->offloads; in enicpmd_dev_txq_info_get()
|
| /f-stack/dpdk/drivers/net/mlx5/linux/ |
| H A D | mlx5_verbs.c | 321 if (rxq_obj->wq) { in mlx5_rxq_ibv_wq_create() 336 rxq_obj->wq = NULL; in mlx5_rxq_ibv_wq_create() 340 return rxq_obj->wq; in mlx5_rxq_ibv_wq_create() 414 if (!tmpl->wq) { in mlx5_rxq_ibv_obj_new() 429 obj.rwq.in = tmpl->wq; in mlx5_rxq_ibv_obj_new() 447 if (tmpl->wq) in mlx5_rxq_ibv_obj_new() 533 wq[i] = rxq_ctrl->obj->wq; in mlx5_ibv_ind_table_new() 538 wq[i] = wq[j]; in mlx5_ibv_ind_table_new() 542 .ind_tbl = wq, in mlx5_ibv_ind_table_new() 691 if (rxq->wq) in mlx5_rxq_ibv_obj_drop_release() [all …]
|
| /f-stack/dpdk/drivers/net/mlx4/ |
| H A D | mlx4_rxq.c | 205 ind_tbl[i] = rxq->wq; in mlx4_rss_attach() 363 struct ibv_wq *wq; in mlx4_rss_init() local 403 if (wq) { in mlx4_rss_init() 404 wq_num = wq->wq_num; in mlx4_rss_init() 410 if (!wq) { in mlx4_rss_init() 540 if (!wq) { in mlx4_rxq_attach() 546 (wq, in mlx4_rxq_attach() 558 mlxdv.rwq.in = wq; in mlx4_rxq_attach() 608 rxq->wq = wq; in mlx4_rxq_attach() 627 if (wq) in mlx4_rxq_attach() [all …]
|
| H A D | mlx4_glue.c | 218 mlx4_glue_destroy_wq(struct ibv_wq *wq) in mlx4_glue_destroy_wq() argument 220 return ibv_destroy_wq(wq); in mlx4_glue_destroy_wq() 223 mlx4_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr) in mlx4_glue_modify_wq() argument 225 return ibv_modify_wq(wq, wq_attr); in mlx4_glue_modify_wq()
|
| H A D | mlx4_glue.h | 78 int (*destroy_wq)(struct ibv_wq *wq); 79 int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
|
| H A D | mlx4.c | 619 struct ibv_wq *wq = NULL; in mlx4_hw_rss_sup() local 641 wq = cq ? mlx4_glue->create_wq in mlx4_hw_rss_sup() 650 ind = wq ? mlx4_glue->create_rwq_ind_table in mlx4_hw_rss_sup() 654 .ind_tbl = &wq, in mlx4_hw_rss_sup() 683 if (wq) in mlx4_hw_rss_sup() 684 claim_zero(mlx4_glue->destroy_wq(wq)); in mlx4_hw_rss_sup()
|
| /f-stack/dpdk/drivers/net/hinic/ |
| H A D | hinic_pmd_tx.h | 11 #define HINIC_GET_WQ_HEAD(txq) ((txq)->wq->queue_buf_vaddr) 14 ((txq)->wq->queue_buf_vaddr + (txq)->wq->wq_buf_size) 109 struct hinic_wq *wq; member
|
| H A D | hinic_pmd_tx.c | 185 (txq)->wq->cons_idx += wqebb_cnt; \ 186 (txq)->wq->delta += wqebb_cnt; \ 675 struct hinic_wq *wq = txq->wq; in hinic_get_sq_wqe() local 678 cur_pi = MASKED_WQE_IDX(wq, wq->prod_idx); in hinic_get_sq_wqe() 682 wq->prod_idx += wqebb_cnt; in hinic_get_sq_wqe() 683 wq->delta -= wqebb_cnt; in hinic_get_sq_wqe() 1108 prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx); in hinic_sq_write_db() 1333 sq->wq = &nic_io->sq_wq[q_id]; in hinic_create_sq() 1354 if (qp->sq.wq == NULL) in hinic_destroy_sq() 1358 hinic_wq_free(nic_io->hwdev, qp->sq.wq); in hinic_destroy_sq() [all …]
|
| H A D | hinic_pmd_rx.c | 21 ((rxq)->wq->mask) 24 (((rxq)->wq->cons_idx) & HINIC_GET_RQ_WQE_MASK(rxq)) 31 (rxq)->wq->cons_idx += (wqebb_cnt); \ 32 (rxq)->wq->delta += (wqebb_cnt); \ 232 rq->wq = &nic_io->rq_wq[q_id]; in hinic_create_rq() 256 if (qp->rq.wq == NULL) in hinic_destroy_rq() 262 hinic_wq_free(nic_io->hwdev, qp->rq.wq); in hinic_destroy_rq() 263 qp->rq.wq = NULL; in hinic_destroy_rq() 928 rq_wqe = WQ_WQE_ADDR(rxq->wq, (u32)pi); in hinic_rearm_rxq_mbuf() 937 rxq->wq->prod_idx += rearm_wqebbs; in hinic_rearm_rxq_mbuf() [all …]
|
| /f-stack/freebsd/kern/ |
| H A D | uipc_ktls.c | 1615 struct ktls_wq *wq; in ktls_check_rx() local 1647 mtx_lock(&wq->mtx); in ktls_check_rx() 1652 wakeup(wq); in ktls_check_rx() 1938 struct ktls_wq *wq; in ktls_enqueue_to_free() local 1944 mtx_lock(&wq->mtx); in ktls_enqueue_to_free() 1949 wakeup(wq); in ktls_enqueue_to_free() 1955 struct ktls_wq *wq; in ktls_enqueue() local 1974 mtx_lock(&wq->mtx); in ktls_enqueue() 1979 wakeup(wq); in ktls_enqueue() 2148 mtx_lock(&wq->mtx); in ktls_work_thread() [all …]
|
| /f-stack/dpdk/drivers/raw/ioat/ |
| H A D | idxd_vdev.c | 74 int dev, wq, bytes = -1; in idxd_rawdev_parse_wq() local 75 int read = sscanf(value, "%d.%d%n", &dev, &wq, &bytes); in idxd_rawdev_parse_wq() 82 if (dev >= UINT8_MAX || wq >= UINT8_MAX) { in idxd_rawdev_parse_wq() 88 args->wq_id = wq; in idxd_rawdev_parse_wq()
|
| /f-stack/freebsd/contrib/ena-com/ |
| H A D | ena_plat.h | 235 #define ena_wait_event_t struct { struct cv wq; struct mtx mtx; } 238 cv_init(&((waitqueue).wq), "cv"); \ 248 cv_destroy(&((comp_ctx->wait_event).wq)); \ 254 cv_init(&((waitqueue).wq), (waitqueue).wq.cv_description) 258 cv_timedwait(&((waitqueue).wq), &((waitqueue).mtx), \ 265 cv_broadcast(&((waitqueue).wq)); \
|
| /f-stack/dpdk/drivers/common/mlx5/ |
| H A D | mlx5_devx_cmds.c | 905 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type); in devx_cmd_fill_wq_data() 908 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave); in devx_cmd_fill_wq_data() 912 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm); in devx_cmd_fill_wq_data() 913 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd); in devx_cmd_fill_wq_data() 914 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page); in devx_cmd_fill_wq_data() 923 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets, in devx_cmd_fill_wq_data() 926 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides, in devx_cmd_fill_wq_data() 980 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); in mlx5_devx_cmd_create_rq() 1032 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); in mlx5_devx_cmd_modify_rq() 1033 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm); in mlx5_devx_cmd_modify_rq() [all …]
|