| /dpdk/drivers/net/ice/base/ |
| H A D | ice_controlq.c | 82 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 83 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() 85 (cq->num_sq_entries | cq->sq.len_ena_mask); in ice_check_sq_alive() 104 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, in ice_alloc_ctrlq_sq_ring() 346 if (!cq->num_sq_entries || !cq->sq_buf_size) { in ice_init_sq() 370 cq->sq.count = cq->num_sq_entries; in ice_init_sq() 408 if (!cq->num_rq_entries || !cq->rq_buf_size) { in ice_init_rq() 432 cq->rq.count = cq->num_rq_entries; in ice_init_rq() 617 !cq->rq_buf_size || !cq->sq_buf_size) { in ice_init_ctrlq() 1033 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd_nolock() [all …]
|
| /dpdk/drivers/net/enic/base/ |
| H A D | vnic_cq.c | 10 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 12 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 14 cq->ctrl = NULL; in vnic_cq_free() 25 cq->index = index; in vnic_cq_alloc() 26 cq->vdev = vdev; in vnic_cq_alloc() 29 if (!cq->ctrl) { in vnic_cq_alloc() 53 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init() 70 cq->to_clean = 0; in vnic_cq_clean() 71 cq->last_color = 0; in vnic_cq_clean() 73 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean() [all …]
|
| H A D | vnic_cq.h | 64 void vnic_cq_free(struct vnic_cq *cq); 65 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, 68 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, 73 void vnic_cq_clean(struct vnic_cq *cq); 74 int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
|
| /dpdk/drivers/vdpa/mlx5/ |
| H A D | mlx5_vdpa_event.c | 68 memset(cq, 0, sizeof(*cq)); in mlx5_vdpa_cq_destroy() 82 cq->arm_sn++; in mlx5_vdpa_cq_arm() 83 cq->armed = 1; in mlx5_vdpa_cq_arm() 106 cq->cq_obj.cq->obj, in mlx5_vdpa_cq_create() 130 container_of(cq, struct mlx5_vdpa_event_qp, cq); in mlx5_vdpa_cq_poll() 158 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); in mlx5_vdpa_cq_poll() 173 cq = &priv->virtqs[i].eqp.cq; in mlx5_vdpa_arm_all_cqs() 174 if (cq->cq_obj.cq && !cq->armed) in mlx5_vdpa_arm_all_cqs() 207 if (cq->cq_obj.cq) { in mlx5_vdpa_queue_complete() 226 struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq; in mlx5_vdpa_queues_complete() local [all …]
|
| /dpdk/drivers/regex/mlx5/ |
| H A D | mlx5_regex_control.c | 58 regex_ctrl_destroy_cq(struct mlx5_regex_cq *cq) in regex_ctrl_destroy_cq() argument 60 mlx5_devx_cq_destroy(&cq->cq_obj); in regex_ctrl_destroy_cq() 61 memset(cq, 0, sizeof(*cq)); in regex_ctrl_destroy_cq() 84 cq->ci = 0; in regex_ctrl_create_cq() 85 ret = mlx5_devx_cq_create(priv->cdev->ctx, &cq->cq_obj, cq->log_nb_desc, in regex_ctrl_create_cq() 89 memset(cq, 0, sizeof(*cq)); in regex_ctrl_create_cq() 138 .cqn = qp->cq.cq_obj.cq->id, in regex_ctrl_create_hw_qp() 221 qp->cq.log_nb_desc = log_desc + (!!priv->has_umr); in mlx5_regex_qp_setup() 236 ret = regex_ctrl_create_cq(priv, &qp->cq); in mlx5_regex_qp_setup() 269 regex_ctrl_destroy_cq(&qp->cq); in mlx5_regex_qp_setup() [all …]
|
| H A D | mlx5_regex_fastpath.c | 52 cq_size_get(struct mlx5_regex_cq *cq) in cq_size_get() argument 54 return (1U << cq->log_nb_desc); in cq_size_get() 485 poll_one(struct mlx5_regex_cq *cq) in poll_one() argument 490 next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1)); in poll_one() 491 cqe = (volatile struct mlx5_cqe *)(cq->cq_obj.cqes + next_cqe_offset); in poll_one() 494 int ret = check_cqe(cqe, cq_size_get(cq), cq->ci); in poll_one() 529 struct mlx5_regex_cq *cq = &queue->cq; in mlx5_regexdev_dequeue() local 533 while ((cqe = poll_one(cq))) { in mlx5_regexdev_dequeue() 556 cq->ci = (cq->ci + 1) & 0xffffff; in mlx5_regexdev_dequeue() 558 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci); in mlx5_regexdev_dequeue()
|
| /dpdk/drivers/net/mlx5/ |
| H A D | mlx5_rxtx_vec_sse.h | 136 rte_prefetch0((void *)(cq + pos + i)); in rxq_cq_decompress_v() 253 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v() 349 rte_prefetch0((void *)(cq + pos + 8)); in rxq_cq_decompress_v() 350 mcq = (void *)(cq + pos); in rxq_cq_decompress_v() 352 cq[inv++].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v() 357 cq[inv].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v() 617 &cq[pos + p3].sop_drop_qpn); in rxq_cq_process_v() 621 &cq[pos + p2].sop_drop_qpn); in rxq_cq_process_v() 629 &cq[pos + p1].sop_drop_qpn); in rxq_cq_process_v() 632 &cq[pos].sop_drop_qpn); in rxq_cq_process_v() [all …]
|
| H A D | mlx5_flow_aso.c | 221 sq_attr.cqn = sq->cq.cq_obj.cq->id; in mlx5_aso_sq_create() 427 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_cqe_err_handle() local 428 uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1); in mlx5_aso_cqe_err_handle() 432 cq->errors++; in mlx5_aso_cqe_err_handle() 529 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_completion_handle() local 558 cq->cq_ci++; in mlx5_aso_completion_handle() 564 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); in mlx5_aso_completion_handle() 751 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_mtr_completion_handle() local 791 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); in mlx5_aso_mtr_completion_handle() 1116 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_ct_completion_handle() local [all …]
|
| H A D | mlx5_rxtx_vec_altivec.h | 367 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v() 926 &cq[pos + p3].sop_drop_qpn, 0LL}; in rxq_cq_process_v() 950 &cq[pos].sop_drop_qpn, 0LL}; in rxq_cq_process_v() 960 &cq[pos + p3].pkt_info; in rxq_cq_process_v() 962 &cq[pos + p2].pkt_info; in rxq_cq_process_v() 977 &cq[pos + p3].rsvd4[2], 0LL}; in rxq_cq_process_v() 980 &cq[pos + p2].rsvd4[2], 0LL}; in rxq_cq_process_v() 1023 &cq[pos + p1].pkt_info; in rxq_cq_process_v() 1025 &cq[pos].pkt_info; in rxq_cq_process_v() 1040 &cq[pos + p1].rsvd4[2], 0LL}; in rxq_cq_process_v() [all …]
|
| H A D | mlx5_rxtx_vec.c | 294 volatile struct mlx5_cqe *cq; in rxq_burst_v() local 304 cq = &(*rxq->cqes)[cq_idx]; in rxq_burst_v() 305 rte_prefetch0(cq); in rxq_burst_v() 306 rte_prefetch0(cq + 1); in rxq_burst_v() 307 rte_prefetch0(cq + 2); in rxq_burst_v() 308 rte_prefetch0(cq + 3); in rxq_burst_v() 444 cq = &(*rxq->cqes)[cq_idx]; in rxq_burst_mprq_v() 445 rte_prefetch0(cq); in rxq_burst_mprq_v() 446 rte_prefetch0(cq + 1); in rxq_burst_mprq_v() 447 rte_prefetch0(cq + 2); in rxq_burst_mprq_v() [all …]
|
| H A D | mlx5_rxtx_vec_neon.h | 150 rte_prefetch0((void *)(cq + pos + i)); in rxq_cq_decompress_v() 269 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v() 359 rte_prefetch0((void *)(cq + pos + 8)); in rxq_cq_decompress_v() 360 mcq = (void *)&(cq + pos)->pkt_info; in rxq_cq_decompress_v() 362 cq[inv++].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v() 367 cq[inv].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v() 636 p0 = (void *)&cq[pos].pkt_info; in rxq_cq_process_v() 653 rte_prefetch_non_temporal(&cq[next]); in rxq_cq_process_v() 654 rte_prefetch_non_temporal(&cq[next + 1]); in rxq_cq_process_v() 655 rte_prefetch_non_temporal(&cq[next + 2]); in rxq_cq_process_v() [all …]
|
| /dpdk/drivers/net/ionic/ |
| H A D | ionic_dev.c | 341 struct ionic_cq *cq = &qcq->cq; in ionic_dev_cmd_adminq_init() local 371 cq->num_descs = num_descs; in ionic_cq_init() 372 cq->size_mask = num_descs - 1; in ionic_cq_init() 373 cq->tail_idx = 0; in ionic_cq_init() 374 cq->done_color = 1; in ionic_cq_init() 382 cq->base = base; in ionic_cq_map() 383 cq->base_pa = base_pa; in ionic_cq_map() 395 while (cb(cq, cq->tail_idx, cb_arg)) { in ionic_cq_service() 396 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); in ionic_cq_service() 397 if (cq->tail_idx == 0) in ionic_cq_service() [all …]
|
| H A D | ionic_rxtx.c | 71 struct ionic_cq *cq = &txq->qcq.cq; in ionic_tx_flush() local 81 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); in ionic_tx_flush() 84 if ((cq->tail_idx & 0x3) == 0) in ionic_tx_flush() 87 if (cq->tail_idx == 0) in ionic_tx_flush() 88 cq->done_color = !cq->done_color; in ionic_tx_flush() 768 struct ionic_cq *cq = &rxq->qcq.cq; in ionic_rx_clean() local 1056 struct ionic_cq *cq = &rxq->qcq.cq; in ionic_rxq_service() local 1069 cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); in ionic_rxq_service() 1071 if (cq->tail_idx == 0) in ionic_rxq_service() 1072 cq->done_color = !cq->done_color; in ionic_rxq_service() [all …]
|
| /dpdk/drivers/net/enic/ |
| H A D | enic_rxtx.c | 47 struct vnic_cq *cq; in enic_recv_pkts_common() local 59 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; in enic_recv_pkts_common() 63 color = cq->last_color; in enic_recv_pkts_common() 203 cq->to_clean = cq_idx; in enic_recv_pkts_common() 253 struct vnic_cq *cq; in enic_noscatter_recv_pkts() local 262 cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_noscatter_recv_pkts() 483 uint8_t eop, cq; in enic_xmit_pkts() local 567 cq = 0; in enic_xmit_pkts() 569 cq = 1; in enic_xmit_pkts() 587 cq = 0; in enic_xmit_pkts() [all …]
|
| H A D | enic_rxtx_vec_avx2.c | 47 struct vnic_cq *cq; in enic_noscatter_vec_recv_pkts() local 54 cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_noscatter_vec_recv_pkts() 55 cq_idx = cq->to_clean; in enic_noscatter_vec_recv_pkts() 69 max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); in enic_noscatter_vec_recv_pkts() 72 color = cq->last_color; in enic_noscatter_vec_recv_pkts() 73 cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; in enic_noscatter_vec_recv_pkts() 773 nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; in enic_noscatter_vec_recv_pkts() 780 if (unlikely(cq_idx == cq->ring.desc_count)) { in enic_noscatter_vec_recv_pkts() 782 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; in enic_noscatter_vec_recv_pkts() 784 cq->to_clean = cq_idx; in enic_noscatter_vec_recv_pkts()
|
| /dpdk/drivers/event/sw/ |
| H A D | sw_evdev_scheduler.c | 53 int cq = fid->cq; in sw_schedule_atomic_to_cq() local 55 if (cq < 0) { in sw_schedule_atomic_to_cq() 61 cq = qid->cq_map[cq_idx]; in sw_schedule_atomic_to_cq() 70 cq = test_cq; in sw_schedule_atomic_to_cq() 75 fid->cq = cq; /* this pins early */ in sw_schedule_atomic_to_cq() 90 sw->cq_ring_space[cq]--; in sw_schedule_atomic_to_cq() 98 qid->to_port[cq]++; in sw_schedule_atomic_to_cq() 137 uint32_t cq; in sw_schedule_parallel_to_cq() local 160 sw->cq_ring_space[cq]--; in sw_schedule_parallel_to_cq() 172 sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe; in sw_schedule_parallel_to_cq() [all …]
|
| /dpdk/drivers/common/cnxk/ |
| H A D | roc_nix_queue.c | 129 aq->rq.cq = rq->qid; in nix_rq_cn9k_cfg() 183 aq->rq_mask.cq = ~aq->rq_mask.cq; in nix_rq_cn9k_cfg() 346 aq->rq_mask.cq = ~aq->rq_mask.cq; in nix_rq_cfg() 471 if (cq == NULL) in roc_nix_cq_init() 479 cq->qmask = cq->nb_desc - 1; in roc_nix_cq_init() 482 cq->wdata = (uint64_t)cq->qid << 32; in roc_nix_cq_init() 503 cq_ctx = &aq->cq; in roc_nix_cq_init() 514 cq_ctx = &aq->cq; in roc_nix_cq_init() 576 if (cq == NULL) in roc_nix_cq_fini() 593 aq->cq.ena = 0; in roc_nix_cq_fini() [all …]
|
| H A D | cnxk_telemetry_nix.c | 125 node->cqs[cq->qid] = cq; in nix_tel_node_add_cq() 236 CNXK_TEL_DICT_INT(d, cq, qid); in cnxk_tel_nix_cq() 237 CNXK_TEL_DICT_INT(d, cq, nb_desc); in cnxk_tel_nix_cq() 238 CNXK_TEL_DICT_PTR(d, cq, roc_nix); in cnxk_tel_nix_cq() 239 CNXK_TEL_DICT_PTR(d, cq, door); in cnxk_tel_nix_cq() 240 CNXK_TEL_DICT_PTR(d, cq, status); in cnxk_tel_nix_cq() 241 CNXK_TEL_DICT_PTR(d, cq, wdata); in cnxk_tel_nix_cq() 243 CNXK_TEL_DICT_INT(d, cq, qmask); in cnxk_tel_nix_cq() 797 int cq; in cnxk_nix_tel_handle_info_x() local 802 cq = strtol(tok, NULL, 10); in cnxk_nix_tel_handle_info_x() [all …]
|
| H A D | roc_nix_debug.c | 343 *ctx_p = &rsp->cq; in nix_q_ctx_get() 365 *ctx_p = &rsp->cq; in nix_q_ctx_get() 374 ctx->sqe_way_mask, ctx->cq); in nix_cn9k_lf_sq_dump() 436 ctx->sqe_way_mask, ctx->cq); in nix_lf_sq_dump() 760 cq->tag, cq->q, cq->node, cq->cqe_type); in roc_nix_cqe_dump() 833 roc_nix_cq_dump(struct roc_nix_cq *cq) in roc_nix_cq_dump() argument 835 nix_dump("nix_cq@%p", cq); in roc_nix_cq_dump() 836 nix_dump(" qid = %d", cq->qid); in roc_nix_cq_dump() 837 nix_dump(" qnb_desc = %d", cq->nb_desc); in roc_nix_cq_dump() 838 nix_dump(" roc_nix = %p", cq->roc_nix); in roc_nix_cq_dump() [all …]
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_txq.c | 237 struct mlx4_cq *cq = &txq->mcq; in mlx4_txq_fill_dv_obj_info() local 239 struct mlx4dv_cq *dcq = mlxdv->cq.out; in mlx4_txq_fill_dv_obj_info() 258 cq->buf = dcq->buf.buf; in mlx4_txq_fill_dv_obj_info() 259 cq->cqe_cnt = dcq->cqe_cnt; in mlx4_txq_fill_dv_obj_info() 260 cq->set_ci_db = dcq->set_ci_db; in mlx4_txq_fill_dv_obj_info() 411 if (!txq->cq) { in mlx4_tx_queue_setup() 418 .send_cq = txq->cq, in mlx4_tx_queue_setup() 419 .recv_cq = txq->cq, in mlx4_tx_queue_setup() 481 mlxdv.cq.in = txq->cq; in mlx4_tx_queue_setup() 482 mlxdv.cq.out = &dv_cq; in mlx4_tx_queue_setup() [all …]
|
| H A D | mlx4_rxq.c | 362 struct ibv_cq *cq; in mlx4_rss_init() local 389 if (!cq) { in mlx4_rss_init() 401 .cq = cq, in mlx4_rss_init() 490 MLX4_ASSERT(rxq->cq); in mlx4_rxq_attach() 519 if (!cq) { in mlx4_rxq_attach() 536 .cq = cq, in mlx4_rxq_attach() 556 mlxdv.cq.in = cq; in mlx4_rxq_attach() 557 mlxdv.cq.out = &dv_cq; in mlx4_rxq_attach() 607 rxq->cq = cq; in mlx4_rxq_attach() 629 if (cq) in mlx4_rxq_attach() [all …]
|
| H A D | mlx4_prm.h | 134 mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index) in mlx4_get_cqe() argument 136 return (volatile struct mlx4_cqe *)(cq->buf + in mlx4_get_cqe() 137 ((index & (cq->cqe_cnt - 1)) << in mlx4_get_cqe() 138 (5 + cq->cqe_64)) + in mlx4_get_cqe() 139 (cq->cqe_64 << 5)); in mlx4_get_cqe()
|
| H A D | mlx4_intr.c | 231 struct mlx4_cq *cq = &rxq->mcq; in mlx4_arm_cq() local 233 uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK; in mlx4_arm_cq() 234 uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK; in mlx4_arm_cq() 237 *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci); in mlx4_arm_cq() 243 doorbell = sn << 28 | cmd | cq->cqn; in mlx4_arm_cq() 246 rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg); in mlx4_arm_cq() 336 ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq, in mlx4_rx_intr_disable() 344 else if (ev_cq != rxq->cq) in mlx4_rx_intr_disable() 354 mlx4_glue->ack_cq_events(rxq->cq, 1); in mlx4_rx_intr_disable()
|
| H A D | mlx4_glue.c | 130 mlx4_glue_destroy_cq(struct ibv_cq *cq) in mlx4_glue_destroy_cq() argument 132 return ibv_destroy_cq(cq); in mlx4_glue_destroy_cq() 136 mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq, in mlx4_glue_get_cq_event() argument 139 return ibv_get_cq_event(channel, cq, cq_context); in mlx4_glue_get_cq_event() 143 mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents) in mlx4_glue_ack_cq_events() argument 145 ibv_ack_cq_events(cq, nevents); in mlx4_glue_ack_cq_events()
|
| /dpdk/drivers/common/mlx5/ |
| H A D | mlx5_common_devx.c | 27 mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq) in mlx5_devx_cq_destroy() argument 29 if (cq->cq) in mlx5_devx_cq_destroy() 30 claim_zero(mlx5_devx_cmd_destroy(cq->cq)); in mlx5_devx_cq_destroy() 31 if (cq->umem_obj) in mlx5_devx_cq_destroy() 32 claim_zero(mlx5_os_umem_dereg(cq->umem_obj)); in mlx5_devx_cq_destroy() 33 if (cq->umem_buf) in mlx5_devx_cq_destroy() 34 mlx5_free((void *)(uintptr_t)cq->umem_buf); in mlx5_devx_cq_destroy() 82 struct mlx5_devx_obj *cq = NULL; in mlx5_devx_cq_create() local 134 cq = mlx5_devx_cmd_create_cq(ctx, attr); in mlx5_devx_cq_create() 135 if (!cq) { in mlx5_devx_cq_create() [all …]
|