Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 80) sorted by relevance

1234

/f-stack/dpdk/drivers/net/ice/base/
H A Dice_controlq.c67 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
68 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
70 (cq->num_sq_entries | cq->sq.len_ena_mask); in ice_check_sq_alive()
89 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, in ice_alloc_ctrlq_sq_ring()
331 if (!cq->num_sq_entries || !cq->sq_buf_size) { in ice_init_sq()
355 cq->sq.count = cq->num_sq_entries; in ice_init_sq()
393 if (!cq->num_rq_entries || !cq->rq_buf_size) { in ice_init_rq()
417 cq->rq.count = cq->num_rq_entries; in ice_init_rq()
598 !cq->rq_buf_size || !cq->sq_buf_size) { in ice_init_ctrlq()
983 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd_nolock()
[all …]
/f-stack/dpdk/drivers/net/enic/base/
H A Dvnic_cq.c10 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
12 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
14 cq->ctrl = NULL; in vnic_cq_free()
25 cq->index = index; in vnic_cq_alloc()
26 cq->vdev = vdev; in vnic_cq_alloc()
29 if (!cq->ctrl) { in vnic_cq_alloc()
53 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
70 cq->to_clean = 0; in vnic_cq_clean()
71 cq->last_color = 0; in vnic_cq_clean()
73 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
[all …]
H A Dvnic_cq.h64 void vnic_cq_free(struct vnic_cq *cq);
65 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
68 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
73 void vnic_cq_clean(struct vnic_cq *cq);
74 int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
/f-stack/dpdk/drivers/vdpa/mlx5/
H A Dmlx5_vdpa_event.c100 if (cq->cq) in mlx5_vdpa_cq_destroy()
101 claim_zero(mlx5_devx_cmd_destroy(cq->cq)); in mlx5_vdpa_cq_destroy()
106 memset(cq, 0, sizeof(*cq)); in mlx5_vdpa_cq_destroy()
173 if (!cq->cq) in mlx5_vdpa_cq_create()
232 cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); in mlx5_vdpa_cq_poll()
247 cq = &priv->virtqs[i].eqp.cq; in mlx5_vdpa_arm_all_cqs()
248 if (cq->cq && !cq->armed) in mlx5_vdpa_arm_all_cqs()
292 cq = &priv->virtqs[i].eqp.cq; in mlx5_vdpa_poll_handle()
293 if (cq->cq && !cq->armed) { in mlx5_vdpa_poll_handle()
372 (int)virtq->index, cq->cq->id, in mlx5_vdpa_interrupt_handler()
[all …]
/f-stack/dpdk/drivers/regex/mlx5/
H A Dmlx5_regex_control.c58 if (cq->cqe_umem) { in regex_ctrl_destroy_cq()
62 if (cq->cqe) { in regex_ctrl_destroy_cq()
64 cq->cqe = NULL; in regex_ctrl_destroy_cq()
67 mlx5_release_dbr(&priv->dbrpgs, cq->dbr_umem, cq->dbr_offset); in regex_ctrl_destroy_cq()
70 if (cq->obj) { in regex_ctrl_destroy_cq()
72 cq->obj = NULL; in regex_ctrl_destroy_cq()
118 cq->cqe = buf; in regex_ctrl_create_cq()
124 cq->ci = 0; in regex_ctrl_create_cq()
137 if (!cq->obj) { in regex_ctrl_create_cq()
144 if (cq->cqe_umem) in regex_ctrl_create_cq()
[all …]
H A Dmlx5_regex_fastpath.c43 cq_size_get(struct mlx5_regex_cq *cq) in cq_size_get() argument
45 return (1U << cq->log_nb_desc); in cq_size_get()
221 poll_one(struct mlx5_regex_cq *cq) in poll_one() argument
226 next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1)); in poll_one()
227 cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset); in poll_one()
230 int ret = check_cqe(cqe, cq_size_get(cq), cq->ci); in poll_one()
265 struct mlx5_regex_cq *cq = &queue->cq; in mlx5_regexdev_dequeue() local
269 while ((cqe = poll_one(cq))) { in mlx5_regexdev_dequeue()
286 cq->ci = (cq->ci + 1) & 0xffffff; in mlx5_regexdev_dequeue()
288 cq->dbr[0] = rte_cpu_to_be_32(cq->ci); in mlx5_regexdev_dequeue()
/f-stack/dpdk/drivers/net/mlx5/
H A Dmlx5_flow_age.c23 if (cq->cq) in mlx5_aso_cq_destroy()
24 claim_zero(mlx5_devx_cmd_destroy(cq->cq)); in mlx5_aso_cq_destroy()
25 if (cq->umem_obj) in mlx5_aso_cq_destroy()
29 memset(cq, 0, sizeof(*cq)); in mlx5_aso_cq_destroy()
89 cq->cq = mlx5_devx_cmd_create_cq(ctx, &attr); in mlx5_aso_cq_create()
90 if (!cq->cq) in mlx5_aso_cq_create()
197 if (sq->cq.cq) in mlx5_aso_destroy_sq()
297 attr.cqn = sq->cq.cq->id; in mlx5_aso_sq_create()
457 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_cqe_err_handle() local
559 struct mlx5_aso_cq *cq = &sq->cq; in mlx5_aso_completion_handle() local
[all …]
H A Dmlx5_rxtx_vec_sse.h253 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v()
350 mcq = (void *)(cq + pos); in rxq_cq_decompress_v()
357 cq[inv].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v()
617 &cq[pos + p3].sop_drop_qpn); in rxq_cq_process_v()
621 &cq[pos + p2].sop_drop_qpn); in rxq_cq_process_v()
629 &cq[pos + p1].sop_drop_qpn); in rxq_cq_process_v()
632 &cq[pos].sop_drop_qpn); in rxq_cq_process_v()
773 cq[pos].flow_table_metadata; in rxq_cq_process_v()
775 cq[pos + p1].flow_table_metadata; in rxq_cq_process_v()
777 cq[pos + p2].flow_table_metadata; in rxq_cq_process_v()
[all …]
H A Dmlx5_rxtx_vec_altivec.h367 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v()
926 &cq[pos + p3].sop_drop_qpn, 0LL}; in rxq_cq_process_v()
950 &cq[pos].sop_drop_qpn, 0LL}; in rxq_cq_process_v()
960 &cq[pos + p3].pkt_info; in rxq_cq_process_v()
962 &cq[pos + p2].pkt_info; in rxq_cq_process_v()
977 &cq[pos + p3].rsvd3[9], 0LL}; in rxq_cq_process_v()
980 &cq[pos + p2].rsvd3[9], 0LL}; in rxq_cq_process_v()
1023 &cq[pos + p1].pkt_info; in rxq_cq_process_v()
1025 &cq[pos].pkt_info; in rxq_cq_process_v()
1040 &cq[pos + p1].rsvd3[9], 0LL}; in rxq_cq_process_v()
[all …]
H A Dmlx5_rxtx_vec.c287 volatile struct mlx5_cqe *cq; in rxq_burst_v() local
297 cq = &(*rxq->cqes)[cq_idx]; in rxq_burst_v()
298 rte_prefetch0(cq); in rxq_burst_v()
299 rte_prefetch0(cq + 1); in rxq_burst_v()
300 rte_prefetch0(cq + 2); in rxq_burst_v()
301 rte_prefetch0(cq + 3); in rxq_burst_v()
437 cq = &(*rxq->cqes)[cq_idx]; in rxq_burst_mprq_v()
438 rte_prefetch0(cq); in rxq_burst_mprq_v()
439 rte_prefetch0(cq + 1); in rxq_burst_mprq_v()
440 rte_prefetch0(cq + 2); in rxq_burst_mprq_v()
[all …]
H A Dmlx5_rxtx_vec_neon.h150 rte_prefetch0((void *)(cq + pos + i)); in rxq_cq_decompress_v()
269 (cq->pkt_info & 0x3) << 6; in rxq_cq_decompress_v()
359 rte_prefetch0((void *)(cq + pos + 8)); in rxq_cq_decompress_v()
360 mcq = (void *)&(cq + pos)->pkt_info; in rxq_cq_decompress_v()
362 cq[inv++].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v()
367 cq[inv].op_own = MLX5_CQE_INVALIDATE; in rxq_cq_decompress_v()
636 p0 = (void *)&cq[pos].pkt_info; in rxq_cq_process_v()
653 rte_prefetch_non_temporal(&cq[next]); in rxq_cq_process_v()
654 rte_prefetch_non_temporal(&cq[next + 1]); in rxq_cq_process_v()
655 rte_prefetch_non_temporal(&cq[next + 2]); in rxq_cq_process_v()
[all …]
/f-stack/dpdk/drivers/net/ionic/
H A Dionic_dev.c338 struct ionic_cq *cq = &qcq->cq; in ionic_dev_cmd_adminq_init() local
374 cq->lif = lif; in ionic_cq_init()
375 cq->bound_intr = intr; in ionic_cq_init()
378 cq->tail_idx = 0; in ionic_cq_init()
379 cq->done_color = 1; in ionic_cq_init()
387 cq->base = base; in ionic_cq_map()
394 cq->bound_q = q; in ionic_cq_bind()
395 q->bound_cq = cq; in ionic_cq_bind()
407 while (cb(cq, cq->tail_idx, cb_arg)) { in ionic_cq_service()
408 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); in ionic_cq_service()
[all …]
H A Dionic_rxtx.c82 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); in ionic_tx_flush()
85 if ((cq->tail_idx & 0x3) == 0) in ionic_tx_flush()
88 if (cq->tail_idx == 0) in ionic_tx_flush()
89 cq->done_color = !cq->done_color; in ionic_tx_flush()
147 ionic_tx_flush(&txq->cq); in ionic_dev_tx_queue_stop()
497 struct ionic_cq *cq = &txq->cq; in ionic_xmit_pkts() local
506 ionic_tx_flush(cq); in ionic_xmit_pkts()
1004 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); in ionic_rxq_service()
1006 if (cq->tail_idx == 0) in ionic_rxq_service()
1007 cq->done_color = !cq->done_color; in ionic_rxq_service()
[all …]
/f-stack/dpdk/drivers/event/sw/
H A Dsw_evdev_scheduler.c53 int cq = fid->cq; in sw_schedule_atomic_to_cq() local
55 if (cq < 0) { in sw_schedule_atomic_to_cq()
61 cq = qid->cq_map[cq_idx]; in sw_schedule_atomic_to_cq()
70 cq = test_cq; in sw_schedule_atomic_to_cq()
75 fid->cq = cq; /* this pins early */ in sw_schedule_atomic_to_cq()
90 sw->cq_ring_space[cq]--; in sw_schedule_atomic_to_cq()
98 qid->to_port[cq]++; in sw_schedule_atomic_to_cq()
137 uint32_t cq; in sw_schedule_parallel_to_cq() local
160 sw->cq_ring_space[cq]--; in sw_schedule_parallel_to_cq()
172 sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe; in sw_schedule_parallel_to_cq()
[all …]
/f-stack/dpdk/drivers/net/enic/
H A Denic_rxtx.c58 struct vnic_cq *cq; in enic_recv_pkts() local
66 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; in enic_recv_pkts()
69 color = cq->last_color; in enic_recv_pkts()
189 cq->to_clean = cq_idx; in enic_recv_pkts()
227 struct vnic_cq *cq; in enic_noscatter_recv_pkts() local
236 cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_noscatter_recv_pkts()
452 uint8_t eop, cq; in enic_xmit_pkts() local
536 cq = 0; in enic_xmit_pkts()
538 cq = 1; in enic_xmit_pkts()
556 cq = 0; in enic_xmit_pkts()
[all …]
H A Denic_rxtx_vec_avx2.c47 struct vnic_cq *cq; in enic_noscatter_vec_recv_pkts() local
54 cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_noscatter_vec_recv_pkts()
55 cq_idx = cq->to_clean; in enic_noscatter_vec_recv_pkts()
69 max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx); in enic_noscatter_vec_recv_pkts()
72 color = cq->last_color; in enic_noscatter_vec_recv_pkts()
73 cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx; in enic_noscatter_vec_recv_pkts()
773 nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx; in enic_noscatter_vec_recv_pkts()
780 if (unlikely(cq_idx == cq->ring.desc_count)) { in enic_noscatter_vec_recv_pkts()
782 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT; in enic_noscatter_vec_recv_pkts()
784 cq->to_clean = cq_idx; in enic_noscatter_vec_recv_pkts()
/f-stack/dpdk/drivers/net/mlx4/
H A Dmlx4_prm.h134 mlx4_get_cqe(struct mlx4_cq *cq, uint32_t index) in mlx4_get_cqe() argument
136 return (volatile struct mlx4_cqe *)(cq->buf + in mlx4_get_cqe()
137 ((index & (cq->cqe_cnt - 1)) << in mlx4_get_cqe()
138 (5 + cq->cqe_64)) + in mlx4_get_cqe()
139 (cq->cqe_64 << 5)); in mlx4_get_cqe()
H A Dmlx4_txq.c210 struct mlx4_cq *cq = &txq->mcq; in mlx4_txq_fill_dv_obj_info() local
212 struct mlx4dv_cq *dcq = mlxdv->cq.out; in mlx4_txq_fill_dv_obj_info()
231 cq->buf = dcq->buf.buf; in mlx4_txq_fill_dv_obj_info()
232 cq->cqe_cnt = dcq->cqe_cnt; in mlx4_txq_fill_dv_obj_info()
233 cq->set_ci_db = dcq->set_ci_db; in mlx4_txq_fill_dv_obj_info()
383 if (!txq->cq) { in mlx4_tx_queue_setup()
390 .send_cq = txq->cq, in mlx4_tx_queue_setup()
391 .recv_cq = txq->cq, in mlx4_tx_queue_setup()
453 mlxdv.cq.in = txq->cq; in mlx4_tx_queue_setup()
454 mlxdv.cq.out = &dv_cq; in mlx4_tx_queue_setup()
[all …]
H A Dmlx4_rxq.c362 struct ibv_cq *cq; in mlx4_rss_init() local
389 if (!cq) { in mlx4_rss_init()
401 .cq = cq, in mlx4_rss_init()
490 MLX4_ASSERT(rxq->cq); in mlx4_rxq_attach()
519 if (!cq) { in mlx4_rxq_attach()
536 .cq = cq, in mlx4_rxq_attach()
556 mlxdv.cq.in = cq; in mlx4_rxq_attach()
557 mlxdv.cq.out = &dv_cq; in mlx4_rxq_attach()
607 rxq->cq = cq; in mlx4_rxq_attach()
629 if (cq) in mlx4_rxq_attach()
[all …]
H A Dmlx4_intr.c225 struct mlx4_cq *cq = &rxq->mcq; in mlx4_arm_cq() local
227 uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK; in mlx4_arm_cq()
228 uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK; in mlx4_arm_cq()
231 *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci); in mlx4_arm_cq()
237 doorbell = sn << 28 | cmd | cq->cqn; in mlx4_arm_cq()
240 rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg); in mlx4_arm_cq()
327 ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq, in mlx4_rx_intr_disable()
335 else if (ev_cq != rxq->cq) in mlx4_rx_intr_disable()
345 mlx4_glue->ack_cq_events(rxq->cq, 1); in mlx4_rx_intr_disable()
H A Dmlx4_glue.c130 mlx4_glue_destroy_cq(struct ibv_cq *cq) in mlx4_glue_destroy_cq() argument
132 return ibv_destroy_cq(cq); in mlx4_glue_destroy_cq()
136 mlx4_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq, in mlx4_glue_get_cq_event() argument
139 return ibv_get_cq_event(channel, cq, cq_context); in mlx4_glue_get_cq_event()
143 mlx4_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents) in mlx4_glue_ack_cq_events() argument
145 ibv_ack_cq_events(cq, nevents); in mlx4_glue_ack_cq_events()
H A Dmlx4_glue.h54 int (*destroy_cq)(struct ibv_cq *cq);
56 struct ibv_cq **cq, void **cq_context);
57 void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
/f-stack/tools/netstat/
H A Droute.c459 char *cq, *cqlim; in fmt_sockaddr() local
461 cq = buf; in fmt_sockaddr()
463 cqlim = cq + sizeof(buf) - sizeof(" ffff"); in fmt_sockaddr()
464 snprintf(cq, sizeof(buf), "(%d)", sa->sa_family); in fmt_sockaddr()
465 cq += strlen(cq); in fmt_sockaddr()
466 while (s < slim && cq < cqlim) { in fmt_sockaddr()
467 snprintf(cq, sizeof(" ff"), " %02x", *s++); in fmt_sockaddr()
468 cq += strlen(cq); in fmt_sockaddr()
470 snprintf(cq, sizeof("ff"), "%02x", *s++); in fmt_sockaddr()
471 cq += strlen(cq); in fmt_sockaddr()
/f-stack/dpdk/drivers/net/af_xdp/
H A Drte_eth_af_xdp.c98 struct xsk_ring_cons cq; member
417 xsk_ring_cons__release(cq, n); in pull_umem_cq()
440 cq); in kick_tx()
457 struct xsk_ring_cons *cq = &txq->pair->cq; in af_xdp_tx_zc() local
468 kick_tx(txq, cq); in af_xdp_tx_zc()
493 kick_tx(txq, cq); in af_xdp_tx_zc()
517 kick_tx(txq, cq); in af_xdp_tx_zc()
539 struct xsk_ring_cons *cq = &txq->pair->cq; in af_xdp_tx_cp() local
551 kick_tx(txq, cq); in af_xdp_tx_cp()
574 kick_tx(txq, cq); in af_xdp_tx_cp()
[all …]
/f-stack/freebsd/contrib/rdma/krping/
H A Dkrping.c175 struct ib_cq *cq; member
364 BUG_ON(cb->cq != cq); in krping_cq_event_handler()
629 init_attr.send_cq = cb->cq; in krping_create_qp()
630 init_attr.recv_cq = cb->cq; in krping_create_qp()
649 ib_destroy_cq(cb->cq); in krping_free_qp()
671 if (IS_ERR(cb->cq)) { in krping_setup_qp()
673 ret = PTR_ERR(cb->cq); in krping_setup_qp()
694 ib_destroy_cq(cb->cq); in krping_setup_qp()
947 ib_req_notify_cq(cb->cq, in rlat_test()
1619 ne = ib_poll_cq(cb->cq, 1, &wc); in krping_rlat_test_client()
[all …]

1234