Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 25 of 34) sorted by relevance

12

/f-stack/dpdk/drivers/net/mlx4/
H A Dmlx4_rxtx.c309 volatile struct mlx4_cqe *cqe; in mlx4_txq_complete() local
331 (volatile struct mlx4_err_cqe *)cqe; in mlx4_txq_complete()
1158 flags = (rte_be_to_cpu_32(cqe->status) & in mlx4_cqe_flags()
1161 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) & in mlx4_cqe_flags()
1184 volatile struct mlx4_cqe *cqe = NULL; in mlx4_cq_poll_one() local
1199 ret = rte_be_to_cpu_32(cqe->byte_cnt); in mlx4_cq_poll_one()
1202 *out = cqe; in mlx4_cq_poll_one()
1232 volatile struct mlx4_cqe *cqe; in mlx4_rx_burst() local
1265 len = mlx4_cq_poll_one(rxq, &cqe); in mlx4_rx_burst()
1282 pkt->hash.rss = cqe->immed_rss_invalid; in mlx4_rx_burst()
[all …]
H A Dmlx4_glue.c123 mlx4_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context, in mlx4_glue_create_cq() argument
126 return ibv_create_cq(context, cqe, cq_context, channel, comp_vector); in mlx4_glue_create_cq()
H A Dmlx4_glue.h50 struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
/f-stack/dpdk/drivers/regex/mlx5/
H A Dmlx5_regex_fastpath.c223 volatile struct mlx5_cqe *cqe; in poll_one() local
227 cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset); in poll_one()
230 int ret = check_cqe(cqe, cq_size_get(cq), cq->ci); in poll_one()
240 return cqe; in poll_one()
266 volatile struct mlx5_cqe *cqe; in mlx5_regexdev_dequeue() local
269 while ((cqe = poll_one(cq))) { in mlx5_regexdev_dequeue()
271 = (rte_be_to_cpu_16(cqe->wqe_counter) + 1) & in mlx5_regexdev_dequeue()
273 size_t sqid = cqe->rsvd3[2]; in mlx5_regexdev_dequeue()
H A Dmlx5_regex_control.c62 if (cq->cqe) { in regex_ctrl_destroy_cq()
63 rte_free((void *)(uintptr_t)cq->cqe); in regex_ctrl_destroy_cq()
64 cq->cqe = NULL; in regex_ctrl_destroy_cq()
118 cq->cqe = buf; in regex_ctrl_create_cq()
120 cq->cqe[i].op_own = 0xff; in regex_ctrl_create_cq()
H A Dmlx5_regex.h36 volatile struct mlx5_cqe *cqe; /* The CQ ring buffer. */ member
/f-stack/dpdk/drivers/net/mlx5/
H A Dmlx5_flow_age.c434 mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe) in mlx5_aso_dump_err_objs() argument
440 DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], in mlx5_aso_dump_err_objs()
441 cqe[i + 2], cqe[i + 3]); in mlx5_aso_dump_err_objs()
459 volatile struct mlx5_err_cqe *cqe = in mlx5_aso_cqe_err_handle() local
463 idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n); in mlx5_aso_cqe_err_handle()
464 mlx5_aso_dump_err_objs((volatile uint32_t *)cqe, in mlx5_aso_cqe_err_handle()
560 volatile struct mlx5_cqe *restrict cqe; in mlx5_aso_completion_handle() local
574 cqe = &cq->cqes[idx]; in mlx5_aso_completion_handle()
575 ret = check_cqe(cqe, cq_size, cq->cq_ci); in mlx5_aso_completion_handle()
H A Dmlx5_rxtx.c464 volatile struct mlx5_cqe *cqe; in rx_queue_count() local
485 op_own = cqe->op_own; in rx_queue_count()
1214 op_own = cqe->op_own; in mlx5_rx_poll_len()
1329 mark = cqe->sop_drop_qpn; in rxq_cq_to_mbuf()
1344 cqe->flow_table_metadata; in rxq_cq_to_mbuf()
1412 rte_prefetch0(cqe); in mlx5_rx_burst()
1449 if (cqe->lro_num_seg > 1) { in mlx5_rx_burst()
1694 cqe->wqe_counter : in mlx5_rx_burst_mprq()
1722 if (cqe->lro_num_seg > 1) { in mlx5_rx_burst_mprq()
2118 cqe->wqe_counter); in mlx5_tx_handle_completion()
[all …]
H A Dmlx5_txpp.c187 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; in mlx5_txpp_fill_cqe_rearm_queue() local
191 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; in mlx5_txpp_fill_cqe_rearm_queue()
192 ++cqe; in mlx5_txpp_fill_cqe_rearm_queue()
687 uint64_t *cqe = (uint64_t *)from; in mlx5_atomic_read_cqe()
698 tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED); in mlx5_atomic_read_cqe()
699 op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED); in mlx5_atomic_read_cqe()
701 if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED)) in mlx5_atomic_read_cqe()
703 if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED)) in mlx5_atomic_read_cqe()
810 volatile struct mlx5_cqe *cqe; in mlx5_txpp_handle_rearm_queue() local
812 cqe = &wq->cqes[cq_ci & (MLX5_TXPP_REARM_CQ_SIZE - 1)]; in mlx5_txpp_handle_rearm_queue()
[all …]
H A Dmlx5_txq.c133 volatile struct mlx5_cqe *cqe; in txq_sync_cq() local
138 cqe = &txq->cqes[txq->cq_ci & txq->cqe_m]; in txq_sync_cq()
139 ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci); in txq_sync_cq()
151 cqe = &txq->cqes[i]; in txq_sync_cq()
152 cqe->op_own = MLX5_CQE_INVALIDATE; in txq_sync_cq()
H A Dmlx5_rxq.c472 volatile struct mlx5_cqe *cqe; in rxq_sync_cq() local
477 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; in rxq_sync_cq()
478 ret = check_cqe(cqe, cqe_n, rxq->cq_ci); in rxq_sync_cq()
486 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) { in rxq_sync_cq()
491 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt); in rxq_sync_cq()
496 cqe = &(*rxq->cqes)[i]; in rxq_sync_cq()
497 cqe->op_own = MLX5_CQE_INVALIDATE; in rxq_sync_cq()
H A Dmlx5_devx.c1201 struct mlx5_cqe *cqe; in mlx5_txq_create_devx_cq_resources() local
1287 cqe = (struct mlx5_cqe *)txq_obj->cq_buf; in mlx5_txq_create_devx_cq_resources()
1289 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; in mlx5_txq_create_devx_cq_resources()
1290 ++cqe; in mlx5_txq_create_devx_cq_resources()
/f-stack/dpdk/drivers/net/qede/
H A Dqede_rxtx.c1375 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0])); in qede_rx_process_tpa_cont_cqe()
1378 cqe->len_list[0]); in qede_rx_process_tpa_cont_cqe()
1389 cqe->len_list[0]); in qede_rx_process_tpa_end_cqe()
1393 rx_mb->nb_segs = cqe->num_of_bds; in qede_rx_process_tpa_end_cqe()
1397 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason, in qede_rx_process_tpa_end_cqe()
1527 union eth_rx_cqe *cqe; in qede_recv_pkts_regular() local
1584 cqe = in qede_recv_pkts_regular()
1749 union eth_rx_cqe *cqe; in qede_recv_pkts() local
1809 cqe = in qede_recv_pkts()
2747 orig_cqe = cqe;
[all …]
/f-stack/dpdk/drivers/net/qede/base/
H A Decore_sp_api.h41 struct eth_slow_path_rx_cqe *cqe);
H A Decore_spq.c480 *cqe, in ecore_cqe_completion()
484 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol); in ecore_cqe_completion()
490 return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL); in ecore_cqe_completion()
494 struct eth_slow_path_rx_cqe *cqe) in ecore_eth_cqe_completion() argument
498 rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); in ecore_eth_cqe_completion()
502 cqe->ramrod_cmd_id); in ecore_eth_cqe_completion()
H A Deth_common.h513 union eth_rx_cqe cqe /* CQE data itself */; member
/f-stack/dpdk/drivers/common/mlx5/
H A Dmlx5_common.h190 check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n, in check_cqe() argument
194 const uint8_t op_own = cqe->op_own; in check_cqe()
/f-stack/dpdk/drivers/net/hinic/
H A Dhinic_pmd_rx.c995 struct hinic_rq_cqe cqe; in hinic_recv_pkts() local
1012 hinic_rq_cqe_be_to_cpu32(&cqe, (volatile void *)rx_cqe); in hinic_recv_pkts()
1013 vlan_len = cqe.vlan_len; in hinic_recv_pkts()
1047 offload_type = cqe.offload_type; in hinic_recv_pkts()
1054 rxm->ol_flags |= hinic_rx_csum(cqe.status, rxq); in hinic_recv_pkts()
1057 rss_hash = cqe.rss_hash; in hinic_recv_pkts()
1062 lro_num = HINIC_GET_RX_NUM_LRO(cqe.status); in hinic_recv_pkts()
/f-stack/freebsd/contrib/device-tree/Bindings/mmc/
H A Dbrcm,sdhci-brcmstb.txt41 supports-cqe;
H A Dmmc-controller.yaml261 supports-cqe:
267 disable-cqe-dcmd:
/f-stack/freebsd/contrib/ena-com/
H A Dena_com.c487 struct ena_admin_acq_entry *cqe) in ena_com_handle_single_admin_completion() argument
492 cmd_id = cqe->acq_common_descriptor.command & in ena_com_handle_single_admin_completion()
504 comp_ctx->comp_status = cqe->acq_common_descriptor.status; in ena_com_handle_single_admin_completion()
507 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); in ena_com_handle_single_admin_completion()
515 struct ena_admin_acq_entry *cqe = NULL; in ena_com_handle_admin_completion() local
523 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
526 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) & in ena_com_handle_admin_completion()
532 ena_com_handle_single_admin_completion(admin_queue, cqe); in ena_com_handle_admin_completion()
541 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
/f-stack/dpdk/drivers/net/ena/base/
H A Dena_com.c445 struct ena_admin_acq_entry *cqe) in ena_com_handle_single_admin_completion() argument
450 cmd_id = cqe->acq_common_descriptor.command & in ena_com_handle_single_admin_completion()
461 comp_ctx->comp_status = cqe->acq_common_descriptor.status; in ena_com_handle_single_admin_completion()
464 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size); in ena_com_handle_single_admin_completion()
472 struct ena_admin_acq_entry *cqe = NULL; in ena_com_handle_admin_completion() local
480 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
483 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) & in ena_com_handle_admin_completion()
489 ena_com_handle_single_admin_completion(admin_queue, cqe); in ena_com_handle_admin_completion()
498 cqe = &admin_queue->cq.entries[head_masked]; in ena_com_handle_admin_completion()
/f-stack/freebsd/contrib/device-tree/src/arm64/qcom/
H A Dqcs404-evb.dtsi220 supports-cqe;
/f-stack/dpdk/drivers/common/mlx5/linux/
H A Dmlx5_glue.h155 struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
H A Dmlx5_glue.c103 mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context, in mlx5_glue_create_cq() argument
106 return ibv_create_cq(context, cqe, cq_context, channel, comp_vector); in mlx5_glue_create_cq()

12