| /f-stack/dpdk/drivers/net/mlx5/ |
| H A D | mlx5_rxtx_vec.h | 55 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0); 57 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) == 58 offsetof(struct mlx5_cqe, pkt_info) + 12); 59 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 == 60 offsetof(struct mlx5_cqe, hdr_type_etc)); 61 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) == 62 offsetof(struct mlx5_cqe, hdr_type_etc) + 2); 64 offsetof(struct mlx5_cqe, byte_cnt)); 66 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8)); 67 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) == [all …]
|
| H A D | mlx5_rxtx_vec_neon.h | 798 (container_of(p0, struct mlx5_cqe, in rxq_cq_process_v() 803 (container_of(p1, struct mlx5_cqe, in rxq_cq_process_v() 808 (container_of(p2, struct mlx5_cqe, in rxq_cq_process_v() 813 (container_of(p3, struct mlx5_cqe, in rxq_cq_process_v() 820 struct mlx5_cqe, pkt_info)->timestamp)); in rxq_cq_process_v() 823 struct mlx5_cqe, pkt_info)->timestamp)); in rxq_cq_process_v() 826 struct mlx5_cqe, pkt_info)->timestamp)); in rxq_cq_process_v() 837 container_of(p0, struct mlx5_cqe, in rxq_cq_process_v() 840 container_of(p1, struct mlx5_cqe, in rxq_cq_process_v() 843 container_of(p2, struct mlx5_cqe, in rxq_cq_process_v() [all …]
|
| H A D | mlx5_txpp.c | 187 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; in mlx5_txpp_fill_cqe_rearm_queue() 253 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE; in mlx5_txpp_create_rearm_queue() 273 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? in mlx5_txpp_create_rearm_queue() 491 umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE; in mlx5_txpp_create_clock_queue() 511 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? in mlx5_txpp_create_clock_queue() 731 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; in mlx5_txpp_update_timestamp() 810 volatile struct mlx5_cqe *cqe; in mlx5_txpp_handle_rearm_queue() 1143 struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes; in mlx5_txpp_read_clock()
|
| H A D | mlx5_devx.c | 492 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); in mlx5_rxq_create_devx_cq_resources() 499 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; in mlx5_rxq_create_devx_cq_resources() 1201 struct mlx5_cqe *cqe; in mlx5_txq_create_devx_cq_resources() 1235 cqe_n * sizeof(struct mlx5_cqe), in mlx5_txq_create_devx_cq_resources() 1248 cqe_n * sizeof(struct mlx5_cqe), in mlx5_txq_create_devx_cq_resources() 1266 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? in mlx5_txq_create_devx_cq_resources() 1287 cqe = (struct mlx5_cqe *)txq_obj->cq_buf; in mlx5_txq_create_devx_cq_resources() 1450 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf; in mlx5_txq_devx_obj_new()
|
| H A D | mlx5_flow_age.c | 61 umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2; in mlx5_aso_cq_create() 85 attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size; in mlx5_aso_cq_create() 560 volatile struct mlx5_cqe *restrict cqe; in mlx5_aso_completion_handle()
|
| H A D | mlx5_rxtx.c | 91 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe); 95 volatile struct mlx5_cqe *cqe, 104 volatile struct mlx5_cqe *__rte_restrict cqe, 109 volatile struct mlx5_cqe *__rte_restrict cqe, 464 volatile struct mlx5_cqe *cqe; in rx_queue_count() 1011 volatile struct mlx5_cqe *cqe; in mlx5_rx_err_handle() 1303 volatile struct mlx5_cqe *cqe, in rxq_cq_to_mbuf() 1395 volatile struct mlx5_cqe *cqe = in mlx5_rx_burst() 2035 volatile struct mlx5_cqe *last_cqe, in mlx5_tx_comp_flush() 2069 volatile struct mlx5_cqe *last_cqe = NULL; in mlx5_tx_handle_completion() [all …]
|
| H A D | mlx5_rxtx_vec.c | 287 volatile struct mlx5_cqe *cq; in rxq_burst_v() 427 volatile struct mlx5_cqe *cq; in rxq_burst_mprq_v()
|
| H A D | mlx5_rxtx.h | 148 volatile struct mlx5_cqe(*cqes)[]; 266 volatile struct mlx5_cqe *cqes; /* Completion queue. */
|
| H A D | mlx5_rxtx_vec_sse.h | 73 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, in rxq_cq_decompress_v() 520 rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, in rxq_cq_process_v()
|
| H A D | mlx5.h | 487 volatile struct mlx5_cqe *cqes; 637 volatile struct mlx5_cqe *cqes;
|
| H A D | mlx5_rxtx_vec_altivec.h | 76 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, in rxq_cq_decompress_v() 780 rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, in rxq_cq_process_v()
|
| H A D | mlx5_txq.c | 133 volatile struct mlx5_cqe *cqe; in txq_sync_cq()
|
| H A D | mlx5_rxq.c | 472 volatile struct mlx5_cqe *cqe; in rxq_sync_cq()
|
| /f-stack/dpdk/drivers/regex/mlx5/ |
| H A D | mlx5_regex_fastpath.c | 220 static inline volatile struct mlx5_cqe * 223 volatile struct mlx5_cqe *cqe; in poll_one() 227 cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset); in poll_one() 266 volatile struct mlx5_cqe *cqe; in mlx5_regexdev_dequeue()
|
| H A D | mlx5_regex.h | 36 volatile struct mlx5_cqe *cqe; /* The CQ ring buffer. */
|
| H A D | mlx5_regex_control.c | 112 buf = rte_calloc(NULL, 1, sizeof(struct mlx5_cqe) * cq_size, 4096); in regex_ctrl_create_cq() 122 sizeof(struct mlx5_cqe) * in regex_ctrl_create_cq()
|
| /f-stack/dpdk/drivers/vdpa/mlx5/ |
| H A D | mlx5_vdpa.h | 53 volatile struct mlx5_cqe *cqes;
|
| H A D | mlx5_vdpa_event.c | 145 umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2; in mlx5_vdpa_cq_create() 168 attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size; in mlx5_vdpa_cq_create()
|
| /f-stack/dpdk/drivers/common/mlx5/ |
| H A D | mlx5_common.h | 190 check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n, in check_cqe()
|
| H A D | mlx5_prm.h | 377 struct mlx5_cqe { struct
|
| /f-stack/dpdk/drivers/net/mlx5/linux/ |
| H A D | mlx5_verbs.c | 409 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; in mlx5_rxq_ibv_obj_new() 999 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf; in mlx5_txq_ibv_obj_new()
|