Home
last modified time | relevance | path

Searched refs:sq (Results 1 – 25 of 54) sorted by relevance

123

/f-stack/dpdk/drivers/net/mlx5/
H A Dmlx5_flow_age.c193 if (sq->sq) { in mlx5_aso_destroy_sq()
194 mlx5_devx_cmd_destroy(sq->sq); in mlx5_aso_destroy_sq()
195 sq->sq = NULL; in mlx5_aso_destroy_sq()
200 memset(sq, 0, sizeof(*sq)); in mlx5_aso_destroy_sq()
310 sq->sq = mlx5_devx_cmd_create_sq(ctx, &attr); in mlx5_aso_sq_create()
311 if (!sq->sq) { in mlx5_aso_sq_create()
326 sq->sqn = sq->sq->id; in mlx5_aso_sq_create()
387 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next); in mlx5_aso_sq_enqueue_burst()
392 wqe = &sq->wqes[sq->head & mask]; in mlx5_aso_sq_enqueue_burst()
393 rte_prefetch0(&sq->wqes[(sq->head + 1) & mask]); in mlx5_aso_sq_enqueue_burst()
[all …]
H A Dmlx5_trigger.c218 struct mlx5_devx_obj *sq; in mlx5_hairpin_auto_bind() local
262 sq = txq_ctrl->obj->sq; in mlx5_hairpin_auto_bind()
293 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); in mlx5_hairpin_auto_bind()
298 rq_attr.hairpin_peer_sq = sq->id; in mlx5_hairpin_auto_bind()
370 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { in mlx5_hairpin_queue_peer_update()
377 peer_info->qp_id = txq_ctrl->obj->sq->id; in mlx5_hairpin_queue_peer_update()
470 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { in mlx5_hairpin_queue_peer_bind()
507 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); in mlx5_hairpin_queue_peer_bind()
616 if (!txq_ctrl->obj || !txq_ctrl->obj->sq) { in mlx5_hairpin_queue_peer_unbind()
625 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); in mlx5_hairpin_queue_peer_unbind()
[all …]
H A Dmlx5_txpp.c123 if (wq->sq) in mlx5_txpp_destroy_send_queue()
124 claim_zero(mlx5_devx_cmd_destroy(wq->sq)); in mlx5_txpp_destroy_send_queue()
211 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); in mlx5_txpp_fill_wqe_rearm_queue()
223 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2); in mlx5_txpp_fill_wqe_rearm_queue()
339 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); in mlx5_txpp_create_rearm_queue()
340 if (!wq->sq) { in mlx5_txpp_create_rearm_queue()
352 ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr); in mlx5_txpp_create_rearm_queue()
387 cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | in mlx5_txpp_fill_wqe_clock_queue()
592 wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr); in mlx5_txpp_create_clock_queue()
593 if (!wq->sq) { in mlx5_txpp_create_clock_queue()
[all …]
/f-stack/freebsd/kern/
H A Dsubr_sleepqueue.c286 return (sq); in sleepq_lookup()
341 if (sq == NULL) { in sleepq_add()
431 if (sq == NULL) in sleepq_sleepcnt()
727 if (sq == NULL) in sleepq_type()
867 sq = mem; in sleepq_dtor()
885 sq = mem; in sleepq_init()
910 if (sq == NULL) in sleepq_signal()
967 if (sq == NULL) in sleepq_broadcast()
1082 MPASS(sq != NULL); in sleepq_remove()
1131 MPASS(sq != NULL); in sleepq_abort()
[all …]
H A Dkern_sig.c311 struct proc *p = sq->sq_proc; in sigqueue_get()
356 sigqueue_t *sq; in sigqueue_take() local
361 p = sq->sq_proc; in sigqueue_take()
380 struct proc *p = sq->sq_proc; in sigqueue_add()
401 si->ksi_sigq = sq; in sigqueue_add()
425 ksi->ksi_sigq = sq; in sigqueue_add()
448 sigqueue_flush(sigqueue_t *sq) in sigqueue_flush() argument
450 struct proc *p = sq->sq_proc; in sigqueue_flush()
465 SIGEMPTYSET(sq->sq_signals); in sigqueue_flush()
466 SIGEMPTYSET(sq->sq_kill); in sigqueue_flush()
[all …]
/f-stack/dpdk/drivers/regex/mlx5/
H A Dmlx5_regex_control.c221 sq->wqe = buf; in regex_ctrl_create_sq()
224 sq->ci = 0; in regex_ctrl_create_sq()
225 sq->pi = 0; in regex_ctrl_create_sq()
249 if (!sq->obj) { in regex_ctrl_create_sq()
264 if (sq->wqe_umem) in regex_ctrl_create_sq()
269 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); in regex_ctrl_create_sq()
304 if (sq->wqe) { in regex_ctrl_destroy_sq()
306 sq->wqe = NULL; in regex_ctrl_destroy_sq()
309 mlx5_release_dbr(&priv->dbrpgs, sq->dbr_umem, sq->dbr_offset); in regex_ctrl_destroy_sq()
312 if (sq->obj) { in regex_ctrl_destroy_sq()
[all …]
H A Dmlx5_regex_fastpath.c106 size_t wqe_offset = (sq->pi & (sq_size_get(sq) - 1)) * MLX5_SEND_WQE_BB; in prep_one()
131 sq->db_pi = sq->pi; in prep_one()
132 sq->pi = (sq->pi + 1) & MLX5_REGEX_MAX_WQE_INDEX; in prep_one()
138 size_t wqe_offset = (sq->db_pi & (sq_size_get(sq) - 1)) * in send_doorbell()
145 sq->dbr[MLX5_SND_DBR] = rte_cpu_to_be_32((sq->db_pi + 1) & in send_doorbell()
154 return ((uint16_t)(sq->pi - sq->ci) < sq_size_get(sq)); in can_send()
173 sq = &queue->sqs[sqid]; in mlx5_regexdev_enqueue()
174 while (can_send(sq)) { in mlx5_regexdev_enqueue()
175 job_id = job_id_get(sqid, sq_size_get(sq), sq->pi); in mlx5_regexdev_enqueue()
281 sq->ci); in mlx5_regexdev_dequeue()
[all …]
/f-stack/freebsd/contrib/ck/include/
H A Dck_sequence.h42 ck_sequence_init(struct ck_sequence *sq) in ck_sequence_init() argument
45 ck_pr_store_uint(&sq->sequence, 0); in ck_sequence_init()
50 ck_sequence_read_begin(const struct ck_sequence *sq) in ck_sequence_read_begin() argument
55 version = ck_pr_load_uint(&sq->sequence); in ck_sequence_read_begin()
77 ck_sequence_read_retry(const struct ck_sequence *sq, unsigned int version) in ck_sequence_read_retry() argument
85 return ck_pr_load_uint(&sq->sequence) != version; in ck_sequence_read_retry()
97 ck_sequence_write_begin(struct ck_sequence *sq) in ck_sequence_write_begin() argument
104 ck_pr_store_uint(&sq->sequence, sq->sequence + 1); in ck_sequence_write_begin()
113 ck_sequence_write_end(struct ck_sequence *sq) in ck_sequence_write_end() argument
121 ck_pr_store_uint(&sq->sequence, sq->sequence + 1); in ck_sequence_write_end()
/f-stack/dpdk/drivers/net/ice/base/
H A Dice_controlq.c67 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
68 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
207 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs()
786 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq() local
870 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; in ice_sq_done()
937 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd_nolock()
955 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd_nolock()
963 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd_nolock()
983 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd_nolock()
985 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd_nolock()
[all …]
/f-stack/dpdk/drivers/net/mvneta/
H A Dmvneta_rxtx.c149 sq->ent[sq->head].cookie = (uint64_t)buf; in mvneta_fill_shadowq()
150 sq->ent[sq->head].addr = buf ? in mvneta_fill_shadowq()
153 sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK; in mvneta_fill_shadowq()
154 sq->size++; in mvneta_fill_shadowq()
385 if (sq->size) in mvneta_tx_pkt_burst()
427 sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) & in mvneta_tx_pkt_burst()
429 addr = cookie_addr_high | sq->ent[sq->head].cookie; in mvneta_tx_pkt_burst()
558 addr = sq->ent[sq->head].cookie; in mvneta_tx_sg_pkt_burst()
920 while (sq->tail != sq->head) { in mvneta_tx_queue_flush()
922 sq->ent[sq->tail].cookie; in mvneta_tx_queue_flush()
[all …]
/f-stack/dpdk/drivers/net/thunderx/
H A Dnicvf_rxtx.c90 sq->head = curr_head; in nicvf_single_pool_free_xmited_buffers()
91 sq->xmit_bufs -= j; in nicvf_single_pool_free_xmited_buffers()
114 sq->xmit_bufs -= n; in nicvf_multi_pool_free_xmited_buffers()
121 return ((sq->head - sq->tail - 1) & sq->qlen_mask); in nicvf_free_tx_desc()
134 sq->xmit_bufs > sq->tx_free_thresh) { in nicvf_free_xmitted_buffers()
138 sq->pool_free(sq); in nicvf_free_xmitted_buffers()
157 tail = sq->tail; in nicvf_xmit_pkts()
174 sq->tail = tail; in nicvf_xmit_pkts()
175 sq->xmit_bufs += i; in nicvf_xmit_pkts()
197 tail = sq->tail; in nicvf_xmit_pkts_multiseg()
[all …]
H A Dnicvf_rxtx.h111 void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq);
112 void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq);
/f-stack/dpdk/drivers/net/mlx4/
H A Dmlx4_rxtx.c263 uint32_t stamp = sq->stamp; in mlx4_txq_stamp_freed_wqe()
277 sq->stamp = stamp; in mlx4_txq_stamp_freed_wqe()
305 struct mlx4_sq *sq) in mlx4_txq_complete() argument
348 sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb, in mlx4_txq_complete()
407 struct mlx4_sq *sq = &txq->msq; in mlx4_tx_burst_tso_get_params() local
637 if (thdr.to >= sq->eob) in mlx4_tx_burst_fill_tso_hdr()
638 thdr.vto = sq->buf; in mlx4_tx_burst_fill_tso_hdr()
654 if (thdr.to >= sq->eob) in mlx4_tx_burst_fill_tso_hdr()
655 thdr.vto = sq->buf; in mlx4_tx_burst_fill_tso_hdr()
778 sq->buf; in mlx4_tx_burst_segs()
[all …]
H A Dmlx4_txq.c209 struct mlx4_sq *sq = &txq->msq; in mlx4_txq_fill_dv_obj_info() local
215 sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset; in mlx4_txq_fill_dv_obj_info()
216 sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset; in mlx4_txq_fill_dv_obj_info()
217 sq->eob = sq->buf + sq->size; in mlx4_txq_fill_dv_obj_info()
218 uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift); in mlx4_txq_fill_dv_obj_info()
220 sq->remain_size = sq->size - headroom_size; in mlx4_txq_fill_dv_obj_info()
222 sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL | in mlx4_txq_fill_dv_obj_info()
225 sq->uar_mmap_offset = dqp->uar_mmap_offset; in mlx4_txq_fill_dv_obj_info()
227 sq->uar_mmap_offset = -1; /* Make mmap() fail. */ in mlx4_txq_fill_dv_obj_info()
229 sq->db = dqp->sdb; in mlx4_txq_fill_dv_obj_info()
[all …]
/f-stack/freebsd/net80211/
H A Dieee80211_superg.c645 if (sq->depth == 0) { in ieee80211_ff_age()
652 head = sq->head; in ieee80211_ff_age()
662 sq->depth--; in ieee80211_ff_age()
665 sq->tail = NULL; in ieee80211_ff_age()
682 if (sq->tail != NULL) { in stageq_add()
686 sq->head = m; in stageq_add()
694 sq->tail = m; in stageq_add()
695 sq->depth++; in stageq_add()
712 if (sq->tail == m) in stageq_remove()
713 sq->tail = mprev; in stageq_remove()
[all …]
/f-stack/dpdk/drivers/net/octeontx2/
H A Dotx2_ethdev.c931 sq->qidx = txq->sq; in nix_sq_init()
936 sq->sq.smq = smq; in nix_sq_init()
937 sq->sq.smq_rr_quantum = rr_quantum; in nix_sq_init()
939 sq->sq.sqe_stype = NIX_STYPE_STF; in nix_sq_init()
940 sq->sq.ena = 1; in nix_sq_init()
942 sq->sq.sqe_stype = NIX_STYPE_STP; in nix_sq_init()
943 sq->sq.sqb_aura = in nix_sq_init()
951 sq->sq.qint_idx = txq->sq % dev->qints; in nix_sq_init()
959 sq->qidx = txq->sq; in nix_sq_init()
1195 send_hdr->w0.sq = txq->sq; in otx2_nix_form_default_desc()
[all …]
H A Dotx2_ethdev_irq.c159 nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq) in nix_lf_sq_irq_get_and_clear() argument
161 return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00); in nix_lf_sq_irq_get_and_clear()
194 int q, cq, rq, sq; in nix_lf_q_irq() local
233 sq = q % dev->qints; in nix_lf_q_irq()
234 irq = nix_lf_sq_irq_get_and_clear(dev, sq); in nix_lf_q_irq()
237 otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq); in nix_lf_q_irq()
241 otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq); in nix_lf_q_irq()
245 otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq); in nix_lf_q_irq()
249 otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq); in nix_lf_q_irq()
H A Dotx2_stats.c359 otx2_mbox_memcpy(&aq->sq, &rsp->sq, sizeof(rsp->sq)); in nix_queue_stats_reset()
361 aq->sq.octs = 0; in nix_queue_stats_reset()
362 aq->sq.pkts = 0; in nix_queue_stats_reset()
363 aq->sq.drop_octs = 0; in nix_queue_stats_reset()
364 aq->sq.drop_pkts = 0; in nix_queue_stats_reset()
H A Dotx2_tm.c993 uint16_t sq = txq->sq; in nix_txq_flush_sq_spin() local
1050 uint16_t sq; in otx2_nix_sq_flush_pre() local
1056 sq = txq->sq; in otx2_nix_sq_flush_pre()
1097 sq = sibling->id; in otx2_nix_sq_flush_pre()
1142 uint16_t sq, s_sq; in otx2_nix_sq_flush_post() local
1147 sq = txq->sq; in otx2_nix_sq_flush_post()
1162 if (sibling->id == sq) in otx2_nix_sq_flush_post()
1219 req->qidx = sq; in nix_sq_sched_data()
1225 req->sq.smq = smq; in nix_sq_sched_data()
1510 uint16_t sq; in nix_tm_alloc_resources() local
[all …]
/f-stack/dpdk/drivers/net/mvpp2/
H A Dmrvl_ethdev.c179 sq->ent[sq->head].buff.cookie = (uint64_t)buf; in mrvl_fill_shadowq()
180 sq->ent[sq->head].buff.addr = buf ? in mrvl_fill_shadowq()
183 sq->ent[sq->head].bpool = in mrvl_fill_shadowq()
188 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; in mrvl_fill_shadowq()
189 sq->size++; in mrvl_fill_shadowq()
788 while (sq->tail != sq->head) { in mrvl_flush_tx_shadow_queues()
790 sq->ent[sq->tail].buff.cookie; in mrvl_flush_tx_shadow_queues()
793 sq->tail = (sq->tail + 1) & in mrvl_flush_tx_shadow_queues()
796 memset(sq, 0, sizeof(*sq)); in mrvl_flush_tx_shadow_queues()
2450 entry = &sq->ent[sq->tail + num]; in mrvl_free_sent_buffers()
[all …]
/f-stack/dpdk/drivers/net/hinic/
H A Dhinic_pmd_tx.c43 #define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument
674 struct hinic_sq *sq = txq->sq; in hinic_get_sq_wqe() local
693 sq->owner = !sq->owner; in hinic_get_sq_wqe()
1108 prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx); in hinic_sq_write_db()
1305 struct hinic_sq *sq = &qp->sq; in hinic_create_sq() local
1309 sq->sq_depth = sq_depth; in hinic_create_sq()
1332 sq->q_id = q_id; in hinic_create_sq()
1334 sq->owner = 1; in hinic_create_sq()
1336 sq->db_addr = db_addr; in hinic_create_sq()
1354 if (qp->sq.wq == NULL) in hinic_destroy_sq()
[all …]
/f-stack/dpdk/drivers/net/hinic/base/
H A Dhinic_pmd_nicio.c149 static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn, in hinic_sq_prepare_ctxt() argument
152 struct hinic_wq *wq = sq->wq; in hinic_sq_prepare_ctxt()
305 hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]); in init_sq_ctxts()
702 struct hinic_sq *sq = &nic_io->qps[q_id].sq; in hinic_return_sq_wqe() local
704 if (owner != sq->owner) in hinic_return_sq_wqe()
705 sq->owner = owner; in hinic_return_sq_wqe()
707 sq->wq->delta += num_wqebbs; in hinic_return_sq_wqe()
708 sq->wq->prod_idx -= num_wqebbs; in hinic_return_sq_wqe()
715 struct hinic_sq *sq = &nic_io->qps[q_id].sq; in hinic_update_sq_local_ci() local
717 hinic_put_wqe(sq->wq, wqebb_cnt); in hinic_update_sq_local_ci()
/f-stack/freebsd/sys/
H A Dsleepqueue.h96 void sleepq_free(struct sleepqueue *sq);
101 int sleepq_remove_matching(struct sleepqueue *sq, int queue,
/f-stack/freebsd/contrib/ena-com/
H A Dena_com.c122 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_init_sq() local
128 if (!sq->entries) { in ena_com_admin_init_sq()
133 sq->head = 0; in ena_com_admin_init_sq()
134 sq->tail = 0; in ena_com_admin_init_sq()
135 sq->phase = 1; in ena_com_admin_init_sq()
137 sq->db_addr = NULL; in ena_com_admin_init_sq()
295 admin_queue->sq.phase = !admin_queue->sq.phase; in __ena_com_submit_admin_cmd()
1730 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_destroy() local
1737 if (sq->entries) in ena_com_admin_destroy()
1739 sq->dma_addr, sq->mem_handle); in ena_com_admin_destroy()
[all …]
/f-stack/dpdk/drivers/net/ena/base/
H A Dena_com.c85 struct ena_com_admin_sq *sq = &queue->sq; in ena_com_admin_init_sq() local
88 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr, in ena_com_admin_init_sq()
91 if (!sq->entries) { in ena_com_admin_init_sq()
96 sq->head = 0; in ena_com_admin_init_sq()
97 sq->tail = 0; in ena_com_admin_init_sq()
98 sq->phase = 1; in ena_com_admin_init_sq()
100 sq->db_addr = NULL; in ena_com_admin_init_sq()
254 admin_queue->sq.phase = !admin_queue->sq.phase; in __ena_com_submit_admin_cmd()
1662 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_destroy() local
1675 if (sq->entries) in ena_com_admin_destroy()
[all …]

123