| /dpdk/drivers/net/mlx5/ |
| H A D | mlx5_flow_aso.c | 79 memset(sq, 0, sizeof(*sq)); in mlx5_aso_destroy_sq() 240 sq->sqn = sq->sq_obj.sq->id; in mlx5_aso_sq_create() 323 sq = &sh->mtrmng->pools_mng.sq; in mlx5_aso_queue_uninit() 359 max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next); in mlx5_aso_sq_enqueue_burst() 364 wqe = &sq->sq_obj.aso_wqes[sq->head & mask]; in mlx5_aso_sq_enqueue_burst() 370 sq->elts[sq->head & mask].pool = pool; in mlx5_aso_sq_enqueue_burst() 663 wqe = &sq->sq_obj.aso_wqes[sq->head & mask]; in mlx5_aso_mtr_sq_enqueue_single() 667 sq->elts[sq->head & mask].mtr = aso_mtr; in mlx5_aso_mtr_sq_enqueue_single() 762 max = (uint16_t)(sq->head - sq->tail); in mlx5_aso_mtr_completion_handle() 904 sq->elts[sq->head & mask].ct = ct; in mlx5_aso_ct_sq_enqueue_single() [all …]
|
| H A D | mlx5_trigger.c | 272 struct mlx5_devx_obj *sq; in mlx5_hairpin_auto_bind() local 314 sq = txq_ctrl->obj->sq; in mlx5_hairpin_auto_bind() 346 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr); in mlx5_hairpin_auto_bind() 351 rq_attr.hairpin_peer_sq = sq->id; in mlx5_hairpin_auto_bind() 422 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { in mlx5_hairpin_queue_peer_update() 429 peer_info->qp_id = txq_ctrl->obj->sq->id; in mlx5_hairpin_queue_peer_update() 520 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) { in mlx5_hairpin_queue_peer_bind() 557 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); in mlx5_hairpin_queue_peer_bind() 661 if (!txq_ctrl->obj || !txq_ctrl->obj->sq) { in mlx5_hairpin_queue_peer_unbind() 670 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr); in mlx5_hairpin_queue_peer_unbind() [all …]
|
| /dpdk/drivers/common/cnxk/ |
| H A D | roc_nix_queue.c | 638 sq->nb_desc = PLT_MAX(256U, sq->nb_desc); in sqb_pool_populate() 720 aq->sq.max_sqe_size = sq->max_sqe_sz; in sq_cn9k_init() 722 aq->sq.max_sqe_size = sq->max_sqe_sz; in sq_cn9k_init() 732 aq->sq.sso_ena = !!sq->sso_ena; in sq_cn9k_init() 733 aq->sq.cq_ena = !!sq->cq_ena; in sq_cn9k_init() 734 aq->sq.cq = sq->cqid; in sq_cn9k_init() 841 aq->sq.max_sqe_size = sq->max_sqe_sz; in sq_init() 843 aq->sq.max_sqe_size = sq->max_sqe_sz; in sq_init() 849 aq->sq.sso_ena = !!sq->sso_ena; in sq_init() 850 aq->sq.cq_ena = !!sq->cq_ena; in sq_init() [all …]
|
| H A D | cnxk_telemetry_nix.c | 135 node = nix_tel_node_get(sq->roc_nix); in nix_tel_node_add_sq() 139 node->sqs[sq->qid] = sq; in nix_tel_node_add_sq() 252 CNXK_TEL_DICT_INT(d, sq, qid); in cnxk_tel_nix_sq() 254 CNXK_TEL_DICT_INT(d, sq, nb_desc); in cnxk_tel_nix_sq() 256 CNXK_TEL_DICT_PTR(d, sq, roc_nix); in cnxk_tel_nix_sq() 260 CNXK_TEL_DICT_PTR(d, sq, io_addr); in cnxk_tel_nix_sq() 262 CNXK_TEL_DICT_PTR(d, sq, sqe_mem); in cnxk_tel_nix_sq() 263 CNXK_TEL_DICT_PTR(d, sq, fc); in cnxk_tel_nix_sq() 813 int sq; in cnxk_nix_tel_handle_info_x() local 818 sq = strtol(tok, NULL, 10); in cnxk_nix_tel_handle_info_x() [all …]
|
| H A D | roc_nix_tm_ops.c | 70 *(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs; in roc_nix_tm_sq_aura_fc() 427 struct roc_nix_sq *sq; in roc_nix_tm_hierarchy_disable() local 472 sq = nix->sqs[i]; in roc_nix_tm_hierarchy_disable() 473 if (!sq) in roc_nix_tm_hierarchy_disable() 485 sq = nix->sqs[i]; in roc_nix_tm_hierarchy_disable() 486 if (!sq) in roc_nix_tm_hierarchy_disable() 524 sq = nix->sqs[i]; in roc_nix_tm_hierarchy_disable() 525 if (!sq) in roc_nix_tm_hierarchy_disable() 537 (*(uint64_t *)sq->fc != sq->nb_sqb_bufs)) in roc_nix_tm_hierarchy_disable() 558 struct roc_nix_sq *sq; in roc_nix_tm_hierarchy_enable() local [all …]
|
| H A D | roc_nix_tm.c | 529 uint16_t qid = sq->qid; in roc_nix_tm_sq_flush_spin() 534 timeout = (sq->nb_desc * roc_nix_max_pkt_len(sq->roc_nix) * 8 * 1E5); in roc_nix_tm_sq_flush_spin() 568 (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) { in roc_nix_tm_sq_flush_spin() 580 roc_nix_tm_dump(sq->roc_nix); in roc_nix_tm_sq_flush_spin() 606 qid = sq->qid; in nix_tm_sq_flush_pre() 656 sq = nix->sqs[qid]; in nix_tm_sq_flush_pre() 657 if (!sq) in nix_tm_sq_flush_pre() 667 rc = roc_nix_tm_sq_flush_spin(sq); in nix_tm_sq_flush_pre() 718 qid = sq->qid; in nix_tm_sq_flush_post() 811 aq->sq.smq = smq; in nix_tm_sq_sched_conf() [all …]
|
| H A D | roc_nix_irq.c | 194 nix_lf_sq_irq_get_and_clear(struct nix *nix, uint16_t sq) in nix_lf_sq_irq_get_and_clear() argument 196 return nix_lf_q_irq_get_and_clear(nix, sq, NIX_LF_SQ_OP_INT, ~0x1ff00); in nix_lf_sq_irq_get_and_clear() 254 int q, cq, rq, sq; in nix_lf_q_irq() local 294 sq = q % nix->qints; in nix_lf_q_irq() 295 irq = nix_lf_sq_irq_get_and_clear(nix, sq); in nix_lf_q_irq() 300 plt_err("SQ=%d NIX_SQINT_LMT_ERR, errcode %x", sq, rc); in nix_lf_q_irq() 305 plt_err("SQ=%d NIX_SQINT_MNQ_ERR, errcode %x", sq, rc); in nix_lf_q_irq() 310 plt_err("SQ=%d NIX_SQINT_SEND_ERR, errcode %x", sq, rc); in nix_lf_q_irq() 315 plt_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq); in nix_lf_q_irq()
|
| H A D | roc_nix_stats.c | 189 aq->sq.octs = 0; in nix_stat_tx_queue_reset() 190 aq->sq.pkts = 0; in nix_stat_tx_queue_reset() 191 aq->sq.drop_octs = 0; in nix_stat_tx_queue_reset() 192 aq->sq.drop_pkts = 0; in nix_stat_tx_queue_reset() 208 aq->sq.octs = 0; in nix_stat_tx_queue_reset() 209 aq->sq.pkts = 0; in nix_stat_tx_queue_reset() 210 aq->sq.drop_octs = 0; in nix_stat_tx_queue_reset() 211 aq->sq.drop_pkts = 0; in nix_stat_tx_queue_reset()
|
| H A D | roc_nix_debug.c | 341 *ctx_p = &rsp->sq; in nix_q_ctx_get() 363 *ctx_p = &rsp->sq; in nix_q_ctx_get() 668 int sq = nix->nb_tx_queues; in roc_nix_queues_ctx_dump() local 702 for (q = 0; q < sq; q++) { in roc_nix_queues_ctx_dump() 847 roc_nix_sq_dump(struct roc_nix_sq *sq) in roc_nix_sq_dump() argument 849 nix_dump("nix_sq@%p", sq); in roc_nix_sq_dump() 850 nix_dump(" qid = %d", sq->qid); in roc_nix_sq_dump() 852 nix_dump(" nb_desc = %d", sq->nb_desc); in roc_nix_sq_dump() 854 nix_dump(" roc_nix= %p", sq->roc_nix); in roc_nix_sq_dump() 860 nix_dump(" sqe_mem = %p", sq->sqe_mem); in roc_nix_sq_dump() [all …]
|
| H A D | roc_nix_priv.h | 371 int nix_tm_leaf_data_get(struct nix *nix, uint16_t sq, uint32_t *rr_quantum, 373 int nix_tm_sq_flush_pre(struct roc_nix_sq *sq); 374 int nix_tm_sq_flush_post(struct roc_nix_sq *sq); 402 int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc, 471 int nix_tel_node_add_sq(struct roc_nix_sq *sq);
|
| /dpdk/drivers/net/ice/base/ |
| H A D | ice_controlq.c | 82 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 83 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() 222 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs() 836 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq() local 920 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; in ice_sq_done() 987 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd_nolock() 1005 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd_nolock() 1013 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd_nolock() 1033 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd_nolock() 1035 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd_nolock() [all …]
|
| /dpdk/drivers/net/mvneta/ |
| H A D | mvneta_rxtx.c | 149 sq->ent[sq->head].cookie = (uint64_t)buf; in mvneta_fill_shadowq() 150 sq->ent[sq->head].addr = buf ? in mvneta_fill_shadowq() 153 sq->head = (sq->head + 1) & MRVL_NETA_TX_SHADOWQ_MASK; in mvneta_fill_shadowq() 154 sq->size++; in mvneta_fill_shadowq() 385 if (sq->size) in mvneta_tx_pkt_burst() 427 sq->head = (MRVL_NETA_TX_SHADOWQ_SIZE + sq->head - 1) & in mvneta_tx_pkt_burst() 429 addr = cookie_addr_high | sq->ent[sq->head].cookie; in mvneta_tx_pkt_burst() 558 addr = sq->ent[sq->head].cookie; in mvneta_tx_sg_pkt_burst() 931 while (sq->tail != sq->head) { in mvneta_tx_queue_flush() 933 sq->ent[sq->tail].cookie; in mvneta_tx_queue_flush() [all …]
|
| /dpdk/drivers/net/thunderx/ |
| H A D | nicvf_rxtx.c | 90 sq->head = curr_head; in nicvf_single_pool_free_xmited_buffers() 91 sq->xmit_bufs -= j; in nicvf_single_pool_free_xmited_buffers() 114 sq->xmit_bufs -= n; in nicvf_multi_pool_free_xmited_buffers() 121 return ((sq->head - sq->tail - 1) & sq->qlen_mask); in nicvf_free_tx_desc() 134 sq->xmit_bufs > sq->tx_free_thresh) { in nicvf_free_xmitted_buffers() 138 sq->pool_free(sq); in nicvf_free_xmitted_buffers() 157 tail = sq->tail; in nicvf_xmit_pkts() 174 sq->tail = tail; in nicvf_xmit_pkts() 175 sq->xmit_bufs += i; in nicvf_xmit_pkts() 197 tail = sq->tail; in nicvf_xmit_pkts_multiseg() [all …]
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_rxtx.c | 263 uint32_t stamp = sq->stamp; in mlx4_txq_stamp_freed_wqe() 277 sq->stamp = stamp; in mlx4_txq_stamp_freed_wqe() 305 struct mlx4_sq *sq) in mlx4_txq_complete() argument 348 sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb, in mlx4_txq_complete() 407 struct mlx4_sq *sq = &txq->msq; in mlx4_tx_burst_tso_get_params() local 637 if (thdr.to >= sq->eob) in mlx4_tx_burst_fill_tso_hdr() 638 thdr.vto = sq->buf; in mlx4_tx_burst_fill_tso_hdr() 654 if (thdr.to >= sq->eob) in mlx4_tx_burst_fill_tso_hdr() 655 thdr.vto = sq->buf; in mlx4_tx_burst_fill_tso_hdr() 778 sq->buf; in mlx4_tx_burst_segs() [all …]
|
| H A D | mlx4_txq.c | 236 struct mlx4_sq *sq = &txq->msq; in mlx4_txq_fill_dv_obj_info() local 242 sq->size = (uint32_t)dqp->rq.offset - (uint32_t)dqp->sq.offset; in mlx4_txq_fill_dv_obj_info() 243 sq->buf = (uint8_t *)dqp->buf.buf + dqp->sq.offset; in mlx4_txq_fill_dv_obj_info() 244 sq->eob = sq->buf + sq->size; in mlx4_txq_fill_dv_obj_info() 245 uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift); in mlx4_txq_fill_dv_obj_info() 247 sq->remain_size = sq->size - headroom_size; in mlx4_txq_fill_dv_obj_info() 249 sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL | in mlx4_txq_fill_dv_obj_info() 252 sq->uar_mmap_offset = dqp->uar_mmap_offset; in mlx4_txq_fill_dv_obj_info() 254 sq->uar_mmap_offset = -1; /* Make mmap() fail. */ in mlx4_txq_fill_dv_obj_info() 256 sq->db = dqp->sdb; in mlx4_txq_fill_dv_obj_info() [all …]
|
| /dpdk/drivers/net/mvpp2/ |
| H A D | mrvl_ethdev.c | 302 sq->ent[sq->head].buff.cookie = (uint64_t)buf; in mrvl_fill_shadowq() 303 sq->ent[sq->head].buff.addr = buf ? in mrvl_fill_shadowq() 306 sq->ent[sq->head].bpool = in mrvl_fill_shadowq() 311 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK; in mrvl_fill_shadowq() 1023 while (sq->tail != sq->head) { in mrvl_flush_tx_shadow_queues() 1025 sq->ent[sq->tail].buff.cookie; in mrvl_flush_tx_shadow_queues() 1028 sq->tail = (sq->tail + 1) & in mrvl_flush_tx_shadow_queues() 1031 memset(sq, 0, sizeof(*sq)); in mrvl_flush_tx_shadow_queues() 2770 entry = &sq->ent[sq->tail + num]; in mrvl_free_sent_buffers() 2891 addr = sq->ent[sq->head].buff.cookie; in mrvl_tx_pkt_burst() [all …]
|
| /dpdk/drivers/common/mlx5/ |
| H A D | mlx5_common_devx.c | 164 mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq) in mlx5_devx_sq_destroy() argument 166 if (sq->sq) in mlx5_devx_sq_destroy() 167 claim_zero(mlx5_devx_cmd_destroy(sq->sq)); in mlx5_devx_sq_destroy() 168 if (sq->umem_obj) in mlx5_devx_sq_destroy() 169 claim_zero(mlx5_os_umem_dereg(sq->umem_obj)); in mlx5_devx_sq_destroy() 170 if (sq->umem_buf) in mlx5_devx_sq_destroy() 171 mlx5_free((void *)(uintptr_t)sq->umem_buf); in mlx5_devx_sq_destroy() 209 struct mlx5_devx_obj *sq = NULL; in mlx5_devx_sq_create() local 253 sq = mlx5_devx_cmd_create_sq(ctx, attr); in mlx5_devx_sq_create() 254 if (!sq) { in mlx5_devx_sq_create() [all …]
|
| H A D | mlx5_common_devx.h | 26 struct mlx5_devx_obj *sq; /* The SQ DevX object. */ member 80 void mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq);
|
| /dpdk/drivers/net/hinic/ |
| H A D | hinic_pmd_tx.c | 43 #define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 674 struct hinic_sq *sq = txq->sq; in hinic_get_sq_wqe() local 693 sq->owner = !sq->owner; in hinic_get_sq_wqe() 1108 prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx); in hinic_sq_write_db() 1305 struct hinic_sq *sq = &qp->sq; in hinic_create_sq() local 1309 sq->sq_depth = sq_depth; in hinic_create_sq() 1332 sq->q_id = q_id; in hinic_create_sq() 1334 sq->owner = 1; in hinic_create_sq() 1336 sq->db_addr = db_addr; in hinic_create_sq() 1354 if (qp->sq.wq == NULL) in hinic_destroy_sq() [all …]
|
| /dpdk/drivers/event/cnxk/ |
| H A D | cnxk_eventdev_adptr.c | 336 cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs) in cnxk_sso_sqb_aura_limit_edit() argument 340 if (sq->nb_sqb_bufs != nb_sqb_bufs) { in cnxk_sso_sqb_aura_limit_edit() 342 sq->aura_handle, in cnxk_sso_sqb_aura_limit_edit() 343 RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs)); in cnxk_sso_sqb_aura_limit_edit() 347 sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs); in cnxk_sso_sqb_aura_limit_edit() 534 struct roc_nix_sq *sq; in cnxk_sso_tx_adapter_queue_add() local 544 sq = &cnxk_eth_dev->sqs[tx_queue_id]; in cnxk_sso_tx_adapter_queue_add() 545 cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs); in cnxk_sso_tx_adapter_queue_add() 567 struct roc_nix_sq *sq; in cnxk_sso_tx_adapter_queue_del() local 576 sq = &cnxk_eth_dev->sqs[tx_queue_id]; in cnxk_sso_tx_adapter_queue_del() [all …]
|
| /dpdk/drivers/net/hinic/base/ |
| H A D | hinic_pmd_nicio.c | 149 static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn, in hinic_sq_prepare_ctxt() argument 152 struct hinic_wq *wq = sq->wq; in hinic_sq_prepare_ctxt() 305 hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]); in init_sq_ctxts() 702 struct hinic_sq *sq = &nic_io->qps[q_id].sq; in hinic_return_sq_wqe() local 704 if (owner != sq->owner) in hinic_return_sq_wqe() 705 sq->owner = owner; in hinic_return_sq_wqe() 707 sq->wq->delta += num_wqebbs; in hinic_return_sq_wqe() 708 sq->wq->prod_idx -= num_wqebbs; in hinic_return_sq_wqe() 715 struct hinic_sq *sq = &nic_io->qps[q_id].sq; in hinic_update_sq_local_ci() local 717 hinic_put_wqe(sq->wq, wqebb_cnt); in hinic_update_sq_local_ci()
|
| /dpdk/drivers/net/ena/base/ |
| H A D | ena_com.c | 86 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_init_sq() local 89 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr, in ena_com_admin_init_sq() 92 if (!sq->entries) { in ena_com_admin_init_sq() 97 sq->head = 0; in ena_com_admin_init_sq() 98 sq->tail = 0; in ena_com_admin_init_sq() 99 sq->phase = 1; in ena_com_admin_init_sq() 101 sq->db_addr = NULL; in ena_com_admin_init_sq() 259 admin_queue->sq.phase = !admin_queue->sq.phase; in __ena_com_submit_admin_cmd() 1694 struct ena_com_admin_sq *sq = &admin_queue->sq; in ena_com_admin_destroy() local 1701 if (sq->entries) in ena_com_admin_destroy() [all …]
|
| /dpdk/drivers/net/cnxk/ |
| H A D | cn9k_ethdev.c | 163 send_hdr_w0.sq = qid; in nix_form_default_desc() 177 struct roc_nix_sq *sq; in cn9k_nix_tx_queue_setup() local 189 sq = &dev->sqs[qid]; in cn9k_nix_tx_queue_setup() 192 txq->fc_mem = sq->fc; in cn9k_nix_tx_queue_setup() 193 txq->lmt_addr = sq->lmt_addr; in cn9k_nix_tx_queue_setup() 194 txq->io_addr = sq->io_addr; in cn9k_nix_tx_queue_setup() 195 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; in cn9k_nix_tx_queue_setup() 196 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2; in cn9k_nix_tx_queue_setup()
|
| H A D | cnxk_ethdev.c | 420 struct roc_nix_sq *sq; in cnxk_nix_tx_queue_setup() local 438 sq = &dev->sqs[qid]; in cnxk_nix_tx_queue_setup() 439 sq->qid = qid; in cnxk_nix_tx_queue_setup() 440 sq->nb_desc = nb_desc; in cnxk_nix_tx_queue_setup() 443 rc = roc_nix_sq_init(&dev->nix, sq); in cnxk_nix_tx_queue_setup() 454 rc |= roc_nix_sq_fini(sq); in cnxk_nix_tx_queue_setup() 467 qid, sq->fc, dev->tx_offloads, sq->lmt_addr, in cnxk_nix_tx_queue_setup() 468 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2); in cnxk_nix_tx_queue_setup() 482 struct roc_nix_sq *sq; in cnxk_nix_tx_queue_release() local 495 sq = &dev->sqs[qid]; in cnxk_nix_tx_queue_release() [all …]
|
| H A D | cn10k_ethdev.c | 163 send_hdr_w0.sq = qid; in nix_form_default_desc() 178 struct roc_nix_sq *sq; in cn10k_nix_tx_queue_setup() local 190 sq = &dev->sqs[qid]; in cn10k_nix_tx_queue_setup() 193 txq->fc_mem = sq->fc; in cn10k_nix_tx_queue_setup() 196 txq->io_addr = sq->io_addr; in cn10k_nix_tx_queue_setup() 197 txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj; in cn10k_nix_tx_queue_setup() 198 txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2; in cn10k_nix_tx_queue_setup()
|