| /f-stack/freebsd/arm64/broadcom/genet/ |
| H A D | if_genetreg.h | 131 #define GENET_RX_DMA_PROD_INDEX(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x08) argument 132 #define GENET_RX_DMA_CONS_INDEX(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x0c) argument 141 #define GENET_RX_DMA_END_ADDR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x1c) argument 142 #define GENET_RX_DMA_END_ADDR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x20) argument 147 #define GENET_RX_DMA_READ_PTR_LO(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x2c) argument 148 #define GENET_RX_DMA_READ_PTR_HI(qid) (GENET_RX_DMA_RINGBASE(qid) + 0x30) argument 151 #define GENET_TX_DMA_READ_PTR_LO(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x00) argument 153 #define GENET_TX_DMA_CONS_INDEX(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x08) argument 154 #define GENET_TX_DMA_PROD_INDEX(qid) (GENET_TX_DMA_RINGBASE(qid) + 0x0c) argument 213 #define GENET_RX_DMA_CTRL_RBUF_EN(qid) __BIT((qid) + 1) argument [all …]
|
| H A D | if_genet.c | 663 q->hwindex = qid; in gen_init_txring() 675 WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0); in gen_init_txring() 676 WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0); in gen_init_txring() 677 WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), in gen_init_txring() 682 WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), in gen_init_txring() 695 val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); in gen_init_txring() 713 q->hwindex = qid; in gen_init_rxring() 721 WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0); in gen_init_rxring() 723 WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), in gen_init_rxring() 728 WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), in gen_init_rxring() [all …]
|
| /f-stack/dpdk/drivers/event/sw/ |
| H A D | sw_evdev_scheduler.c | 46 uint32_t qid_id = qid->id; in sw_schedule_atomic_to_cq() 57 if (qid->cq_next_tx >= qid->cq_num_mapped_cqs) in sw_schedule_atomic_to_cq() 58 qid->cq_next_tx = 0; in sw_schedule_atomic_to_cq() 97 qid->stats.tx_pkts++; in sw_schedule_atomic_to_cq() 98 qid->to_port[cq]++; in sw_schedule_atomic_to_cq() 162 qid->stats.tx_pkts++; in sw_schedule_parallel_to_cq() 224 int type = qid->type; in sw_schedule_qid_to_cq() 287 entry = &qid->reorder_buffer[qid->reorder_buffer_index]; in sw_schedule_reorder() 333 qid->reorder_buffer_index %= qid->window_size; in sw_schedule_reorder() 445 qid->stats.rx_pkts++; in __pull_port_lb() [all …]
|
| H A D | sw_evdev.c | 206 p->hist_list[i].qid = -1; in sw_port_setup() 240 qid->fids[i] = fid; in qid_init() 242 qid->id = idx; in qid_init() 243 qid->type = type; in qid_init() 295 qid->cq_next_tx = 0; in qid_init() 298 qid->initialized = 1; in qid_init() 326 memset(qid, 0, sizeof(*qid)); in sw_queue_release() 362 if (!qid->initialized) in sw_init_qid_iqs() 470 if (!qid->iq[j].head) in sw_clean_qid_iqs() 719 qid->stats.rx_pkts, qid->stats.rx_dropped, in sw_dump() [all …]
|
| H A D | sw_evdev_xstats.c | 106 case rx: return qid->stats.rx_pkts; in get_qid_stat() 114 infl += qid->fids[i].pcount; in get_qid_stat() 148 if (qid->fids[i].cq == port) in get_qid_port_stat() 154 return qid->to_port[port]; in get_qid_port_stat() 317 for (qid = 0; qid < sw->qid_count; qid++) { in sw_xstats_init() 324 .obj_idx = qid, in sw_xstats_init() 330 qid, qid_stats[i]); in sw_xstats_init() 336 .obj_idx = qid, in sw_xstats_init() 344 qid, iq, in sw_xstats_init() 352 .obj_idx = qid, in sw_xstats_init() [all …]
|
| /f-stack/freebsd/contrib/alpine-hal/ |
| H A D | al_hal_udma_debug.c | 208 al_dbg("M2S Q[%d] status regs:\n", qid); in al_udma_regs_m2s_q_print() 215 al_dbg("M2S Q[%d] regs:\n", qid); in al_udma_regs_m2s_q_print() 230 AL_UDMA_PRINT_REG(udma, " ", "\n", m2s, m2s_q[qid], in al_udma_regs_m2s_q_print() 325 al_dbg("S2M Q[%d] status regs:\n", qid); in al_udma_regs_s2m_q_print() 330 al_dbg("S2M Q[%d] regs:\n", qid); in al_udma_regs_s2m_q_print() 409 if (qid >= DMA_MAX_Q) in al_udma_q_struct_print() 412 queue = &udma->udma_q[qid]; in al_udma_q_struct_print() 414 al_dbg("Q[%d] struct:\n", qid); in al_udma_q_struct_print() 435 al_dbg(" qid = %d\n", (uint32_t)queue->qid); in al_udma_q_struct_print() 449 if (qid >= DMA_MAX_Q) in al_udma_ring_print() [all …]
|
| H A D | al_hal_udma_main.c | 282 if (qid >= udma->num_of_queues) { in al_udma_q_init() 283 al_err("udma: invalid queue id (%d)\n", qid); in al_udma_q_init() 304 q_params->size, qid); in al_udma_q_init() 308 udma_q = &udma->udma_q[qid]; in al_udma_q_init() 312 &udma->udma_regs->m2s.m2s_q[qid]; in al_udma_q_init() 315 &udma->udma_regs->s2m.s2m_q[qid]; in al_udma_q_init() 342 udma_q->qid = qid; in al_udma_q_init() 354 udma_q->udma->name, udma_q->qid, in al_udma_q_init() 422 udma_q->udma->name, udma_q->qid, __func__); in al_udma_q_reset() 447 if (unlikely(qid >= udma->num_of_queues)) { in al_udma_q_handle_get() [all …]
|
| H A D | al_hal_udma_regs_gen.h | 262 #define UDMA_GEN_TGTID_CFG_TGTID_SHIFT(qid) (((qid) & 0x1) ? 16 : 0) argument 263 #define UDMA_GEN_TGTID_CFG_TGTID_MASK(qid) (((qid) & 0x1) ? 0xFFFF0000 : 0x0000FFFF) argument 297 #define UDMA_GEN_TGTADDR_CFG_SHIFT(qid) (((qid) & 0x1) ? 16 : 0) argument 298 #define UDMA_GEN_TGTADDR_CFG_MASK(qid) (((qid) & 0x1) ? 0xFFFF0000 : 0x0000FFFF) argument
|
| H A D | al_hal_udma_config.c | 1121 uint32_t qid) in al_udma_gen_tgtid_conf_queue_set() argument 1131 (conf->tx_q_conf[qid].desc_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1133 (conf->tx_q_conf[qid].desc_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1138 (conf->tx_q_conf[qid].queue_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1140 (conf->tx_q_conf[qid].queue_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1145 (conf->rx_q_conf[qid].desc_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1147 (conf->rx_q_conf[qid].desc_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1152 (conf->rx_q_conf[qid].queue_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1154 (conf->rx_q_conf[qid].queue_en << qid) << in al_udma_gen_tgtid_conf_queue_set() 1157 switch (qid) { in al_udma_gen_tgtid_conf_queue_set() [all …]
|
| /f-stack/dpdk/lib/librte_latencystats/ |
| H A D | rte_latencystats.c | 124 uint16_t qid __rte_unused, in add_time_stamps() 157 uint16_t qid __rte_unused, in calc_latency() 220 uint16_t qid; in rte_latencystats_init() local 277 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) { in rte_latencystats_init() 278 cbs = &rx_cbs[pid][qid]; in rte_latencystats_init() 284 "qid=%d\n", pid, qid); in rte_latencystats_init() 286 for (qid = 0; qid < dev_info.nb_tx_queues; qid++) { in rte_latencystats_init() 287 cbs = &tx_cbs[pid][qid]; in rte_latencystats_init() 303 uint16_t qid; in rte_latencystats_uninit() local 321 for (qid = 0; qid < dev_info.nb_rx_queues; qid++) { in rte_latencystats_uninit() [all …]
|
| /f-stack/freebsd/mips/nlm/hal/ |
| H A D | fmn.c | 184 return nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_get_onchip_queue() 191 rdval = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_set_onchip_queue() 201 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_per_queue_level_intr() 208 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val); in nlm_cms_per_queue_level_intr() 216 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_per_queue_timer_intr() 223 nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val); in nlm_cms_per_queue_timer_intr() 227 int nlm_cms_outputq_intr_check(uint64_t base, int qid) in nlm_cms_outputq_intr_check() argument 230 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_outputq_intr_check() 235 void nlm_cms_outputq_clr_intr(uint64_t base, int qid) in nlm_cms_outputq_clr_intr() argument 238 val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid)); in nlm_cms_outputq_clr_intr() [all …]
|
| H A D | fmn.h | 216 extern int nlm_cms_get_oc_space(int qsize, int max_queues, int qid, int *ocbase, int *ocstart, int … 218 extern int nlm_cms_config_onchip_queue (uint64_t base, uint64_t cms_spill_base, int qid, int spill_… 220 extern uint64_t nlm_cms_get_onchip_queue (uint64_t base, int qid); 221 extern void nlm_cms_set_onchip_queue (uint64_t base, int qid, uint64_t val); 222 extern void nlm_cms_per_queue_level_intr(uint64_t base, int qid, int sub_type, int intr_val); 224 extern void nlm_cms_per_queue_timer_intr(uint64_t base, int qid, int sub_type, int intr_val); 226 extern int nlm_cms_outputq_intr_check(uint64_t base, int qid); 227 extern void nlm_cms_outputq_clr_intr(uint64_t base, int qid); 243 extern int nlm_cms_alloc_spill_q(uint64_t base, int qid, uint64_t spill_base, 245 extern int nlm_cms_alloc_onchip_q(uint64_t base, int qid, int nsegs);
|
| /f-stack/dpdk/drivers/raw/ioat/ |
| H A D | idxd_pci.c | 25 uint16_t qid = idxd->qid; in idxd_pci_dev_command() local 29 qid = (1 << qid); in idxd_pci_dev_command() 75 idxd->qid, err_code); in idxd_pci_dev_stop() 88 IOAT_PMD_WARN("WQ %d already enabled", idxd->qid); in idxd_pci_dev_start() 100 idxd->qid, err_code); in idxd_pci_dev_start() 104 IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid); in idxd_pci_dev_start() 243 int qid, ret = 0; in idxd_rawdev_probe_pci() local 258 for (qid = 0; qid < nb_wqs; qid++) { in idxd_rawdev_probe_pci() 263 idxd.qid = qid; in idxd_rawdev_probe_pci() 265 qid * IDXD_PORTAL_SIZE); in idxd_rawdev_probe_pci() [all …]
|
| /f-stack/dpdk/drivers/net/failsafe/ |
| H A D | failsafe_intr.c | 271 uint16_t qid; in failsafe_eth_rx_intr_ctl_subdevice() local 299 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { in failsafe_eth_rx_intr_ctl_subdevice() 300 fsrxq = fsdev->data->rx_queues[qid]; in failsafe_eth_rx_intr_ctl_subdevice() 306 pid, qid, epfd, rc); in failsafe_eth_rx_intr_ctl_subdevice() 326 int qid; in failsafe_rx_intr_install_subdevice() local 340 for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { in failsafe_rx_intr_install_subdevice() 341 if (rxq[qid]->enable_events) { in failsafe_rx_intr_install_subdevice() 343 qid); in failsafe_rx_intr_install_subdevice() 366 int qid; in failsafe_rx_intr_uninstall_subdevice() local 371 for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) { in failsafe_rx_intr_uninstall_subdevice() [all …]
|
| H A D | failsafe_rxtx.c | 97 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; in failsafe_rx_burst() 125 sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid]; in failsafe_rx_burst_fast() 153 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; in failsafe_tx_burst() 173 sub_txq = ETH(sdev)->data->tx_queues[txq->qid]; in failsafe_tx_burst_fast()
|
| /f-stack/tools/ipfw/ |
| H A D | altq.c | 105 if (pfioc.altq.qid == 0) in altq_fetch() 125 return altq->qid; in altq_name_to_qid() 129 altq_qid_to_name(u_int32_t qid) in altq_qid_to_name() argument 135 if (qid == altq->qid) in altq_qid_to_name() 148 qname = altq_qid_to_name(altqptr->qid); in print_altq_cmd() 150 bprintf(bp, " altq ?<%u>", altqptr->qid); in print_altq_cmd()
|
| /f-stack/freebsd/mips/nlm/ |
| H A D | cms.c | 124 int src, qid, i; in xlp_cms_credit_setup() local 146 for (qid = 0; qid < maxqid; qid++) in xlp_cms_credit_setup() 147 nlm_cms_setup_credits(cmsbase, qid, in xlp_cms_credit_setup() 159 int qid, maxqid, src; in xlp_msgring_cpu_init() local 166 for (qid = 0; qid < maxqid; qid++) in xlp_msgring_cpu_init() 167 nlm_cms_setup_credits(cmsbase, qid, src, credit); in xlp_msgring_cpu_init() 261 int i, qid; in xlp_cms_enable_intr() local 266 qid = (i + (cpu * 4)) & 0x7f; in xlp_cms_enable_intr() 267 nlm_cms_per_queue_level_intr(cmsbase, qid, type, watermark); in xlp_cms_enable_intr() 268 nlm_cms_per_queue_timer_intr(cmsbase, qid, 0x1, 0); in xlp_cms_enable_intr()
|
| /f-stack/dpdk/lib/librte_vhost/ |
| H A D | rte_vdpa_dev.h | 50 int (*get_notify_area)(int vid, int qid, 59 int (*get_stats)(struct rte_vdpa_device *dev, int qid, 63 int (*reset_stats)(struct rte_vdpa_device *dev, int qid); 118 rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable); 135 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m);
|
| H A D | rte_vdpa.h | 120 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m); 170 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid, 183 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid);
|
| H A D | vdpa.c | 133 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) in rte_vdpa_relay_vring_used() argument 149 if (qid >= dev->nr_vring) in rte_vdpa_relay_vring_used() 156 vq = dev->virtqueue[qid]; in rte_vdpa_relay_vring_used() 276 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid, in rte_vdpa_get_stats() argument 284 return dev->ops->get_stats(dev, qid, stats, n); in rte_vdpa_get_stats() 288 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid) in rte_vdpa_reset_stats() argument 295 return dev->ops->reset_stats(dev, qid); in rte_vdpa_reset_stats()
|
| /f-stack/dpdk/lib/librte_pdump/ |
| H A D | rte_pdump.c | 127 uint16_t qid; in pdump_register_rx_callbacks() local 130 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; in pdump_register_rx_callbacks() 131 for (; qid < end_q; qid++) { in pdump_register_rx_callbacks() 132 cbs = &rx_cbs[port][qid]; in pdump_register_rx_callbacks() 138 port, qid); in pdump_register_rx_callbacks() 159 port, qid); in pdump_register_rx_callbacks() 182 uint16_t qid; in pdump_register_tx_callbacks() local 186 for (; qid < end_q; qid++) { in pdump_register_tx_callbacks() 187 cbs = &tx_cbs[port][qid]; in pdump_register_tx_callbacks() 193 port, qid); in pdump_register_tx_callbacks() [all …]
|
| /f-stack/dpdk/drivers/vdpa/ifc/ |
| H A D | ifcvf_vdpa.c | 435 uint32_t qid, q_num; in notify_relay() local 454 for (qid = 0; qid < q_num; qid++) { in notify_relay() 474 qid = events[i].data.u32; in notify_relay() 490 ifcvf_notify_queue(hw, qid); in notify_relay() 707 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]); in update_used_ring() 717 uint16_t qid, q_num; in vring_relay() local 735 for (qid = 0; qid < q_num; qid++) { in vring_relay() 745 for (qid = 0; qid < q_num; qid += 2) { in vring_relay() 748 ev.data.u64 = 1 | qid << 1 | in vring_relay() 759 for (qid = 0; qid < q_num; qid++) in vring_relay() [all …]
|
| /f-stack/dpdk/drivers/event/opdl/ |
| H A D | opdl_test.c | 42 uint8_t qid[MAX_QIDS]; member 164 t->qid[i] = i; in create_queues_type() 256 ev.queue_id = t->qid[0]; in ordered_basic() 297 deq_ev[i].queue_id = t->qid[1]; in ordered_basic() 395 ev.queue_id = t->qid[0]; in atomic_basic() 592 t->qid[0]); in single_link_w_stats() 603 t->qid[1]); in single_link_w_stats() 625 ev.queue_id = t->qid[0]; in single_link_w_stats() 656 deq_ev[i].queue_id = t->qid[1]; in single_link_w_stats() 755 uint8_t qid, in populate_event_burst() argument [all …]
|
| /f-stack/freebsd/kern/ |
| H A D | subr_gtaskqueue.c | 672 int cpu, qid, error; in taskqgroup_attach() local 683 qid = taskqgroup_find(qgroup, uniq); in taskqgroup_attach() 684 qgroup->tqg_queue[qid].tgc_cnt++; in taskqgroup_attach() 686 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq; in taskqgroup_attach() 688 cpu = qgroup->tqg_queue[qid].tgc_cpu; in taskqgroup_attach() 703 int i, qid, error; in taskqgroup_attach_cpu() local 711 for (i = 0, qid = -1; i < qgroup->tqg_cnt; i++) in taskqgroup_attach_cpu() 713 qid = i; in taskqgroup_attach_cpu() 716 if (qid == -1) { in taskqgroup_attach_cpu() 721 qgroup->tqg_queue[qid].tgc_cnt++; in taskqgroup_attach_cpu() [all …]
|
| /f-stack/dpdk/app/test/ |
| H A D | test_event_crypto_adapter.c | 617 uint8_t qid; in configure_eventdev() local 645 qid = TEST_APP_EV_QUEUE_ID; in configure_eventdev() 646 ret = rte_event_queue_setup(evdev, qid, NULL); in configure_eventdev() 647 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid); in configure_eventdev() 655 qid = TEST_CRYPTO_EV_QUEUE_ID; in configure_eventdev() 656 ret = rte_event_queue_setup(evdev, qid, &queue_conf); in configure_eventdev() 669 qid = TEST_APP_EV_QUEUE_ID; in configure_eventdev() 805 uint8_t qid; in test_crypto_adapter_conf() local 811 qid = TEST_CRYPTO_EV_QUEUE_ID; in test_crypto_adapter_conf() 813 params.crypto_event_port_id, &qid, NULL, 1); in test_crypto_adapter_conf() [all …]
|