| /f-stack/dpdk/drivers/event/sw/ |
| H A D | iq_chunk.h | 28 return iq->count; in iq_count() 62 iq->tail = iq->head; in iq_init() 63 iq->head_idx = 0; in iq_init() 64 iq->tail_idx = 0; in iq_init() 65 iq->count = 0; in iq_init() 71 iq->tail->events[iq->tail_idx++] = *ev; in iq_enqueue() 72 iq->count++; in iq_enqueue() 89 iq->head_idx++; in iq_pop() 90 iq->count--; in iq_pop() 103 return &iq->head->events[iq->head_idx]; in iq_peek() [all …]
|
| H A D | sw_evdev_scheduler.c | 48 iq_dequeue_burst(sw, &qid->iq[iq_num], qes, count); in sw_schedule_atomic_to_cq() 135 const struct rte_event *qe = iq_peek(&qid->iq[iq_num]); in sw_schedule_parallel_to_cq() 173 iq_pop(sw, &qid->iq[iq_num]); in sw_schedule_parallel_to_cq() 198 struct sw_iq *iq = &qid->iq[iq_num]; in sw_schedule_dir_to_cq() local 199 uint32_t ret = iq_dequeue_burst(sw, iq, in sw_schedule_dir_to_cq() 232 uint32_t count = iq_count(&qid->iq[iq_num]); in sw_schedule_qid_to_cq() 310 struct sw_iq *iq = &q->iq[dest_iq]; in sw_schedule_reorder() local 315 iq_enqueue(sw, iq, qe); in sw_schedule_reorder() 443 iq_enqueue(sw, &qid->iq[iq_num], qe); in __pull_port_lb() 488 struct sw_iq *iq = &qid->iq[iq_num]; in sw_schedule_pull_port_dir() local [all …]
|
| H A D | sw_evdev.c | 366 iq_init(sw, &qid->iq[j]); in sw_init_qid_iqs() 377 if (iq_count(&sw->qids[i].iq[j])) in sw_qids_empty() 437 while (iq_count(iq) > 0) { in sw_drain_queue() 440 iq_dequeue_burst(sw, iq, &ev, 1); in sw_drain_queue() 470 if (!qid->iq[j].head) in sw_clean_qid_iqs() 473 qid->iq[j].head = NULL; in sw_clean_qid_iqs() 747 uint32_t iq; in sw_dump() local 749 for (iq = 0; iq < SW_IQS_MAX; iq++) { in sw_dump() 750 if (!qid->iq[iq].head) { in sw_dump() 755 uint32_t used = iq_count(&qid->iq[iq]); in sw_dump() [all …]
|
| H A D | sw_evdev_xstats.c | 130 case iq_used: return iq_count(&qid->iq[iq_idx]); in get_qid_iq_stat() 255 unsigned int i, port, qid, iq, bkt, stat = 0; in sw_xstats_init() local 332 for (iq = 0; iq < SW_IQS_MAX; iq++) in sw_xstats_init() 339 .extra_arg = iq, in sw_xstats_init() 344 qid, iq, in sw_xstats_init()
|
| H A D | sw_evdev.h | 116 struct sw_iq iq[SW_IQS_MAX]; member
|
| /f-stack/dpdk/drivers/raw/octeontx2_ep/ |
| H A D | otx2_ep_enqdeq.c | 100 iq->base_addr_dma = iq->iq_mz->iova; in sdp_init_instr_queue() 101 iq->base_addr = (uint8_t *)iq->iq_mz->addr; in sdp_init_instr_queue() 146 iq->iqcmd_64B = (conf->iq.instr_type == 64); in sdp_init_instr_queue() 439 iq->req_list[iq->host_write_index].buf = buf; in sdp_iqreq_add() 440 iq->req_list[iq->host_write_index].reqtype = reqtype; in sdp_iqreq_add() 456 while (iq->flush_index != iq->otx_read_index) { in sdp_flush_iq() 458 sdp_iqreq_delete(sdpvf, iq, iq->flush_index); in sdp_flush_iq() 460 sdp_incr_index(iq->flush_index, 1, iq->nb_desc); in sdp_flush_iq() 475 otx2_write64(iq->fill_cnt, iq->doorbell_reg); in sdp_ring_doorbell() 510 sdp_incr_index(iq->host_write_index, 1, iq->nb_desc); in post_iqcmd() [all …]
|
| H A D | otx2_ep_vf.c | 223 otx2_write64(iq->base_addr_dma, sdpvf->hw_addr + in sdp_vf_setup_iq_regs() 225 otx2_write64(iq->nb_desc, sdpvf->hw_addr + in sdp_vf_setup_iq_regs() 231 iq->doorbell_reg = (uint8_t *) sdpvf->hw_addr + in sdp_vf_setup_iq_regs() 233 iq->inst_cnt_reg = (uint8_t *) sdpvf->hw_addr + in sdp_vf_setup_iq_regs() 237 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in sdp_vf_setup_iq_regs() 240 iq->reset_instr_cnt = rte_read32(iq->inst_cnt_reg); in sdp_vf_setup_iq_regs() 413 sdp_vf_update_read_index(struct sdp_instr_queue *iq) in sdp_vf_update_read_index() argument 415 uint32_t new_idx = rte_read32(iq->inst_cnt_reg); in sdp_vf_update_read_index() 421 if (iq->reset_instr_cnt < new_idx) in sdp_vf_update_read_index() 422 new_idx -= iq->reset_instr_cnt; in sdp_vf_update_read_index() [all …]
|
| H A D | otx2_ep_rawdev.h | 383 uint64_t iq; member 391 struct sdp_iq_config iq; member 412 uint32_t (*update_iq_read_idx)(struct sdp_instr_queue *iq);
|
| H A D | otx2_ep_rawdev.c | 35 .iq = {
|
| /f-stack/dpdk/drivers/net/liquidio/base/ |
| H A D | lio_23xx_reg.h | 32 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ argument 33 (CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) 35 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ argument 36 (CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) 38 #define CN23XX_SLI_IQ_SIZE(iq) \ argument 39 (CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET)) 41 #define CN23XX_SLI_IQ_DOORBELL(iq) \ argument 42 (CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET)) 44 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ argument 45 (CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET))
|
| H A D | lio_23xx_vf.c | 179 struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no]; in cn23xx_vf_setup_iq_regs() local 186 iq->base_addr_dma); in cn23xx_vf_setup_iq_regs() 187 lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc); in cn23xx_vf_setup_iq_regs() 192 iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr + in cn23xx_vf_setup_iq_regs() 194 iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr + in cn23xx_vf_setup_iq_regs() 197 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_vf_setup_iq_regs() 202 pkt_in_done = rte_read64(iq->inst_cnt_reg); in cn23xx_vf_setup_iq_regs() 207 rte_write64(pkt_in_done, iq->inst_cnt_reg); in cn23xx_vf_setup_iq_regs() 310 if (lio_dev->io_qmask.iq & (1ULL << q_no)) { in cn23xx_vf_enable_io_queues()
|
| H A D | lio_23xx_vf.h | 16 .iq = {
|
| H A D | lio_hw_defs.h | 70 #define LIO_IQ_INSTR_TYPE(cfg) ((cfg)->default_config->iq.instr_type)
|
| /f-stack/dpdk/drivers/net/liquidio/ |
| H A D | lio_rxtx.c | 703 iq->base_addr_dma = iq->iq_mz->iova; in lio_init_instr_queue() 704 iq->base_addr = (uint8_t *)iq->iq_mz->addr; in lio_init_instr_queue() 724 iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma, in lio_init_instr_queue() 729 iq->fill_cnt = 0; in lio_init_instr_queue() 881 rte_write32(iq->fill_cnt, iq->doorbell_reg); in lio_ring_doorbell() 894 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); in copy_cmd_into_iq() 924 iq->host_write_index = lio_incr_index(iq->host_write_index, 1, in post_command2() 926 iq->fill_cnt++; in post_command2() 1045 iq->lio_read_index = (iq->lio_read_index + in lio_update_read_index() 1047 iq->nb_desc; in lio_update_read_index() [all …]
|
| H A D | lio_struct.h | 396 uint64_t iq; member 503 struct lio_iq_config iq; member
|
| H A D | lio_rxtx.h | 730 int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
|
| /f-stack/dpdk/drivers/net/cxgbe/ |
| H A D | sge.c | 1797 iq->size = cxgbe_roundup(iq->size, 16); in t4_sge_alloc_rxq() 1800 queue_id, socket_id, iq->size, iq->iqe_len, in t4_sge_alloc_rxq() 1802 if (!iq->desc) in t4_sge_alloc_rxq() 1903 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq() 1904 iq->cidx = 0; in t4_sge_alloc_rxq() 1905 iq->gts_idx = 0; in t4_sge_alloc_rxq() 1906 iq->gen = 1; in t4_sge_alloc_rxq() 1907 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq() 1913 iq->stat = (void *)&iq->desc[iq->size * 8]; in t4_sge_alloc_rxq() 1989 iq->abs_id = 0; in t4_sge_alloc_rxq() [all …]
|
| H A D | cxgbe_filter.c | 132 unsigned int iq; in get_filter_steerq() local 141 iq = 0; in get_filter_steerq() 147 if (fs->iq < pi->n_rx_qsets) in get_filter_steerq() 149 fs->iq].rspq.abs_id; in get_filter_steerq() 151 iq = fs->iq; in get_filter_steerq() 154 return iq; in get_filter_steerq() 535 V_RSS_QUEUE(f->fs.iq) | in mk_act_open_req6() 604 unsigned int iq; in cxgbe_set_hash_filter() local 623 f->fs.iq = iq; in cxgbe_set_hash_filter() 1007 unsigned int fidx, iq; in cxgbe_set_filter() local [all …]
|
| H A D | cxgbe_filter.h | 102 uint32_t iq:10; /* ingress queue */ member
|
| H A D | cxgbe_flow.c | 901 fs->iq = q->index; in cxgbe_rtef_parse_actions()
|
| /f-stack/dpdk/doc/guides/nics/ |
| H A D | cxgbe.rst | 623 t5nex0: PCIe x8, 2 ports, 14 MSI-X interrupts, 31 eq, 13 iq
|