| /dpdk/drivers/net/ark/ |
| H A D | ark_ethdev_rx.c | 98 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_hw_setup() 110 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_update_cons_index() 191 if (queue->reserve_q == 0 || queue->paddress_q == 0) { in eth_ark_dev_rx_queue_setup() 273 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_recv_pkts() 367 mbuf = queue->reserve_q[cons_index & queue->queue_mask]; in eth_ark_rx_jumbo() 418 ark_mpu_set_producer(queue->mpu, queue->seed_index); in eth_ark_rx_start_queue() 455 uint32_t seed_m = queue->seed_index & queue->queue_mask; in eth_ark_rx_seed_mbufs() 472 queue->seed_index - queue->cons_index); in eth_ark_rx_seed_mbufs() 638 ark_mpu_dump(queue->mpu, name, queue->phys_qid); in ark_ethdev_rx_dump() 639 ark_mpu_dump_setup(queue->mpu, queue->phys_qid); in ark_ethdev_rx_dump() [all …]
|
| H A D | ark_ethdev_tx.c | 82 tx_idx = queue->prod_index & queue->queue_mask; in eth_ark_tx_desc_fill() 94 tx_idx = queue->prod_index & queue->queue_mask; in eth_ark_tx_desc_fill() 101 tx_idx = queue->prod_index & queue->queue_mask; in eth_ark_tx_desc_fill() 192 ark_mpu_set_producer(queue->mpu, queue->prod_index); in eth_ark_xmit_pkts() 207 (queue->prod_index - queue->free_index); in eth_ark_tx_jumbo() 277 queue->bufs = in eth_ark_tx_queue_setup() 283 if (queue->meta_q == 0 || queue->bufs == 0) { in eth_ark_tx_queue_setup() 365 queue->cons_index = queue->prod_index; in eth_ark_tx_queue_release() 383 while (queue->cons_index != queue->prod_index) { in eth_ark_tx_queue_stop() 422 meta = &queue->meta_q[queue->free_index & queue->queue_mask]; in free_completed_tx() [all …]
|
| /dpdk/drivers/event/opdl/ |
| H A D | opdl_evdev_init.c | 299 struct opdl_queue *queue = &device->queue[q_id]; in opdl_add_deps() local 415 struct opdl_queue *queue = &device->queue[i]; in build_all_dependencies() local 472 struct opdl_queue *queue = &device->queue[i]; in check_queues_linked() local 709 struct opdl_queue *queue = &device->queue[port->queue_id]; in initialise_all_other_ports() local 733 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 751 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 778 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 799 queue = &device->queue[next_qid]; in initialise_all_other_ports() 808 queue->ports[queue->nb_ports] = port; in initialise_all_other_ports() 901 queue = &device->queue[port->queue_id]; in initialise_queue_zero_ports() [all …]
|
| /dpdk/drivers/net/pfe/ |
| H A D | pfe_hif_lib.c | 161 queue->base, queue->size); in hif_lib_client_init_rx_buffers() 235 queue->base, queue->size); in hif_lib_client_init_tx_buffers() 346 struct rx_queue_desc *desc = queue->base + queue->read_idx; in hif_lib_event_handler_start() 407 desc = queue->base + queue->read_idx; in hif_lib_receive_pkt() 467 queue->read_idx = (queue->read_idx + 1) & in hif_lib_receive_pkt() 497 struct tx_queue_desc *desc = queue->base + queue->write_idx; in hif_lib_xmit_pkt() 513 queue->write_idx = (queue->write_idx + 1) & (queue->size - 1); in hif_lib_xmit_pkt() 523 struct tx_queue_desc *desc = queue->base + queue->read_idx; in hif_lib_tx_get_next_complete() 526 qno, queue->read_idx, queue->tx_pending); in hif_lib_tx_get_next_complete() 531 if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) { in hif_lib_tx_get_next_complete() [all …]
|
| /dpdk/drivers/common/qat/ |
| H A D | qat_qp.c | 212 snprintf(queue->memz_name, sizeof(queue->memz_name), in qat_queue_create() 216 queue->hw_bundle_number, queue->hw_queue_number); in qat_queue_create() 243 queue->head = 0; in qat_queue_create() 244 queue->tail = 0; in qat_queue_create() 257 queue->memz_name, in qat_queue_create() 350 queue->hw_queue_number, queue->memz_name); in qat_queue_delete() 355 memset(queue->base_addr, 0x7F, queue->queue_size); in qat_queue_delete() 621 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); in qat_enqueue_op_burst() 878 ((uint8_t *)queue->base_addr + queue->head); in qat_cq_dequeue_response() 900 queue->head = adf_modulo(queue->head + queue->msg_size, in qat_cq_dequeue_response() [all …]
|
| /dpdk/app/test-pmd/ |
| H A D | util.c | 279 (unsigned int) queue); in dump_pkt_burst() 340 uint16_t queue; in add_tx_md_callback() local 350 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in add_tx_md_callback() 352 ports[portid].tx_set_md_cb[queue] = in add_tx_md_callback() 361 uint16_t queue; in remove_tx_md_callback() local 371 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in remove_tx_md_callback() 374 ports[portid].tx_set_md_cb[queue]); in remove_tx_md_callback() 396 uint16_t queue; in add_tx_dynf_callback() local 406 for (queue = 0; queue < dev_info.nb_tx_queues; queue++) in add_tx_dynf_callback() 417 uint16_t queue; in remove_tx_dynf_callback() local [all …]
|
| H A D | bpf_cmd.c | 59 uint16_t queue; member 109 rc = rte_bpf_eth_rx_elf_load(res->port, res->queue, &prm, in cmd_operate_bpf_ld_parsed() 113 rc = rte_bpf_eth_tx_elf_load(res->port, res->queue, &prm, in cmd_operate_bpf_ld_parsed() 129 TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, queue, RTE_UINT16); 157 uint16_t queue; member 169 rte_bpf_eth_rx_unload(res->port, res->queue); in cmd_operate_bpf_unld_parsed() 171 rte_bpf_eth_tx_unload(res->port, res->queue); in cmd_operate_bpf_unld_parsed() 185 TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, queue, RTE_UINT16);
|
| /dpdk/lib/pdump/ |
| H A D | rte_pdump.c | 42 uint16_t queue; member 121 p = rte_pcapng_copy(port_id, queue, in pdump_copy() 145 pdump_rx(uint16_t port, uint16_t queue, in pdump_rx() argument 158 pdump_tx(uint16_t port, uint16_t queue, in pdump_tx() argument 178 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; in pdump_register_rx_callbacks() 236 qid = (queue == RTE_PDUMP_ALL_QUEUES) ? 0 : queue; in pdump_register_tx_callbacks() 321 queue = p->queue; in set_pdump_rxtx_cbs() 334 if (queue == RTE_PDUMP_ALL_QUEUES) { in set_pdump_rxtx_cbs() 367 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1; in set_pdump_rxtx_cbs() 377 end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1; in set_pdump_rxtx_cbs() [all …]
|
| H A D | rte_pdump.h | 79 rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags, 113 rte_pdump_enable_bpf(uint16_t port_id, uint16_t queue, 137 rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags); 164 rte_pdump_enable_by_deviceid(char *device_id, uint16_t queue, 200 rte_pdump_enable_bpf_by_deviceid(const char *device_id, uint16_t queue, 226 rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
|
| /dpdk/lib/bpf/ |
| H A D | bpf_pkt.c | 35 uint16_t queue; member 131 if (cbi->port == port && cbi->queue == queue) in bpf_eth_cbh_find() 143 cbi = bpf_eth_cbh_find(cbh, port, queue); in bpf_eth_cbh_add() 150 cbi->queue = queue; in bpf_eth_cbh_add() 475 bpf_eth_unload(cbh, port, queue); in rte_bpf_eth_rx_unload() 486 bpf_eth_unload(cbh, port, queue); in rte_bpf_eth_tx_unload() 506 queue >= RTE_MAX_QUEUES_PER_PORT) in bpf_eth_elf_load() 516 __func__, port, queue); in bpf_eth_elf_load() 528 __func__, port, queue); in bpf_eth_elf_load() 534 bc = bpf_eth_cbh_add(cbh, port, queue); in bpf_eth_elf_load() [all …]
|
| /dpdk/drivers/event/dlb2/pf/base/ |
| H A D | dlb2_resource.c | 1442 return queue; in dlb2_get_ldb_queue_from_id() 1448 return queue; in dlb2_get_ldb_queue_from_id() 2593 idx = queue->id.vdev_id * max_ports + queue->id.virt_id; in dlb2_domain_disable_dir_queue_write_perms() 3683 if (!queue) { in dlb2_verify_create_ldb_queue_args() 3951 &queue); in dlb2_hw_create_ldb_queue() 5044 &queue); in dlb2_hw_create_dir_queue() 5248 if (!queue || !queue->configured) { in dlb2_verify_map_qid_args() 5558 if (!queue || !queue->configured) { in dlb2_verify_unmap_qid_args() 5651 &queue); in dlb2_hw_unmap_qid() 5981 if (!queue) { in dlb2_hw_get_dir_queue_depth() [all …]
|
| /dpdk/drivers/crypto/bcmfs/ |
| H A D | bcmfs_qp.c | 43 if (queue == NULL) { in bcmfs_queue_delete() 48 queue_pair_id, queue->q_type, queue->memz_name); in bcmfs_queue_delete() 50 mz = rte_memzone_lookup(queue->memz_name); in bcmfs_queue_delete() 53 memset(queue->base_addr, 0x9B, queue->queue_size); in bcmfs_queue_delete() 57 status, queue->memz_name); in bcmfs_queue_delete() 60 queue->memz_name); in bcmfs_queue_delete() 128 queue->q_type = qtype; in bcmfs_queue_create() 133 snprintf(queue->memz_name, sizeof(queue->memz_name), in bcmfs_queue_create() 146 queue->base_phys_addr); in bcmfs_queue_create() 152 queue->base_phys_addr = qp_mz->iova; in bcmfs_queue_create() [all …]
|
| /dpdk/drivers/net/nfb/ |
| H A D | nfb_tx.c | 16 if (txq->queue == NULL) { in nfb_eth_tx_queue_start() 21 ret = ndp_queue_start(txq->queue); in nfb_eth_tx_queue_start() 37 if (txq->queue == NULL) { in nfb_eth_tx_queue_stop() 42 ret = ndp_queue_stop(txq->queue); in nfb_eth_tx_queue_stop() 90 txq->queue = ndp_open_tx_queue(nfb, tx_queue_id); in nfb_eth_tx_queue_init() 91 if (txq->queue == NULL) in nfb_eth_tx_queue_init() 109 if (txq->queue != NULL) { in nfb_eth_tx_queue_release() 110 ndp_close_tx_queue(txq->queue); in nfb_eth_tx_queue_release() 112 txq->queue = NULL; in nfb_eth_tx_queue_release()
|
| H A D | nfb_rx.c | 21 if (rxq->queue == NULL) { in nfb_eth_rx_queue_start() 26 ret = ndp_queue_start(rxq->queue); in nfb_eth_rx_queue_start() 42 if (rxq->queue == NULL) { in nfb_eth_rx_queue_stop() 47 ret = ndp_queue_stop(rxq->queue); in nfb_eth_rx_queue_stop() 107 rxq->queue = ndp_open_rx_queue(nfb, rx_queue_id); in nfb_eth_rx_queue_init() 108 if (rxq->queue == NULL) in nfb_eth_rx_queue_init() 130 if (rxq->queue != NULL) { in nfb_eth_rx_queue_release() 131 ndp_close_rx_queue(rxq->queue); in nfb_eth_rx_queue_release() 133 rxq->queue = NULL; in nfb_eth_rx_queue_release()
|
| H A D | nfb_tx.h | 19 struct ndp_queue *queue; /* tx queue */ member 123 nfb_eth_ndp_tx(void *queue, in nfb_eth_ndp_tx() argument 129 struct ndp_tx_queue *ndp = queue; in nfb_eth_ndp_tx() 142 if (unlikely(ndp->queue == NULL)) { in nfb_eth_ndp_tx() 152 num_tx = ndp_tx_burst_get(ndp->queue, packets, nb_pkts); in nfb_eth_ndp_tx() 191 ndp_tx_burst_flush(ndp->queue); in nfb_eth_ndp_tx()
|
| /dpdk/examples/vhost/ |
| H A D | virtio_net.c | 25 struct vhost_queue *queue; in vs_vhost_net_setup() local 46 queue = &dev->queues[i]; in vs_vhost_net_setup() 48 queue->last_used_idx = 0; in vs_vhost_net_setup() 49 queue->last_avail_idx = 0; in vs_vhost_net_setup() 190 struct vhost_queue *queue; in vs_enqueue_pkts() local 198 vr = &queue->vr; in vs_enqueue_pkts() 370 struct vhost_queue *queue; in vs_dequeue_pkts() local 379 vr = &queue->vr; in vs_dequeue_pkts() 382 queue->last_avail_idx; in vs_dequeue_pkts() 434 queue->last_avail_idx += i; in vs_dequeue_pkts() [all …]
|
| /dpdk/drivers/event/dsw/ |
| H A D | dsw_evdev.c | 113 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC; in dsw_queue_setup() 118 queue->schedule_type = conf->schedule_type; in dsw_queue_setup() 121 queue->num_serving_ports = 0; in dsw_queue_setup() 147 queue->serving_ports[queue->num_serving_ports] = port_id; in queue_add_port() 148 queue->num_serving_ports++; in queue_add_port() 157 if (queue->serving_ports[i] == port_id) { in queue_remove_port() 160 queue->serving_ports[i] = in queue_remove_port() 161 queue->serving_ports[last_idx]; in queue_remove_port() 162 queue->num_serving_ports--; in queue_remove_port() 259 rte_rand() % queue->num_serving_ports; in initial_flow_to_port_assignment() [all …]
|
| /dpdk/lib/power/ |
| H A D | rte_power_pmd_mgmt.c | 36 union queue { union 46 union queue queue; member 69 queue_equal(const union queue *l, const union queue *r) in queue_equal() 75 queue_copy(union queue *dst, const union queue *src) in queue_copy() 86 if (queue_equal(&cur->queue, q)) in queue_list_find() 93 queue_list_add(struct pmd_core_cfg *cfg, const union queue *q) in queue_list_add() 106 queue_copy(&qle->queue, q); in queue_list_add() 114 queue_list_take(struct pmd_core_cfg *cfg, const union queue *q) in queue_list_take() 138 const union queue *q = &qle->queue; in get_monitor_addresses() 402 const union queue *q = &entry->queue; in cfg_queues_stopped() [all …]
|
| /dpdk/drivers/net/pcap/ |
| H A D | pcap_ethdev.c | 111 } queue[RTE_PMD_PCAP_MAX_QUEUES]; member 1082 tx->queue[0].pcap = pcap; in open_rx_tx_iface() 1083 tx->queue[0].name = iface; in open_rx_tx_iface() 1084 tx->queue[0].type = key; in open_rx_tx_iface() 1135 pmd->queue[qid].pcap, in open_rx_iface() 1296 struct devargs_queue *queue = &rx_queues->queue[i]; in eth_from_pcaps_common() local 1305 struct devargs_queue *queue = &tx_queues->queue[i]; in eth_from_pcaps_common() local 1377 if (pcaps->queue[0].pcap) in eth_release_pcaps() 1386 if (dumpers->queue[i].pcap) in eth_release_pcaps() 1391 if (pcaps->queue[i].pcap) in eth_release_pcaps() [all …]
|
| /dpdk/doc/guides/eventdevs/ |
| H A D | dlb2.rst | 56 queue's scheduling types are controlled by the event queue configuration. 68 dictates the queue's scheduling type. 112 whether it is a directed or load-balanced queue. 222 used to enqueue to a directed queue. 237 queue A. 250 queue priority established at queue creation time. 279 a. Setup queue(s). The reconfigured queue(s) lose their previous port links. 285 configuration (including port->queue links) at this time. 290 3. Setup queue or setup port 304 - An LDB queue allocated N atomic buffer entries [all …]
|
| /dpdk/doc/guides/tools/ |
| H A D | testeventdev.rst | 109 Enable queue priority. 240 order queue test operation. 247 to the ordered queue. The worker receives the events from ordered queue and 248 forwards to atomic queue. Since the events from an ordered queue can be 278 Example command to run order queue test: 382 perf queue test operation. 445 Example command to run perf queue test: 479 ``all types queue`` eventdev scheme. 509 perf all types queue test operation. 616 pipeline queue test operation. [all …]
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | event_ethernet_rx_adapter.rst | 79 queue is passed in using a ``struct rte_event_eth_rx_adapter_queue_conf`` 161 Getting Adapter queue config 169 Getting and resetting Adapter queue stats 174 This function reports queue level stats only when queue level event buffer is 178 reset queue level stats when queue level event buffer is in use. 190 has to enable Rx queue interrupts when configuring the ethernet device 192 of zero when adding the Rx queue to the adapter. 196 service function dequeues the port id and queue id from the ring buffer, 197 invokes the ``rte_eth_rx_burst()`` to receive packets on the queue and 233 port and queue identifier in the ``rte_event_vector::port`` and [all …]
|
| /dpdk/drivers/event/dpaa2/ |
| H A D | dpaa2_eventdev_selftest.c | 46 uint8_t queue; member 231 attr->queue = queue; in update_event_and_validation_attr() 241 ev->queue_id = queue; in update_event_and_validation_attr() 334 attr->queue, ev->queue_id); in validate_event() 660 uint8_t queue = (uint8_t)i; in test_queue_to_port_single_link() local 670 queue /* queue */, in test_queue_to_port_single_link() 707 uint8_t queue, port; in test_queue_to_port_multi_link() local 738 for (queue = 0; queue < nr_queues; queue++) { in test_queue_to_port_multi_link() 739 port = queue & 0x1; in test_queue_to_port_multi_link() 742 queue, port); in test_queue_to_port_multi_link() [all …]
|
| /dpdk/lib/node/ |
| H A D | ethdev_tx.c | 19 uint16_t port, queue; in ethdev_tx_node_process() local 24 queue = ctx->queue; in ethdev_tx_node_process() 26 count = rte_eth_tx_burst(port, queue, (struct rte_mbuf **)objs, in ethdev_tx_node_process() 56 ctx->queue = graph->id; in ethdev_tx_node_init()
|
| /dpdk/lib/eventdev/ |
| H A D | eventdev_trace_points.c | 14 lib.eventdev.queue.setup) 51 lib.eventdev.rx.adapter.queue.add) 54 lib.eventdev.rx.adapter.queue.del) 70 lib.eventdev.tx.adapter.queue.add) 73 lib.eventdev.tx.adapter.queue.del) 114 lib.eventdev.crypto.queue.add) 117 lib.eventdev.crypto.queue.del)
|