Home
last modified time | relevance | path

Searched refs:nb_queues (Results 1 – 25 of 85) sorted by relevance

1234

/f-stack/dpdk/drivers/crypto/octeontx2/
H A Dotx2_cryptodev.c53 uint16_t nb_queues; in otx2_cpt_pci_probe() local
82 ret = otx2_cpt_available_queues_get(dev, &nb_queues); in otx2_cpt_pci_probe()
89 nb_queues = RTE_MIN(nb_queues, OTX2_CPT_MAX_QUEUES_PER_VF); in otx2_cpt_pci_probe()
91 if (nb_queues == 0) { in otx2_cpt_pci_probe()
96 vf->max_queues = nb_queues; in otx2_cpt_pci_probe()
H A Dotx2_cryptodev_mbox.c47 uint16_t *nb_queues) in otx2_cpt_available_queues_get() argument
60 *nb_queues = rsp->cpt; in otx2_cpt_available_queues_get()
65 otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues) in otx2_cpt_queues_attach() argument
76 req->cptlfs = nb_queues; in otx2_cpt_queues_attach()
82 vf->nb_queues = nb_queues; in otx2_cpt_queues_attach()
101 vf->nb_queues = 0; in otx2_cpt_queues_detach()
122 for (i = 0; i < vf->nb_queues; i++) in otx2_cpt_msix_offsets_get()
H A Dotx2_cryptodev_mbox.h16 uint16_t *nb_queues);
18 int otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues);
H A Dotx2_cryptodev_hw_access.c55 for (i = 0; i < vf->nb_queues; i++) { in otx2_cpt_err_intr_unregister()
93 for (i = 0; i < vf->nb_queues; i++) { in otx2_cpt_err_intr_register()
101 for (i = 0; i < vf->nb_queues; i++) { in otx2_cpt_err_intr_register()
/f-stack/dpdk/drivers/raw/ioat/
H A Ddpdk_idxd_cfg.py47 nb_queues = min(queues, max_queues)
48 if queues > nb_queues:
52 for q in range(nb_queues):
59 "size": int(max_tokens / nb_queues)})
63 for q in range(nb_queues):
/f-stack/dpdk/lib/librte_eventdev/
H A Drte_event_eth_tx_adapter.c102 uint32_t nb_queues; member
138 uint16_t nb_queues; member
697 if (txa->nb_queues) { in txa_service_adapter_free()
699 txa->nb_queues); in txa_service_adapter_free()
725 int nb_queues; in txa_service_queue_add() local
732 nb_queues -= tdi->nb_queues; in txa_service_queue_add()
791 tdi->nb_queues++; in txa_service_queue_add()
792 txa->nb_queues++; in txa_service_queue_add()
822 nb_queues = txa->nb_queues; in txa_service_queue_del()
823 if (nb_queues == 0) in txa_service_queue_del()
[all …]
H A Drte_eventdev.c222 dev->data->nb_queues = 0; in rte_event_dev_queue_config()
224 "nb_queues %u", nb_queues); in rte_event_dev_queue_config()
231 for (i = nb_queues; i < old_nb_queues; i++) in rte_event_dev_queue_config()
237 sizeof(queues_cfg[0]) * nb_queues, in rte_event_dev_queue_config()
241 " nb_queues %u", nb_queues); in rte_event_dev_queue_config()
246 if (nb_queues > old_nb_queues) { in rte_event_dev_queue_config()
259 dev->data->nb_queues = nb_queues; in rte_event_dev_queue_config()
839 *attr_value = dev->data->nb_queues; in rte_event_dev_attr_get()
968 nb_links = dev->data->nb_queues; in rte_event_port_link()
979 if (queues[i] >= dev->data->nb_queues) { in rte_event_port_link()
[all …]
/f-stack/dpdk/examples/ioat/
H A Dioatfwd.c38 uint16_t nb_queues; member
81 static uint16_t nb_queues = 1; variable
194 "Rx Queues = %d, ", nb_queues); in print_stats()
271 for (j = 0; j < cfg.ports[i].nb_queues; j++) { in print_stats()
390 for (i = 0; i < rx_config->nb_queues; i++) { in ioat_rx_port()
448 for (i = 0; i < tx_config->nb_queues; i++) { in ioat_tx_port()
646 nb_queues = atoi(optarg); in ioat_parse_args()
647 if (nb_queues == 0 || nb_queues > MAX_RX_QUEUES_COUNT) { in ioat_parse_args()
748 for (j = 0; j < cfg.ports[i].nb_queues; j++) { in assign_rawdevs()
855 for (i = 0; i < nb_queues; i++) { in port_init()
[all …]
/f-stack/dpdk/app/test/
H A Dtest_eventdev.c783 int ret, nb_queues, i; in test_eventdev_link() local
795 nb_queues = queue_count; in test_eventdev_link()
796 for (i = 0; i < nb_queues; i++) { in test_eventdev_link()
802 priorities, nb_queues); in test_eventdev_link()
811 int ret, nb_queues, i; in test_eventdev_unlink() local
822 nb_queues = queue_count; in test_eventdev_unlink()
823 for (i = 0; i < nb_queues; i++) in test_eventdev_unlink()
853 for (i = 0; i < nb_queues; i++) in test_eventdev_link_get()
864 for (i = 0; i < nb_queues; i++) { in test_eventdev_link_get()
869 nb_queues); in test_eventdev_link_get()
[all …]
/f-stack/dpdk/drivers/net/af_packet/
H A Drte_eth_af_packet.c71 unsigned nb_queues; member
366 for (i = 0; i < internal->nb_queues; i++) { in eth_stats_reset()
371 for (i = 0; i < internal->nb_queues; i++) { in eth_stats_reset()
658 nb_queues, in rte_pmd_init_internals()
662 nb_queues, in rte_pmd_init_internals()
669 for (q = 0; q < nb_queues; q++) { in rte_pmd_init_internals()
721 for (q = 0; q < nb_queues; q++) { in rte_pmd_init_internals()
855 (*internals)->nb_queues = nb_queues; in rte_pmd_init_internals()
859 data->nb_rx_queues = (uint16_t)nb_queues; in rte_pmd_init_internals()
860 data->nb_tx_queues = (uint16_t)nb_queues; in rte_pmd_init_internals()
[all …]
/f-stack/dpdk/app/test-eventdev/
H A Dtest_pipeline_common.c29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) in pipeline_opt_dump() argument
35 evt_dump("nb_evdev_queues", "%d", nb_queues); in pipeline_opt_dump()
106 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) in pipeline_opt_check() argument
144 if (nb_queues > EVT_MAX_QUEUES) { in pipeline_opt_check()
169 uint8_t nb_queues = 1; in pipeline_ethdev_setup() local
240 if (rte_eth_dev_configure(i, nb_queues, nb_queues, in pipeline_ethdev_setup()
273 uint8_t *queue_arr, uint8_t nb_queues, in pipeline_event_port_setup() argument
297 nb_queues) != nb_queues) in pipeline_event_port_setup()
H A Dtest_pipeline_atq.c292 int nb_queues; in pipeline_atq_eventdev_setup() local
303 nb_queues = rte_eth_dev_count_avail(); in pipeline_atq_eventdev_setup()
310 tx_evqueue_id[prod] = nb_queues; in pipeline_atq_eventdev_setup()
311 nb_queues++; in pipeline_atq_eventdev_setup()
317 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in pipeline_atq_eventdev_setup()
329 for (queue = 0; queue < nb_queues; queue++) { in pipeline_atq_eventdev_setup()
365 ret = pipeline_event_port_setup(test, opt, NULL, nb_queues, in pipeline_atq_eventdev_setup()
H A Dtest_perf_atq.c158 uint8_t nb_queues; in perf_atq_eventdev_setup() local
169 nb_queues = atq_nb_event_queues(opt); in perf_atq_eventdev_setup()
178 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in perf_atq_eventdev_setup()
191 for (queue = 0; queue < nb_queues; queue++) { in perf_atq_eventdev_setup()
209 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, in perf_atq_eventdev_setup()
H A Dtest_perf_queue.c160 int nb_queues; in perf_queue_eventdev_setup() local
170 nb_queues = perf_queue_nb_event_queues(opt); in perf_queue_eventdev_setup()
179 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in perf_queue_eventdev_setup()
191 for (queue = 0; queue < nb_queues; queue++) { in perf_queue_eventdev_setup()
225 nb_queues, &p_conf); in perf_queue_eventdev_setup()
H A Dtest_perf_common.h143 int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
148 uint8_t stride, uint8_t nb_queues,
153 void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
H A Dtest_pipeline_queue.c316 int nb_queues; in pipeline_queue_eventdev_setup() local
328 nb_queues = rte_eth_dev_count_avail() * (nb_stages); in pipeline_queue_eventdev_setup()
331 nb_queues += rte_eth_dev_count_avail(); in pipeline_queue_eventdev_setup()
337 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in pipeline_queue_eventdev_setup()
349 for (queue = 0; queue < nb_queues; queue++) { in pipeline_queue_eventdev_setup()
390 ret = pipeline_event_port_setup(test, opt, NULL, nb_queues, in pipeline_queue_eventdev_setup()
H A Dtest_pipeline_common.h154 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
163 uint8_t *queue_arr, uint8_t nb_queues,
167 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
/f-stack/dpdk/drivers/regex/octeontx2/
H A Dotx2_regexdev_mbox.c12 uint16_t *nb_queues) in otx2_ree_available_queues_get() argument
28 *nb_queues = rsp->ree0; in otx2_ree_available_queues_get()
30 *nb_queues = rsp->ree1; in otx2_ree_available_queues_get()
35 otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues) in otx2_ree_queues_attach() argument
47 req->reelfs = nb_queues; in otx2_ree_queues_attach()
54 vf->nb_queues = nb_queues; in otx2_ree_queues_attach()
75 vf->nb_queues = 0; in otx2_ree_queues_detach()
97 for (i = 0; i < vf->nb_queues; i++) { in otx2_ree_msix_offsets_get()
H A Dotx2_regexdev_mbox.h11 uint16_t *nb_queues);
13 int otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues);
H A Dotx2_regexdev_hw_access.c51 for (i = 0; i < vf->nb_queues; i++) { in otx2_ree_err_intr_unregister()
90 for (i = 0; i < vf->nb_queues; i++) { in otx2_ree_err_intr_register()
98 for (i = 0; i < vf->nb_queues; i++) { in otx2_ree_err_intr_register()
/f-stack/dpdk/examples/l2fwd-event/
H A Dl2fwd_event_generic.c82 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; in l2fwd_event_device_setup_generic()
147 evt_rsrc->evq.nb_queues - 1); in l2fwd_event_port_setup_generic()
148 if (ret != (evt_rsrc->evq.nb_queues - 1)) in l2fwd_event_port_setup_generic()
177 evt_rsrc->evq.nb_queues); in l2fwd_event_queue_setup_generic()
188 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1); in l2fwd_event_queue_setup_generic()
246 if (i < evt_rsrc->evq.nb_queues) in l2fwd_rx_tx_adapter_setup_generic()
303 evt_rsrc->evq.nb_queues - 1], in l2fwd_rx_tx_adapter_setup_generic()
H A Dl2fwd_event_internal_port.c81 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; in l2fwd_event_device_setup_internal_port()
185 evt_rsrc->evq.nb_queues); in l2fwd_event_queue_setup_internal_port()
189 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues; in l2fwd_event_queue_setup_internal_port()
253 if (q_id < evt_rsrc->evq.nb_queues) in l2fwd_rx_tx_adapter_setup_internal_port()
/f-stack/dpdk/examples/l3fwd/
H A Dl3fwd_event_generic.c71 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; in l3fwd_event_device_setup_generic()
136 evt_rsrc->evq.nb_queues - 1); in l3fwd_event_port_setup_generic()
137 if (ret != (evt_rsrc->evq.nb_queues - 1)) in l3fwd_event_port_setup_generic()
165 evt_rsrc->evq.nb_queues); in l3fwd_event_queue_setup_generic()
176 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1); in l3fwd_event_queue_setup_generic()
234 if (i < evt_rsrc->evq.nb_queues) in l3fwd_rx_tx_adapter_setup_generic()
291 evt_rsrc->evq.nb_queues - 1], in l3fwd_rx_tx_adapter_setup_generic()
H A Dl3fwd_event_internal_port.c71 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; in l3fwd_event_device_setup_internal_port()
174 evt_rsrc->evq.nb_queues); in l3fwd_event_queue_setup_internal_port()
178 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues; in l3fwd_event_queue_setup_internal_port()
242 if (q_id < evt_rsrc->evq.nb_queues) in l3fwd_rx_tx_adapter_setup_internal_port()
/f-stack/lib/
H A Dff_dpdk_if.c410 int nb_queues = pconf->nb_lcores; in init_dispatch_ring() local
578 uint16_t nb_queues = pconf->nb_lcores; in init_port_start() local
603 nb_queues, in init_port_start()
609 nb_queues, in init_port_start()
721 ret = rte_eth_dev_configure(port_id, nb_queues, nb_queues, &port_conf); in init_port_start()
734 for (q = 0; q < nb_queues; q++) { in init_port_start()
794 if (nb_queues > 1) { in init_port_start()
899 int nb_queues = pconf->nb_lcores; in create_tcp_flow() local
902 for (i = 0, j = 0; i < nb_queues; ++i) in create_tcp_flow()
1338 for(j = 0; j < nb_queues; ++j) {
[all …]

1234