Lines Matching refs:opt
10 perf_queue_nb_event_queues(struct evt_options *opt) in perf_queue_nb_event_queues() argument
13 uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? in perf_queue_nb_event_queues()
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); in perf_queue_nb_event_queues()
15 return nb_prod * opt->nb_stages; in perf_queue_nb_event_queues()
173 struct evt_options *opt = w->t->opt; in worker_wrapper() local
176 const int fwd_latency = opt->fwd_latency; in worker_wrapper()
192 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt) in perf_queue_launch_lcores() argument
194 return perf_launch_lcores(test, opt, worker_wrapper); in perf_queue_launch_lcores()
198 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) in perf_queue_eventdev_setup() argument
201 int nb_stages = opt->nb_stages; in perf_queue_eventdev_setup()
209 nb_ports = evt_nr_active_lcores(opt->wlcores); in perf_queue_eventdev_setup()
210 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || in perf_queue_eventdev_setup()
211 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 : in perf_queue_eventdev_setup()
212 evt_nr_active_lcores(opt->plcores); in perf_queue_eventdev_setup()
214 nb_queues = perf_queue_nb_event_queues(opt); in perf_queue_eventdev_setup()
217 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_eventdev_setup()
219 evt_err("failed to get eventdev info %d", opt->dev_id); in perf_queue_eventdev_setup()
223 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in perf_queue_eventdev_setup()
225 evt_err("failed to configure eventdev %d", opt->dev_id); in perf_queue_eventdev_setup()
231 .nb_atomic_flows = opt->nb_flows, in perf_queue_eventdev_setup()
232 .nb_atomic_order_sequences = opt->nb_flows, in perf_queue_eventdev_setup()
237 (opt->sched_type_list[queue % nb_stages]); in perf_queue_eventdev_setup()
239 if (opt->q_priority) { in perf_queue_eventdev_setup()
251 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); in perf_queue_eventdev_setup()
258 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) in perf_queue_eventdev_setup()
259 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; in perf_queue_eventdev_setup()
263 .dequeue_depth = opt->wkr_deq_dep, in perf_queue_eventdev_setup()
268 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */, in perf_queue_eventdev_setup()
273 if (!evt_has_distributed_sched(opt->dev_id)) { in perf_queue_eventdev_setup()
275 rte_event_dev_service_id_get(opt->dev_id, &service_id); in perf_queue_eventdev_setup()
283 ret = rte_event_dev_start(opt->dev_id); in perf_queue_eventdev_setup()
285 evt_err("failed to start eventdev %d", opt->dev_id); in perf_queue_eventdev_setup()
289 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { in perf_queue_eventdev_setup()
306 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { in perf_queue_eventdev_setup()
307 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) { in perf_queue_eventdev_setup()
316 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) { in perf_queue_eventdev_setup()
334 perf_queue_opt_dump(struct evt_options *opt) in perf_queue_opt_dump() argument
336 evt_dump_fwd_latency(opt); in perf_queue_opt_dump()
337 perf_opt_dump(opt, perf_queue_nb_event_queues(opt)); in perf_queue_opt_dump()
341 perf_queue_opt_check(struct evt_options *opt) in perf_queue_opt_check() argument
343 return perf_opt_check(opt, perf_queue_nb_event_queues(opt)); in perf_queue_opt_check()
347 perf_queue_capability_check(struct evt_options *opt) in perf_queue_capability_check() argument
351 rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_capability_check()
352 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) || in perf_queue_capability_check()
353 dev_info.max_event_ports < perf_nb_event_ports(opt)) { in perf_queue_capability_check()
355 perf_queue_nb_event_queues(opt), in perf_queue_capability_check()
357 perf_nb_event_ports(opt), dev_info.max_event_ports); in perf_queue_capability_check()