Lines Matching refs:opt

10 atq_nb_event_queues(struct evt_options *opt)  in atq_nb_event_queues()  argument
13 return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? in atq_nb_event_queues()
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); in atq_nb_event_queues()
129 struct evt_options *opt = w->t->opt; in worker_wrapper() local
132 const int fwd_latency = opt->fwd_latency; in worker_wrapper()
148 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt) in perf_atq_launch_lcores() argument
150 return perf_launch_lcores(test, opt, worker_wrapper); in perf_atq_launch_lcores()
154 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) in perf_atq_eventdev_setup() argument
164 nb_ports = evt_nr_active_lcores(opt->wlcores); in perf_atq_eventdev_setup()
165 nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || in perf_atq_eventdev_setup()
166 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : in perf_atq_eventdev_setup()
167 evt_nr_active_lcores(opt->plcores); in perf_atq_eventdev_setup()
169 nb_queues = atq_nb_event_queues(opt); in perf_atq_eventdev_setup()
172 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_atq_eventdev_setup()
174 evt_err("failed to get eventdev info %d", opt->dev_id); in perf_atq_eventdev_setup()
178 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in perf_atq_eventdev_setup()
180 evt_err("failed to configure eventdev %d", opt->dev_id); in perf_atq_eventdev_setup()
187 .nb_atomic_flows = opt->nb_flows, in perf_atq_eventdev_setup()
188 .nb_atomic_order_sequences = opt->nb_flows, in perf_atq_eventdev_setup()
192 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); in perf_atq_eventdev_setup()
199 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) in perf_atq_eventdev_setup()
200 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; in perf_atq_eventdev_setup()
204 .dequeue_depth = opt->wkr_deq_dep, in perf_atq_eventdev_setup()
209 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, in perf_atq_eventdev_setup()
214 if (!evt_has_distributed_sched(opt->dev_id)) { in perf_atq_eventdev_setup()
216 rte_event_dev_service_id_get(opt->dev_id, &service_id); in perf_atq_eventdev_setup()
224 ret = rte_event_dev_start(opt->dev_id); in perf_atq_eventdev_setup()
226 evt_err("failed to start eventdev %d", opt->dev_id); in perf_atq_eventdev_setup()
230 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { in perf_atq_eventdev_setup()
247 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { in perf_atq_eventdev_setup()
248 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) { in perf_atq_eventdev_setup()
263 perf_atq_opt_dump(struct evt_options *opt) in perf_atq_opt_dump() argument
265 perf_opt_dump(opt, atq_nb_event_queues(opt)); in perf_atq_opt_dump()
269 perf_atq_opt_check(struct evt_options *opt) in perf_atq_opt_check() argument
271 return perf_opt_check(opt, atq_nb_event_queues(opt)); in perf_atq_opt_check()
275 perf_atq_capability_check(struct evt_options *opt) in perf_atq_capability_check() argument
279 rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_atq_capability_check()
280 if (dev_info.max_event_queues < atq_nb_event_queues(opt) || in perf_atq_capability_check()
281 dev_info.max_event_ports < perf_nb_event_ports(opt)) { in perf_atq_capability_check()
283 atq_nb_event_queues(opt), dev_info.max_event_queues, in perf_atq_capability_check()
284 perf_nb_event_ports(opt), dev_info.max_event_ports); in perf_atq_capability_check()
286 if (!evt_has_all_types_queue(opt->dev_id)) in perf_atq_capability_check()