Lines Matching refs:opt

10 perf_queue_nb_event_queues(struct evt_options *opt)  in perf_queue_nb_event_queues()  argument
13 uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? in perf_queue_nb_event_queues()
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); in perf_queue_nb_event_queues()
15 return nb_prod * opt->nb_stages; in perf_queue_nb_event_queues()
129 struct evt_options *opt = w->t->opt; in worker_wrapper() local
132 const int fwd_latency = opt->fwd_latency; in worker_wrapper()
148 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt) in perf_queue_launch_lcores() argument
150 return perf_launch_lcores(test, opt, worker_wrapper); in perf_queue_launch_lcores()
154 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) in perf_queue_eventdev_setup() argument
157 int nb_stages = opt->nb_stages; in perf_queue_eventdev_setup()
165 nb_ports = evt_nr_active_lcores(opt->wlcores); in perf_queue_eventdev_setup()
166 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || in perf_queue_eventdev_setup()
167 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 : in perf_queue_eventdev_setup()
168 evt_nr_active_lcores(opt->plcores); in perf_queue_eventdev_setup()
170 nb_queues = perf_queue_nb_event_queues(opt); in perf_queue_eventdev_setup()
173 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_eventdev_setup()
175 evt_err("failed to get eventdev info %d", opt->dev_id); in perf_queue_eventdev_setup()
179 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); in perf_queue_eventdev_setup()
181 evt_err("failed to configure eventdev %d", opt->dev_id); in perf_queue_eventdev_setup()
187 .nb_atomic_flows = opt->nb_flows, in perf_queue_eventdev_setup()
188 .nb_atomic_order_sequences = opt->nb_flows, in perf_queue_eventdev_setup()
193 (opt->sched_type_list[queue % nb_stages]); in perf_queue_eventdev_setup()
195 if (opt->q_priority) { in perf_queue_eventdev_setup()
207 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); in perf_queue_eventdev_setup()
214 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) in perf_queue_eventdev_setup()
215 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; in perf_queue_eventdev_setup()
219 .dequeue_depth = opt->wkr_deq_dep, in perf_queue_eventdev_setup()
224 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */, in perf_queue_eventdev_setup()
229 if (!evt_has_distributed_sched(opt->dev_id)) { in perf_queue_eventdev_setup()
231 rte_event_dev_service_id_get(opt->dev_id, &service_id); in perf_queue_eventdev_setup()
239 ret = rte_event_dev_start(opt->dev_id); in perf_queue_eventdev_setup()
241 evt_err("failed to start eventdev %d", opt->dev_id); in perf_queue_eventdev_setup()
245 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { in perf_queue_eventdev_setup()
262 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { in perf_queue_eventdev_setup()
263 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) { in perf_queue_eventdev_setup()
278 perf_queue_opt_dump(struct evt_options *opt) in perf_queue_opt_dump() argument
280 evt_dump_fwd_latency(opt); in perf_queue_opt_dump()
281 perf_opt_dump(opt, perf_queue_nb_event_queues(opt)); in perf_queue_opt_dump()
285 perf_queue_opt_check(struct evt_options *opt) in perf_queue_opt_check() argument
287 return perf_opt_check(opt, perf_queue_nb_event_queues(opt)); in perf_queue_opt_check()
291 perf_queue_capability_check(struct evt_options *opt) in perf_queue_capability_check() argument
295 rte_event_dev_info_get(opt->dev_id, &dev_info); in perf_queue_capability_check()
296 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) || in perf_queue_capability_check()
297 dev_info.max_event_ports < perf_nb_event_ports(opt)) { in perf_queue_capability_check()
299 perf_queue_nb_event_queues(opt), in perf_queue_capability_check()
301 perf_nb_event_ports(opt), dev_info.max_event_ports); in perf_queue_capability_check()