Lines Matching refs:info

72 	struct rte_event_dev_info info;  in test_eventdev_info_get()  local
75 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_info_get()
77 TEST_ASSERT(info.max_event_ports > 0, in test_eventdev_info_get()
78 "Not enough event ports %d", info.max_event_ports); in test_eventdev_info_get()
79 TEST_ASSERT(info.max_event_queues > 0, in test_eventdev_info_get()
80 "Not enough event queues %d", info.max_event_queues); in test_eventdev_info_get()
86 struct rte_event_dev_info *info) in devconf_set_default_sane_values() argument
89 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; in devconf_set_default_sane_values()
90 dev_conf->nb_event_ports = info->max_event_ports; in devconf_set_default_sane_values()
91 dev_conf->nb_event_queues = info->max_event_queues; in devconf_set_default_sane_values()
92 dev_conf->nb_event_queue_flows = info->max_event_queue_flows; in devconf_set_default_sane_values()
94 info->max_event_port_dequeue_depth; in devconf_set_default_sane_values()
96 info->max_event_port_enqueue_depth; in devconf_set_default_sane_values()
98 info->max_event_port_enqueue_depth; in devconf_set_default_sane_values()
100 info->max_num_events; in devconf_set_default_sane_values()
105 struct rte_event_dev_info *info, in test_ethdev_config_run() argument
107 struct rte_event_dev_info *info)) in test_ethdev_config_run()
109 devconf_set_default_sane_values(dev_conf, info); in test_ethdev_config_run()
110 fn(dev_conf, info); in test_ethdev_config_run()
116 struct rte_event_dev_info *info) in max_dequeue_limit() argument
118 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1; in max_dequeue_limit()
123 struct rte_event_dev_info *info) in max_events_limit() argument
125 dev_conf->nb_events_limit = info->max_num_events + 1; in max_events_limit()
130 struct rte_event_dev_info *info) in max_event_ports() argument
132 dev_conf->nb_event_ports = info->max_event_ports + 1; in max_event_ports()
137 struct rte_event_dev_info *info) in max_event_queues() argument
139 dev_conf->nb_event_queues = info->max_event_queues + 1; in max_event_queues()
144 struct rte_event_dev_info *info) in max_event_queue_flows() argument
146 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1; in max_event_queue_flows()
151 struct rte_event_dev_info *info) in max_event_port_dequeue_depth() argument
154 info->max_event_port_dequeue_depth + 1; in max_event_port_dequeue_depth()
159 struct rte_event_dev_info *info) in max_event_port_enqueue_depth() argument
162 info->max_event_port_enqueue_depth + 1; in max_event_port_enqueue_depth()
171 struct rte_event_dev_info info; in test_eventdev_configure() local
175 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_configure()
180 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit), in test_eventdev_configure()
183 test_ethdev_config_run(&dev_conf, &info, max_events_limit), in test_eventdev_configure()
186 test_ethdev_config_run(&dev_conf, &info, max_event_ports), in test_eventdev_configure()
189 test_ethdev_config_run(&dev_conf, &info, max_event_queues), in test_eventdev_configure()
192 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows), in test_eventdev_configure()
195 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) { in test_eventdev_configure()
197 test_ethdev_config_run(&dev_conf, &info, in test_eventdev_configure()
201 test_ethdev_config_run(&dev_conf, &info, in test_eventdev_configure()
207 devconf_set_default_sane_values(&dev_conf, &info); in test_eventdev_configure()
212 devconf_set_default_sane_values(&dev_conf, &info); in test_eventdev_configure()
213 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1); in test_eventdev_configure()
214 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1); in test_eventdev_configure()
219 devconf_set_default_sane_values(&dev_conf, &info); in test_eventdev_configure()
232 struct rte_event_dev_info info; in eventdev_configure_setup() local
234 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in eventdev_configure_setup()
236 devconf_set_default_sane_values(&dev_conf, &info); in eventdev_configure_setup()
270 struct rte_event_dev_info info; in test_eventdev_queue_setup() local
273 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_setup()
280 qconf.nb_atomic_flows = info.max_event_queue_flows + 1; in test_eventdev_queue_setup()
284 qconf.nb_atomic_flows = info.max_event_queue_flows; in test_eventdev_queue_setup()
286 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1; in test_eventdev_queue_setup()
290 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues, in test_eventdev_queue_setup()
317 struct rte_event_dev_info info; in test_eventdev_queue_count() local
319 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_count()
326 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues, in test_eventdev_queue_count()
336 struct rte_event_dev_info info; in test_eventdev_queue_attr_priority() local
340 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_attr_priority()
364 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS) in test_eventdev_queue_attr_priority()
381 struct rte_event_dev_info info; in test_eventdev_queue_attr_nb_atomic_flows() local
385 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_attr_nb_atomic_flows()
424 struct rte_event_dev_info info; in test_eventdev_queue_attr_nb_atomic_order_sequences() local
428 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_attr_nb_atomic_order_sequences()
469 struct rte_event_dev_info info; in test_eventdev_queue_attr_event_queue_cfg() local
473 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_queue_attr_event_queue_cfg()
536 struct rte_event_dev_info info; in test_eventdev_port_setup() local
539 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_port_setup()
545 pconf.new_event_threshold = info.max_num_events + 1; in test_eventdev_port_setup()
549 pconf.new_event_threshold = info.max_num_events; in test_eventdev_port_setup()
550 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1; in test_eventdev_port_setup()
554 pconf.dequeue_depth = info.max_event_port_dequeue_depth; in test_eventdev_port_setup()
555 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1; in test_eventdev_port_setup()
559 if (!(info.event_dev_cap & in test_eventdev_port_setup()
561 pconf.enqueue_depth = info.max_event_port_enqueue_depth; in test_eventdev_port_setup()
568 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports, in test_eventdev_port_setup()
595 struct rte_event_dev_info info; in test_eventdev_port_attr_dequeue_depth() local
598 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_port_attr_dequeue_depth()
620 struct rte_event_dev_info info; in test_eventdev_port_attr_enqueue_depth() local
623 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_port_attr_enqueue_depth()
645 struct rte_event_dev_info info; in test_eventdev_port_attr_new_event_threshold() local
648 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_port_attr_new_event_threshold()
670 struct rte_event_dev_info info; in test_eventdev_port_count() local
672 ret = rte_event_dev_info_get(TEST_DEV_ID, &info); in test_eventdev_port_count()
679 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count"); in test_eventdev_port_count()