xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision f0b68c0b)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
320eb154eSJerin Jacob  */
420eb154eSJerin Jacob 
520eb154eSJerin Jacob #include "test_perf_common.h"
620eb154eSJerin Jacob 
743d162bcSThomas Monjalon /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
820eb154eSJerin Jacob 
920eb154eSJerin Jacob static inline int
perf_queue_nb_event_queues(struct evt_options * opt)1020eb154eSJerin Jacob perf_queue_nb_event_queues(struct evt_options *opt)
1120eb154eSJerin Jacob {
1220eb154eSJerin Jacob 	/* nb_queues = number of producers * number of stages */
13452cd797SPavan Nikhilesh 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14d9a42a69SThomas Monjalon 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15452cd797SPavan Nikhilesh 	return nb_prod * opt->nb_stages;
1620eb154eSJerin Jacob }
1720eb154eSJerin Jacob 
1833011cb3SThomas Monjalon static __rte_always_inline void
mark_fwd_latency(struct rte_event * const ev,const uint8_t nb_stages)192369f733SJerin Jacob mark_fwd_latency(struct rte_event *const ev,
202369f733SJerin Jacob 		const uint8_t nb_stages)
212369f733SJerin Jacob {
222369f733SJerin Jacob 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
232369f733SJerin Jacob 		struct perf_elt *const m = ev->event_ptr;
242369f733SJerin Jacob 
252369f733SJerin Jacob 		m->timestamp = rte_get_timer_cycles();
262369f733SJerin Jacob 	}
272369f733SJerin Jacob }
282369f733SJerin Jacob 
2933011cb3SThomas Monjalon static __rte_always_inline void
fwd_event(struct rte_event * const ev,uint8_t * const sched_type_list,const uint8_t nb_stages)302369f733SJerin Jacob fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
312369f733SJerin Jacob 		const uint8_t nb_stages)
322369f733SJerin Jacob {
332369f733SJerin Jacob 	ev->queue_id++;
342369f733SJerin Jacob 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
352369f733SJerin Jacob 	ev->op = RTE_EVENT_OP_FORWARD;
362369f733SJerin Jacob 	ev->event_type = RTE_EVENT_TYPE_CPU;
372369f733SJerin Jacob }
382369f733SJerin Jacob 
392369f733SJerin Jacob static int
perf_queue_worker(void * arg,const int enable_fwd_latency)402369f733SJerin Jacob perf_queue_worker(void *arg, const int enable_fwd_latency)
412369f733SJerin Jacob {
42*f0b68c0bSPavan Nikhilesh 	uint16_t enq = 0, deq = 0;
432369f733SJerin Jacob 	struct rte_event ev;
44*f0b68c0bSPavan Nikhilesh 	PERF_WORKER_INIT;
452369f733SJerin Jacob 
462369f733SJerin Jacob 	while (t->done == false) {
47*f0b68c0bSPavan Nikhilesh 		deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
482369f733SJerin Jacob 
49*f0b68c0bSPavan Nikhilesh 		if (!deq) {
502369f733SJerin Jacob 			rte_pause();
512369f733SJerin Jacob 			continue;
522369f733SJerin Jacob 		}
53de2bc16eSShijith Thotton 
54de2bc16eSShijith Thotton 		if (prod_crypto_type &&
55de2bc16eSShijith Thotton 		    (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
56de2bc16eSShijith Thotton 			struct rte_crypto_op *op = ev.event_ptr;
57de2bc16eSShijith Thotton 
58de2bc16eSShijith Thotton 			if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
59de2bc16eSShijith Thotton 				if (op->sym->m_dst == NULL)
60de2bc16eSShijith Thotton 					ev.event_ptr = op->sym->m_src;
61de2bc16eSShijith Thotton 				else
62de2bc16eSShijith Thotton 					ev.event_ptr = op->sym->m_dst;
63de2bc16eSShijith Thotton 				rte_crypto_op_free(op);
64de2bc16eSShijith Thotton 			} else {
65de2bc16eSShijith Thotton 				rte_crypto_op_free(op);
66de2bc16eSShijith Thotton 				continue;
67de2bc16eSShijith Thotton 			}
68de2bc16eSShijith Thotton 		}
69de2bc16eSShijith Thotton 
70d008f20bSPavan Nikhilesh 		if (enable_fwd_latency && !prod_timer_type)
712369f733SJerin Jacob 		/* first q in pipeline, mark timestamp to compute fwd latency */
722369f733SJerin Jacob 			mark_fwd_latency(&ev, nb_stages);
732369f733SJerin Jacob 
742369f733SJerin Jacob 		/* last stage in pipeline */
752369f733SJerin Jacob 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
762369f733SJerin Jacob 			if (enable_fwd_latency)
772369f733SJerin Jacob 				cnt = perf_process_last_stage_latency(pool,
782369f733SJerin Jacob 					&ev, w, bufs, sz, cnt);
792369f733SJerin Jacob 			else
802369f733SJerin Jacob 				cnt = perf_process_last_stage(pool,
812369f733SJerin Jacob 					&ev, w, bufs, sz, cnt);
822369f733SJerin Jacob 		} else {
832369f733SJerin Jacob 			fwd_event(&ev, sched_type_list, nb_stages);
84*f0b68c0bSPavan Nikhilesh 			do {
85*f0b68c0bSPavan Nikhilesh 				enq = rte_event_enqueue_burst(dev, port, &ev,
86*f0b68c0bSPavan Nikhilesh 							      1);
87*f0b68c0bSPavan Nikhilesh 			} while (!enq && !t->done);
882369f733SJerin Jacob 		}
892369f733SJerin Jacob 	}
90*f0b68c0bSPavan Nikhilesh 
91*f0b68c0bSPavan Nikhilesh 	perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
92*f0b68c0bSPavan Nikhilesh 
932369f733SJerin Jacob 	return 0;
942369f733SJerin Jacob }
952369f733SJerin Jacob 
962369f733SJerin Jacob static int
perf_queue_worker_burst(void * arg,const int enable_fwd_latency)972369f733SJerin Jacob perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
982369f733SJerin Jacob {
992369f733SJerin Jacob 	/* +1 to avoid prefetch out of array check */
1002369f733SJerin Jacob 	struct rte_event ev[BURST_SIZE + 1];
101*f0b68c0bSPavan Nikhilesh 	uint16_t enq = 0, nb_rx = 0;
102*f0b68c0bSPavan Nikhilesh 	PERF_WORKER_INIT;
103*f0b68c0bSPavan Nikhilesh 	uint16_t i;
1042369f733SJerin Jacob 
1052369f733SJerin Jacob 	while (t->done == false) {
106*f0b68c0bSPavan Nikhilesh 		nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
1072369f733SJerin Jacob 
1082369f733SJerin Jacob 		if (!nb_rx) {
1092369f733SJerin Jacob 			rte_pause();
1102369f733SJerin Jacob 			continue;
1112369f733SJerin Jacob 		}
1122369f733SJerin Jacob 
1132369f733SJerin Jacob 		for (i = 0; i < nb_rx; i++) {
114de2bc16eSShijith Thotton 			if (prod_crypto_type &&
115de2bc16eSShijith Thotton 			    (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
116de2bc16eSShijith Thotton 				struct rte_crypto_op *op = ev[i].event_ptr;
117de2bc16eSShijith Thotton 
118de2bc16eSShijith Thotton 				if (op->status ==
119de2bc16eSShijith Thotton 				    RTE_CRYPTO_OP_STATUS_SUCCESS) {
120de2bc16eSShijith Thotton 					if (op->sym->m_dst == NULL)
121de2bc16eSShijith Thotton 						ev[i].event_ptr =
122de2bc16eSShijith Thotton 							op->sym->m_src;
123de2bc16eSShijith Thotton 					else
124de2bc16eSShijith Thotton 						ev[i].event_ptr =
125de2bc16eSShijith Thotton 							op->sym->m_dst;
126de2bc16eSShijith Thotton 					rte_crypto_op_free(op);
127de2bc16eSShijith Thotton 				} else {
128de2bc16eSShijith Thotton 					rte_crypto_op_free(op);
129de2bc16eSShijith Thotton 					continue;
130de2bc16eSShijith Thotton 				}
131de2bc16eSShijith Thotton 			}
132de2bc16eSShijith Thotton 
133d008f20bSPavan Nikhilesh 			if (enable_fwd_latency && !prod_timer_type) {
1342369f733SJerin Jacob 				rte_prefetch0(ev[i+1].event_ptr);
1352369f733SJerin Jacob 				/* first queue in pipeline.
1362369f733SJerin Jacob 				 * mark time stamp to compute fwd latency
1372369f733SJerin Jacob 				 */
1382369f733SJerin Jacob 				mark_fwd_latency(&ev[i], nb_stages);
1392369f733SJerin Jacob 			}
1402369f733SJerin Jacob 			/* last stage in pipeline */
1412369f733SJerin Jacob 			if (unlikely((ev[i].queue_id % nb_stages) ==
1422369f733SJerin Jacob 						 laststage)) {
1432369f733SJerin Jacob 				if (enable_fwd_latency)
1442369f733SJerin Jacob 					cnt = perf_process_last_stage_latency(
1452369f733SJerin Jacob 						pool, &ev[i], w, bufs, sz, cnt);
1462369f733SJerin Jacob 				else
1472369f733SJerin Jacob 					cnt = perf_process_last_stage(pool,
1482369f733SJerin Jacob 						&ev[i], w, bufs, sz, cnt);
1492369f733SJerin Jacob 
1502369f733SJerin Jacob 				ev[i].op = RTE_EVENT_OP_RELEASE;
1512369f733SJerin Jacob 			} else {
1522369f733SJerin Jacob 				fwd_event(&ev[i], sched_type_list, nb_stages);
1532369f733SJerin Jacob 			}
1542369f733SJerin Jacob 		}
1552369f733SJerin Jacob 
1562369f733SJerin Jacob 
1572369f733SJerin Jacob 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
158*f0b68c0bSPavan Nikhilesh 		while (enq < nb_rx && !t->done) {
1592369f733SJerin Jacob 			enq += rte_event_enqueue_burst(dev, port,
1602369f733SJerin Jacob 							ev + enq, nb_rx - enq);
1612369f733SJerin Jacob 		}
1622369f733SJerin Jacob 	}
163*f0b68c0bSPavan Nikhilesh 
164*f0b68c0bSPavan Nikhilesh 	perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
165*f0b68c0bSPavan Nikhilesh 
1662369f733SJerin Jacob 	return 0;
1672369f733SJerin Jacob }
1682369f733SJerin Jacob 
1692369f733SJerin Jacob static int
worker_wrapper(void * arg)1702369f733SJerin Jacob worker_wrapper(void *arg)
1712369f733SJerin Jacob {
1722369f733SJerin Jacob 	struct worker_data *w  = arg;
1732369f733SJerin Jacob 	struct evt_options *opt = w->t->opt;
1742369f733SJerin Jacob 
1752369f733SJerin Jacob 	const bool burst = evt_has_burst_mode(w->dev_id);
1762369f733SJerin Jacob 	const int fwd_latency = opt->fwd_latency;
1772369f733SJerin Jacob 
1782369f733SJerin Jacob 	/* allow compiler to optimize */
1792369f733SJerin Jacob 	if (!burst && !fwd_latency)
1802369f733SJerin Jacob 		return perf_queue_worker(arg, 0);
1812369f733SJerin Jacob 	else if (!burst && fwd_latency)
1822369f733SJerin Jacob 		return perf_queue_worker(arg, 1);
1832369f733SJerin Jacob 	else if (burst && !fwd_latency)
1842369f733SJerin Jacob 		return perf_queue_worker_burst(arg, 0);
1852369f733SJerin Jacob 	else if (burst && fwd_latency)
1862369f733SJerin Jacob 		return perf_queue_worker_burst(arg, 1);
1872369f733SJerin Jacob 
1882369f733SJerin Jacob 	rte_panic("invalid worker\n");
1892369f733SJerin Jacob }
1902369f733SJerin Jacob 
1912369f733SJerin Jacob static int
perf_queue_launch_lcores(struct evt_test * test,struct evt_options * opt)1922369f733SJerin Jacob perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
1932369f733SJerin Jacob {
1942369f733SJerin Jacob 	return perf_launch_lcores(test, opt, worker_wrapper);
1952369f733SJerin Jacob }
1962369f733SJerin Jacob 
19720eb154eSJerin Jacob static int
perf_queue_eventdev_setup(struct evt_test * test,struct evt_options * opt)19820eb154eSJerin Jacob perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
19920eb154eSJerin Jacob {
20020eb154eSJerin Jacob 	uint8_t queue;
20120eb154eSJerin Jacob 	int nb_stages = opt->nb_stages;
20220eb154eSJerin Jacob 	int ret;
20359f697e3SPavan Nikhilesh 	int nb_ports;
20459f697e3SPavan Nikhilesh 	int nb_queues;
20566b82db2SPavan Nikhilesh 	uint16_t prod;
2063617aae5SPavan Nikhilesh 	struct rte_event_dev_info dev_info;
20766b82db2SPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
20859f697e3SPavan Nikhilesh 
20959f697e3SPavan Nikhilesh 	nb_ports = evt_nr_active_lcores(opt->wlcores);
210d008f20bSPavan Nikhilesh 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
211d008f20bSPavan Nikhilesh 		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
21259f697e3SPavan Nikhilesh 		evt_nr_active_lcores(opt->plcores);
21359f697e3SPavan Nikhilesh 
214452cd797SPavan Nikhilesh 	nb_queues = perf_queue_nb_event_queues(opt);
21520eb154eSJerin Jacob 
2163617aae5SPavan Nikhilesh 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
2173617aae5SPavan Nikhilesh 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
2183617aae5SPavan Nikhilesh 	if (ret) {
2193617aae5SPavan Nikhilesh 		evt_err("failed to get eventdev info %d", opt->dev_id);
2203617aae5SPavan Nikhilesh 		return ret;
2213617aae5SPavan Nikhilesh 	}
2223617aae5SPavan Nikhilesh 
223f0959283SPavan Nikhilesh 	ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
22420eb154eSJerin Jacob 	if (ret) {
22520eb154eSJerin Jacob 		evt_err("failed to configure eventdev %d", opt->dev_id);
22620eb154eSJerin Jacob 		return ret;
22720eb154eSJerin Jacob 	}
22820eb154eSJerin Jacob 
22920eb154eSJerin Jacob 	struct rte_event_queue_conf q_conf = {
23020eb154eSJerin Jacob 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
23120eb154eSJerin Jacob 			.nb_atomic_flows = opt->nb_flows,
23220eb154eSJerin Jacob 			.nb_atomic_order_sequences = opt->nb_flows,
23320eb154eSJerin Jacob 	};
23420eb154eSJerin Jacob 	/* queue configurations */
235452cd797SPavan Nikhilesh 	for (queue = 0; queue < nb_queues; queue++) {
23613370a38SPavan Nikhilesh 		q_conf.schedule_type =
23720eb154eSJerin Jacob 			(opt->sched_type_list[queue % nb_stages]);
23820eb154eSJerin Jacob 
23920eb154eSJerin Jacob 		if (opt->q_priority) {
24020eb154eSJerin Jacob 			uint8_t stage_pos = queue % nb_stages;
24120eb154eSJerin Jacob 			/* Configure event queues(stage 0 to stage n) with
24220eb154eSJerin Jacob 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
24320eb154eSJerin Jacob 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
24420eb154eSJerin Jacob 			 */
24520eb154eSJerin Jacob 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
24620eb154eSJerin Jacob 					(nb_stages - 1);
24720eb154eSJerin Jacob 			/* Higher prio for the queues closer to last stage */
24820eb154eSJerin Jacob 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
24920eb154eSJerin Jacob 					(step * stage_pos);
25020eb154eSJerin Jacob 		}
25120eb154eSJerin Jacob 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
25220eb154eSJerin Jacob 		if (ret) {
25320eb154eSJerin Jacob 			evt_err("failed to setup queue=%d", queue);
25420eb154eSJerin Jacob 			return ret;
25520eb154eSJerin Jacob 		}
25620eb154eSJerin Jacob 	}
25720eb154eSJerin Jacob 
258535c630cSPavan Nikhilesh 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
259535c630cSPavan Nikhilesh 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
260535c630cSPavan Nikhilesh 
261535c630cSPavan Nikhilesh 	/* port configuration */
262535c630cSPavan Nikhilesh 	const struct rte_event_port_conf p_conf = {
263535c630cSPavan Nikhilesh 			.dequeue_depth = opt->wkr_deq_dep,
264535c630cSPavan Nikhilesh 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
265535c630cSPavan Nikhilesh 			.new_event_threshold = dev_info.max_num_events,
266535c630cSPavan Nikhilesh 	};
267535c630cSPavan Nikhilesh 
26820eb154eSJerin Jacob 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
269535c630cSPavan Nikhilesh 					nb_queues, &p_conf);
27020eb154eSJerin Jacob 	if (ret)
27120eb154eSJerin Jacob 		return ret;
27220eb154eSJerin Jacob 
273b0333c55SPavan Nikhilesh 	if (!evt_has_distributed_sched(opt->dev_id)) {
274b0333c55SPavan Nikhilesh 		uint32_t service_id;
275b0333c55SPavan Nikhilesh 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
276b0333c55SPavan Nikhilesh 		ret = evt_service_setup(service_id);
27757305d79SPavan Nikhilesh 		if (ret) {
27857305d79SPavan Nikhilesh 			evt_err("No service lcore found to run event dev.");
27957305d79SPavan Nikhilesh 			return ret;
28057305d79SPavan Nikhilesh 		}
281b0333c55SPavan Nikhilesh 	}
28257305d79SPavan Nikhilesh 
28320eb154eSJerin Jacob 	ret = rte_event_dev_start(opt->dev_id);
28420eb154eSJerin Jacob 	if (ret) {
28520eb154eSJerin Jacob 		evt_err("failed to start eventdev %d", opt->dev_id);
28620eb154eSJerin Jacob 		return ret;
28720eb154eSJerin Jacob 	}
28820eb154eSJerin Jacob 
28966b82db2SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
29066b82db2SPavan Nikhilesh 		RTE_ETH_FOREACH_DEV(prod) {
29166b82db2SPavan Nikhilesh 			ret = rte_eth_dev_start(prod);
29266b82db2SPavan Nikhilesh 			if (ret) {
29366b82db2SPavan Nikhilesh 				evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
29466b82db2SPavan Nikhilesh 						prod);
29566b82db2SPavan Nikhilesh 				return ret;
29666b82db2SPavan Nikhilesh 			}
29766b82db2SPavan Nikhilesh 
29866b82db2SPavan Nikhilesh 			ret = rte_event_eth_rx_adapter_start(prod);
29966b82db2SPavan Nikhilesh 			if (ret) {
30066b82db2SPavan Nikhilesh 				evt_err("Rx adapter[%d] start failed", prod);
30166b82db2SPavan Nikhilesh 				return ret;
30266b82db2SPavan Nikhilesh 			}
30366b82db2SPavan Nikhilesh 			printf("%s: Port[%d] using Rx adapter[%d] started\n",
30466b82db2SPavan Nikhilesh 					__func__, prod, prod);
30566b82db2SPavan Nikhilesh 		}
30666b82db2SPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
30766b82db2SPavan Nikhilesh 		for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
30866b82db2SPavan Nikhilesh 			ret = rte_event_timer_adapter_start(
30966b82db2SPavan Nikhilesh 					t->timer_adptr[prod]);
31066b82db2SPavan Nikhilesh 			if (ret) {
31166b82db2SPavan Nikhilesh 				evt_err("failed to Start event timer adapter %d"
31266b82db2SPavan Nikhilesh 						, prod);
31366b82db2SPavan Nikhilesh 				return ret;
31466b82db2SPavan Nikhilesh 			}
31566b82db2SPavan Nikhilesh 		}
316de2bc16eSShijith Thotton 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
317de2bc16eSShijith Thotton 		uint8_t cdev_id, cdev_count;
318de2bc16eSShijith Thotton 
319de2bc16eSShijith Thotton 		cdev_count = rte_cryptodev_count();
320de2bc16eSShijith Thotton 		for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
321de2bc16eSShijith Thotton 			ret = rte_cryptodev_start(cdev_id);
322de2bc16eSShijith Thotton 			if (ret) {
323de2bc16eSShijith Thotton 				evt_err("Failed to start cryptodev %u",
324de2bc16eSShijith Thotton 					cdev_id);
325de2bc16eSShijith Thotton 				return ret;
326de2bc16eSShijith Thotton 			}
327de2bc16eSShijith Thotton 		}
32866b82db2SPavan Nikhilesh 	}
32966b82db2SPavan Nikhilesh 
33020eb154eSJerin Jacob 	return 0;
33120eb154eSJerin Jacob }
33220eb154eSJerin Jacob 
33320eb154eSJerin Jacob static void
perf_queue_opt_dump(struct evt_options * opt)33420eb154eSJerin Jacob perf_queue_opt_dump(struct evt_options *opt)
33520eb154eSJerin Jacob {
33620eb154eSJerin Jacob 	evt_dump_fwd_latency(opt);
33720eb154eSJerin Jacob 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
33820eb154eSJerin Jacob }
33920eb154eSJerin Jacob 
34020eb154eSJerin Jacob static int
perf_queue_opt_check(struct evt_options * opt)34120eb154eSJerin Jacob perf_queue_opt_check(struct evt_options *opt)
34220eb154eSJerin Jacob {
34320eb154eSJerin Jacob 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
34420eb154eSJerin Jacob }
34520eb154eSJerin Jacob 
34620eb154eSJerin Jacob static bool
perf_queue_capability_check(struct evt_options * opt)34720eb154eSJerin Jacob perf_queue_capability_check(struct evt_options *opt)
34820eb154eSJerin Jacob {
34920eb154eSJerin Jacob 	struct rte_event_dev_info dev_info;
35020eb154eSJerin Jacob 
35120eb154eSJerin Jacob 	rte_event_dev_info_get(opt->dev_id, &dev_info);
35220eb154eSJerin Jacob 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
35320eb154eSJerin Jacob 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
35420eb154eSJerin Jacob 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
35520eb154eSJerin Jacob 			perf_queue_nb_event_queues(opt),
35620eb154eSJerin Jacob 			dev_info.max_event_queues,
35720eb154eSJerin Jacob 			perf_nb_event_ports(opt), dev_info.max_event_ports);
35820eb154eSJerin Jacob 	}
35920eb154eSJerin Jacob 
36020eb154eSJerin Jacob 	return true;
36120eb154eSJerin Jacob }
36220eb154eSJerin Jacob 
36320eb154eSJerin Jacob static const struct evt_test_ops perf_queue =  {
36420eb154eSJerin Jacob 	.cap_check          = perf_queue_capability_check,
36520eb154eSJerin Jacob 	.opt_check          = perf_queue_opt_check,
36620eb154eSJerin Jacob 	.opt_dump           = perf_queue_opt_dump,
36720eb154eSJerin Jacob 	.test_setup         = perf_test_setup,
36820eb154eSJerin Jacob 	.mempool_setup      = perf_mempool_setup,
3693fc8de4fSPavan Nikhilesh 	.ethdev_setup	    = perf_ethdev_setup,
370de2bc16eSShijith Thotton 	.cryptodev_setup    = perf_cryptodev_setup,
371a734e738SPavan Nikhilesh 	.ethdev_rx_stop     = perf_ethdev_rx_stop,
37220eb154eSJerin Jacob 	.eventdev_setup     = perf_queue_eventdev_setup,
3732369f733SJerin Jacob 	.launch_lcores      = perf_queue_launch_lcores,
37420eb154eSJerin Jacob 	.eventdev_destroy   = perf_eventdev_destroy,
37520eb154eSJerin Jacob 	.mempool_destroy    = perf_mempool_destroy,
3767f3daf34SPavan Nikhilesh 	.ethdev_destroy	    = perf_ethdev_destroy,
377de2bc16eSShijith Thotton 	.cryptodev_destroy  = perf_cryptodev_destroy,
37820eb154eSJerin Jacob 	.test_result        = perf_test_result,
37920eb154eSJerin Jacob 	.test_destroy       = perf_test_destroy,
38020eb154eSJerin Jacob };
38120eb154eSJerin Jacob 
38220eb154eSJerin Jacob EVT_TEST_REGISTER(perf_queue);
383