1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606  * Copyright(c) 2017 Cavium, Inc
32bfe3f2eSlogwang  */
42bfe3f2eSlogwang 
52bfe3f2eSlogwang #include "test_perf_common.h"
62bfe3f2eSlogwang 
7*d30ea906Sjfb8856606 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
82bfe3f2eSlogwang 
92bfe3f2eSlogwang static inline int
102bfe3f2eSlogwang perf_queue_nb_event_queues(struct evt_options *opt)
112bfe3f2eSlogwang {
122bfe3f2eSlogwang 	/* nb_queues = number of producers * number of stages */
13*d30ea906Sjfb8856606 	uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14*d30ea906Sjfb8856606 		rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15*d30ea906Sjfb8856606 	return nb_prod * opt->nb_stages;
162bfe3f2eSlogwang }
172bfe3f2eSlogwang 
182bfe3f2eSlogwang static inline __attribute__((always_inline)) void
192bfe3f2eSlogwang mark_fwd_latency(struct rte_event *const ev,
202bfe3f2eSlogwang 		const uint8_t nb_stages)
212bfe3f2eSlogwang {
222bfe3f2eSlogwang 	if (unlikely((ev->queue_id % nb_stages) == 0)) {
232bfe3f2eSlogwang 		struct perf_elt *const m = ev->event_ptr;
242bfe3f2eSlogwang 
252bfe3f2eSlogwang 		m->timestamp = rte_get_timer_cycles();
262bfe3f2eSlogwang 	}
272bfe3f2eSlogwang }
282bfe3f2eSlogwang 
292bfe3f2eSlogwang static inline __attribute__((always_inline)) void
302bfe3f2eSlogwang fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
312bfe3f2eSlogwang 		const uint8_t nb_stages)
322bfe3f2eSlogwang {
332bfe3f2eSlogwang 	ev->queue_id++;
342bfe3f2eSlogwang 	ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
352bfe3f2eSlogwang 	ev->op = RTE_EVENT_OP_FORWARD;
362bfe3f2eSlogwang 	ev->event_type = RTE_EVENT_TYPE_CPU;
372bfe3f2eSlogwang }
382bfe3f2eSlogwang 
392bfe3f2eSlogwang static int
402bfe3f2eSlogwang perf_queue_worker(void *arg, const int enable_fwd_latency)
412bfe3f2eSlogwang {
422bfe3f2eSlogwang 	PERF_WORKER_INIT;
432bfe3f2eSlogwang 	struct rte_event ev;
442bfe3f2eSlogwang 
452bfe3f2eSlogwang 	while (t->done == false) {
462bfe3f2eSlogwang 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
472bfe3f2eSlogwang 
482bfe3f2eSlogwang 		if (!event) {
492bfe3f2eSlogwang 			rte_pause();
502bfe3f2eSlogwang 			continue;
512bfe3f2eSlogwang 		}
52*d30ea906Sjfb8856606 		if (enable_fwd_latency && !prod_timer_type)
532bfe3f2eSlogwang 		/* first q in pipeline, mark timestamp to compute fwd latency */
542bfe3f2eSlogwang 			mark_fwd_latency(&ev, nb_stages);
552bfe3f2eSlogwang 
562bfe3f2eSlogwang 		/* last stage in pipeline */
572bfe3f2eSlogwang 		if (unlikely((ev.queue_id % nb_stages) == laststage)) {
582bfe3f2eSlogwang 			if (enable_fwd_latency)
592bfe3f2eSlogwang 				cnt = perf_process_last_stage_latency(pool,
602bfe3f2eSlogwang 					&ev, w, bufs, sz, cnt);
612bfe3f2eSlogwang 			else
622bfe3f2eSlogwang 				cnt = perf_process_last_stage(pool,
632bfe3f2eSlogwang 					&ev, w, bufs, sz, cnt);
642bfe3f2eSlogwang 		} else {
652bfe3f2eSlogwang 			fwd_event(&ev, sched_type_list, nb_stages);
662bfe3f2eSlogwang 			while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
672bfe3f2eSlogwang 				rte_pause();
682bfe3f2eSlogwang 		}
692bfe3f2eSlogwang 	}
702bfe3f2eSlogwang 	return 0;
712bfe3f2eSlogwang }
722bfe3f2eSlogwang 
732bfe3f2eSlogwang static int
742bfe3f2eSlogwang perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
752bfe3f2eSlogwang {
762bfe3f2eSlogwang 	PERF_WORKER_INIT;
772bfe3f2eSlogwang 	uint16_t i;
782bfe3f2eSlogwang 	/* +1 to avoid prefetch out of array check */
792bfe3f2eSlogwang 	struct rte_event ev[BURST_SIZE + 1];
802bfe3f2eSlogwang 
812bfe3f2eSlogwang 	while (t->done == false) {
822bfe3f2eSlogwang 		uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
832bfe3f2eSlogwang 				BURST_SIZE, 0);
842bfe3f2eSlogwang 
852bfe3f2eSlogwang 		if (!nb_rx) {
862bfe3f2eSlogwang 			rte_pause();
872bfe3f2eSlogwang 			continue;
882bfe3f2eSlogwang 		}
892bfe3f2eSlogwang 
902bfe3f2eSlogwang 		for (i = 0; i < nb_rx; i++) {
91*d30ea906Sjfb8856606 			if (enable_fwd_latency && !prod_timer_type) {
922bfe3f2eSlogwang 				rte_prefetch0(ev[i+1].event_ptr);
932bfe3f2eSlogwang 				/* first queue in pipeline.
942bfe3f2eSlogwang 				 * mark time stamp to compute fwd latency
952bfe3f2eSlogwang 				 */
962bfe3f2eSlogwang 				mark_fwd_latency(&ev[i], nb_stages);
972bfe3f2eSlogwang 			}
982bfe3f2eSlogwang 			/* last stage in pipeline */
992bfe3f2eSlogwang 			if (unlikely((ev[i].queue_id % nb_stages) ==
1002bfe3f2eSlogwang 						 laststage)) {
1012bfe3f2eSlogwang 				if (enable_fwd_latency)
1022bfe3f2eSlogwang 					cnt = perf_process_last_stage_latency(
1032bfe3f2eSlogwang 						pool, &ev[i], w, bufs, sz, cnt);
1042bfe3f2eSlogwang 				else
1052bfe3f2eSlogwang 					cnt = perf_process_last_stage(pool,
1062bfe3f2eSlogwang 						&ev[i], w, bufs, sz, cnt);
1072bfe3f2eSlogwang 
1082bfe3f2eSlogwang 				ev[i].op = RTE_EVENT_OP_RELEASE;
1092bfe3f2eSlogwang 			} else {
1102bfe3f2eSlogwang 				fwd_event(&ev[i], sched_type_list, nb_stages);
1112bfe3f2eSlogwang 			}
1122bfe3f2eSlogwang 		}
1132bfe3f2eSlogwang 
1142bfe3f2eSlogwang 		uint16_t enq;
1152bfe3f2eSlogwang 
1162bfe3f2eSlogwang 		enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
1172bfe3f2eSlogwang 		while (enq < nb_rx) {
1182bfe3f2eSlogwang 			enq += rte_event_enqueue_burst(dev, port,
1192bfe3f2eSlogwang 							ev + enq, nb_rx - enq);
1202bfe3f2eSlogwang 		}
1212bfe3f2eSlogwang 	}
1222bfe3f2eSlogwang 	return 0;
1232bfe3f2eSlogwang }
1242bfe3f2eSlogwang 
1252bfe3f2eSlogwang static int
1262bfe3f2eSlogwang worker_wrapper(void *arg)
1272bfe3f2eSlogwang {
1282bfe3f2eSlogwang 	struct worker_data *w  = arg;
1292bfe3f2eSlogwang 	struct evt_options *opt = w->t->opt;
1302bfe3f2eSlogwang 
1312bfe3f2eSlogwang 	const bool burst = evt_has_burst_mode(w->dev_id);
1322bfe3f2eSlogwang 	const int fwd_latency = opt->fwd_latency;
1332bfe3f2eSlogwang 
1342bfe3f2eSlogwang 	/* allow compiler to optimize */
1352bfe3f2eSlogwang 	if (!burst && !fwd_latency)
1362bfe3f2eSlogwang 		return perf_queue_worker(arg, 0);
1372bfe3f2eSlogwang 	else if (!burst && fwd_latency)
1382bfe3f2eSlogwang 		return perf_queue_worker(arg, 1);
1392bfe3f2eSlogwang 	else if (burst && !fwd_latency)
1402bfe3f2eSlogwang 		return perf_queue_worker_burst(arg, 0);
1412bfe3f2eSlogwang 	else if (burst && fwd_latency)
1422bfe3f2eSlogwang 		return perf_queue_worker_burst(arg, 1);
1432bfe3f2eSlogwang 
1442bfe3f2eSlogwang 	rte_panic("invalid worker\n");
1452bfe3f2eSlogwang }
1462bfe3f2eSlogwang 
1472bfe3f2eSlogwang static int
1482bfe3f2eSlogwang perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
1492bfe3f2eSlogwang {
1502bfe3f2eSlogwang 	return perf_launch_lcores(test, opt, worker_wrapper);
1512bfe3f2eSlogwang }
1522bfe3f2eSlogwang 
1532bfe3f2eSlogwang static int
1542bfe3f2eSlogwang perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
1552bfe3f2eSlogwang {
1562bfe3f2eSlogwang 	uint8_t queue;
1572bfe3f2eSlogwang 	int nb_stages = opt->nb_stages;
1582bfe3f2eSlogwang 	int ret;
159*d30ea906Sjfb8856606 	int nb_ports;
160*d30ea906Sjfb8856606 	int nb_queues;
161*d30ea906Sjfb8856606 	struct rte_event_dev_info dev_info;
162*d30ea906Sjfb8856606 
163*d30ea906Sjfb8856606 	nb_ports = evt_nr_active_lcores(opt->wlcores);
164*d30ea906Sjfb8856606 	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
165*d30ea906Sjfb8856606 		 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
166*d30ea906Sjfb8856606 		evt_nr_active_lcores(opt->plcores);
167*d30ea906Sjfb8856606 
168*d30ea906Sjfb8856606 	nb_queues = perf_queue_nb_event_queues(opt);
169*d30ea906Sjfb8856606 
170*d30ea906Sjfb8856606 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
171*d30ea906Sjfb8856606 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
172*d30ea906Sjfb8856606 	if (ret) {
173*d30ea906Sjfb8856606 		evt_err("failed to get eventdev info %d", opt->dev_id);
174*d30ea906Sjfb8856606 		return ret;
175*d30ea906Sjfb8856606 	}
1762bfe3f2eSlogwang 
1772bfe3f2eSlogwang 	const struct rte_event_dev_config config = {
178*d30ea906Sjfb8856606 			.nb_event_queues = nb_queues,
179*d30ea906Sjfb8856606 			.nb_event_ports = nb_ports,
180*d30ea906Sjfb8856606 			.nb_events_limit  = dev_info.max_num_events,
1812bfe3f2eSlogwang 			.nb_event_queue_flows = opt->nb_flows,
182*d30ea906Sjfb8856606 			.nb_event_port_dequeue_depth =
183*d30ea906Sjfb8856606 				dev_info.max_event_port_dequeue_depth,
184*d30ea906Sjfb8856606 			.nb_event_port_enqueue_depth =
185*d30ea906Sjfb8856606 				dev_info.max_event_port_enqueue_depth,
1862bfe3f2eSlogwang 	};
1872bfe3f2eSlogwang 
1882bfe3f2eSlogwang 	ret = rte_event_dev_configure(opt->dev_id, &config);
1892bfe3f2eSlogwang 	if (ret) {
1902bfe3f2eSlogwang 		evt_err("failed to configure eventdev %d", opt->dev_id);
1912bfe3f2eSlogwang 		return ret;
1922bfe3f2eSlogwang 	}
1932bfe3f2eSlogwang 
1942bfe3f2eSlogwang 	struct rte_event_queue_conf q_conf = {
1952bfe3f2eSlogwang 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1962bfe3f2eSlogwang 			.nb_atomic_flows = opt->nb_flows,
1972bfe3f2eSlogwang 			.nb_atomic_order_sequences = opt->nb_flows,
1982bfe3f2eSlogwang 	};
1992bfe3f2eSlogwang 	/* queue configurations */
200*d30ea906Sjfb8856606 	for (queue = 0; queue < nb_queues; queue++) {
2012bfe3f2eSlogwang 		q_conf.schedule_type =
2022bfe3f2eSlogwang 			(opt->sched_type_list[queue % nb_stages]);
2032bfe3f2eSlogwang 
2042bfe3f2eSlogwang 		if (opt->q_priority) {
2052bfe3f2eSlogwang 			uint8_t stage_pos = queue % nb_stages;
2062bfe3f2eSlogwang 			/* Configure event queues(stage 0 to stage n) with
2072bfe3f2eSlogwang 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
2082bfe3f2eSlogwang 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
2092bfe3f2eSlogwang 			 */
2102bfe3f2eSlogwang 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
2112bfe3f2eSlogwang 					(nb_stages - 1);
2122bfe3f2eSlogwang 			/* Higher prio for the queues closer to last stage */
2132bfe3f2eSlogwang 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
2142bfe3f2eSlogwang 					(step * stage_pos);
2152bfe3f2eSlogwang 		}
2162bfe3f2eSlogwang 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
2172bfe3f2eSlogwang 		if (ret) {
2182bfe3f2eSlogwang 			evt_err("failed to setup queue=%d", queue);
2192bfe3f2eSlogwang 			return ret;
2202bfe3f2eSlogwang 		}
2212bfe3f2eSlogwang 	}
2222bfe3f2eSlogwang 
223*d30ea906Sjfb8856606 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
224*d30ea906Sjfb8856606 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
225*d30ea906Sjfb8856606 
226*d30ea906Sjfb8856606 	/* port configuration */
227*d30ea906Sjfb8856606 	const struct rte_event_port_conf p_conf = {
228*d30ea906Sjfb8856606 			.dequeue_depth = opt->wkr_deq_dep,
229*d30ea906Sjfb8856606 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
230*d30ea906Sjfb8856606 			.new_event_threshold = dev_info.max_num_events,
231*d30ea906Sjfb8856606 	};
232*d30ea906Sjfb8856606 
2332bfe3f2eSlogwang 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
234*d30ea906Sjfb8856606 					nb_queues, &p_conf);
2352bfe3f2eSlogwang 	if (ret)
2362bfe3f2eSlogwang 		return ret;
2372bfe3f2eSlogwang 
238*d30ea906Sjfb8856606 	if (!evt_has_distributed_sched(opt->dev_id)) {
239*d30ea906Sjfb8856606 		uint32_t service_id;
240*d30ea906Sjfb8856606 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
241*d30ea906Sjfb8856606 		ret = evt_service_setup(service_id);
2422bfe3f2eSlogwang 		if (ret) {
2432bfe3f2eSlogwang 			evt_err("No service lcore found to run event dev.");
2442bfe3f2eSlogwang 			return ret;
2452bfe3f2eSlogwang 		}
246*d30ea906Sjfb8856606 	}
2472bfe3f2eSlogwang 
2482bfe3f2eSlogwang 	ret = rte_event_dev_start(opt->dev_id);
2492bfe3f2eSlogwang 	if (ret) {
2502bfe3f2eSlogwang 		evt_err("failed to start eventdev %d", opt->dev_id);
2512bfe3f2eSlogwang 		return ret;
2522bfe3f2eSlogwang 	}
2532bfe3f2eSlogwang 
2542bfe3f2eSlogwang 	return 0;
2552bfe3f2eSlogwang }
2562bfe3f2eSlogwang 
2572bfe3f2eSlogwang static void
2582bfe3f2eSlogwang perf_queue_opt_dump(struct evt_options *opt)
2592bfe3f2eSlogwang {
2602bfe3f2eSlogwang 	evt_dump_fwd_latency(opt);
2612bfe3f2eSlogwang 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
2622bfe3f2eSlogwang }
2632bfe3f2eSlogwang 
2642bfe3f2eSlogwang static int
2652bfe3f2eSlogwang perf_queue_opt_check(struct evt_options *opt)
2662bfe3f2eSlogwang {
2672bfe3f2eSlogwang 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
2682bfe3f2eSlogwang }
2692bfe3f2eSlogwang 
2702bfe3f2eSlogwang static bool
2712bfe3f2eSlogwang perf_queue_capability_check(struct evt_options *opt)
2722bfe3f2eSlogwang {
2732bfe3f2eSlogwang 	struct rte_event_dev_info dev_info;
2742bfe3f2eSlogwang 
2752bfe3f2eSlogwang 	rte_event_dev_info_get(opt->dev_id, &dev_info);
2762bfe3f2eSlogwang 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
2772bfe3f2eSlogwang 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
2782bfe3f2eSlogwang 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
2792bfe3f2eSlogwang 			perf_queue_nb_event_queues(opt),
2802bfe3f2eSlogwang 			dev_info.max_event_queues,
2812bfe3f2eSlogwang 			perf_nb_event_ports(opt), dev_info.max_event_ports);
2822bfe3f2eSlogwang 	}
2832bfe3f2eSlogwang 
2842bfe3f2eSlogwang 	return true;
2852bfe3f2eSlogwang }
2862bfe3f2eSlogwang 
2872bfe3f2eSlogwang static const struct evt_test_ops perf_queue =  {
2882bfe3f2eSlogwang 	.cap_check          = perf_queue_capability_check,
2892bfe3f2eSlogwang 	.opt_check          = perf_queue_opt_check,
2902bfe3f2eSlogwang 	.opt_dump           = perf_queue_opt_dump,
2912bfe3f2eSlogwang 	.test_setup         = perf_test_setup,
2922bfe3f2eSlogwang 	.mempool_setup      = perf_mempool_setup,
293*d30ea906Sjfb8856606 	.ethdev_setup	    = perf_ethdev_setup,
2942bfe3f2eSlogwang 	.eventdev_setup     = perf_queue_eventdev_setup,
2952bfe3f2eSlogwang 	.launch_lcores      = perf_queue_launch_lcores,
2962bfe3f2eSlogwang 	.eventdev_destroy   = perf_eventdev_destroy,
2972bfe3f2eSlogwang 	.mempool_destroy    = perf_mempool_destroy,
298*d30ea906Sjfb8856606 	.ethdev_destroy	    = perf_ethdev_destroy,
2992bfe3f2eSlogwang 	.test_result        = perf_test_result,
3002bfe3f2eSlogwang 	.test_destroy       = perf_test_destroy,
3012bfe3f2eSlogwang };
3022bfe3f2eSlogwang 
3032bfe3f2eSlogwang EVT_TEST_REGISTER(perf_queue);
304