1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606  * Copyright(c) 2017 Cavium, Inc
32bfe3f2eSlogwang  */
42bfe3f2eSlogwang 
52bfe3f2eSlogwang #include <stdio.h>
62bfe3f2eSlogwang #include <unistd.h>
72bfe3f2eSlogwang 
82bfe3f2eSlogwang #include "test_order_common.h"
92bfe3f2eSlogwang 
10*d30ea906Sjfb8856606 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
112bfe3f2eSlogwang 
122bfe3f2eSlogwang static inline __attribute__((always_inline)) void
132bfe3f2eSlogwang order_queue_process_stage_0(struct rte_event *const ev)
142bfe3f2eSlogwang {
152bfe3f2eSlogwang 	ev->queue_id = 1; /* q1 atomic queue */
162bfe3f2eSlogwang 	ev->op = RTE_EVENT_OP_FORWARD;
172bfe3f2eSlogwang 	ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
182bfe3f2eSlogwang 	ev->event_type = RTE_EVENT_TYPE_CPU;
192bfe3f2eSlogwang }
202bfe3f2eSlogwang 
212bfe3f2eSlogwang static int
222bfe3f2eSlogwang order_queue_worker(void *arg)
232bfe3f2eSlogwang {
242bfe3f2eSlogwang 	ORDER_WORKER_INIT;
252bfe3f2eSlogwang 	struct rte_event ev;
262bfe3f2eSlogwang 
272bfe3f2eSlogwang 	while (t->err == false) {
282bfe3f2eSlogwang 		uint16_t event = rte_event_dequeue_burst(dev_id, port,
292bfe3f2eSlogwang 					&ev, 1, 0);
302bfe3f2eSlogwang 		if (!event) {
312bfe3f2eSlogwang 			if (rte_atomic64_read(outstand_pkts) <= 0)
322bfe3f2eSlogwang 				break;
332bfe3f2eSlogwang 			rte_pause();
342bfe3f2eSlogwang 			continue;
352bfe3f2eSlogwang 		}
362bfe3f2eSlogwang 
372bfe3f2eSlogwang 		if (ev.queue_id == 0) { /* from ordered queue */
382bfe3f2eSlogwang 			order_queue_process_stage_0(&ev);
392bfe3f2eSlogwang 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
402bfe3f2eSlogwang 					!= 1)
412bfe3f2eSlogwang 				rte_pause();
422bfe3f2eSlogwang 		} else if (ev.queue_id == 1) { /* from atomic queue */
432bfe3f2eSlogwang 			order_process_stage_1(t, &ev, nb_flows,
442bfe3f2eSlogwang 					expected_flow_seq, outstand_pkts);
452bfe3f2eSlogwang 		} else {
462bfe3f2eSlogwang 			order_process_stage_invalid(t, &ev);
472bfe3f2eSlogwang 		}
482bfe3f2eSlogwang 	}
492bfe3f2eSlogwang 	return 0;
502bfe3f2eSlogwang }
512bfe3f2eSlogwang 
522bfe3f2eSlogwang static int
532bfe3f2eSlogwang order_queue_worker_burst(void *arg)
542bfe3f2eSlogwang {
552bfe3f2eSlogwang 	ORDER_WORKER_INIT;
562bfe3f2eSlogwang 	struct rte_event ev[BURST_SIZE];
572bfe3f2eSlogwang 	uint16_t i;
582bfe3f2eSlogwang 
592bfe3f2eSlogwang 	while (t->err == false) {
602bfe3f2eSlogwang 		uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
612bfe3f2eSlogwang 				BURST_SIZE, 0);
622bfe3f2eSlogwang 
632bfe3f2eSlogwang 		if (nb_rx == 0) {
642bfe3f2eSlogwang 			if (rte_atomic64_read(outstand_pkts) <= 0)
652bfe3f2eSlogwang 				break;
662bfe3f2eSlogwang 			rte_pause();
672bfe3f2eSlogwang 			continue;
682bfe3f2eSlogwang 		}
692bfe3f2eSlogwang 
702bfe3f2eSlogwang 		for (i = 0; i < nb_rx; i++) {
712bfe3f2eSlogwang 			if (ev[i].queue_id == 0) { /* from ordered queue */
722bfe3f2eSlogwang 				order_queue_process_stage_0(&ev[i]);
732bfe3f2eSlogwang 			} else if (ev[i].queue_id == 1) {/* from atomic queue */
742bfe3f2eSlogwang 				order_process_stage_1(t, &ev[i], nb_flows,
752bfe3f2eSlogwang 					expected_flow_seq, outstand_pkts);
762bfe3f2eSlogwang 				ev[i].op = RTE_EVENT_OP_RELEASE;
772bfe3f2eSlogwang 			} else {
782bfe3f2eSlogwang 				order_process_stage_invalid(t, &ev[i]);
792bfe3f2eSlogwang 			}
802bfe3f2eSlogwang 		}
812bfe3f2eSlogwang 
822bfe3f2eSlogwang 		uint16_t enq;
832bfe3f2eSlogwang 
842bfe3f2eSlogwang 		enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
852bfe3f2eSlogwang 		while (enq < nb_rx) {
862bfe3f2eSlogwang 			enq += rte_event_enqueue_burst(dev_id, port,
872bfe3f2eSlogwang 							ev + enq, nb_rx - enq);
882bfe3f2eSlogwang 		}
892bfe3f2eSlogwang 	}
902bfe3f2eSlogwang 	return 0;
912bfe3f2eSlogwang }
922bfe3f2eSlogwang 
932bfe3f2eSlogwang static int
942bfe3f2eSlogwang worker_wrapper(void *arg)
952bfe3f2eSlogwang {
962bfe3f2eSlogwang 	struct worker_data *w  = arg;
972bfe3f2eSlogwang 	const bool burst = evt_has_burst_mode(w->dev_id);
982bfe3f2eSlogwang 
992bfe3f2eSlogwang 	if (burst)
1002bfe3f2eSlogwang 		return order_queue_worker_burst(arg);
1012bfe3f2eSlogwang 	else
1022bfe3f2eSlogwang 		return order_queue_worker(arg);
1032bfe3f2eSlogwang }
1042bfe3f2eSlogwang 
1052bfe3f2eSlogwang static int
1062bfe3f2eSlogwang order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
1072bfe3f2eSlogwang {
1082bfe3f2eSlogwang 	return order_launch_lcores(test, opt, worker_wrapper);
1092bfe3f2eSlogwang }
1102bfe3f2eSlogwang 
1112bfe3f2eSlogwang #define NB_QUEUES 2
1122bfe3f2eSlogwang static int
1132bfe3f2eSlogwang order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
1142bfe3f2eSlogwang {
1152bfe3f2eSlogwang 	int ret;
1162bfe3f2eSlogwang 
1172bfe3f2eSlogwang 	const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
1182bfe3f2eSlogwang 	/* number of active worker cores + 1 producer */
1192bfe3f2eSlogwang 	const uint8_t nb_ports = nb_workers + 1;
1202bfe3f2eSlogwang 
1212bfe3f2eSlogwang 	const struct rte_event_dev_config config = {
1222bfe3f2eSlogwang 			.nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
1232bfe3f2eSlogwang 			.nb_event_ports = nb_ports,
1242bfe3f2eSlogwang 			.nb_events_limit  = 4096,
1252bfe3f2eSlogwang 			.nb_event_queue_flows = opt->nb_flows,
1262bfe3f2eSlogwang 			.nb_event_port_dequeue_depth = 128,
1272bfe3f2eSlogwang 			.nb_event_port_enqueue_depth = 128,
1282bfe3f2eSlogwang 	};
1292bfe3f2eSlogwang 
1302bfe3f2eSlogwang 	ret = rte_event_dev_configure(opt->dev_id, &config);
1312bfe3f2eSlogwang 	if (ret) {
1322bfe3f2eSlogwang 		evt_err("failed to configure eventdev %d", opt->dev_id);
1332bfe3f2eSlogwang 		return ret;
1342bfe3f2eSlogwang 	}
1352bfe3f2eSlogwang 
1362bfe3f2eSlogwang 	/* q0 (ordered queue) configuration */
1372bfe3f2eSlogwang 	struct rte_event_queue_conf q0_ordered_conf = {
1382bfe3f2eSlogwang 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1392bfe3f2eSlogwang 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
1402bfe3f2eSlogwang 			.nb_atomic_flows = opt->nb_flows,
1412bfe3f2eSlogwang 			.nb_atomic_order_sequences = opt->nb_flows,
1422bfe3f2eSlogwang 	};
1432bfe3f2eSlogwang 	ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
1442bfe3f2eSlogwang 	if (ret) {
1452bfe3f2eSlogwang 		evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
1462bfe3f2eSlogwang 		return ret;
1472bfe3f2eSlogwang 	}
1482bfe3f2eSlogwang 
1492bfe3f2eSlogwang 	/* q1 (atomic queue) configuration */
1502bfe3f2eSlogwang 	struct rte_event_queue_conf q1_atomic_conf = {
1512bfe3f2eSlogwang 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1522bfe3f2eSlogwang 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
1532bfe3f2eSlogwang 			.nb_atomic_flows = opt->nb_flows,
1542bfe3f2eSlogwang 			.nb_atomic_order_sequences = opt->nb_flows,
1552bfe3f2eSlogwang 	};
1562bfe3f2eSlogwang 	ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
1572bfe3f2eSlogwang 	if (ret) {
1582bfe3f2eSlogwang 		evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
1592bfe3f2eSlogwang 		return ret;
1602bfe3f2eSlogwang 	}
1612bfe3f2eSlogwang 
1622bfe3f2eSlogwang 	/* setup one port per worker, linking to all queues */
1632bfe3f2eSlogwang 	ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
1642bfe3f2eSlogwang 	if (ret)
1652bfe3f2eSlogwang 		return ret;
1662bfe3f2eSlogwang 
167*d30ea906Sjfb8856606 	if (!evt_has_distributed_sched(opt->dev_id)) {
168*d30ea906Sjfb8856606 		uint32_t service_id;
169*d30ea906Sjfb8856606 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
170*d30ea906Sjfb8856606 		ret = evt_service_setup(service_id);
1712bfe3f2eSlogwang 		if (ret) {
1722bfe3f2eSlogwang 			evt_err("No service lcore found to run event dev.");
1732bfe3f2eSlogwang 			return ret;
1742bfe3f2eSlogwang 		}
175*d30ea906Sjfb8856606 	}
1762bfe3f2eSlogwang 
1772bfe3f2eSlogwang 	ret = rte_event_dev_start(opt->dev_id);
1782bfe3f2eSlogwang 	if (ret) {
1792bfe3f2eSlogwang 		evt_err("failed to start eventdev %d", opt->dev_id);
1802bfe3f2eSlogwang 		return ret;
1812bfe3f2eSlogwang 	}
1822bfe3f2eSlogwang 
1832bfe3f2eSlogwang 	return 0;
1842bfe3f2eSlogwang }
1852bfe3f2eSlogwang 
1862bfe3f2eSlogwang static void
1872bfe3f2eSlogwang order_queue_opt_dump(struct evt_options *opt)
1882bfe3f2eSlogwang {
1892bfe3f2eSlogwang 	order_opt_dump(opt);
1902bfe3f2eSlogwang 	evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
1912bfe3f2eSlogwang }
1922bfe3f2eSlogwang 
1932bfe3f2eSlogwang static bool
1942bfe3f2eSlogwang order_queue_capability_check(struct evt_options *opt)
1952bfe3f2eSlogwang {
1962bfe3f2eSlogwang 	struct rte_event_dev_info dev_info;
1972bfe3f2eSlogwang 
1982bfe3f2eSlogwang 	rte_event_dev_info_get(opt->dev_id, &dev_info);
1992bfe3f2eSlogwang 	if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
2002bfe3f2eSlogwang 			order_nb_event_ports(opt)) {
2012bfe3f2eSlogwang 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
2022bfe3f2eSlogwang 			NB_QUEUES, dev_info.max_event_queues,
2032bfe3f2eSlogwang 			order_nb_event_ports(opt), dev_info.max_event_ports);
2042bfe3f2eSlogwang 		return false;
2052bfe3f2eSlogwang 	}
2062bfe3f2eSlogwang 
2072bfe3f2eSlogwang 	return true;
2082bfe3f2eSlogwang }
2092bfe3f2eSlogwang 
2102bfe3f2eSlogwang static const struct evt_test_ops order_queue =  {
2112bfe3f2eSlogwang 	.cap_check          = order_queue_capability_check,
2122bfe3f2eSlogwang 	.opt_check          = order_opt_check,
2132bfe3f2eSlogwang 	.opt_dump           = order_queue_opt_dump,
2142bfe3f2eSlogwang 	.test_setup         = order_test_setup,
2152bfe3f2eSlogwang 	.mempool_setup      = order_mempool_setup,
2162bfe3f2eSlogwang 	.eventdev_setup     = order_queue_eventdev_setup,
2172bfe3f2eSlogwang 	.launch_lcores      = order_queue_launch_lcores,
2182bfe3f2eSlogwang 	.eventdev_destroy   = order_eventdev_destroy,
2192bfe3f2eSlogwang 	.mempool_destroy    = order_mempool_destroy,
2202bfe3f2eSlogwang 	.test_result        = order_test_result,
2212bfe3f2eSlogwang 	.test_destroy       = order_test_destroy,
2222bfe3f2eSlogwang };
2232bfe3f2eSlogwang 
2242bfe3f2eSlogwang EVT_TEST_REGISTER(order_queue);
225