1*2bfe3f2eSlogwang /*
2*2bfe3f2eSlogwang  *   BSD LICENSE
3*2bfe3f2eSlogwang  *
4*2bfe3f2eSlogwang  *   Copyright (C) Cavium, Inc 2017.
5*2bfe3f2eSlogwang  *
6*2bfe3f2eSlogwang  *   Redistribution and use in source and binary forms, with or without
7*2bfe3f2eSlogwang  *   modification, are permitted provided that the following conditions
8*2bfe3f2eSlogwang  *   are met:
9*2bfe3f2eSlogwang  *
10*2bfe3f2eSlogwang  *     * Redistributions of source code must retain the above copyright
11*2bfe3f2eSlogwang  *       notice, this list of conditions and the following disclaimer.
12*2bfe3f2eSlogwang  *     * Redistributions in binary form must reproduce the above copyright
13*2bfe3f2eSlogwang  *       notice, this list of conditions and the following disclaimer in
14*2bfe3f2eSlogwang  *       the documentation and/or other materials provided with the
15*2bfe3f2eSlogwang  *       distribution.
16*2bfe3f2eSlogwang  *     * Neither the name of Cavium, Inc nor the names of its
17*2bfe3f2eSlogwang  *       contributors may be used to endorse or promote products derived
18*2bfe3f2eSlogwang  *       from this software without specific prior written permission.
19*2bfe3f2eSlogwang  *
20*2bfe3f2eSlogwang  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21*2bfe3f2eSlogwang  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22*2bfe3f2eSlogwang  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23*2bfe3f2eSlogwang  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24*2bfe3f2eSlogwang  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25*2bfe3f2eSlogwang  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26*2bfe3f2eSlogwang  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27*2bfe3f2eSlogwang  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28*2bfe3f2eSlogwang  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29*2bfe3f2eSlogwang  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30*2bfe3f2eSlogwang  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31*2bfe3f2eSlogwang  */
32*2bfe3f2eSlogwang 
33*2bfe3f2eSlogwang #include <stdio.h>
34*2bfe3f2eSlogwang #include <unistd.h>
35*2bfe3f2eSlogwang 
36*2bfe3f2eSlogwang #include "test_order_common.h"
37*2bfe3f2eSlogwang 
38*2bfe3f2eSlogwang /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
39*2bfe3f2eSlogwang 
40*2bfe3f2eSlogwang static inline __attribute__((always_inline)) void
41*2bfe3f2eSlogwang order_queue_process_stage_0(struct rte_event *const ev)
42*2bfe3f2eSlogwang {
43*2bfe3f2eSlogwang 	ev->queue_id = 1; /* q1 atomic queue */
44*2bfe3f2eSlogwang 	ev->op = RTE_EVENT_OP_FORWARD;
45*2bfe3f2eSlogwang 	ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
46*2bfe3f2eSlogwang 	ev->event_type = RTE_EVENT_TYPE_CPU;
47*2bfe3f2eSlogwang }
48*2bfe3f2eSlogwang 
49*2bfe3f2eSlogwang static int
50*2bfe3f2eSlogwang order_queue_worker(void *arg)
51*2bfe3f2eSlogwang {
52*2bfe3f2eSlogwang 	ORDER_WORKER_INIT;
53*2bfe3f2eSlogwang 	struct rte_event ev;
54*2bfe3f2eSlogwang 
55*2bfe3f2eSlogwang 	while (t->err == false) {
56*2bfe3f2eSlogwang 		uint16_t event = rte_event_dequeue_burst(dev_id, port,
57*2bfe3f2eSlogwang 					&ev, 1, 0);
58*2bfe3f2eSlogwang 		if (!event) {
59*2bfe3f2eSlogwang 			if (rte_atomic64_read(outstand_pkts) <= 0)
60*2bfe3f2eSlogwang 				break;
61*2bfe3f2eSlogwang 			rte_pause();
62*2bfe3f2eSlogwang 			continue;
63*2bfe3f2eSlogwang 		}
64*2bfe3f2eSlogwang 
65*2bfe3f2eSlogwang 		if (ev.queue_id == 0) { /* from ordered queue */
66*2bfe3f2eSlogwang 			order_queue_process_stage_0(&ev);
67*2bfe3f2eSlogwang 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
68*2bfe3f2eSlogwang 					!= 1)
69*2bfe3f2eSlogwang 				rte_pause();
70*2bfe3f2eSlogwang 		} else if (ev.queue_id == 1) { /* from atomic queue */
71*2bfe3f2eSlogwang 			order_process_stage_1(t, &ev, nb_flows,
72*2bfe3f2eSlogwang 					expected_flow_seq, outstand_pkts);
73*2bfe3f2eSlogwang 		} else {
74*2bfe3f2eSlogwang 			order_process_stage_invalid(t, &ev);
75*2bfe3f2eSlogwang 		}
76*2bfe3f2eSlogwang 	}
77*2bfe3f2eSlogwang 	return 0;
78*2bfe3f2eSlogwang }
79*2bfe3f2eSlogwang 
80*2bfe3f2eSlogwang static int
81*2bfe3f2eSlogwang order_queue_worker_burst(void *arg)
82*2bfe3f2eSlogwang {
83*2bfe3f2eSlogwang 	ORDER_WORKER_INIT;
84*2bfe3f2eSlogwang 	struct rte_event ev[BURST_SIZE];
85*2bfe3f2eSlogwang 	uint16_t i;
86*2bfe3f2eSlogwang 
87*2bfe3f2eSlogwang 	while (t->err == false) {
88*2bfe3f2eSlogwang 		uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
89*2bfe3f2eSlogwang 				BURST_SIZE, 0);
90*2bfe3f2eSlogwang 
91*2bfe3f2eSlogwang 		if (nb_rx == 0) {
92*2bfe3f2eSlogwang 			if (rte_atomic64_read(outstand_pkts) <= 0)
93*2bfe3f2eSlogwang 				break;
94*2bfe3f2eSlogwang 			rte_pause();
95*2bfe3f2eSlogwang 			continue;
96*2bfe3f2eSlogwang 		}
97*2bfe3f2eSlogwang 
98*2bfe3f2eSlogwang 		for (i = 0; i < nb_rx; i++) {
99*2bfe3f2eSlogwang 			if (ev[i].queue_id == 0) { /* from ordered queue */
100*2bfe3f2eSlogwang 				order_queue_process_stage_0(&ev[i]);
101*2bfe3f2eSlogwang 			} else if (ev[i].queue_id == 1) {/* from atomic queue */
102*2bfe3f2eSlogwang 				order_process_stage_1(t, &ev[i], nb_flows,
103*2bfe3f2eSlogwang 					expected_flow_seq, outstand_pkts);
104*2bfe3f2eSlogwang 				ev[i].op = RTE_EVENT_OP_RELEASE;
105*2bfe3f2eSlogwang 			} else {
106*2bfe3f2eSlogwang 				order_process_stage_invalid(t, &ev[i]);
107*2bfe3f2eSlogwang 			}
108*2bfe3f2eSlogwang 		}
109*2bfe3f2eSlogwang 
110*2bfe3f2eSlogwang 		uint16_t enq;
111*2bfe3f2eSlogwang 
112*2bfe3f2eSlogwang 		enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
113*2bfe3f2eSlogwang 		while (enq < nb_rx) {
114*2bfe3f2eSlogwang 			enq += rte_event_enqueue_burst(dev_id, port,
115*2bfe3f2eSlogwang 							ev + enq, nb_rx - enq);
116*2bfe3f2eSlogwang 		}
117*2bfe3f2eSlogwang 	}
118*2bfe3f2eSlogwang 	return 0;
119*2bfe3f2eSlogwang }
120*2bfe3f2eSlogwang 
121*2bfe3f2eSlogwang static int
122*2bfe3f2eSlogwang worker_wrapper(void *arg)
123*2bfe3f2eSlogwang {
124*2bfe3f2eSlogwang 	struct worker_data *w  = arg;
125*2bfe3f2eSlogwang 	const bool burst = evt_has_burst_mode(w->dev_id);
126*2bfe3f2eSlogwang 
127*2bfe3f2eSlogwang 	if (burst)
128*2bfe3f2eSlogwang 		return order_queue_worker_burst(arg);
129*2bfe3f2eSlogwang 	else
130*2bfe3f2eSlogwang 		return order_queue_worker(arg);
131*2bfe3f2eSlogwang }
132*2bfe3f2eSlogwang 
133*2bfe3f2eSlogwang static int
134*2bfe3f2eSlogwang order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
135*2bfe3f2eSlogwang {
136*2bfe3f2eSlogwang 	return order_launch_lcores(test, opt, worker_wrapper);
137*2bfe3f2eSlogwang }
138*2bfe3f2eSlogwang 
139*2bfe3f2eSlogwang #define NB_QUEUES 2
140*2bfe3f2eSlogwang static int
141*2bfe3f2eSlogwang order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
142*2bfe3f2eSlogwang {
143*2bfe3f2eSlogwang 	int ret;
144*2bfe3f2eSlogwang 
145*2bfe3f2eSlogwang 	const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
146*2bfe3f2eSlogwang 	/* number of active worker cores + 1 producer */
147*2bfe3f2eSlogwang 	const uint8_t nb_ports = nb_workers + 1;
148*2bfe3f2eSlogwang 
149*2bfe3f2eSlogwang 	const struct rte_event_dev_config config = {
150*2bfe3f2eSlogwang 			.nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */
151*2bfe3f2eSlogwang 			.nb_event_ports = nb_ports,
152*2bfe3f2eSlogwang 			.nb_events_limit  = 4096,
153*2bfe3f2eSlogwang 			.nb_event_queue_flows = opt->nb_flows,
154*2bfe3f2eSlogwang 			.nb_event_port_dequeue_depth = 128,
155*2bfe3f2eSlogwang 			.nb_event_port_enqueue_depth = 128,
156*2bfe3f2eSlogwang 	};
157*2bfe3f2eSlogwang 
158*2bfe3f2eSlogwang 	ret = rte_event_dev_configure(opt->dev_id, &config);
159*2bfe3f2eSlogwang 	if (ret) {
160*2bfe3f2eSlogwang 		evt_err("failed to configure eventdev %d", opt->dev_id);
161*2bfe3f2eSlogwang 		return ret;
162*2bfe3f2eSlogwang 	}
163*2bfe3f2eSlogwang 
164*2bfe3f2eSlogwang 	/* q0 (ordered queue) configuration */
165*2bfe3f2eSlogwang 	struct rte_event_queue_conf q0_ordered_conf = {
166*2bfe3f2eSlogwang 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
167*2bfe3f2eSlogwang 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
168*2bfe3f2eSlogwang 			.nb_atomic_flows = opt->nb_flows,
169*2bfe3f2eSlogwang 			.nb_atomic_order_sequences = opt->nb_flows,
170*2bfe3f2eSlogwang 	};
171*2bfe3f2eSlogwang 	ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
172*2bfe3f2eSlogwang 	if (ret) {
173*2bfe3f2eSlogwang 		evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
174*2bfe3f2eSlogwang 		return ret;
175*2bfe3f2eSlogwang 	}
176*2bfe3f2eSlogwang 
177*2bfe3f2eSlogwang 	/* q1 (atomic queue) configuration */
178*2bfe3f2eSlogwang 	struct rte_event_queue_conf q1_atomic_conf = {
179*2bfe3f2eSlogwang 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
180*2bfe3f2eSlogwang 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
181*2bfe3f2eSlogwang 			.nb_atomic_flows = opt->nb_flows,
182*2bfe3f2eSlogwang 			.nb_atomic_order_sequences = opt->nb_flows,
183*2bfe3f2eSlogwang 	};
184*2bfe3f2eSlogwang 	ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
185*2bfe3f2eSlogwang 	if (ret) {
186*2bfe3f2eSlogwang 		evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
187*2bfe3f2eSlogwang 		return ret;
188*2bfe3f2eSlogwang 	}
189*2bfe3f2eSlogwang 
190*2bfe3f2eSlogwang 	/* setup one port per worker, linking to all queues */
191*2bfe3f2eSlogwang 	ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
192*2bfe3f2eSlogwang 	if (ret)
193*2bfe3f2eSlogwang 		return ret;
194*2bfe3f2eSlogwang 
195*2bfe3f2eSlogwang 	ret = evt_service_setup(opt->dev_id);
196*2bfe3f2eSlogwang 	if (ret) {
197*2bfe3f2eSlogwang 		evt_err("No service lcore found to run event dev.");
198*2bfe3f2eSlogwang 		return ret;
199*2bfe3f2eSlogwang 	}
200*2bfe3f2eSlogwang 
201*2bfe3f2eSlogwang 	ret = rte_event_dev_start(opt->dev_id);
202*2bfe3f2eSlogwang 	if (ret) {
203*2bfe3f2eSlogwang 		evt_err("failed to start eventdev %d", opt->dev_id);
204*2bfe3f2eSlogwang 		return ret;
205*2bfe3f2eSlogwang 	}
206*2bfe3f2eSlogwang 
207*2bfe3f2eSlogwang 	return 0;
208*2bfe3f2eSlogwang }
209*2bfe3f2eSlogwang 
210*2bfe3f2eSlogwang static void
211*2bfe3f2eSlogwang order_queue_opt_dump(struct evt_options *opt)
212*2bfe3f2eSlogwang {
213*2bfe3f2eSlogwang 	order_opt_dump(opt);
214*2bfe3f2eSlogwang 	evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
215*2bfe3f2eSlogwang }
216*2bfe3f2eSlogwang 
217*2bfe3f2eSlogwang static bool
218*2bfe3f2eSlogwang order_queue_capability_check(struct evt_options *opt)
219*2bfe3f2eSlogwang {
220*2bfe3f2eSlogwang 	struct rte_event_dev_info dev_info;
221*2bfe3f2eSlogwang 
222*2bfe3f2eSlogwang 	rte_event_dev_info_get(opt->dev_id, &dev_info);
223*2bfe3f2eSlogwang 	if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
224*2bfe3f2eSlogwang 			order_nb_event_ports(opt)) {
225*2bfe3f2eSlogwang 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
226*2bfe3f2eSlogwang 			NB_QUEUES, dev_info.max_event_queues,
227*2bfe3f2eSlogwang 			order_nb_event_ports(opt), dev_info.max_event_ports);
228*2bfe3f2eSlogwang 		return false;
229*2bfe3f2eSlogwang 	}
230*2bfe3f2eSlogwang 
231*2bfe3f2eSlogwang 	return true;
232*2bfe3f2eSlogwang }
233*2bfe3f2eSlogwang 
234*2bfe3f2eSlogwang static const struct evt_test_ops order_queue =  {
235*2bfe3f2eSlogwang 	.cap_check          = order_queue_capability_check,
236*2bfe3f2eSlogwang 	.opt_check          = order_opt_check,
237*2bfe3f2eSlogwang 	.opt_dump           = order_queue_opt_dump,
238*2bfe3f2eSlogwang 	.test_setup         = order_test_setup,
239*2bfe3f2eSlogwang 	.mempool_setup      = order_mempool_setup,
240*2bfe3f2eSlogwang 	.eventdev_setup     = order_queue_eventdev_setup,
241*2bfe3f2eSlogwang 	.launch_lcores      = order_queue_launch_lcores,
242*2bfe3f2eSlogwang 	.eventdev_destroy   = order_eventdev_destroy,
243*2bfe3f2eSlogwang 	.mempool_destroy    = order_mempool_destroy,
244*2bfe3f2eSlogwang 	.test_result        = order_test_result,
245*2bfe3f2eSlogwang 	.test_destroy       = order_test_destroy,
246*2bfe3f2eSlogwang };
247*2bfe3f2eSlogwang 
248*2bfe3f2eSlogwang EVT_TEST_REGISTER(order_queue);
249