1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include "test_order_common.h"
6 
7 int
8 order_test_result(struct evt_test *test, struct evt_options *opt)
9 {
10 	RTE_SET_USED(opt);
11 	struct test_order *t = evt_test_priv(test);
12 
13 	return t->result;
14 }
15 
16 static inline int
17 order_producer(void *arg)
18 {
19 	struct prod_data *p  = arg;
20 	struct test_order *t = p->t;
21 	struct evt_options *opt = t->opt;
22 	const uint8_t dev_id = p->dev_id;
23 	const uint8_t port = p->port_id;
24 	struct rte_mempool *pool = t->pool;
25 	const uint64_t nb_pkts = t->nb_pkts;
26 	uint32_t *producer_flow_seq = t->producer_flow_seq;
27 	const uint32_t nb_flows = t->nb_flows;
28 	uint64_t count = 0;
29 	struct rte_mbuf *m;
30 	struct rte_event ev;
31 
32 	if (opt->verbose_level > 1)
33 		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
34 			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
35 
36 	ev.event = 0;
37 	ev.op = RTE_EVENT_OP_NEW;
38 	ev.queue_id = p->queue_id;
39 	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
40 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
41 	ev.event_type =  RTE_EVENT_TYPE_CPU;
42 	ev.sub_event_type = 0; /* stage 0 */
43 
44 	while (count < nb_pkts && t->err == false) {
45 		m = rte_pktmbuf_alloc(pool);
46 		if (m == NULL)
47 			continue;
48 
49 		const flow_id_t flow = (uintptr_t)m % nb_flows;
50 		/* Maintain seq number per flow */
51 		m->seqn = producer_flow_seq[flow]++;
52 		order_flow_id_save(t, flow, m, &ev);
53 
54 		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
55 			if (t->err)
56 				break;
57 			rte_pause();
58 		}
59 
60 		count++;
61 	}
62 	return 0;
63 }
64 
65 int
66 order_opt_check(struct evt_options *opt)
67 {
68 	if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
69 		evt_err("Invalid producer type '%s' valid producer '%s'",
70 			evt_prod_id_to_name(opt->prod_type),
71 			evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
72 		return -1;
73 	}
74 
75 	/* 1 producer + N workers + main */
76 	if (rte_lcore_count() < 3) {
77 		evt_err("test need minimum 3 lcores");
78 		return -1;
79 	}
80 
81 	/* Validate worker lcores */
82 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
83 		evt_err("worker lcores overlaps with main lcore");
84 		return -1;
85 	}
86 
87 	if (evt_nr_active_lcores(opt->plcores) == 0) {
88 		evt_err("missing the producer lcore");
89 		return -1;
90 	}
91 
92 	if (evt_nr_active_lcores(opt->plcores) != 1) {
93 		evt_err("only one producer lcore must be selected");
94 		return -1;
95 	}
96 
97 	int plcore = evt_get_first_active_lcore(opt->plcores);
98 
99 	if (plcore < 0) {
100 		evt_err("failed to find active producer");
101 		return plcore;
102 	}
103 
104 	if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
105 		evt_err("worker lcores overlaps producer lcore");
106 		return -1;
107 	}
108 	if (evt_has_disabled_lcore(opt->wlcores)) {
109 		evt_err("one or more workers lcores are not enabled");
110 		return -1;
111 	}
112 	if (!evt_has_active_lcore(opt->wlcores)) {
113 		evt_err("minimum one worker is required");
114 		return -1;
115 	}
116 
117 	/* Validate producer lcore */
118 	if (plcore == (int)rte_get_main_lcore()) {
119 		evt_err("producer lcore and main lcore should be different");
120 		return -1;
121 	}
122 	if (!rte_lcore_is_enabled(plcore)) {
123 		evt_err("producer lcore is not enabled");
124 		return -1;
125 	}
126 
127 	/* Fixups */
128 	if (opt->nb_pkts == 0)
129 		opt->nb_pkts = INT64_MAX;
130 
131 	return 0;
132 }
133 
134 int
135 order_test_setup(struct evt_test *test, struct evt_options *opt)
136 {
137 	void *test_order;
138 	struct test_order *t;
139 	static const struct rte_mbuf_dynfield flow_id_dynfield_desc = {
140 		.name = "test_event_dynfield_flow_id",
141 		.size = sizeof(flow_id_t),
142 		.align = __alignof__(flow_id_t),
143 	};
144 
145 	test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
146 				RTE_CACHE_LINE_SIZE, opt->socket_id);
147 	if (test_order  == NULL) {
148 		evt_err("failed to allocate test_order memory");
149 		goto nomem;
150 	}
151 	test->test_priv = test_order;
152 	t = evt_test_priv(test);
153 
154 	t->flow_id_dynfield_offset =
155 		rte_mbuf_dynfield_register(&flow_id_dynfield_desc);
156 	if (t->flow_id_dynfield_offset < 0) {
157 		evt_err("failed to register mbuf field");
158 		return -rte_errno;
159 	}
160 
161 	t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
162 				 sizeof(*t->producer_flow_seq) * opt->nb_flows,
163 				RTE_CACHE_LINE_SIZE, opt->socket_id);
164 
165 	if (t->producer_flow_seq  == NULL) {
166 		evt_err("failed to allocate t->producer_flow_seq memory");
167 		goto prod_nomem;
168 	}
169 
170 	t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
171 				 sizeof(*t->expected_flow_seq) * opt->nb_flows,
172 				RTE_CACHE_LINE_SIZE, opt->socket_id);
173 
174 	if (t->expected_flow_seq  == NULL) {
175 		evt_err("failed to allocate t->expected_flow_seq memory");
176 		goto exp_nomem;
177 	}
178 	rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
179 	t->err = false;
180 	t->nb_pkts = opt->nb_pkts;
181 	t->nb_flows = opt->nb_flows;
182 	t->result = EVT_TEST_FAILED;
183 	t->opt = opt;
184 	return 0;
185 
186 exp_nomem:
187 	rte_free(t->producer_flow_seq);
188 prod_nomem:
189 	rte_free(test->test_priv);
190 nomem:
191 	return -ENOMEM;
192 }
193 
194 void
195 order_test_destroy(struct evt_test *test, struct evt_options *opt)
196 {
197 	RTE_SET_USED(opt);
198 	struct test_order *t = evt_test_priv(test);
199 
200 	rte_free(t->expected_flow_seq);
201 	rte_free(t->producer_flow_seq);
202 	rte_free(test->test_priv);
203 }
204 
205 int
206 order_mempool_setup(struct evt_test *test, struct evt_options *opt)
207 {
208 	struct test_order *t = evt_test_priv(test);
209 
210 	t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
211 					256 /* Cache */, 0,
212 					512, /* Use very small mbufs */
213 					opt->socket_id);
214 	if (t->pool == NULL) {
215 		evt_err("failed to create mempool");
216 		return -ENOMEM;
217 	}
218 
219 	return 0;
220 }
221 
222 void
223 order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
224 {
225 	RTE_SET_USED(opt);
226 	struct test_order *t = evt_test_priv(test);
227 
228 	rte_mempool_free(t->pool);
229 }
230 
231 void
232 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
233 {
234 	RTE_SET_USED(test);
235 
236 	rte_event_dev_stop(opt->dev_id);
237 	rte_event_dev_close(opt->dev_id);
238 }
239 
240 void
241 order_opt_dump(struct evt_options *opt)
242 {
243 	evt_dump_producer_lcores(opt);
244 	evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
245 	evt_dump_worker_lcores(opt);
246 	evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
247 }
248 
249 int
250 order_launch_lcores(struct evt_test *test, struct evt_options *opt,
251 			int (*worker)(void *))
252 {
253 	int ret, lcore_id;
254 	struct test_order *t = evt_test_priv(test);
255 
256 	int wkr_idx = 0;
257 	/* launch workers */
258 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
259 		if (!(opt->wlcores[lcore_id]))
260 			continue;
261 
262 		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
263 					 lcore_id);
264 		if (ret) {
265 			evt_err("failed to launch worker %d", lcore_id);
266 			return ret;
267 		}
268 		wkr_idx++;
269 	}
270 
271 	/* launch producer */
272 	int plcore = evt_get_first_active_lcore(opt->plcores);
273 
274 	ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
275 	if (ret) {
276 		evt_err("failed to launch order_producer %d", plcore);
277 		return ret;
278 	}
279 
280 	uint64_t cycles = rte_get_timer_cycles();
281 	int64_t old_remaining  = -1;
282 
283 	while (t->err == false) {
284 		uint64_t new_cycles = rte_get_timer_cycles();
285 		int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
286 
287 		if (remaining <= 0) {
288 			t->result = EVT_TEST_SUCCESS;
289 			break;
290 		}
291 
292 		if (new_cycles - cycles > rte_get_timer_hz() * 1) {
293 			printf(CLGRN"\r%"PRId64""CLNRM, remaining);
294 			fflush(stdout);
295 			if (old_remaining == remaining) {
296 				rte_event_dev_dump(opt->dev_id, stdout);
297 				evt_err("No schedules for seconds, deadlock");
298 				t->err = true;
299 				rte_smp_wmb();
300 				break;
301 			}
302 			old_remaining = remaining;
303 			cycles = new_cycles;
304 		}
305 	}
306 	printf("\r");
307 
308 	return 0;
309 }
310 
311 int
312 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
313 				uint8_t nb_workers, uint8_t nb_queues)
314 {
315 	int ret;
316 	uint8_t port;
317 	struct test_order *t = evt_test_priv(test);
318 	struct rte_event_dev_info dev_info;
319 
320 	memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
321 	ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
322 	if (ret) {
323 		evt_err("failed to get eventdev info %d", opt->dev_id);
324 		return ret;
325 	}
326 
327 	if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
328 		opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
329 
330 	/* port configuration */
331 	const struct rte_event_port_conf p_conf = {
332 			.dequeue_depth = opt->wkr_deq_dep,
333 			.enqueue_depth = dev_info.max_event_port_dequeue_depth,
334 			.new_event_threshold = dev_info.max_num_events,
335 	};
336 
337 	/* setup one port per worker, linking to all queues */
338 	for (port = 0; port < nb_workers; port++) {
339 		struct worker_data *w = &t->worker[port];
340 
341 		w->dev_id = opt->dev_id;
342 		w->port_id = port;
343 		w->t = t;
344 
345 		ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
346 		if (ret) {
347 			evt_err("failed to setup port %d", port);
348 			return ret;
349 		}
350 
351 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
352 		if (ret != nb_queues) {
353 			evt_err("failed to link all queues to port %d", port);
354 			return -EINVAL;
355 		}
356 	}
357 	struct prod_data *p = &t->prod;
358 
359 	p->dev_id = opt->dev_id;
360 	p->port_id = port; /* last port */
361 	p->queue_id = 0;
362 	p->t = t;
363 
364 	ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
365 	if (ret) {
366 		evt_err("failed to setup producer port %d", port);
367 		return ret;
368 	}
369 
370 	return ret;
371 }
372