1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #include "test_perf_common.h"
6
7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
8
9 static inline int
perf_queue_nb_event_queues(struct evt_options * opt)10 perf_queue_nb_event_queues(struct evt_options *opt)
11 {
12 /* nb_queues = number of producers * number of stages */
13 uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
15 return nb_prod * opt->nb_stages;
16 }
17
18 static __rte_always_inline void
mark_fwd_latency(struct rte_event * const ev,const uint8_t nb_stages)19 mark_fwd_latency(struct rte_event *const ev,
20 const uint8_t nb_stages)
21 {
22 if (unlikely((ev->queue_id % nb_stages) == 0)) {
23 struct perf_elt *const m = ev->event_ptr;
24
25 m->timestamp = rte_get_timer_cycles();
26 }
27 }
28
29 static __rte_always_inline void
fwd_event(struct rte_event * const ev,uint8_t * const sched_type_list,const uint8_t nb_stages)30 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
31 const uint8_t nb_stages)
32 {
33 ev->queue_id++;
34 ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
35 ev->op = RTE_EVENT_OP_FORWARD;
36 ev->event_type = RTE_EVENT_TYPE_CPU;
37 }
38
39 static int
perf_queue_worker(void * arg,const int enable_fwd_latency)40 perf_queue_worker(void *arg, const int enable_fwd_latency)
41 {
42 uint16_t enq = 0, deq = 0;
43 struct rte_event ev;
44 PERF_WORKER_INIT;
45
46 while (t->done == false) {
47 deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
48
49 if (!deq) {
50 rte_pause();
51 continue;
52 }
53
54 if (prod_crypto_type &&
55 (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
56 struct rte_crypto_op *op = ev.event_ptr;
57
58 if (op->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
59 if (op->sym->m_dst == NULL)
60 ev.event_ptr = op->sym->m_src;
61 else
62 ev.event_ptr = op->sym->m_dst;
63 rte_crypto_op_free(op);
64 } else {
65 rte_crypto_op_free(op);
66 continue;
67 }
68 }
69
70 if (enable_fwd_latency && !prod_timer_type)
71 /* first q in pipeline, mark timestamp to compute fwd latency */
72 mark_fwd_latency(&ev, nb_stages);
73
74 /* last stage in pipeline */
75 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
76 if (enable_fwd_latency)
77 cnt = perf_process_last_stage_latency(pool,
78 &ev, w, bufs, sz, cnt);
79 else
80 cnt = perf_process_last_stage(pool,
81 &ev, w, bufs, sz, cnt);
82 } else {
83 fwd_event(&ev, sched_type_list, nb_stages);
84 do {
85 enq = rte_event_enqueue_burst(dev, port, &ev,
86 1);
87 } while (!enq && !t->done);
88 }
89 }
90
91 perf_worker_cleanup(pool, dev, port, &ev, enq, deq);
92
93 return 0;
94 }
95
96 static int
perf_queue_worker_burst(void * arg,const int enable_fwd_latency)97 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
98 {
99 /* +1 to avoid prefetch out of array check */
100 struct rte_event ev[BURST_SIZE + 1];
101 uint16_t enq = 0, nb_rx = 0;
102 PERF_WORKER_INIT;
103 uint16_t i;
104
105 while (t->done == false) {
106 nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0);
107
108 if (!nb_rx) {
109 rte_pause();
110 continue;
111 }
112
113 for (i = 0; i < nb_rx; i++) {
114 if (prod_crypto_type &&
115 (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
116 struct rte_crypto_op *op = ev[i].event_ptr;
117
118 if (op->status ==
119 RTE_CRYPTO_OP_STATUS_SUCCESS) {
120 if (op->sym->m_dst == NULL)
121 ev[i].event_ptr =
122 op->sym->m_src;
123 else
124 ev[i].event_ptr =
125 op->sym->m_dst;
126 rte_crypto_op_free(op);
127 } else {
128 rte_crypto_op_free(op);
129 continue;
130 }
131 }
132
133 if (enable_fwd_latency && !prod_timer_type) {
134 rte_prefetch0(ev[i+1].event_ptr);
135 /* first queue in pipeline.
136 * mark time stamp to compute fwd latency
137 */
138 mark_fwd_latency(&ev[i], nb_stages);
139 }
140 /* last stage in pipeline */
141 if (unlikely((ev[i].queue_id % nb_stages) ==
142 laststage)) {
143 if (enable_fwd_latency)
144 cnt = perf_process_last_stage_latency(
145 pool, &ev[i], w, bufs, sz, cnt);
146 else
147 cnt = perf_process_last_stage(pool,
148 &ev[i], w, bufs, sz, cnt);
149
150 ev[i].op = RTE_EVENT_OP_RELEASE;
151 } else {
152 fwd_event(&ev[i], sched_type_list, nb_stages);
153 }
154 }
155
156
157 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
158 while (enq < nb_rx && !t->done) {
159 enq += rte_event_enqueue_burst(dev, port,
160 ev + enq, nb_rx - enq);
161 }
162 }
163
164 perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx);
165
166 return 0;
167 }
168
169 static int
worker_wrapper(void * arg)170 worker_wrapper(void *arg)
171 {
172 struct worker_data *w = arg;
173 struct evt_options *opt = w->t->opt;
174
175 const bool burst = evt_has_burst_mode(w->dev_id);
176 const int fwd_latency = opt->fwd_latency;
177
178 /* allow compiler to optimize */
179 if (!burst && !fwd_latency)
180 return perf_queue_worker(arg, 0);
181 else if (!burst && fwd_latency)
182 return perf_queue_worker(arg, 1);
183 else if (burst && !fwd_latency)
184 return perf_queue_worker_burst(arg, 0);
185 else if (burst && fwd_latency)
186 return perf_queue_worker_burst(arg, 1);
187
188 rte_panic("invalid worker\n");
189 }
190
191 static int
perf_queue_launch_lcores(struct evt_test * test,struct evt_options * opt)192 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
193 {
194 return perf_launch_lcores(test, opt, worker_wrapper);
195 }
196
197 static int
perf_queue_eventdev_setup(struct evt_test * test,struct evt_options * opt)198 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
199 {
200 uint8_t queue;
201 int nb_stages = opt->nb_stages;
202 int ret;
203 int nb_ports;
204 int nb_queues;
205 uint16_t prod;
206 struct rte_event_dev_info dev_info;
207 struct test_perf *t = evt_test_priv(test);
208
209 nb_ports = evt_nr_active_lcores(opt->wlcores);
210 nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
211 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
212 evt_nr_active_lcores(opt->plcores);
213
214 nb_queues = perf_queue_nb_event_queues(opt);
215
216 memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
217 ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
218 if (ret) {
219 evt_err("failed to get eventdev info %d", opt->dev_id);
220 return ret;
221 }
222
223 ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
224 if (ret) {
225 evt_err("failed to configure eventdev %d", opt->dev_id);
226 return ret;
227 }
228
229 struct rte_event_queue_conf q_conf = {
230 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
231 .nb_atomic_flows = opt->nb_flows,
232 .nb_atomic_order_sequences = opt->nb_flows,
233 };
234 /* queue configurations */
235 for (queue = 0; queue < nb_queues; queue++) {
236 q_conf.schedule_type =
237 (opt->sched_type_list[queue % nb_stages]);
238
239 if (opt->q_priority) {
240 uint8_t stage_pos = queue % nb_stages;
241 /* Configure event queues(stage 0 to stage n) with
242 * RTE_EVENT_DEV_PRIORITY_LOWEST to
243 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
244 */
245 uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
246 (nb_stages - 1);
247 /* Higher prio for the queues closer to last stage */
248 q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
249 (step * stage_pos);
250 }
251 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
252 if (ret) {
253 evt_err("failed to setup queue=%d", queue);
254 return ret;
255 }
256 }
257
258 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
259 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
260
261 /* port configuration */
262 const struct rte_event_port_conf p_conf = {
263 .dequeue_depth = opt->wkr_deq_dep,
264 .enqueue_depth = dev_info.max_event_port_dequeue_depth,
265 .new_event_threshold = dev_info.max_num_events,
266 };
267
268 ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
269 nb_queues, &p_conf);
270 if (ret)
271 return ret;
272
273 if (!evt_has_distributed_sched(opt->dev_id)) {
274 uint32_t service_id;
275 rte_event_dev_service_id_get(opt->dev_id, &service_id);
276 ret = evt_service_setup(service_id);
277 if (ret) {
278 evt_err("No service lcore found to run event dev.");
279 return ret;
280 }
281 }
282
283 ret = rte_event_dev_start(opt->dev_id);
284 if (ret) {
285 evt_err("failed to start eventdev %d", opt->dev_id);
286 return ret;
287 }
288
289 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
290 RTE_ETH_FOREACH_DEV(prod) {
291 ret = rte_eth_dev_start(prod);
292 if (ret) {
293 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
294 prod);
295 return ret;
296 }
297
298 ret = rte_event_eth_rx_adapter_start(prod);
299 if (ret) {
300 evt_err("Rx adapter[%d] start failed", prod);
301 return ret;
302 }
303 printf("%s: Port[%d] using Rx adapter[%d] started\n",
304 __func__, prod, prod);
305 }
306 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
307 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
308 ret = rte_event_timer_adapter_start(
309 t->timer_adptr[prod]);
310 if (ret) {
311 evt_err("failed to Start event timer adapter %d"
312 , prod);
313 return ret;
314 }
315 }
316 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
317 uint8_t cdev_id, cdev_count;
318
319 cdev_count = rte_cryptodev_count();
320 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
321 ret = rte_cryptodev_start(cdev_id);
322 if (ret) {
323 evt_err("Failed to start cryptodev %u",
324 cdev_id);
325 return ret;
326 }
327 }
328 }
329
330 return 0;
331 }
332
333 static void
perf_queue_opt_dump(struct evt_options * opt)334 perf_queue_opt_dump(struct evt_options *opt)
335 {
336 evt_dump_fwd_latency(opt);
337 perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
338 }
339
340 static int
perf_queue_opt_check(struct evt_options * opt)341 perf_queue_opt_check(struct evt_options *opt)
342 {
343 return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
344 }
345
346 static bool
perf_queue_capability_check(struct evt_options * opt)347 perf_queue_capability_check(struct evt_options *opt)
348 {
349 struct rte_event_dev_info dev_info;
350
351 rte_event_dev_info_get(opt->dev_id, &dev_info);
352 if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
353 dev_info.max_event_ports < perf_nb_event_ports(opt)) {
354 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
355 perf_queue_nb_event_queues(opt),
356 dev_info.max_event_queues,
357 perf_nb_event_ports(opt), dev_info.max_event_ports);
358 }
359
360 return true;
361 }
362
363 static const struct evt_test_ops perf_queue = {
364 .cap_check = perf_queue_capability_check,
365 .opt_check = perf_queue_opt_check,
366 .opt_dump = perf_queue_opt_dump,
367 .test_setup = perf_test_setup,
368 .mempool_setup = perf_mempool_setup,
369 .ethdev_setup = perf_ethdev_setup,
370 .cryptodev_setup = perf_cryptodev_setup,
371 .ethdev_rx_stop = perf_ethdev_rx_stop,
372 .eventdev_setup = perf_queue_eventdev_setup,
373 .launch_lcores = perf_queue_launch_lcores,
374 .eventdev_destroy = perf_eventdev_destroy,
375 .mempool_destroy = perf_mempool_destroy,
376 .ethdev_destroy = perf_ethdev_destroy,
377 .cryptodev_destroy = perf_cryptodev_destroy,
378 .test_result = perf_test_result,
379 .test_destroy = perf_test_destroy,
380 };
381
382 EVT_TEST_REGISTER(perf_queue);
383