1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2017 Cavium, Inc.
4 */
5
6 #include "test_pipeline_common.h"
7
8 int
pipeline_test_result(struct evt_test * test,struct evt_options * opt)9 pipeline_test_result(struct evt_test *test, struct evt_options *opt)
10 {
11 RTE_SET_USED(opt);
12 int i;
13 uint64_t total = 0;
14 struct test_pipeline *t = evt_test_priv(test);
15
16 evt_info("Packet distribution across worker cores :");
17 for (i = 0; i < t->nb_workers; i++)
18 total += t->worker[i].processed_pkts;
19 for (i = 0; i < t->nb_workers; i++)
20 evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:"
21 CLGRN" %3.2f"CLNRM, i,
22 t->worker[i].processed_pkts,
23 (((double)t->worker[i].processed_pkts)/total)
24 * 100);
25 return t->result;
26 }
27
28 void
pipeline_opt_dump(struct evt_options * opt,uint8_t nb_queues)29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues)
30 {
31 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
32 evt_dump_worker_lcores(opt);
33 evt_dump_nb_stages(opt);
34 evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt));
35 evt_dump("nb_evdev_queues", "%d", nb_queues);
36 evt_dump_queue_priority(opt);
37 evt_dump_sched_type_list(opt);
38 evt_dump_producer_type(opt);
39 evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues);
40 evt_dump("event_vector", "%d", opt->ena_vector);
41 if (opt->ena_vector) {
42 evt_dump("vector_size", "%d", opt->vector_size);
43 evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec);
44 }
45 }
46
47 static inline uint64_t
processed_pkts(struct test_pipeline * t)48 processed_pkts(struct test_pipeline *t)
49 {
50 uint8_t i;
51 uint64_t total = 0;
52
53 for (i = 0; i < t->nb_workers; i++)
54 total += t->worker[i].processed_pkts;
55
56 return total;
57 }
58
59 int
pipeline_launch_lcores(struct evt_test * test,struct evt_options * opt,int (* worker)(void *))60 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
61 int (*worker)(void *))
62 {
63 int ret, lcore_id;
64 struct test_pipeline *t = evt_test_priv(test);
65
66 int port_idx = 0;
67 /* launch workers */
68 RTE_LCORE_FOREACH_WORKER(lcore_id) {
69 if (!(opt->wlcores[lcore_id]))
70 continue;
71
72 ret = rte_eal_remote_launch(worker,
73 &t->worker[port_idx], lcore_id);
74 if (ret) {
75 evt_err("failed to launch worker %d", lcore_id);
76 return ret;
77 }
78 port_idx++;
79 }
80
81 uint64_t perf_cycles = rte_get_timer_cycles();
82 const uint64_t perf_sample = rte_get_timer_hz();
83
84 static float total_mpps;
85 static uint64_t samples;
86
87 uint64_t prev_pkts = 0;
88
89 while (t->done == false) {
90 const uint64_t new_cycles = rte_get_timer_cycles();
91
92 if ((new_cycles - perf_cycles) > perf_sample) {
93 const uint64_t curr_pkts = processed_pkts(t);
94
95 float mpps = (float)(curr_pkts - prev_pkts)/1000000;
96
97 prev_pkts = curr_pkts;
98 perf_cycles = new_cycles;
99 total_mpps += mpps;
100 ++samples;
101 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
102 mpps, total_mpps/samples);
103 fflush(stdout);
104 }
105 }
106 printf("\n");
107 return 0;
108 }
109
110 int
pipeline_opt_check(struct evt_options * opt,uint64_t nb_queues)111 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
112 {
113 unsigned int lcores;
114
115 /* N worker + main */
116 lcores = 2;
117
118 if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
119 evt_err("Invalid producer type '%s' valid producer '%s'",
120 evt_prod_id_to_name(opt->prod_type),
121 evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR));
122 return -1;
123 }
124
125 if (!rte_eth_dev_count_avail()) {
126 evt_err("test needs minimum 1 ethernet dev");
127 return -1;
128 }
129
130 if (rte_lcore_count() < lcores) {
131 evt_err("test need minimum %d lcores", lcores);
132 return -1;
133 }
134
135 /* Validate worker lcores */
136 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
137 evt_err("worker lcores overlaps with main lcore");
138 return -1;
139 }
140 if (evt_has_disabled_lcore(opt->wlcores)) {
141 evt_err("one or more workers lcores are not enabled");
142 return -1;
143 }
144 if (!evt_has_active_lcore(opt->wlcores)) {
145 evt_err("minimum one worker is required");
146 return -1;
147 }
148
149 if (nb_queues > EVT_MAX_QUEUES) {
150 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
151 return -1;
152 }
153 if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) {
154 evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
155 return -1;
156 }
157
158 if (evt_has_invalid_stage(opt))
159 return -1;
160
161 if (evt_has_invalid_sched_type(opt))
162 return -1;
163
164 return 0;
165 }
166
167 #define NB_RX_DESC 128
168 #define NB_TX_DESC 512
169 int
pipeline_ethdev_setup(struct evt_test * test,struct evt_options * opt)170 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
171 {
172 uint16_t i, j;
173 int ret;
174 uint8_t nb_queues = 1;
175 struct test_pipeline *t = evt_test_priv(test);
176 struct rte_eth_rxconf rx_conf;
177 struct rte_eth_conf port_conf = {
178 .rxmode = {
179 .mq_mode = RTE_ETH_MQ_RX_RSS,
180 },
181 .rx_adv_conf = {
182 .rss_conf = {
183 .rss_key = NULL,
184 .rss_hf = RTE_ETH_RSS_IP,
185 },
186 },
187 };
188
189 if (!rte_eth_dev_count_avail()) {
190 evt_err("No ethernet ports found.");
191 return -ENODEV;
192 }
193
194 if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) {
195 evt_err("max_pkt_sz can not be less than %d",
196 RTE_ETHER_MIN_LEN);
197 return -EINVAL;
198 }
199
200 port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
201 RTE_ETHER_CRC_LEN;
202
203 t->internal_port = 1;
204 RTE_ETH_FOREACH_DEV(i) {
205 struct rte_eth_dev_info dev_info;
206 struct rte_eth_conf local_port_conf = port_conf;
207 uint32_t caps = 0;
208
209 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
210 if (ret != 0) {
211 evt_err("failed to get event tx adapter[%d] caps", i);
212 return ret;
213 }
214
215 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
216 t->internal_port = 0;
217
218 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps);
219 if (ret != 0) {
220 evt_err("failed to get event tx adapter[%d] caps", i);
221 return ret;
222 }
223
224 if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
225 local_port_conf.rxmode.offloads |=
226 RTE_ETH_RX_OFFLOAD_RSS_HASH;
227
228 ret = rte_eth_dev_info_get(i, &dev_info);
229 if (ret != 0) {
230 evt_err("Error during getting device (port %u) info: %s\n",
231 i, strerror(-ret));
232 return ret;
233 }
234
235 /* Enable mbuf fast free if PMD has the capability. */
236 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
237 local_port_conf.txmode.offloads |=
238 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
239
240 rx_conf = dev_info.default_rxconf;
241 rx_conf.offloads = port_conf.rxmode.offloads;
242
243 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
244 dev_info.flow_type_rss_offloads;
245 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
246 port_conf.rx_adv_conf.rss_conf.rss_hf) {
247 evt_info("Port %u modified RSS hash function based on hardware support,"
248 "requested:%#"PRIx64" configured:%#"PRIx64"",
249 i,
250 port_conf.rx_adv_conf.rss_conf.rss_hf,
251 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
252 }
253
254 if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues,
255 &local_port_conf) < 0) {
256 evt_err("Failed to configure eth port [%d]", i);
257 return -EINVAL;
258 }
259
260 for (j = 0; j < opt->eth_queues; j++) {
261 if (rte_eth_rx_queue_setup(
262 i, j, NB_RX_DESC, rte_socket_id(), &rx_conf,
263 opt->per_port_pool ? t->pool[i] :
264 t->pool[0]) < 0) {
265 evt_err("Failed to setup eth port [%d] rx_queue: %d.",
266 i, 0);
267 return -EINVAL;
268 }
269 }
270
271 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
272 rte_socket_id(), NULL) < 0) {
273 evt_err("Failed to setup eth port [%d] tx_queue: %d.",
274 i, 0);
275 return -EINVAL;
276 }
277
278 ret = rte_eth_promiscuous_enable(i);
279 if (ret != 0) {
280 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
281 i, rte_strerror(-ret));
282 return ret;
283 }
284 }
285
286 return 0;
287 }
288
289 int
pipeline_event_port_setup(struct evt_test * test,struct evt_options * opt,uint8_t * queue_arr,uint8_t nb_queues,const struct rte_event_port_conf p_conf)290 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
291 uint8_t *queue_arr, uint8_t nb_queues,
292 const struct rte_event_port_conf p_conf)
293 {
294 int ret;
295 uint8_t port;
296 struct test_pipeline *t = evt_test_priv(test);
297
298
299 /* setup one port per worker, linking to all queues */
300 for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) {
301 struct worker_data *w = &t->worker[port];
302
303 w->dev_id = opt->dev_id;
304 w->port_id = port;
305 w->t = t;
306 w->processed_pkts = 0;
307
308 ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
309 if (ret) {
310 evt_err("failed to setup port %d", port);
311 return ret;
312 }
313
314 if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL,
315 nb_queues) != nb_queues)
316 goto link_fail;
317 }
318
319 return 0;
320
321 link_fail:
322 evt_err("failed to link queues to port %d", port);
323 return -EINVAL;
324 }
325
326 int
pipeline_event_rx_adapter_setup(struct evt_options * opt,uint8_t stride,struct rte_event_port_conf prod_conf)327 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
328 struct rte_event_port_conf prod_conf)
329 {
330 int ret = 0;
331 uint16_t prod;
332 struct rte_mempool *vector_pool = NULL;
333 struct rte_event_eth_rx_adapter_queue_conf queue_conf;
334
335 memset(&queue_conf, 0,
336 sizeof(struct rte_event_eth_rx_adapter_queue_conf));
337 queue_conf.ev.sched_type = opt->sched_type_list[0];
338 if (opt->ena_vector) {
339 unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1;
340
341 nb_elem = nb_elem ? nb_elem : 1;
342 vector_pool = rte_event_vector_pool_create(
343 "vector_pool", nb_elem, 0, opt->vector_size,
344 opt->socket_id);
345 if (vector_pool == NULL) {
346 evt_err("failed to create event vector pool");
347 return -ENOMEM;
348 }
349 }
350 RTE_ETH_FOREACH_DEV(prod) {
351 struct rte_event_eth_rx_adapter_vector_limits limits;
352 uint32_t cap;
353
354 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
355 prod, &cap);
356 if (ret) {
357 evt_err("failed to get event rx adapter[%d]"
358 " capabilities",
359 opt->dev_id);
360 return ret;
361 }
362
363 if (opt->ena_vector) {
364 memset(&limits, 0, sizeof(limits));
365 ret = rte_event_eth_rx_adapter_vector_limits_get(
366 opt->dev_id, prod, &limits);
367 if (ret) {
368 evt_err("failed to get vector limits");
369 return ret;
370 }
371
372 if (opt->vector_size < limits.min_sz ||
373 opt->vector_size > limits.max_sz) {
374 evt_err("Vector size [%d] not within limits max[%d] min[%d]",
375 opt->vector_size, limits.min_sz,
376 limits.max_sz);
377 return -EINVAL;
378 }
379
380 if (limits.log2_sz &&
381 !rte_is_power_of_2(opt->vector_size)) {
382 evt_err("Vector size [%d] not power of 2",
383 opt->vector_size);
384 return -EINVAL;
385 }
386
387 if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
388 opt->vector_tmo_nsec < limits.min_timeout_ns) {
389 evt_err("Vector timeout [%" PRIu64
390 "] not within limits max[%" PRIu64
391 "] min[%" PRIu64 "]",
392 opt->vector_tmo_nsec,
393 limits.max_timeout_ns,
394 limits.min_timeout_ns);
395 return -EINVAL;
396 }
397
398 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
399 queue_conf.vector_sz = opt->vector_size;
400 queue_conf.vector_timeout_ns =
401 opt->vector_tmo_nsec;
402 queue_conf.rx_queue_flags |=
403 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
404 queue_conf.vector_mp = vector_pool;
405 } else {
406 evt_err("Rx adapter doesn't support event vector");
407 return -EINVAL;
408 }
409 }
410 queue_conf.ev.queue_id = prod * stride;
411 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
412 &prod_conf);
413 if (ret) {
414 evt_err("failed to create rx adapter[%d]", prod);
415 return ret;
416 }
417 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
418 &queue_conf);
419 if (ret) {
420 evt_err("failed to add rx queues to adapter[%d]", prod);
421 return ret;
422 }
423
424 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
425 uint32_t service_id = -1U;
426
427 rte_event_eth_rx_adapter_service_id_get(prod,
428 &service_id);
429 ret = evt_service_setup(service_id);
430 if (ret) {
431 evt_err("Failed to setup service core"
432 " for Rx adapter");
433 return ret;
434 }
435 }
436
437 evt_info("Port[%d] using Rx adapter[%d] configured", prod,
438 prod);
439 }
440
441 return ret;
442 }
443
444 int
pipeline_event_tx_adapter_setup(struct evt_options * opt,struct rte_event_port_conf port_conf)445 pipeline_event_tx_adapter_setup(struct evt_options *opt,
446 struct rte_event_port_conf port_conf)
447 {
448 int ret = 0;
449 uint16_t consm;
450
451 RTE_ETH_FOREACH_DEV(consm) {
452 uint32_t cap;
453
454 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id,
455 consm, &cap);
456 if (ret) {
457 evt_err("failed to get event tx adapter[%d] caps",
458 consm);
459 return ret;
460 }
461
462 if (opt->ena_vector) {
463 if (!(cap &
464 RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) {
465 evt_err("Tx adapter doesn't support event vector");
466 return -EINVAL;
467 }
468 }
469
470 ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id,
471 &port_conf);
472 if (ret) {
473 evt_err("failed to create tx adapter[%d]", consm);
474 return ret;
475 }
476
477 ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1);
478 if (ret) {
479 evt_err("failed to add tx queues to adapter[%d]",
480 consm);
481 return ret;
482 }
483
484 if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
485 uint32_t service_id = -1U;
486
487 ret = rte_event_eth_tx_adapter_service_id_get(consm,
488 &service_id);
489 if (ret != -ESRCH && ret != 0) {
490 evt_err("Failed to get Tx adptr service ID");
491 return ret;
492 }
493 ret = evt_service_setup(service_id);
494 if (ret) {
495 evt_err("Failed to setup service core"
496 " for Tx adapter");
497 return ret;
498 }
499 }
500
501 evt_info("Port[%d] using Tx adapter[%d] Configured", consm,
502 consm);
503 }
504
505 return ret;
506 }
507
508 static void
pipeline_vector_array_free(struct rte_event events[],uint16_t num)509 pipeline_vector_array_free(struct rte_event events[], uint16_t num)
510 {
511 uint16_t i;
512
513 for (i = 0; i < num; i++) {
514 rte_pktmbuf_free_bulk(events[i].vec->mbufs,
515 events[i].vec->nb_elem);
516 rte_mempool_put(rte_mempool_from_obj(events[i].vec),
517 events[i].vec);
518 }
519 }
520
521 static void
pipeline_event_port_flush(uint8_t dev_id __rte_unused,struct rte_event ev,void * args __rte_unused)522 pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
523 void *args __rte_unused)
524 {
525 if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
526 pipeline_vector_array_free(&ev, 1);
527 else
528 rte_pktmbuf_free(ev.mbuf);
529 }
530
531 void
pipeline_worker_cleanup(uint8_t dev,uint8_t port,struct rte_event ev[],uint16_t enq,uint16_t deq)532 pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
533 uint16_t enq, uint16_t deq)
534 {
535 int i;
536
537 if (!(deq - enq))
538 return;
539
540 if (deq) {
541 for (i = enq; i < deq; i++) {
542 if (ev[i].op == RTE_EVENT_OP_RELEASE)
543 continue;
544 if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
545 pipeline_vector_array_free(&ev[i], 1);
546 else
547 rte_pktmbuf_free(ev[i].mbuf);
548 }
549
550 for (i = 0; i < deq; i++)
551 ev[i].op = RTE_EVENT_OP_RELEASE;
552
553 rte_event_enqueue_burst(dev, port, ev, deq);
554 }
555
556 rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
557 }
558
559 void
pipeline_ethdev_rx_stop(struct evt_test * test,struct evt_options * opt)560 pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
561 {
562 uint16_t i, j;
563 RTE_SET_USED(test);
564
565 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
566 RTE_ETH_FOREACH_DEV(i) {
567 rte_event_eth_rx_adapter_stop(i);
568 rte_event_eth_rx_adapter_queue_del(i, i, -1);
569 for (j = 0; j < opt->eth_queues; j++)
570 rte_eth_dev_rx_queue_stop(i, j);
571 }
572 }
573 }
574
575 void
pipeline_ethdev_destroy(struct evt_test * test,struct evt_options * opt)576 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
577 {
578 uint16_t i;
579 RTE_SET_USED(test);
580 RTE_SET_USED(opt);
581
582 RTE_ETH_FOREACH_DEV(i) {
583 rte_event_eth_tx_adapter_stop(i);
584 rte_event_eth_tx_adapter_queue_del(i, i, -1);
585 rte_eth_dev_tx_queue_stop(i, 0);
586 rte_eth_dev_stop(i);
587 }
588 }
589
590 void
pipeline_eventdev_destroy(struct evt_test * test,struct evt_options * opt)591 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
592 {
593 RTE_SET_USED(test);
594
595 rte_event_dev_stop(opt->dev_id);
596 rte_event_dev_close(opt->dev_id);
597 }
598
599 int
pipeline_mempool_setup(struct evt_test * test,struct evt_options * opt)600 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
601 {
602 struct test_pipeline *t = evt_test_priv(test);
603 int i, ret;
604
605 if (!opt->mbuf_sz)
606 opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE;
607
608 if (!opt->max_pkt_sz)
609 opt->max_pkt_sz = RTE_ETHER_MAX_LEN;
610
611 RTE_ETH_FOREACH_DEV(i) {
612 struct rte_eth_dev_info dev_info;
613 uint16_t data_size = 0;
614
615 memset(&dev_info, 0, sizeof(dev_info));
616 ret = rte_eth_dev_info_get(i, &dev_info);
617 if (ret != 0) {
618 evt_err("Error during getting device (port %u) info: %s\n",
619 i, strerror(-ret));
620 return ret;
621 }
622
623 if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
624 dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
625 data_size = opt->max_pkt_sz /
626 dev_info.rx_desc_lim.nb_mtu_seg_max;
627 data_size += RTE_PKTMBUF_HEADROOM;
628
629 if (data_size > opt->mbuf_sz)
630 opt->mbuf_sz = data_size;
631 }
632 if (opt->per_port_pool) {
633 char name[RTE_MEMPOOL_NAMESIZE];
634
635 snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d",
636 test->name, i);
637 t->pool[i] = rte_pktmbuf_pool_create(
638 name, /* mempool name */
639 opt->pool_sz, /* number of elements*/
640 0, /* cache size*/
641 0, opt->mbuf_sz, opt->socket_id); /* flags */
642
643 if (t->pool[i] == NULL) {
644 evt_err("failed to create mempool %s", name);
645 return -ENOMEM;
646 }
647 }
648 }
649
650 if (!opt->per_port_pool) {
651 t->pool[0] = rte_pktmbuf_pool_create(
652 test->name, /* mempool name */
653 opt->pool_sz, /* number of elements*/
654 0, /* cache size*/
655 0, opt->mbuf_sz, opt->socket_id); /* flags */
656
657 if (t->pool[0] == NULL) {
658 evt_err("failed to create mempool");
659 return -ENOMEM;
660 }
661 }
662
663 return 0;
664 }
665
666 void
pipeline_mempool_destroy(struct evt_test * test,struct evt_options * opt)667 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
668 {
669 struct test_pipeline *t = evt_test_priv(test);
670 int i;
671
672 RTE_SET_USED(opt);
673 if (opt->per_port_pool) {
674 RTE_ETH_FOREACH_DEV(i)
675 rte_mempool_free(t->pool[i]);
676 } else {
677 rte_mempool_free(t->pool[0]);
678 }
679 }
680
681 int
pipeline_test_setup(struct evt_test * test,struct evt_options * opt)682 pipeline_test_setup(struct evt_test *test, struct evt_options *opt)
683 {
684 void *test_pipeline;
685
686 test_pipeline = rte_zmalloc_socket(test->name,
687 sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE,
688 opt->socket_id);
689 if (test_pipeline == NULL) {
690 evt_err("failed to allocate test_pipeline memory");
691 goto nomem;
692 }
693 test->test_priv = test_pipeline;
694
695 struct test_pipeline *t = evt_test_priv(test);
696
697 t->nb_workers = evt_nr_active_lcores(opt->wlcores);
698 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores);
699 t->done = false;
700 t->nb_flows = opt->nb_flows;
701 t->result = EVT_TEST_FAILED;
702 t->opt = opt;
703 opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
704 memcpy(t->sched_type_list, opt->sched_type_list,
705 sizeof(opt->sched_type_list));
706 return 0;
707 nomem:
708 return -ENOMEM;
709 }
710
711 void
pipeline_test_destroy(struct evt_test * test,struct evt_options * opt)712 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt)
713 {
714 RTE_SET_USED(opt);
715
716 rte_free(test->test_priv);
717 }
718