xref: /dpdk/app/test-eventdev/test_perf_queue.c (revision 20eb154e)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium 2017.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "test_perf_common.h"
34 
35 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
36 
37 static inline int
38 perf_queue_nb_event_queues(struct evt_options *opt)
39 {
40 	/* nb_queues = number of producers * number of stages */
41 	return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
42 }
43 
44 static int
45 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
46 {
47 	uint8_t queue;
48 	int nb_stages = opt->nb_stages;
49 	int ret;
50 
51 	const struct rte_event_dev_config config = {
52 			.nb_event_queues = perf_queue_nb_event_queues(opt),
53 			.nb_event_ports = perf_nb_event_ports(opt),
54 			.nb_events_limit  = 4096,
55 			.nb_event_queue_flows = opt->nb_flows,
56 			.nb_event_port_dequeue_depth = 128,
57 			.nb_event_port_enqueue_depth = 128,
58 	};
59 
60 	ret = rte_event_dev_configure(opt->dev_id, &config);
61 	if (ret) {
62 		evt_err("failed to configure eventdev %d", opt->dev_id);
63 		return ret;
64 	}
65 
66 	struct rte_event_queue_conf q_conf = {
67 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
68 			.nb_atomic_flows = opt->nb_flows,
69 			.nb_atomic_order_sequences = opt->nb_flows,
70 	};
71 	/* queue configurations */
72 	for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
73 		q_conf.event_queue_cfg =  evt_sched_type2queue_cfg
74 				(opt->sched_type_list[queue % nb_stages]);
75 
76 		if (opt->q_priority) {
77 			uint8_t stage_pos = queue % nb_stages;
78 			/* Configure event queues(stage 0 to stage n) with
79 			 * RTE_EVENT_DEV_PRIORITY_LOWEST to
80 			 * RTE_EVENT_DEV_PRIORITY_HIGHEST.
81 			 */
82 			uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
83 					(nb_stages - 1);
84 			/* Higher prio for the queues closer to last stage */
85 			q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
86 					(step * stage_pos);
87 		}
88 		ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
89 		if (ret) {
90 			evt_err("failed to setup queue=%d", queue);
91 			return ret;
92 		}
93 	}
94 
95 	ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
96 					perf_queue_nb_event_queues(opt));
97 	if (ret)
98 		return ret;
99 
100 	ret = rte_event_dev_start(opt->dev_id);
101 	if (ret) {
102 		evt_err("failed to start eventdev %d", opt->dev_id);
103 		return ret;
104 	}
105 
106 	return 0;
107 }
108 
109 static void
110 perf_queue_opt_dump(struct evt_options *opt)
111 {
112 	evt_dump_fwd_latency(opt);
113 	perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
114 }
115 
116 static int
117 perf_queue_opt_check(struct evt_options *opt)
118 {
119 	return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
120 }
121 
122 static bool
123 perf_queue_capability_check(struct evt_options *opt)
124 {
125 	struct rte_event_dev_info dev_info;
126 
127 	rte_event_dev_info_get(opt->dev_id, &dev_info);
128 	if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
129 			dev_info.max_event_ports < perf_nb_event_ports(opt)) {
130 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
131 			perf_queue_nb_event_queues(opt),
132 			dev_info.max_event_queues,
133 			perf_nb_event_ports(opt), dev_info.max_event_ports);
134 	}
135 
136 	return true;
137 }
138 
139 static const struct evt_test_ops perf_queue =  {
140 	.cap_check          = perf_queue_capability_check,
141 	.opt_check          = perf_queue_opt_check,
142 	.opt_dump           = perf_queue_opt_dump,
143 	.test_setup         = perf_test_setup,
144 	.mempool_setup      = perf_mempool_setup,
145 	.eventdev_setup     = perf_queue_eventdev_setup,
146 	.eventdev_destroy   = perf_eventdev_destroy,
147 	.mempool_destroy    = perf_mempool_destroy,
148 	.test_result        = perf_test_result,
149 	.test_destroy       = perf_test_destroy,
150 };
151 
152 EVT_TEST_REGISTER(perf_queue);
153