1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef _EVT_COMMON_
6 #define _EVT_COMMON_
7
8 #include <rte_common.h>
9 #include <rte_debug.h>
10 #include <rte_event_crypto_adapter.h>
11 #include <rte_eventdev.h>
12 #include <rte_service.h>
13
14 #define CLNRM "\x1b[0m"
15 #define CLRED "\x1b[31m"
16 #define CLGRN "\x1b[32m"
17 #define CLYEL "\x1b[33m"
18
19 #define evt_err(fmt, args...) \
20 fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
21
22 #define evt_info(fmt, args...) \
23 fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
24
25 #define EVT_STR_FMT 20
26
27 #define evt_dump(str, fmt, val...) \
28 printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
29
30 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
31
32 #define evt_dump_end printf("\b}\n")
33
34 #define EVT_MAX_STAGES 64
35 #define EVT_MAX_PORTS 256
36 #define EVT_MAX_QUEUES 256
37
38 enum evt_prod_type {
39 EVT_PROD_TYPE_NONE,
40 EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
41 EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
42 EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
43 EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR, /* Producer type Crypto Adapter. */
44 EVT_PROD_TYPE_MAX,
45 };
46
47 struct evt_options {
48 #define EVT_TEST_NAME_MAX_LEN 32
49 char test_name[EVT_TEST_NAME_MAX_LEN];
50 bool plcores[RTE_MAX_LCORE];
51 bool wlcores[RTE_MAX_LCORE];
52 int pool_sz;
53 int socket_id;
54 int nb_stages;
55 int verbose_level;
56 uint8_t dev_id;
57 uint8_t timdev_cnt;
58 uint8_t nb_timer_adptrs;
59 uint8_t timdev_use_burst;
60 uint8_t per_port_pool;
61 uint8_t sched_type_list[EVT_MAX_STAGES];
62 uint16_t mbuf_sz;
63 uint16_t wkr_deq_dep;
64 uint16_t vector_size;
65 uint16_t eth_queues;
66 uint32_t nb_flows;
67 uint32_t tx_first;
68 uint32_t max_pkt_sz;
69 uint32_t prod_enq_burst_sz;
70 uint32_t deq_tmo_nsec;
71 uint32_t q_priority:1;
72 uint32_t fwd_latency:1;
73 uint32_t ena_vector : 1;
74 uint64_t nb_pkts;
75 uint64_t nb_timers;
76 uint64_t expiry_nsec;
77 uint64_t max_tmo_nsec;
78 uint64_t vector_tmo_nsec;
79 uint64_t timer_tick_nsec;
80 uint64_t optm_timer_tick_nsec;
81 enum evt_prod_type prod_type;
82 enum rte_event_crypto_adapter_mode crypto_adptr_mode;
83 };
84
85 static inline bool
evt_has_distributed_sched(uint8_t dev_id)86 evt_has_distributed_sched(uint8_t dev_id)
87 {
88 struct rte_event_dev_info dev_info;
89
90 rte_event_dev_info_get(dev_id, &dev_info);
91 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
92 true : false;
93 }
94
95 static inline bool
evt_has_burst_mode(uint8_t dev_id)96 evt_has_burst_mode(uint8_t dev_id)
97 {
98 struct rte_event_dev_info dev_info;
99
100 rte_event_dev_info_get(dev_id, &dev_info);
101 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
102 true : false;
103 }
104
105
106 static inline bool
evt_has_all_types_queue(uint8_t dev_id)107 evt_has_all_types_queue(uint8_t dev_id)
108 {
109 struct rte_event_dev_info dev_info;
110
111 rte_event_dev_info_get(dev_id, &dev_info);
112 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
113 true : false;
114 }
115
116 static inline bool
evt_has_flow_id(uint8_t dev_id)117 evt_has_flow_id(uint8_t dev_id)
118 {
119 struct rte_event_dev_info dev_info;
120
121 rte_event_dev_info_get(dev_id, &dev_info);
122 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
123 true : false;
124 }
125
126 static inline int
evt_service_setup(uint32_t service_id)127 evt_service_setup(uint32_t service_id)
128 {
129 int32_t core_cnt;
130 unsigned int lcore = 0;
131 uint32_t core_array[RTE_MAX_LCORE];
132 uint8_t cnt;
133 uint8_t min_cnt = UINT8_MAX;
134
135 if (!rte_service_lcore_count())
136 return -ENOENT;
137
138 core_cnt = rte_service_lcore_list(core_array,
139 RTE_MAX_LCORE);
140 if (core_cnt < 0)
141 return -ENOENT;
142 /* Get the core which has least number of services running. */
143 while (core_cnt--) {
144 /* Reset default mapping */
145 rte_service_map_lcore_set(service_id,
146 core_array[core_cnt], 0);
147 cnt = rte_service_lcore_count_services(
148 core_array[core_cnt]);
149 if (cnt < min_cnt) {
150 lcore = core_array[core_cnt];
151 min_cnt = cnt;
152 }
153 }
154 if (rte_service_map_lcore_set(service_id, lcore, 1))
155 return -ENOENT;
156
157 return 0;
158 }
159
160 static inline int
evt_configure_eventdev(struct evt_options * opt,uint8_t nb_queues,uint8_t nb_ports)161 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
162 uint8_t nb_ports)
163 {
164 struct rte_event_dev_info info;
165 int ret;
166
167 memset(&info, 0, sizeof(struct rte_event_dev_info));
168 ret = rte_event_dev_info_get(opt->dev_id, &info);
169 if (ret) {
170 evt_err("failed to get eventdev info %d", opt->dev_id);
171 return ret;
172 }
173
174 if (opt->deq_tmo_nsec) {
175 if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
176 opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
177 evt_info("dequeue_timeout_ns too low, using %d",
178 opt->deq_tmo_nsec);
179 }
180 if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
181 opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
182 evt_info("dequeue_timeout_ns too high, using %d",
183 opt->deq_tmo_nsec);
184 }
185 }
186
187 const struct rte_event_dev_config config = {
188 .dequeue_timeout_ns = opt->deq_tmo_nsec,
189 .nb_event_queues = nb_queues,
190 .nb_event_ports = nb_ports,
191 .nb_single_link_event_port_queues = 0,
192 .nb_events_limit = info.max_num_events,
193 .nb_event_queue_flows = opt->nb_flows,
194 .nb_event_port_dequeue_depth =
195 info.max_event_port_dequeue_depth,
196 .nb_event_port_enqueue_depth =
197 info.max_event_port_enqueue_depth,
198 };
199
200 return rte_event_dev_configure(opt->dev_id, &config);
201 }
202
203 #endif /* _EVT_COMMON_*/
204