1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2017-2018 Intel Corporation.
3d30ea906Sjfb8856606  * All rights reserved.
4d30ea906Sjfb8856606  */
5d30ea906Sjfb8856606 
6d30ea906Sjfb8856606 #include <string.h>
7d30ea906Sjfb8856606 #include <inttypes.h>
8d30ea906Sjfb8856606 #include <stdbool.h>
9d30ea906Sjfb8856606 #include <sys/queue.h>
10d30ea906Sjfb8856606 
11d30ea906Sjfb8856606 #include <rte_memzone.h>
12d30ea906Sjfb8856606 #include <rte_memory.h>
13d30ea906Sjfb8856606 #include <rte_dev.h>
14d30ea906Sjfb8856606 #include <rte_errno.h>
15d30ea906Sjfb8856606 #include <rte_malloc.h>
16d30ea906Sjfb8856606 #include <rte_ring.h>
17d30ea906Sjfb8856606 #include <rte_mempool.h>
18d30ea906Sjfb8856606 #include <rte_common.h>
19d30ea906Sjfb8856606 #include <rte_timer.h>
20d30ea906Sjfb8856606 #include <rte_service_component.h>
21d30ea906Sjfb8856606 #include <rte_cycles.h>
22d30ea906Sjfb8856606 
23d30ea906Sjfb8856606 #include "rte_eventdev.h"
24d30ea906Sjfb8856606 #include "rte_eventdev_pmd.h"
25*2d9fd380Sjfb8856606 #include "rte_eventdev_trace.h"
26d30ea906Sjfb8856606 #include "rte_event_timer_adapter.h"
27d30ea906Sjfb8856606 #include "rte_event_timer_adapter_pmd.h"
28d30ea906Sjfb8856606 
29d30ea906Sjfb8856606 #define DATA_MZ_NAME_MAX_LEN 64
30d30ea906Sjfb8856606 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
31d30ea906Sjfb8856606 
32*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(evtim_logtype, lib.eventdev.adapter.timer, NOTICE);
33*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(evtim_buffer_logtype, lib.eventdev.adapter.timer, NOTICE);
34*2d9fd380Sjfb8856606 RTE_LOG_REGISTER(evtim_svc_logtype, lib.eventdev.adapter.timer.svc, NOTICE);
35d30ea906Sjfb8856606 
36d30ea906Sjfb8856606 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
37d30ea906Sjfb8856606 
384418919fSjohnjiang static const struct rte_event_timer_adapter_ops swtim_ops;
39d30ea906Sjfb8856606 
40d30ea906Sjfb8856606 #define EVTIM_LOG(level, logtype, ...) \
41d30ea906Sjfb8856606 	rte_log(RTE_LOG_ ## level, logtype, \
42d30ea906Sjfb8856606 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
43d30ea906Sjfb8856606 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
44d30ea906Sjfb8856606 
45d30ea906Sjfb8856606 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
46d30ea906Sjfb8856606 
47d30ea906Sjfb8856606 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
48d30ea906Sjfb8856606 #define EVTIM_LOG_DBG(...) \
49d30ea906Sjfb8856606 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
50d30ea906Sjfb8856606 #define EVTIM_BUF_LOG_DBG(...) \
51d30ea906Sjfb8856606 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
52d30ea906Sjfb8856606 #define EVTIM_SVC_LOG_DBG(...) \
53d30ea906Sjfb8856606 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
54d30ea906Sjfb8856606 #else
55d30ea906Sjfb8856606 #define EVTIM_LOG_DBG(...) (void)0
56d30ea906Sjfb8856606 #define EVTIM_BUF_LOG_DBG(...) (void)0
57d30ea906Sjfb8856606 #define EVTIM_SVC_LOG_DBG(...) (void)0
58d30ea906Sjfb8856606 #endif
59d30ea906Sjfb8856606 
60d30ea906Sjfb8856606 static int
default_port_conf_cb(uint16_t id,uint8_t event_dev_id,uint8_t * event_port_id,void * conf_arg)61d30ea906Sjfb8856606 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
62d30ea906Sjfb8856606 		     void *conf_arg)
63d30ea906Sjfb8856606 {
64d30ea906Sjfb8856606 	struct rte_event_timer_adapter *adapter;
65d30ea906Sjfb8856606 	struct rte_eventdev *dev;
66d30ea906Sjfb8856606 	struct rte_event_dev_config dev_conf;
67d30ea906Sjfb8856606 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
68d30ea906Sjfb8856606 	int started;
69d30ea906Sjfb8856606 	uint8_t port_id;
70d30ea906Sjfb8856606 	uint8_t dev_id;
71d30ea906Sjfb8856606 	int ret;
72d30ea906Sjfb8856606 
73d30ea906Sjfb8856606 	RTE_SET_USED(event_dev_id);
74d30ea906Sjfb8856606 
75d30ea906Sjfb8856606 	adapter = &adapters[id];
76d30ea906Sjfb8856606 	dev = &rte_eventdevs[adapter->data->event_dev_id];
77d30ea906Sjfb8856606 	dev_id = dev->data->dev_id;
78d30ea906Sjfb8856606 	dev_conf = dev->data->dev_conf;
79d30ea906Sjfb8856606 
80d30ea906Sjfb8856606 	started = dev->data->dev_started;
81d30ea906Sjfb8856606 	if (started)
82d30ea906Sjfb8856606 		rte_event_dev_stop(dev_id);
83d30ea906Sjfb8856606 
84d30ea906Sjfb8856606 	port_id = dev_conf.nb_event_ports;
85d30ea906Sjfb8856606 	dev_conf.nb_event_ports += 1;
86d30ea906Sjfb8856606 	ret = rte_event_dev_configure(dev_id, &dev_conf);
87d30ea906Sjfb8856606 	if (ret < 0) {
88d30ea906Sjfb8856606 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
89d30ea906Sjfb8856606 		if (started)
90d30ea906Sjfb8856606 			if (rte_event_dev_start(dev_id))
91d30ea906Sjfb8856606 				return -EIO;
92d30ea906Sjfb8856606 
93d30ea906Sjfb8856606 		return ret;
94d30ea906Sjfb8856606 	}
95d30ea906Sjfb8856606 
96d30ea906Sjfb8856606 	if (conf_arg != NULL)
97d30ea906Sjfb8856606 		port_conf = conf_arg;
98d30ea906Sjfb8856606 	else {
99d30ea906Sjfb8856606 		port_conf = &def_port_conf;
100d30ea906Sjfb8856606 		ret = rte_event_port_default_conf_get(dev_id, port_id,
101d30ea906Sjfb8856606 						      port_conf);
102d30ea906Sjfb8856606 		if (ret < 0)
103d30ea906Sjfb8856606 			return ret;
104d30ea906Sjfb8856606 	}
105d30ea906Sjfb8856606 
106d30ea906Sjfb8856606 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
107d30ea906Sjfb8856606 	if (ret < 0) {
108d30ea906Sjfb8856606 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
109d30ea906Sjfb8856606 			      port_id, dev_id);
110d30ea906Sjfb8856606 		return ret;
111d30ea906Sjfb8856606 	}
112d30ea906Sjfb8856606 
113d30ea906Sjfb8856606 	*event_port_id = port_id;
114d30ea906Sjfb8856606 
115d30ea906Sjfb8856606 	if (started)
116d30ea906Sjfb8856606 		ret = rte_event_dev_start(dev_id);
117d30ea906Sjfb8856606 
118d30ea906Sjfb8856606 	return ret;
119d30ea906Sjfb8856606 }
120d30ea906Sjfb8856606 
1214418919fSjohnjiang struct rte_event_timer_adapter *
rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf * conf)122d30ea906Sjfb8856606 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
123d30ea906Sjfb8856606 {
124d30ea906Sjfb8856606 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
125d30ea906Sjfb8856606 						  NULL);
126d30ea906Sjfb8856606 }
127d30ea906Sjfb8856606 
1284418919fSjohnjiang struct rte_event_timer_adapter *
rte_event_timer_adapter_create_ext(const struct rte_event_timer_adapter_conf * conf,rte_event_timer_adapter_port_conf_cb_t conf_cb,void * conf_arg)129d30ea906Sjfb8856606 rte_event_timer_adapter_create_ext(
130d30ea906Sjfb8856606 		const struct rte_event_timer_adapter_conf *conf,
131d30ea906Sjfb8856606 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
132d30ea906Sjfb8856606 		void *conf_arg)
133d30ea906Sjfb8856606 {
134d30ea906Sjfb8856606 	uint16_t adapter_id;
135d30ea906Sjfb8856606 	struct rte_event_timer_adapter *adapter;
136d30ea906Sjfb8856606 	const struct rte_memzone *mz;
137d30ea906Sjfb8856606 	char mz_name[DATA_MZ_NAME_MAX_LEN];
138d30ea906Sjfb8856606 	int n, ret;
139d30ea906Sjfb8856606 	struct rte_eventdev *dev;
140d30ea906Sjfb8856606 
141d30ea906Sjfb8856606 	if (conf == NULL) {
142d30ea906Sjfb8856606 		rte_errno = EINVAL;
143d30ea906Sjfb8856606 		return NULL;
144d30ea906Sjfb8856606 	}
145d30ea906Sjfb8856606 
146d30ea906Sjfb8856606 	/* Check eventdev ID */
147d30ea906Sjfb8856606 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
148d30ea906Sjfb8856606 		rte_errno = EINVAL;
149d30ea906Sjfb8856606 		return NULL;
150d30ea906Sjfb8856606 	}
151d30ea906Sjfb8856606 	dev = &rte_eventdevs[conf->event_dev_id];
152d30ea906Sjfb8856606 
153d30ea906Sjfb8856606 	adapter_id = conf->timer_adapter_id;
154d30ea906Sjfb8856606 
155d30ea906Sjfb8856606 	/* Check that adapter_id is in range */
156d30ea906Sjfb8856606 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
157d30ea906Sjfb8856606 		rte_errno = EINVAL;
158d30ea906Sjfb8856606 		return NULL;
159d30ea906Sjfb8856606 	}
160d30ea906Sjfb8856606 
161d30ea906Sjfb8856606 	/* Check adapter ID not already allocated */
162d30ea906Sjfb8856606 	adapter = &adapters[adapter_id];
163d30ea906Sjfb8856606 	if (adapter->allocated) {
164d30ea906Sjfb8856606 		rte_errno = EEXIST;
165d30ea906Sjfb8856606 		return NULL;
166d30ea906Sjfb8856606 	}
167d30ea906Sjfb8856606 
168d30ea906Sjfb8856606 	/* Create shared data area. */
169d30ea906Sjfb8856606 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
170d30ea906Sjfb8856606 	if (n >= (int)sizeof(mz_name)) {
171d30ea906Sjfb8856606 		rte_errno = EINVAL;
172d30ea906Sjfb8856606 		return NULL;
173d30ea906Sjfb8856606 	}
174d30ea906Sjfb8856606 	mz = rte_memzone_reserve(mz_name,
175d30ea906Sjfb8856606 				 sizeof(struct rte_event_timer_adapter_data),
176d30ea906Sjfb8856606 				 conf->socket_id, 0);
177d30ea906Sjfb8856606 	if (mz == NULL)
178d30ea906Sjfb8856606 		/* rte_errno set by rte_memzone_reserve */
179d30ea906Sjfb8856606 		return NULL;
180d30ea906Sjfb8856606 
181d30ea906Sjfb8856606 	adapter->data = mz->addr;
182d30ea906Sjfb8856606 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
183d30ea906Sjfb8856606 
184d30ea906Sjfb8856606 	adapter->data->mz = mz;
185d30ea906Sjfb8856606 	adapter->data->event_dev_id = conf->event_dev_id;
186d30ea906Sjfb8856606 	adapter->data->id = adapter_id;
187d30ea906Sjfb8856606 	adapter->data->socket_id = conf->socket_id;
188d30ea906Sjfb8856606 	adapter->data->conf = *conf;  /* copy conf structure */
189d30ea906Sjfb8856606 
190d30ea906Sjfb8856606 	/* Query eventdev PMD for timer adapter capabilities and ops */
191d30ea906Sjfb8856606 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
192d30ea906Sjfb8856606 						   adapter->data->conf.flags,
193d30ea906Sjfb8856606 						   &adapter->data->caps,
194d30ea906Sjfb8856606 						   &adapter->ops);
195d30ea906Sjfb8856606 	if (ret < 0) {
1964b05018fSfengbojiang 		rte_errno = -ret;
197d30ea906Sjfb8856606 		goto free_memzone;
198d30ea906Sjfb8856606 	}
199d30ea906Sjfb8856606 
200d30ea906Sjfb8856606 	if (!(adapter->data->caps &
201d30ea906Sjfb8856606 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
2024b05018fSfengbojiang 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
203d30ea906Sjfb8856606 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
204d30ea906Sjfb8856606 			      &adapter->data->event_port_id, conf_arg);
205d30ea906Sjfb8856606 		if (ret < 0) {
2064b05018fSfengbojiang 			rte_errno = -ret;
207d30ea906Sjfb8856606 			goto free_memzone;
208d30ea906Sjfb8856606 		}
209d30ea906Sjfb8856606 	}
210d30ea906Sjfb8856606 
211d30ea906Sjfb8856606 	/* If eventdev PMD did not provide ops, use default software
212d30ea906Sjfb8856606 	 * implementation.
213d30ea906Sjfb8856606 	 */
214d30ea906Sjfb8856606 	if (adapter->ops == NULL)
2154418919fSjohnjiang 		adapter->ops = &swtim_ops;
216d30ea906Sjfb8856606 
217d30ea906Sjfb8856606 	/* Allow driver to do some setup */
2184b05018fSfengbojiang 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
219d30ea906Sjfb8856606 	ret = adapter->ops->init(adapter);
220d30ea906Sjfb8856606 	if (ret < 0) {
2214b05018fSfengbojiang 		rte_errno = -ret;
222d30ea906Sjfb8856606 		goto free_memzone;
223d30ea906Sjfb8856606 	}
224d30ea906Sjfb8856606 
225d30ea906Sjfb8856606 	/* Set fast-path function pointers */
226d30ea906Sjfb8856606 	adapter->arm_burst = adapter->ops->arm_burst;
227d30ea906Sjfb8856606 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
228d30ea906Sjfb8856606 	adapter->cancel_burst = adapter->ops->cancel_burst;
229d30ea906Sjfb8856606 
230d30ea906Sjfb8856606 	adapter->allocated = 1;
231d30ea906Sjfb8856606 
232*2d9fd380Sjfb8856606 	rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
233*2d9fd380Sjfb8856606 		conf_cb);
234d30ea906Sjfb8856606 	return adapter;
235d30ea906Sjfb8856606 
236d30ea906Sjfb8856606 free_memzone:
237d30ea906Sjfb8856606 	rte_memzone_free(adapter->data->mz);
238d30ea906Sjfb8856606 	return NULL;
239d30ea906Sjfb8856606 }
240d30ea906Sjfb8856606 
2414418919fSjohnjiang int
rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_info * adapter_info)242d30ea906Sjfb8856606 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
243d30ea906Sjfb8856606 		struct rte_event_timer_adapter_info *adapter_info)
244d30ea906Sjfb8856606 {
245d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
246d30ea906Sjfb8856606 
247d30ea906Sjfb8856606 	if (adapter->ops->get_info)
248d30ea906Sjfb8856606 		/* let driver set values it knows */
249d30ea906Sjfb8856606 		adapter->ops->get_info(adapter, adapter_info);
250d30ea906Sjfb8856606 
251d30ea906Sjfb8856606 	/* Set common values */
252d30ea906Sjfb8856606 	adapter_info->conf = adapter->data->conf;
253d30ea906Sjfb8856606 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
254d30ea906Sjfb8856606 	adapter_info->caps = adapter->data->caps;
255d30ea906Sjfb8856606 
256d30ea906Sjfb8856606 	return 0;
257d30ea906Sjfb8856606 }
258d30ea906Sjfb8856606 
2594418919fSjohnjiang int
rte_event_timer_adapter_start(const struct rte_event_timer_adapter * adapter)260d30ea906Sjfb8856606 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
261d30ea906Sjfb8856606 {
262d30ea906Sjfb8856606 	int ret;
263d30ea906Sjfb8856606 
264d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
265d30ea906Sjfb8856606 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
266d30ea906Sjfb8856606 
2674418919fSjohnjiang 	if (adapter->data->started) {
2684418919fSjohnjiang 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
2694418919fSjohnjiang 			      adapter->data->id);
2704418919fSjohnjiang 		return -EALREADY;
2714418919fSjohnjiang 	}
2724418919fSjohnjiang 
273d30ea906Sjfb8856606 	ret = adapter->ops->start(adapter);
274d30ea906Sjfb8856606 	if (ret < 0)
275d30ea906Sjfb8856606 		return ret;
276d30ea906Sjfb8856606 
277d30ea906Sjfb8856606 	adapter->data->started = 1;
278*2d9fd380Sjfb8856606 	rte_eventdev_trace_timer_adapter_start(adapter);
279d30ea906Sjfb8856606 	return 0;
280d30ea906Sjfb8856606 }
281d30ea906Sjfb8856606 
2824418919fSjohnjiang int
rte_event_timer_adapter_stop(const struct rte_event_timer_adapter * adapter)283d30ea906Sjfb8856606 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
284d30ea906Sjfb8856606 {
285d30ea906Sjfb8856606 	int ret;
286d30ea906Sjfb8856606 
287d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
288d30ea906Sjfb8856606 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
289d30ea906Sjfb8856606 
290d30ea906Sjfb8856606 	if (adapter->data->started == 0) {
291d30ea906Sjfb8856606 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
292d30ea906Sjfb8856606 			      adapter->data->id);
293d30ea906Sjfb8856606 		return 0;
294d30ea906Sjfb8856606 	}
295d30ea906Sjfb8856606 
296d30ea906Sjfb8856606 	ret = adapter->ops->stop(adapter);
297d30ea906Sjfb8856606 	if (ret < 0)
298d30ea906Sjfb8856606 		return ret;
299d30ea906Sjfb8856606 
300d30ea906Sjfb8856606 	adapter->data->started = 0;
301*2d9fd380Sjfb8856606 	rte_eventdev_trace_timer_adapter_stop(adapter);
302d30ea906Sjfb8856606 	return 0;
303d30ea906Sjfb8856606 }
304d30ea906Sjfb8856606 
3054418919fSjohnjiang struct rte_event_timer_adapter *
rte_event_timer_adapter_lookup(uint16_t adapter_id)306d30ea906Sjfb8856606 rte_event_timer_adapter_lookup(uint16_t adapter_id)
307d30ea906Sjfb8856606 {
308d30ea906Sjfb8856606 	char name[DATA_MZ_NAME_MAX_LEN];
309d30ea906Sjfb8856606 	const struct rte_memzone *mz;
310d30ea906Sjfb8856606 	struct rte_event_timer_adapter_data *data;
311d30ea906Sjfb8856606 	struct rte_event_timer_adapter *adapter;
312d30ea906Sjfb8856606 	int ret;
313d30ea906Sjfb8856606 	struct rte_eventdev *dev;
314d30ea906Sjfb8856606 
315d30ea906Sjfb8856606 	if (adapters[adapter_id].allocated)
316d30ea906Sjfb8856606 		return &adapters[adapter_id]; /* Adapter is already loaded */
317d30ea906Sjfb8856606 
318d30ea906Sjfb8856606 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
319d30ea906Sjfb8856606 	mz = rte_memzone_lookup(name);
320d30ea906Sjfb8856606 	if (mz == NULL) {
321d30ea906Sjfb8856606 		rte_errno = ENOENT;
322d30ea906Sjfb8856606 		return NULL;
323d30ea906Sjfb8856606 	}
324d30ea906Sjfb8856606 
325d30ea906Sjfb8856606 	data = mz->addr;
326d30ea906Sjfb8856606 
327d30ea906Sjfb8856606 	adapter = &adapters[data->id];
328d30ea906Sjfb8856606 	adapter->data = data;
329d30ea906Sjfb8856606 
330d30ea906Sjfb8856606 	dev = &rte_eventdevs[adapter->data->event_dev_id];
331d30ea906Sjfb8856606 
332d30ea906Sjfb8856606 	/* Query eventdev PMD for timer adapter capabilities and ops */
333d30ea906Sjfb8856606 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
334d30ea906Sjfb8856606 						   adapter->data->conf.flags,
335d30ea906Sjfb8856606 						   &adapter->data->caps,
336d30ea906Sjfb8856606 						   &adapter->ops);
337d30ea906Sjfb8856606 	if (ret < 0) {
338d30ea906Sjfb8856606 		rte_errno = EINVAL;
339d30ea906Sjfb8856606 		return NULL;
340d30ea906Sjfb8856606 	}
341d30ea906Sjfb8856606 
342d30ea906Sjfb8856606 	/* If eventdev PMD did not provide ops, use default software
343d30ea906Sjfb8856606 	 * implementation.
344d30ea906Sjfb8856606 	 */
345d30ea906Sjfb8856606 	if (adapter->ops == NULL)
3464418919fSjohnjiang 		adapter->ops = &swtim_ops;
347d30ea906Sjfb8856606 
348d30ea906Sjfb8856606 	/* Set fast-path function pointers */
349d30ea906Sjfb8856606 	adapter->arm_burst = adapter->ops->arm_burst;
350d30ea906Sjfb8856606 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
351d30ea906Sjfb8856606 	adapter->cancel_burst = adapter->ops->cancel_burst;
352d30ea906Sjfb8856606 
353d30ea906Sjfb8856606 	adapter->allocated = 1;
354d30ea906Sjfb8856606 
355d30ea906Sjfb8856606 	return adapter;
356d30ea906Sjfb8856606 }
357d30ea906Sjfb8856606 
3584418919fSjohnjiang int
rte_event_timer_adapter_free(struct rte_event_timer_adapter * adapter)359d30ea906Sjfb8856606 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
360d30ea906Sjfb8856606 {
361d30ea906Sjfb8856606 	int ret;
362d30ea906Sjfb8856606 
363d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
364d30ea906Sjfb8856606 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
365d30ea906Sjfb8856606 
366d30ea906Sjfb8856606 	if (adapter->data->started == 1) {
367d30ea906Sjfb8856606 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
368d30ea906Sjfb8856606 			      "before freeing", adapter->data->id);
369d30ea906Sjfb8856606 		return -EBUSY;
370d30ea906Sjfb8856606 	}
371d30ea906Sjfb8856606 
372d30ea906Sjfb8856606 	/* free impl priv data */
373d30ea906Sjfb8856606 	ret = adapter->ops->uninit(adapter);
374d30ea906Sjfb8856606 	if (ret < 0)
375d30ea906Sjfb8856606 		return ret;
376d30ea906Sjfb8856606 
377d30ea906Sjfb8856606 	/* free shared data area */
378d30ea906Sjfb8856606 	ret = rte_memzone_free(adapter->data->mz);
379d30ea906Sjfb8856606 	if (ret < 0)
380d30ea906Sjfb8856606 		return ret;
381d30ea906Sjfb8856606 
382d30ea906Sjfb8856606 	adapter->data = NULL;
383d30ea906Sjfb8856606 	adapter->allocated = 0;
384d30ea906Sjfb8856606 
385*2d9fd380Sjfb8856606 	rte_eventdev_trace_timer_adapter_free(adapter);
386d30ea906Sjfb8856606 	return 0;
387d30ea906Sjfb8856606 }
388d30ea906Sjfb8856606 
3894418919fSjohnjiang int
rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter * adapter,uint32_t * service_id)390d30ea906Sjfb8856606 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
391d30ea906Sjfb8856606 				       uint32_t *service_id)
392d30ea906Sjfb8856606 {
393d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
394d30ea906Sjfb8856606 
395d30ea906Sjfb8856606 	if (adapter->data->service_inited && service_id != NULL)
396d30ea906Sjfb8856606 		*service_id = adapter->data->service_id;
397d30ea906Sjfb8856606 
398d30ea906Sjfb8856606 	return adapter->data->service_inited ? 0 : -ESRCH;
399d30ea906Sjfb8856606 }
400d30ea906Sjfb8856606 
4014418919fSjohnjiang int
rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_stats * stats)402d30ea906Sjfb8856606 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
403d30ea906Sjfb8856606 				  struct rte_event_timer_adapter_stats *stats)
404d30ea906Sjfb8856606 {
405d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
406d30ea906Sjfb8856606 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
407d30ea906Sjfb8856606 	if (stats == NULL)
408d30ea906Sjfb8856606 		return -EINVAL;
409d30ea906Sjfb8856606 
410d30ea906Sjfb8856606 	return adapter->ops->stats_get(adapter, stats);
411d30ea906Sjfb8856606 }
412d30ea906Sjfb8856606 
4134418919fSjohnjiang int
rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter * adapter)414d30ea906Sjfb8856606 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
415d30ea906Sjfb8856606 {
416d30ea906Sjfb8856606 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
417d30ea906Sjfb8856606 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
418d30ea906Sjfb8856606 	return adapter->ops->stats_reset(adapter);
419d30ea906Sjfb8856606 }
420d30ea906Sjfb8856606 
421d30ea906Sjfb8856606 /*
422d30ea906Sjfb8856606  * Software event timer adapter buffer helper functions
423d30ea906Sjfb8856606  */
424d30ea906Sjfb8856606 
425d30ea906Sjfb8856606 #define NSECPERSEC 1E9
426d30ea906Sjfb8856606 
427d30ea906Sjfb8856606 /* Optimizations used to index into the buffer require that the buffer size
428d30ea906Sjfb8856606  * be a power of 2.
429d30ea906Sjfb8856606  */
430d30ea906Sjfb8856606 #define EVENT_BUFFER_SZ 4096
431d30ea906Sjfb8856606 #define EVENT_BUFFER_BATCHSZ 32
432d30ea906Sjfb8856606 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
433d30ea906Sjfb8856606 
4344418919fSjohnjiang #define EXP_TIM_BUF_SZ 128
4354418919fSjohnjiang 
436d30ea906Sjfb8856606 struct event_buffer {
4374418919fSjohnjiang 	size_t head;
4384418919fSjohnjiang 	size_t tail;
439d30ea906Sjfb8856606 	struct rte_event events[EVENT_BUFFER_SZ];
440d30ea906Sjfb8856606 } __rte_cache_aligned;
441d30ea906Sjfb8856606 
442d30ea906Sjfb8856606 static inline bool
event_buffer_full(struct event_buffer * bufp)443d30ea906Sjfb8856606 event_buffer_full(struct event_buffer *bufp)
444d30ea906Sjfb8856606 {
445d30ea906Sjfb8856606 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
446d30ea906Sjfb8856606 }
447d30ea906Sjfb8856606 
448d30ea906Sjfb8856606 static inline bool
event_buffer_batch_ready(struct event_buffer * bufp)449d30ea906Sjfb8856606 event_buffer_batch_ready(struct event_buffer *bufp)
450d30ea906Sjfb8856606 {
451d30ea906Sjfb8856606 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
452d30ea906Sjfb8856606 }
453d30ea906Sjfb8856606 
454d30ea906Sjfb8856606 static void
event_buffer_init(struct event_buffer * bufp)455d30ea906Sjfb8856606 event_buffer_init(struct event_buffer *bufp)
456d30ea906Sjfb8856606 {
457d30ea906Sjfb8856606 	bufp->head = bufp->tail = 0;
458d30ea906Sjfb8856606 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
459d30ea906Sjfb8856606 }
460d30ea906Sjfb8856606 
461d30ea906Sjfb8856606 static int
event_buffer_add(struct event_buffer * bufp,struct rte_event * eventp)462d30ea906Sjfb8856606 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
463d30ea906Sjfb8856606 {
4644418919fSjohnjiang 	size_t head_idx;
465d30ea906Sjfb8856606 	struct rte_event *buf_eventp;
466d30ea906Sjfb8856606 
467d30ea906Sjfb8856606 	if (event_buffer_full(bufp))
468d30ea906Sjfb8856606 		return -1;
469d30ea906Sjfb8856606 
470d30ea906Sjfb8856606 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
471d30ea906Sjfb8856606 	head_idx = bufp->head & EVENT_BUFFER_MASK;
472d30ea906Sjfb8856606 	buf_eventp = &bufp->events[head_idx];
473d30ea906Sjfb8856606 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
474d30ea906Sjfb8856606 
475d30ea906Sjfb8856606 	/* Wrap automatically when overflow occurs. */
476d30ea906Sjfb8856606 	bufp->head++;
477d30ea906Sjfb8856606 
478d30ea906Sjfb8856606 	return 0;
479d30ea906Sjfb8856606 }
480d30ea906Sjfb8856606 
481d30ea906Sjfb8856606 static void
event_buffer_flush(struct event_buffer * bufp,uint8_t dev_id,uint8_t port_id,uint16_t * nb_events_flushed,uint16_t * nb_events_inv)482d30ea906Sjfb8856606 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
483d30ea906Sjfb8856606 		   uint16_t *nb_events_flushed,
484d30ea906Sjfb8856606 		   uint16_t *nb_events_inv)
485d30ea906Sjfb8856606 {
486d30ea906Sjfb8856606 	struct rte_event *events = bufp->events;
4874418919fSjohnjiang 	size_t head_idx, tail_idx;
4884418919fSjohnjiang 	uint16_t n = 0;
489d30ea906Sjfb8856606 
490d30ea906Sjfb8856606 	/* Instead of modulus, bitwise AND with mask to get index. */
491d30ea906Sjfb8856606 	head_idx = bufp->head & EVENT_BUFFER_MASK;
492d30ea906Sjfb8856606 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
493d30ea906Sjfb8856606 
4944418919fSjohnjiang 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
4954418919fSjohnjiang 
496d30ea906Sjfb8856606 	/* Determine the largest contigous run we can attempt to enqueue to the
497d30ea906Sjfb8856606 	 * event device.
498d30ea906Sjfb8856606 	 */
499d30ea906Sjfb8856606 	if (head_idx > tail_idx)
500d30ea906Sjfb8856606 		n = head_idx - tail_idx;
501d30ea906Sjfb8856606 	else if (head_idx < tail_idx)
502d30ea906Sjfb8856606 		n = EVENT_BUFFER_SZ - tail_idx;
5034418919fSjohnjiang 	else if (event_buffer_full(bufp))
5044418919fSjohnjiang 		n = EVENT_BUFFER_SZ - tail_idx;
505d30ea906Sjfb8856606 	else {
506d30ea906Sjfb8856606 		*nb_events_flushed = 0;
507d30ea906Sjfb8856606 		return;
508d30ea906Sjfb8856606 	}
509d30ea906Sjfb8856606 
5104418919fSjohnjiang 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
511d30ea906Sjfb8856606 	*nb_events_inv = 0;
5124418919fSjohnjiang 
513d30ea906Sjfb8856606 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
514d30ea906Sjfb8856606 						     &events[tail_idx], n);
5154418919fSjohnjiang 	if (*nb_events_flushed != n) {
5164418919fSjohnjiang 		if (rte_errno == EINVAL) {
5174418919fSjohnjiang 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
5184418919fSjohnjiang 				      "dropping it");
519d30ea906Sjfb8856606 			(*nb_events_inv)++;
5204418919fSjohnjiang 		} else if (rte_errno == ENOSPC)
5214418919fSjohnjiang 			rte_pause();
522d30ea906Sjfb8856606 	}
523d30ea906Sjfb8856606 
5244418919fSjohnjiang 	if (*nb_events_flushed > 0)
5254418919fSjohnjiang 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
5264418919fSjohnjiang 				  "device", *nb_events_flushed);
5274418919fSjohnjiang 
528d30ea906Sjfb8856606 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
529d30ea906Sjfb8856606 }
530d30ea906Sjfb8856606 
531d30ea906Sjfb8856606 /*
532d30ea906Sjfb8856606  * Software event timer adapter implementation
533d30ea906Sjfb8856606  */
5344418919fSjohnjiang struct swtim {
535d30ea906Sjfb8856606 	/* Identifier of service executing timer management logic. */
536d30ea906Sjfb8856606 	uint32_t service_id;
537d30ea906Sjfb8856606 	/* The cycle count at which the adapter should next tick */
538d30ea906Sjfb8856606 	uint64_t next_tick_cycles;
539d30ea906Sjfb8856606 	/* The tick resolution used by adapter instance. May have been
540d30ea906Sjfb8856606 	 * adjusted from what user requested
541d30ea906Sjfb8856606 	 */
542d30ea906Sjfb8856606 	uint64_t timer_tick_ns;
543d30ea906Sjfb8856606 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
544d30ea906Sjfb8856606 	uint64_t max_tmo_ns;
545d30ea906Sjfb8856606 	/* Buffered timer expiry events to be enqueued to an event device. */
546d30ea906Sjfb8856606 	struct event_buffer buffer;
547d30ea906Sjfb8856606 	/* Statistics */
548d30ea906Sjfb8856606 	struct rte_event_timer_adapter_stats stats;
5494418919fSjohnjiang 	/* Mempool of timer objects */
5504418919fSjohnjiang 	struct rte_mempool *tim_pool;
5514418919fSjohnjiang 	/* Back pointer for convenience */
5524418919fSjohnjiang 	struct rte_event_timer_adapter *adapter;
5534418919fSjohnjiang 	/* Identifier of timer data instance */
5544418919fSjohnjiang 	uint32_t timer_data_id;
5554418919fSjohnjiang 	/* Track which cores have actually armed a timer */
5564418919fSjohnjiang 	struct {
5570c6bd470Sfengbojiang 		uint16_t v;
5584418919fSjohnjiang 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
5594418919fSjohnjiang 	/* Track which cores' timer lists should be polled */
5604418919fSjohnjiang 	unsigned int poll_lcores[RTE_MAX_LCORE];
5614418919fSjohnjiang 	/* The number of lists that should be polled */
5624418919fSjohnjiang 	int n_poll_lcores;
5634418919fSjohnjiang 	/* Timers which have expired and can be returned to a mempool */
5644418919fSjohnjiang 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
5654418919fSjohnjiang 	/* The number of timers that can be returned to a mempool */
5664418919fSjohnjiang 	size_t n_expired_timers;
567d30ea906Sjfb8856606 };
568d30ea906Sjfb8856606 
5694418919fSjohnjiang static inline struct swtim *
swtim_pmd_priv(const struct rte_event_timer_adapter * adapter)5704418919fSjohnjiang swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
5714418919fSjohnjiang {
5724418919fSjohnjiang 	return adapter->data->adapter_priv;
5734418919fSjohnjiang }
574d30ea906Sjfb8856606 
575d30ea906Sjfb8856606 static void
swtim_callback(struct rte_timer * tim)5764418919fSjohnjiang swtim_callback(struct rte_timer *tim)
577d30ea906Sjfb8856606 {
5784418919fSjohnjiang 	struct rte_event_timer *evtim = tim->arg;
5794418919fSjohnjiang 	struct rte_event_timer_adapter *adapter;
5804418919fSjohnjiang 	unsigned int lcore = rte_lcore_id();
5814418919fSjohnjiang 	struct swtim *sw;
582d30ea906Sjfb8856606 	uint16_t nb_evs_flushed = 0;
583d30ea906Sjfb8856606 	uint16_t nb_evs_invalid = 0;
584d30ea906Sjfb8856606 	uint64_t opaque;
5854418919fSjohnjiang 	int ret;
5860c6bd470Sfengbojiang 	int n_lcores;
587d30ea906Sjfb8856606 
588d30ea906Sjfb8856606 	opaque = evtim->impl_opaque[1];
589d30ea906Sjfb8856606 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
5904418919fSjohnjiang 	sw = swtim_pmd_priv(adapter);
591d30ea906Sjfb8856606 
5924418919fSjohnjiang 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
593d30ea906Sjfb8856606 	if (ret < 0) {
594d30ea906Sjfb8856606 		/* If event buffer is full, put timer back in list with
595d30ea906Sjfb8856606 		 * immediate expiry value, so that we process it again on the
596d30ea906Sjfb8856606 		 * next iteration.
597d30ea906Sjfb8856606 		 */
5984418919fSjohnjiang 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
5994418919fSjohnjiang 					  lcore, NULL, evtim);
6004418919fSjohnjiang 		if (ret < 0) {
6014418919fSjohnjiang 			EVTIM_LOG_DBG("event buffer full, failed to reset "
6024418919fSjohnjiang 				      "timer with immediate expiry value");
603d30ea906Sjfb8856606 		} else {
6044418919fSjohnjiang 			sw->stats.evtim_retry_count++;
6054418919fSjohnjiang 			EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
6064418919fSjohnjiang 				      "with immediate expiry value");
607d30ea906Sjfb8856606 		}
608d30ea906Sjfb8856606 
6090c6bd470Sfengbojiang 		if (unlikely(sw->in_use[lcore].v == 0)) {
6100c6bd470Sfengbojiang 			sw->in_use[lcore].v = 1;
6110c6bd470Sfengbojiang 			n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
6120c6bd470Sfengbojiang 						     __ATOMIC_RELAXED);
6130c6bd470Sfengbojiang 			__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
6140c6bd470Sfengbojiang 					__ATOMIC_RELAXED);
6150c6bd470Sfengbojiang 		}
6164418919fSjohnjiang 	} else {
6174418919fSjohnjiang 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
6184418919fSjohnjiang 
6194418919fSjohnjiang 		/* Empty the buffer here, if necessary, to free older expired
6204418919fSjohnjiang 		 * timers only
6214418919fSjohnjiang 		 */
6224418919fSjohnjiang 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
6234418919fSjohnjiang 			rte_mempool_put_bulk(sw->tim_pool,
6244418919fSjohnjiang 					     (void **)sw->expired_timers,
6254418919fSjohnjiang 					     sw->n_expired_timers);
6264418919fSjohnjiang 			sw->n_expired_timers = 0;
6274418919fSjohnjiang 		}
6284418919fSjohnjiang 
6294418919fSjohnjiang 		sw->expired_timers[sw->n_expired_timers++] = tim;
6304418919fSjohnjiang 		sw->stats.evtim_exp_count++;
6314418919fSjohnjiang 
6320c6bd470Sfengbojiang 		__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
6330c6bd470Sfengbojiang 				__ATOMIC_RELEASE);
6344418919fSjohnjiang 	}
6354418919fSjohnjiang 
6364418919fSjohnjiang 	if (event_buffer_batch_ready(&sw->buffer)) {
6374418919fSjohnjiang 		event_buffer_flush(&sw->buffer,
638d30ea906Sjfb8856606 				   adapter->data->event_dev_id,
639d30ea906Sjfb8856606 				   adapter->data->event_port_id,
640d30ea906Sjfb8856606 				   &nb_evs_flushed,
641d30ea906Sjfb8856606 				   &nb_evs_invalid);
642d30ea906Sjfb8856606 
6434418919fSjohnjiang 		sw->stats.ev_enq_count += nb_evs_flushed;
6444418919fSjohnjiang 		sw->stats.ev_inv_count += nb_evs_invalid;
645d30ea906Sjfb8856606 	}
646d30ea906Sjfb8856606 }
647d30ea906Sjfb8856606 
648d30ea906Sjfb8856606 static __rte_always_inline uint64_t
get_timeout_cycles(struct rte_event_timer * evtim,const struct rte_event_timer_adapter * adapter)649d30ea906Sjfb8856606 get_timeout_cycles(struct rte_event_timer *evtim,
6504418919fSjohnjiang 		   const struct rte_event_timer_adapter *adapter)
651d30ea906Sjfb8856606 {
6524418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
6534418919fSjohnjiang 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
654d30ea906Sjfb8856606 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
655d30ea906Sjfb8856606 }
656d30ea906Sjfb8856606 
657d30ea906Sjfb8856606 /* This function returns true if one or more (adapter) ticks have occurred since
658d30ea906Sjfb8856606  * the last time it was called.
659d30ea906Sjfb8856606  */
660d30ea906Sjfb8856606 static inline bool
swtim_did_tick(struct swtim * sw)6614418919fSjohnjiang swtim_did_tick(struct swtim *sw)
662d30ea906Sjfb8856606 {
663d30ea906Sjfb8856606 	uint64_t cycles_per_adapter_tick, start_cycles;
664d30ea906Sjfb8856606 	uint64_t *next_tick_cyclesp;
665d30ea906Sjfb8856606 
6664418919fSjohnjiang 	next_tick_cyclesp = &sw->next_tick_cycles;
6674418919fSjohnjiang 	cycles_per_adapter_tick = sw->timer_tick_ns *
668d30ea906Sjfb8856606 			(rte_get_timer_hz() / NSECPERSEC);
669d30ea906Sjfb8856606 	start_cycles = rte_get_timer_cycles();
670d30ea906Sjfb8856606 
671d30ea906Sjfb8856606 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
672d30ea906Sjfb8856606 	 * execute, and set things going.
673d30ea906Sjfb8856606 	 */
674d30ea906Sjfb8856606 
675d30ea906Sjfb8856606 	if (start_cycles >= *next_tick_cyclesp) {
676d30ea906Sjfb8856606 		/* Snap the current cycle count to the preceding adapter tick
677d30ea906Sjfb8856606 		 * boundary.
678d30ea906Sjfb8856606 		 */
679d30ea906Sjfb8856606 		start_cycles -= start_cycles % cycles_per_adapter_tick;
680d30ea906Sjfb8856606 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
681d30ea906Sjfb8856606 
682d30ea906Sjfb8856606 		return true;
683d30ea906Sjfb8856606 	}
684d30ea906Sjfb8856606 
685d30ea906Sjfb8856606 	return false;
686d30ea906Sjfb8856606 }
687d30ea906Sjfb8856606 
688d30ea906Sjfb8856606 /* Check that event timer timeout value is in range */
689d30ea906Sjfb8856606 static __rte_always_inline int
check_timeout(struct rte_event_timer * evtim,const struct rte_event_timer_adapter * adapter)690d30ea906Sjfb8856606 check_timeout(struct rte_event_timer *evtim,
691d30ea906Sjfb8856606 	      const struct rte_event_timer_adapter *adapter)
692d30ea906Sjfb8856606 {
693d30ea906Sjfb8856606 	uint64_t tmo_nsec;
6944418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
695d30ea906Sjfb8856606 
6964418919fSjohnjiang 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
6974418919fSjohnjiang 	if (tmo_nsec > sw->max_tmo_ns)
698d30ea906Sjfb8856606 		return -1;
6994418919fSjohnjiang 	if (tmo_nsec < sw->timer_tick_ns)
700d30ea906Sjfb8856606 		return -2;
701d30ea906Sjfb8856606 
702d30ea906Sjfb8856606 	return 0;
703d30ea906Sjfb8856606 }
704d30ea906Sjfb8856606 
705d30ea906Sjfb8856606 /* Check that event timer event queue sched type matches destination event queue
706d30ea906Sjfb8856606  * sched type
707d30ea906Sjfb8856606  */
708d30ea906Sjfb8856606 static __rte_always_inline int
check_destination_event_queue(struct rte_event_timer * evtim,const struct rte_event_timer_adapter * adapter)709d30ea906Sjfb8856606 check_destination_event_queue(struct rte_event_timer *evtim,
710d30ea906Sjfb8856606 			      const struct rte_event_timer_adapter *adapter)
711d30ea906Sjfb8856606 {
712d30ea906Sjfb8856606 	int ret;
713d30ea906Sjfb8856606 	uint32_t sched_type;
714d30ea906Sjfb8856606 
715d30ea906Sjfb8856606 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
716d30ea906Sjfb8856606 				       evtim->ev.queue_id,
717d30ea906Sjfb8856606 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
718d30ea906Sjfb8856606 				       &sched_type);
719d30ea906Sjfb8856606 
7204418919fSjohnjiang 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
7214418919fSjohnjiang 	    ret == -EOVERFLOW)
722d30ea906Sjfb8856606 		return 0;
7234418919fSjohnjiang 
7244418919fSjohnjiang 	return -1;
725d30ea906Sjfb8856606 }
726d30ea906Sjfb8856606 
727d30ea906Sjfb8856606 static int
swtim_service_func(void * arg)7284418919fSjohnjiang swtim_service_func(void *arg)
729d30ea906Sjfb8856606 {
7304418919fSjohnjiang 	struct rte_event_timer_adapter *adapter = arg;
7314418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
732d30ea906Sjfb8856606 	uint16_t nb_evs_flushed = 0;
733d30ea906Sjfb8856606 	uint16_t nb_evs_invalid = 0;
734d30ea906Sjfb8856606 
7354418919fSjohnjiang 	if (swtim_did_tick(sw)) {
7364418919fSjohnjiang 		rte_timer_alt_manage(sw->timer_data_id,
7374418919fSjohnjiang 				     sw->poll_lcores,
7384418919fSjohnjiang 				     sw->n_poll_lcores,
7394418919fSjohnjiang 				     swtim_callback);
740d30ea906Sjfb8856606 
7414418919fSjohnjiang 		/* Return expired timer objects back to mempool */
7424418919fSjohnjiang 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
7434418919fSjohnjiang 				     sw->n_expired_timers);
7444418919fSjohnjiang 		sw->n_expired_timers = 0;
745d30ea906Sjfb8856606 
7464418919fSjohnjiang 		event_buffer_flush(&sw->buffer,
747d30ea906Sjfb8856606 				   adapter->data->event_dev_id,
748d30ea906Sjfb8856606 				   adapter->data->event_port_id,
7494418919fSjohnjiang 				   &nb_evs_flushed,
7504418919fSjohnjiang 				   &nb_evs_invalid);
751d30ea906Sjfb8856606 
7524418919fSjohnjiang 		sw->stats.ev_enq_count += nb_evs_flushed;
7534418919fSjohnjiang 		sw->stats.ev_inv_count += nb_evs_invalid;
7544418919fSjohnjiang 		sw->stats.adapter_tick_count++;
755d30ea906Sjfb8856606 	}
756d30ea906Sjfb8856606 
757d30ea906Sjfb8856606 	return 0;
758d30ea906Sjfb8856606 }
759d30ea906Sjfb8856606 
760d30ea906Sjfb8856606 /* The adapter initialization function rounds the mempool size up to the next
761d30ea906Sjfb8856606  * power of 2, so we can take the difference between that value and what the
762d30ea906Sjfb8856606  * user requested, and use the space for caches.  This avoids a scenario where a
763d30ea906Sjfb8856606  * user can't arm the number of timers the adapter was configured with because
764d30ea906Sjfb8856606  * mempool objects have been lost to caches.
765d30ea906Sjfb8856606  *
766d30ea906Sjfb8856606  * nb_actual should always be a power of 2, so we can iterate over the powers
767d30ea906Sjfb8856606  * of 2 to see what the largest cache size we can use is.
768d30ea906Sjfb8856606  */
769d30ea906Sjfb8856606 static int
compute_msg_mempool_cache_size(uint64_t nb_requested,uint64_t nb_actual)770d30ea906Sjfb8856606 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
771d30ea906Sjfb8856606 {
772d30ea906Sjfb8856606 	int i;
773d30ea906Sjfb8856606 	int size;
774d30ea906Sjfb8856606 	int cache_size = 0;
775d30ea906Sjfb8856606 
776d30ea906Sjfb8856606 	for (i = 0;; i++) {
777d30ea906Sjfb8856606 		size = 1 << i;
778d30ea906Sjfb8856606 
779d30ea906Sjfb8856606 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
780d30ea906Sjfb8856606 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
781d30ea906Sjfb8856606 		    size <= nb_actual / 1.5)
782d30ea906Sjfb8856606 			cache_size = size;
783d30ea906Sjfb8856606 		else
784d30ea906Sjfb8856606 			break;
785d30ea906Sjfb8856606 	}
786d30ea906Sjfb8856606 
787d30ea906Sjfb8856606 	return cache_size;
788d30ea906Sjfb8856606 }
789d30ea906Sjfb8856606 
790d30ea906Sjfb8856606 static int
swtim_init(struct rte_event_timer_adapter * adapter)7914418919fSjohnjiang swtim_init(struct rte_event_timer_adapter *adapter)
792d30ea906Sjfb8856606 {
7934418919fSjohnjiang 	int i, ret;
7944418919fSjohnjiang 	struct swtim *sw;
795d30ea906Sjfb8856606 	unsigned int flags;
796d30ea906Sjfb8856606 	struct rte_service_spec service;
797d30ea906Sjfb8856606 
7984418919fSjohnjiang 	/* Allocate storage for private data area */
7994418919fSjohnjiang #define SWTIM_NAMESIZE 32
8004418919fSjohnjiang 	char swtim_name[SWTIM_NAMESIZE];
8014418919fSjohnjiang 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
802d30ea906Sjfb8856606 			adapter->data->id);
8034418919fSjohnjiang 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
804d30ea906Sjfb8856606 			adapter->data->socket_id);
8054418919fSjohnjiang 	if (sw == NULL) {
806d30ea906Sjfb8856606 		EVTIM_LOG_ERR("failed to allocate space for private data");
807d30ea906Sjfb8856606 		rte_errno = ENOMEM;
808d30ea906Sjfb8856606 		return -1;
809d30ea906Sjfb8856606 	}
810d30ea906Sjfb8856606 
8114418919fSjohnjiang 	/* Connect storage to adapter instance */
8124418919fSjohnjiang 	adapter->data->adapter_priv = sw;
8134418919fSjohnjiang 	sw->adapter = adapter;
814d30ea906Sjfb8856606 
8154418919fSjohnjiang 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
8164418919fSjohnjiang 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
817d30ea906Sjfb8856606 
8184418919fSjohnjiang 	/* Create a timer pool */
8194418919fSjohnjiang 	char pool_name[SWTIM_NAMESIZE];
8204418919fSjohnjiang 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
821d30ea906Sjfb8856606 		 adapter->data->id);
8224418919fSjohnjiang 	/* Optimal mempool size is a power of 2 minus one */
8234418919fSjohnjiang 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
824d30ea906Sjfb8856606 	int pool_size = nb_timers - 1;
825d30ea906Sjfb8856606 	int cache_size = compute_msg_mempool_cache_size(
826d30ea906Sjfb8856606 				adapter->data->conf.nb_timers, nb_timers);
8274418919fSjohnjiang 	flags = 0; /* pool is multi-producer, multi-consumer */
8284418919fSjohnjiang 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
8294418919fSjohnjiang 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
8304418919fSjohnjiang 			NULL, NULL, adapter->data->socket_id, flags);
8314418919fSjohnjiang 	if (sw->tim_pool == NULL) {
8324418919fSjohnjiang 		EVTIM_LOG_ERR("failed to create timer object mempool");
833d30ea906Sjfb8856606 		rte_errno = ENOMEM;
8344418919fSjohnjiang 		goto free_alloc;
835d30ea906Sjfb8856606 	}
836d30ea906Sjfb8856606 
8374418919fSjohnjiang 	/* Initialize the variables that track in-use timer lists */
8384418919fSjohnjiang 	for (i = 0; i < RTE_MAX_LCORE; i++)
8390c6bd470Sfengbojiang 		sw->in_use[i].v = 0;
8404418919fSjohnjiang 
8414418919fSjohnjiang 	/* Initialize the timer subsystem and allocate timer data instance */
8424418919fSjohnjiang 	ret = rte_timer_subsystem_init();
8434418919fSjohnjiang 	if (ret < 0) {
8444418919fSjohnjiang 		if (ret != -EALREADY) {
8454418919fSjohnjiang 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
8464418919fSjohnjiang 			rte_errno = -ret;
8474418919fSjohnjiang 			goto free_mempool;
8484418919fSjohnjiang 		}
8494418919fSjohnjiang 	}
8504418919fSjohnjiang 
8514418919fSjohnjiang 	ret = rte_timer_data_alloc(&sw->timer_data_id);
8524418919fSjohnjiang 	if (ret < 0) {
8534418919fSjohnjiang 		EVTIM_LOG_ERR("failed to allocate timer data instance");
8544418919fSjohnjiang 		rte_errno = -ret;
8554418919fSjohnjiang 		goto free_mempool;
8564418919fSjohnjiang 	}
8574418919fSjohnjiang 
8584418919fSjohnjiang 	/* Initialize timer event buffer */
8594418919fSjohnjiang 	event_buffer_init(&sw->buffer);
8604418919fSjohnjiang 
8614418919fSjohnjiang 	sw->adapter = adapter;
862d30ea906Sjfb8856606 
863d30ea906Sjfb8856606 	/* Register a service component to run adapter logic */
864d30ea906Sjfb8856606 	memset(&service, 0, sizeof(service));
865d30ea906Sjfb8856606 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
8664418919fSjohnjiang 		 "swtim_svc_%"PRIu8, adapter->data->id);
867d30ea906Sjfb8856606 	service.socket_id = adapter->data->socket_id;
8684418919fSjohnjiang 	service.callback = swtim_service_func;
869d30ea906Sjfb8856606 	service.callback_userdata = adapter;
870d30ea906Sjfb8856606 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
8714418919fSjohnjiang 	ret = rte_service_component_register(&service, &sw->service_id);
872d30ea906Sjfb8856606 	if (ret < 0) {
873d30ea906Sjfb8856606 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
8744418919fSjohnjiang 			      ": err = %d", service.name, sw->service_id,
875d30ea906Sjfb8856606 			      ret);
876d30ea906Sjfb8856606 
877d30ea906Sjfb8856606 		rte_errno = ENOSPC;
8784418919fSjohnjiang 		goto free_mempool;
879d30ea906Sjfb8856606 	}
880d30ea906Sjfb8856606 
881d30ea906Sjfb8856606 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
8824418919fSjohnjiang 		      sw->service_id);
883d30ea906Sjfb8856606 
8844418919fSjohnjiang 	adapter->data->service_id = sw->service_id;
885d30ea906Sjfb8856606 	adapter->data->service_inited = 1;
886d30ea906Sjfb8856606 
887d30ea906Sjfb8856606 	return 0;
8884418919fSjohnjiang free_mempool:
8894418919fSjohnjiang 	rte_mempool_free(sw->tim_pool);
8904418919fSjohnjiang free_alloc:
8914418919fSjohnjiang 	rte_free(sw);
892d30ea906Sjfb8856606 	return -1;
893d30ea906Sjfb8856606 }
894d30ea906Sjfb8856606 
8954418919fSjohnjiang static void
swtim_free_tim(struct rte_timer * tim,void * arg)8964418919fSjohnjiang swtim_free_tim(struct rte_timer *tim, void *arg)
897d30ea906Sjfb8856606 {
8984418919fSjohnjiang 	struct swtim *sw = arg;
899d30ea906Sjfb8856606 
9004418919fSjohnjiang 	rte_mempool_put(sw->tim_pool, tim);
901d30ea906Sjfb8856606 }
902d30ea906Sjfb8856606 
9034418919fSjohnjiang /* Traverse the list of outstanding timers and put them back in the mempool
9044418919fSjohnjiang  * before freeing the adapter to avoid leaking the memory.
9054418919fSjohnjiang  */
9064418919fSjohnjiang static int
swtim_uninit(struct rte_event_timer_adapter * adapter)9074418919fSjohnjiang swtim_uninit(struct rte_event_timer_adapter *adapter)
9084418919fSjohnjiang {
9094418919fSjohnjiang 	int ret;
9104418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
911d30ea906Sjfb8856606 
9124418919fSjohnjiang 	/* Free outstanding timers */
9134418919fSjohnjiang 	rte_timer_stop_all(sw->timer_data_id,
9144418919fSjohnjiang 			   sw->poll_lcores,
9154418919fSjohnjiang 			   sw->n_poll_lcores,
9164418919fSjohnjiang 			   swtim_free_tim,
9174418919fSjohnjiang 			   sw);
9184418919fSjohnjiang 
9194418919fSjohnjiang 	ret = rte_service_component_unregister(sw->service_id);
920d30ea906Sjfb8856606 	if (ret < 0) {
921d30ea906Sjfb8856606 		EVTIM_LOG_ERR("failed to unregister service component");
922d30ea906Sjfb8856606 		return ret;
923d30ea906Sjfb8856606 	}
924d30ea906Sjfb8856606 
9254418919fSjohnjiang 	rte_mempool_free(sw->tim_pool);
9264418919fSjohnjiang 	rte_free(sw);
9274418919fSjohnjiang 	adapter->data->adapter_priv = NULL;
928d30ea906Sjfb8856606 
929d30ea906Sjfb8856606 	return 0;
930d30ea906Sjfb8856606 }
931d30ea906Sjfb8856606 
932d30ea906Sjfb8856606 static inline int32_t
get_mapped_count_for_service(uint32_t service_id)933d30ea906Sjfb8856606 get_mapped_count_for_service(uint32_t service_id)
934d30ea906Sjfb8856606 {
935d30ea906Sjfb8856606 	int32_t core_count, i, mapped_count = 0;
936d30ea906Sjfb8856606 	uint32_t lcore_arr[RTE_MAX_LCORE];
937d30ea906Sjfb8856606 
938d30ea906Sjfb8856606 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
939d30ea906Sjfb8856606 
940d30ea906Sjfb8856606 	for (i = 0; i < core_count; i++)
941d30ea906Sjfb8856606 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
942d30ea906Sjfb8856606 			mapped_count++;
943d30ea906Sjfb8856606 
944d30ea906Sjfb8856606 	return mapped_count;
945d30ea906Sjfb8856606 }
946d30ea906Sjfb8856606 
947d30ea906Sjfb8856606 static int
swtim_start(const struct rte_event_timer_adapter * adapter)9484418919fSjohnjiang swtim_start(const struct rte_event_timer_adapter *adapter)
949d30ea906Sjfb8856606 {
950d30ea906Sjfb8856606 	int mapped_count;
9514418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
952d30ea906Sjfb8856606 
953d30ea906Sjfb8856606 	/* Mapping the service to more than one service core can introduce
954d30ea906Sjfb8856606 	 * delays while one thread is waiting to acquire a lock, so only allow
955d30ea906Sjfb8856606 	 * one core to be mapped to the service.
9564418919fSjohnjiang 	 *
9574418919fSjohnjiang 	 * Note: the service could be modified such that it spreads cores to
9584418919fSjohnjiang 	 * poll over multiple service instances.
959d30ea906Sjfb8856606 	 */
9604418919fSjohnjiang 	mapped_count = get_mapped_count_for_service(sw->service_id);
961d30ea906Sjfb8856606 
9624418919fSjohnjiang 	if (mapped_count != 1)
963d30ea906Sjfb8856606 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
9644418919fSjohnjiang 
9654418919fSjohnjiang 	return rte_service_component_runstate_set(sw->service_id, 1);
966d30ea906Sjfb8856606 }
967d30ea906Sjfb8856606 
968d30ea906Sjfb8856606 static int
swtim_stop(const struct rte_event_timer_adapter * adapter)9694418919fSjohnjiang swtim_stop(const struct rte_event_timer_adapter *adapter)
970d30ea906Sjfb8856606 {
971d30ea906Sjfb8856606 	int ret;
9724418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
973d30ea906Sjfb8856606 
9744418919fSjohnjiang 	ret = rte_service_component_runstate_set(sw->service_id, 0);
975d30ea906Sjfb8856606 	if (ret < 0)
976d30ea906Sjfb8856606 		return ret;
977d30ea906Sjfb8856606 
9784418919fSjohnjiang 	/* Wait for the service to complete its final iteration */
9794418919fSjohnjiang 	while (rte_service_may_be_active(sw->service_id))
980d30ea906Sjfb8856606 		rte_pause();
981d30ea906Sjfb8856606 
982d30ea906Sjfb8856606 	return 0;
983d30ea906Sjfb8856606 }
984d30ea906Sjfb8856606 
985d30ea906Sjfb8856606 static void
swtim_get_info(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_info * adapter_info)9864418919fSjohnjiang swtim_get_info(const struct rte_event_timer_adapter *adapter,
987d30ea906Sjfb8856606 		struct rte_event_timer_adapter_info *adapter_info)
988d30ea906Sjfb8856606 {
9894418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
9904418919fSjohnjiang 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
9914418919fSjohnjiang 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
992d30ea906Sjfb8856606 }
993d30ea906Sjfb8856606 
994d30ea906Sjfb8856606 static int
swtim_stats_get(const struct rte_event_timer_adapter * adapter,struct rte_event_timer_adapter_stats * stats)9954418919fSjohnjiang swtim_stats_get(const struct rte_event_timer_adapter *adapter,
996d30ea906Sjfb8856606 		struct rte_event_timer_adapter_stats *stats)
997d30ea906Sjfb8856606 {
9984418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
9994418919fSjohnjiang 	*stats = sw->stats; /* structure copy */
1000d30ea906Sjfb8856606 	return 0;
1001d30ea906Sjfb8856606 }
1002d30ea906Sjfb8856606 
1003d30ea906Sjfb8856606 static int
swtim_stats_reset(const struct rte_event_timer_adapter * adapter)10044418919fSjohnjiang swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1005d30ea906Sjfb8856606 {
10064418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
10074418919fSjohnjiang 	memset(&sw->stats, 0, sizeof(sw->stats));
1008d30ea906Sjfb8856606 	return 0;
1009d30ea906Sjfb8856606 }
1010d30ea906Sjfb8856606 
10114418919fSjohnjiang static uint16_t
__swtim_arm_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)10124418919fSjohnjiang __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1013d30ea906Sjfb8856606 		struct rte_event_timer **evtims,
1014d30ea906Sjfb8856606 		uint16_t nb_evtims)
1015d30ea906Sjfb8856606 {
10164418919fSjohnjiang 	int i, ret;
10174418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
10184418919fSjohnjiang 	uint32_t lcore_id = rte_lcore_id();
10194418919fSjohnjiang 	struct rte_timer *tim, *tims[nb_evtims];
10204418919fSjohnjiang 	uint64_t cycles;
10210c6bd470Sfengbojiang 	int n_lcores;
10220c6bd470Sfengbojiang 	/* Timer list for this lcore is not in use. */
10230c6bd470Sfengbojiang 	uint16_t exp_state = 0;
10240c6bd470Sfengbojiang 	enum rte_event_timer_state n_state;
1025d30ea906Sjfb8856606 
1026d30ea906Sjfb8856606 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1027d30ea906Sjfb8856606 	/* Check that the service is running. */
1028d30ea906Sjfb8856606 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1029d30ea906Sjfb8856606 		rte_errno = EINVAL;
1030d30ea906Sjfb8856606 		return 0;
1031d30ea906Sjfb8856606 	}
1032d30ea906Sjfb8856606 #endif
1033d30ea906Sjfb8856606 
10344418919fSjohnjiang 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
10354418919fSjohnjiang 	 * the highest lcore to insert such timers into
10364418919fSjohnjiang 	 */
10374418919fSjohnjiang 	if (lcore_id == LCORE_ID_ANY)
10384418919fSjohnjiang 		lcore_id = RTE_MAX_LCORE - 1;
1039d30ea906Sjfb8856606 
10404418919fSjohnjiang 	/* If this is the first time we're arming an event timer on this lcore,
10414418919fSjohnjiang 	 * mark this lcore as "in use"; this will cause the service
10424418919fSjohnjiang 	 * function to process the timer list that corresponds to this lcore.
10430c6bd470Sfengbojiang 	 * The atomic compare-and-swap operation can prevent the race condition
10440c6bd470Sfengbojiang 	 * on in_use flag between multiple non-EAL threads.
10454418919fSjohnjiang 	 */
10460c6bd470Sfengbojiang 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
10470c6bd470Sfengbojiang 			&exp_state, 1, 0,
10480c6bd470Sfengbojiang 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
10494418919fSjohnjiang 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
10504418919fSjohnjiang 			      lcore_id);
10510c6bd470Sfengbojiang 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
10520c6bd470Sfengbojiang 					     __ATOMIC_RELAXED);
10530c6bd470Sfengbojiang 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
10540c6bd470Sfengbojiang 				__ATOMIC_RELAXED);
10554418919fSjohnjiang 	}
10564418919fSjohnjiang 
10574418919fSjohnjiang 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
10584418919fSjohnjiang 				   nb_evtims);
1059d30ea906Sjfb8856606 	if (ret < 0) {
1060d30ea906Sjfb8856606 		rte_errno = ENOSPC;
1061d30ea906Sjfb8856606 		return 0;
1062d30ea906Sjfb8856606 	}
1063d30ea906Sjfb8856606 
1064d30ea906Sjfb8856606 	for (i = 0; i < nb_evtims; i++) {
10650c6bd470Sfengbojiang 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
10660c6bd470Sfengbojiang 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1067d30ea906Sjfb8856606 			rte_errno = EALREADY;
1068d30ea906Sjfb8856606 			break;
10690c6bd470Sfengbojiang 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
10700c6bd470Sfengbojiang 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1071d30ea906Sjfb8856606 			rte_errno = EINVAL;
1072d30ea906Sjfb8856606 			break;
1073d30ea906Sjfb8856606 		}
1074d30ea906Sjfb8856606 
1075d30ea906Sjfb8856606 		ret = check_timeout(evtims[i], adapter);
10764418919fSjohnjiang 		if (unlikely(ret == -1)) {
10770c6bd470Sfengbojiang 			__atomic_store_n(&evtims[i]->state,
10780c6bd470Sfengbojiang 					RTE_EVENT_TIMER_ERROR_TOOLATE,
10790c6bd470Sfengbojiang 					__ATOMIC_RELAXED);
1080d30ea906Sjfb8856606 			rte_errno = EINVAL;
1081d30ea906Sjfb8856606 			break;
10824418919fSjohnjiang 		} else if (unlikely(ret == -2)) {
10830c6bd470Sfengbojiang 			__atomic_store_n(&evtims[i]->state,
10840c6bd470Sfengbojiang 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
10850c6bd470Sfengbojiang 					__ATOMIC_RELAXED);
1086d30ea906Sjfb8856606 			rte_errno = EINVAL;
1087d30ea906Sjfb8856606 			break;
1088d30ea906Sjfb8856606 		}
1089d30ea906Sjfb8856606 
10904418919fSjohnjiang 		if (unlikely(check_destination_event_queue(evtims[i],
10914418919fSjohnjiang 							   adapter) < 0)) {
10920c6bd470Sfengbojiang 			__atomic_store_n(&evtims[i]->state,
10930c6bd470Sfengbojiang 					RTE_EVENT_TIMER_ERROR,
10940c6bd470Sfengbojiang 					__ATOMIC_RELAXED);
1095d30ea906Sjfb8856606 			rte_errno = EINVAL;
1096d30ea906Sjfb8856606 			break;
1097d30ea906Sjfb8856606 		}
1098d30ea906Sjfb8856606 
10994418919fSjohnjiang 		tim = tims[i];
11004418919fSjohnjiang 		rte_timer_init(tim);
1101d30ea906Sjfb8856606 
11024418919fSjohnjiang 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
11034418919fSjohnjiang 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1104d30ea906Sjfb8856606 
11054418919fSjohnjiang 		cycles = get_timeout_cycles(evtims[i], adapter);
11064418919fSjohnjiang 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
11074418919fSjohnjiang 					  SINGLE, lcore_id, NULL, evtims[i]);
11084418919fSjohnjiang 		if (ret < 0) {
11094418919fSjohnjiang 			/* tim was in RUNNING or CONFIG state */
11100c6bd470Sfengbojiang 			__atomic_store_n(&evtims[i]->state,
11110c6bd470Sfengbojiang 					RTE_EVENT_TIMER_ERROR,
11120c6bd470Sfengbojiang 					__ATOMIC_RELEASE);
1113d30ea906Sjfb8856606 			break;
1114d30ea906Sjfb8856606 		}
1115d30ea906Sjfb8856606 
11164418919fSjohnjiang 		EVTIM_LOG_DBG("armed an event timer");
11170c6bd470Sfengbojiang 		/* RELEASE ordering guarantees the adapter specific value
11180c6bd470Sfengbojiang 		 * changes observed before the update of state.
11190c6bd470Sfengbojiang 		 */
11200c6bd470Sfengbojiang 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
11210c6bd470Sfengbojiang 				__ATOMIC_RELEASE);
1122d30ea906Sjfb8856606 	}
1123d30ea906Sjfb8856606 
1124d30ea906Sjfb8856606 	if (i < nb_evtims)
11254418919fSjohnjiang 		rte_mempool_put_bulk(sw->tim_pool,
11264418919fSjohnjiang 				     (void **)&tims[i], nb_evtims - i);
1127d30ea906Sjfb8856606 
1128d30ea906Sjfb8856606 	return i;
1129d30ea906Sjfb8856606 }
1130d30ea906Sjfb8856606 
1131d30ea906Sjfb8856606 static uint16_t
swtim_arm_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)11324418919fSjohnjiang swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1133d30ea906Sjfb8856606 		struct rte_event_timer **evtims,
1134d30ea906Sjfb8856606 		uint16_t nb_evtims)
1135d30ea906Sjfb8856606 {
11364418919fSjohnjiang 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1137d30ea906Sjfb8856606 }
1138d30ea906Sjfb8856606 
1139d30ea906Sjfb8856606 static uint16_t
swtim_cancel_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint16_t nb_evtims)11404418919fSjohnjiang swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1141d30ea906Sjfb8856606 		   struct rte_event_timer **evtims,
1142d30ea906Sjfb8856606 		   uint16_t nb_evtims)
1143d30ea906Sjfb8856606 {
11444418919fSjohnjiang 	int i, ret;
11454418919fSjohnjiang 	struct rte_timer *timp;
11464418919fSjohnjiang 	uint64_t opaque;
11474418919fSjohnjiang 	struct swtim *sw = swtim_pmd_priv(adapter);
11480c6bd470Sfengbojiang 	enum rte_event_timer_state n_state;
1149d30ea906Sjfb8856606 
1150d30ea906Sjfb8856606 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1151d30ea906Sjfb8856606 	/* Check that the service is running. */
1152d30ea906Sjfb8856606 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1153d30ea906Sjfb8856606 		rte_errno = EINVAL;
1154d30ea906Sjfb8856606 		return 0;
1155d30ea906Sjfb8856606 	}
1156d30ea906Sjfb8856606 #endif
1157d30ea906Sjfb8856606 
1158d30ea906Sjfb8856606 	for (i = 0; i < nb_evtims; i++) {
1159d30ea906Sjfb8856606 		/* Don't modify the event timer state in these cases */
11600c6bd470Sfengbojiang 		/* ACQUIRE ordering guarantees the access of implementation
11610c6bd470Sfengbojiang 		 * specific opaque data under the correct state.
11620c6bd470Sfengbojiang 		 */
11630c6bd470Sfengbojiang 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
11640c6bd470Sfengbojiang 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1165d30ea906Sjfb8856606 			rte_errno = EALREADY;
1166d30ea906Sjfb8856606 			break;
11670c6bd470Sfengbojiang 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1168d30ea906Sjfb8856606 			rte_errno = EINVAL;
1169d30ea906Sjfb8856606 			break;
1170d30ea906Sjfb8856606 		}
1171d30ea906Sjfb8856606 
11724418919fSjohnjiang 		opaque = evtims[i]->impl_opaque[0];
11734418919fSjohnjiang 		timp = (struct rte_timer *)(uintptr_t)opaque;
11744418919fSjohnjiang 		RTE_ASSERT(timp != NULL);
11754418919fSjohnjiang 
11764418919fSjohnjiang 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
11774418919fSjohnjiang 		if (ret < 0) {
11784418919fSjohnjiang 			/* Timer is running or being configured */
11794418919fSjohnjiang 			rte_errno = EAGAIN;
1180d30ea906Sjfb8856606 			break;
1181d30ea906Sjfb8856606 		}
1182d30ea906Sjfb8856606 
11834418919fSjohnjiang 		rte_mempool_put(sw->tim_pool, (void **)timp);
1184d30ea906Sjfb8856606 
11850c6bd470Sfengbojiang 		/* The RELEASE ordering here pairs with atomic ordering
11860c6bd470Sfengbojiang 		 * to make sure the state update data observed between
11870c6bd470Sfengbojiang 		 * threads.
11880c6bd470Sfengbojiang 		 */
11890c6bd470Sfengbojiang 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
11900c6bd470Sfengbojiang 				__ATOMIC_RELEASE);
1191d30ea906Sjfb8856606 	}
1192d30ea906Sjfb8856606 
1193d30ea906Sjfb8856606 	return i;
1194d30ea906Sjfb8856606 }
1195d30ea906Sjfb8856606 
1196d30ea906Sjfb8856606 static uint16_t
swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter * adapter,struct rte_event_timer ** evtims,uint64_t timeout_ticks,uint16_t nb_evtims)11974418919fSjohnjiang swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1198d30ea906Sjfb8856606 			 struct rte_event_timer **evtims,
1199d30ea906Sjfb8856606 			 uint64_t timeout_ticks,
1200d30ea906Sjfb8856606 			 uint16_t nb_evtims)
1201d30ea906Sjfb8856606 {
1202d30ea906Sjfb8856606 	int i;
1203d30ea906Sjfb8856606 
1204d30ea906Sjfb8856606 	for (i = 0; i < nb_evtims; i++)
1205d30ea906Sjfb8856606 		evtims[i]->timeout_ticks = timeout_ticks;
1206d30ea906Sjfb8856606 
12074418919fSjohnjiang 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1208d30ea906Sjfb8856606 }
1209d30ea906Sjfb8856606 
12104418919fSjohnjiang static const struct rte_event_timer_adapter_ops swtim_ops = {
12114418919fSjohnjiang 	.init			= swtim_init,
12124418919fSjohnjiang 	.uninit			= swtim_uninit,
12134418919fSjohnjiang 	.start			= swtim_start,
12144418919fSjohnjiang 	.stop			= swtim_stop,
12154418919fSjohnjiang 	.get_info		= swtim_get_info,
12164418919fSjohnjiang 	.stats_get		= swtim_stats_get,
12174418919fSjohnjiang 	.stats_reset		= swtim_stats_reset,
12184418919fSjohnjiang 	.arm_burst		= swtim_arm_burst,
12194418919fSjohnjiang 	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
12204418919fSjohnjiang 	.cancel_burst		= swtim_cancel_burst,
1221d30ea906Sjfb8856606 };
1222