1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <sys/queue.h>
10 
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
13 #include <rte_dev.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_ring.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
22 
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
27 
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
30 
31 static int evtim_logtype;
32 static int evtim_svc_logtype;
33 static int evtim_buffer_logtype;
34 
35 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
36 
37 static const struct rte_event_timer_adapter_ops swtim_ops;
38 
39 #define EVTIM_LOG(level, logtype, ...) \
40 	rte_log(RTE_LOG_ ## level, logtype, \
41 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
45 
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
53 #else
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
57 #endif
58 
59 static int
60 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
61 		     void *conf_arg)
62 {
63 	struct rte_event_timer_adapter *adapter;
64 	struct rte_eventdev *dev;
65 	struct rte_event_dev_config dev_conf;
66 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
67 	int started;
68 	uint8_t port_id;
69 	uint8_t dev_id;
70 	int ret;
71 
72 	RTE_SET_USED(event_dev_id);
73 
74 	adapter = &adapters[id];
75 	dev = &rte_eventdevs[adapter->data->event_dev_id];
76 	dev_id = dev->data->dev_id;
77 	dev_conf = dev->data->dev_conf;
78 
79 	started = dev->data->dev_started;
80 	if (started)
81 		rte_event_dev_stop(dev_id);
82 
83 	port_id = dev_conf.nb_event_ports;
84 	dev_conf.nb_event_ports += 1;
85 	ret = rte_event_dev_configure(dev_id, &dev_conf);
86 	if (ret < 0) {
87 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
88 		if (started)
89 			if (rte_event_dev_start(dev_id))
90 				return -EIO;
91 
92 		return ret;
93 	}
94 
95 	if (conf_arg != NULL)
96 		port_conf = conf_arg;
97 	else {
98 		port_conf = &def_port_conf;
99 		ret = rte_event_port_default_conf_get(dev_id, port_id,
100 						      port_conf);
101 		if (ret < 0)
102 			return ret;
103 	}
104 
105 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
106 	if (ret < 0) {
107 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
108 			      port_id, dev_id);
109 		return ret;
110 	}
111 
112 	*event_port_id = port_id;
113 
114 	if (started)
115 		ret = rte_event_dev_start(dev_id);
116 
117 	return ret;
118 }
119 
120 struct rte_event_timer_adapter *
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
122 {
123 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
124 						  NULL);
125 }
126 
127 struct rte_event_timer_adapter *
128 rte_event_timer_adapter_create_ext(
129 		const struct rte_event_timer_adapter_conf *conf,
130 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
131 		void *conf_arg)
132 {
133 	uint16_t adapter_id;
134 	struct rte_event_timer_adapter *adapter;
135 	const struct rte_memzone *mz;
136 	char mz_name[DATA_MZ_NAME_MAX_LEN];
137 	int n, ret;
138 	struct rte_eventdev *dev;
139 
140 	if (conf == NULL) {
141 		rte_errno = EINVAL;
142 		return NULL;
143 	}
144 
145 	/* Check eventdev ID */
146 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
147 		rte_errno = EINVAL;
148 		return NULL;
149 	}
150 	dev = &rte_eventdevs[conf->event_dev_id];
151 
152 	adapter_id = conf->timer_adapter_id;
153 
154 	/* Check that adapter_id is in range */
155 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
156 		rte_errno = EINVAL;
157 		return NULL;
158 	}
159 
160 	/* Check adapter ID not already allocated */
161 	adapter = &adapters[adapter_id];
162 	if (adapter->allocated) {
163 		rte_errno = EEXIST;
164 		return NULL;
165 	}
166 
167 	/* Create shared data area. */
168 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
169 	if (n >= (int)sizeof(mz_name)) {
170 		rte_errno = EINVAL;
171 		return NULL;
172 	}
173 	mz = rte_memzone_reserve(mz_name,
174 				 sizeof(struct rte_event_timer_adapter_data),
175 				 conf->socket_id, 0);
176 	if (mz == NULL)
177 		/* rte_errno set by rte_memzone_reserve */
178 		return NULL;
179 
180 	adapter->data = mz->addr;
181 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
182 
183 	adapter->data->mz = mz;
184 	adapter->data->event_dev_id = conf->event_dev_id;
185 	adapter->data->id = adapter_id;
186 	adapter->data->socket_id = conf->socket_id;
187 	adapter->data->conf = *conf;  /* copy conf structure */
188 
189 	/* Query eventdev PMD for timer adapter capabilities and ops */
190 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
191 						   adapter->data->conf.flags,
192 						   &adapter->data->caps,
193 						   &adapter->ops);
194 	if (ret < 0) {
195 		rte_errno = -ret;
196 		goto free_memzone;
197 	}
198 
199 	if (!(adapter->data->caps &
200 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
201 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
202 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
203 			      &adapter->data->event_port_id, conf_arg);
204 		if (ret < 0) {
205 			rte_errno = -ret;
206 			goto free_memzone;
207 		}
208 	}
209 
210 	/* If eventdev PMD did not provide ops, use default software
211 	 * implementation.
212 	 */
213 	if (adapter->ops == NULL)
214 		adapter->ops = &swtim_ops;
215 
216 	/* Allow driver to do some setup */
217 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
218 	ret = adapter->ops->init(adapter);
219 	if (ret < 0) {
220 		rte_errno = -ret;
221 		goto free_memzone;
222 	}
223 
224 	/* Set fast-path function pointers */
225 	adapter->arm_burst = adapter->ops->arm_burst;
226 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
227 	adapter->cancel_burst = adapter->ops->cancel_burst;
228 
229 	adapter->allocated = 1;
230 
231 	return adapter;
232 
233 free_memzone:
234 	rte_memzone_free(adapter->data->mz);
235 	return NULL;
236 }
237 
238 int
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
240 		struct rte_event_timer_adapter_info *adapter_info)
241 {
242 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
243 
244 	if (adapter->ops->get_info)
245 		/* let driver set values it knows */
246 		adapter->ops->get_info(adapter, adapter_info);
247 
248 	/* Set common values */
249 	adapter_info->conf = adapter->data->conf;
250 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
251 	adapter_info->caps = adapter->data->caps;
252 
253 	return 0;
254 }
255 
256 int
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
258 {
259 	int ret;
260 
261 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
262 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
263 
264 	if (adapter->data->started) {
265 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
266 			      adapter->data->id);
267 		return -EALREADY;
268 	}
269 
270 	ret = adapter->ops->start(adapter);
271 	if (ret < 0)
272 		return ret;
273 
274 	adapter->data->started = 1;
275 
276 	return 0;
277 }
278 
279 int
280 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
281 {
282 	int ret;
283 
284 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
285 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
286 
287 	if (adapter->data->started == 0) {
288 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
289 			      adapter->data->id);
290 		return 0;
291 	}
292 
293 	ret = adapter->ops->stop(adapter);
294 	if (ret < 0)
295 		return ret;
296 
297 	adapter->data->started = 0;
298 
299 	return 0;
300 }
301 
302 struct rte_event_timer_adapter *
303 rte_event_timer_adapter_lookup(uint16_t adapter_id)
304 {
305 	char name[DATA_MZ_NAME_MAX_LEN];
306 	const struct rte_memzone *mz;
307 	struct rte_event_timer_adapter_data *data;
308 	struct rte_event_timer_adapter *adapter;
309 	int ret;
310 	struct rte_eventdev *dev;
311 
312 	if (adapters[adapter_id].allocated)
313 		return &adapters[adapter_id]; /* Adapter is already loaded */
314 
315 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
316 	mz = rte_memzone_lookup(name);
317 	if (mz == NULL) {
318 		rte_errno = ENOENT;
319 		return NULL;
320 	}
321 
322 	data = mz->addr;
323 
324 	adapter = &adapters[data->id];
325 	adapter->data = data;
326 
327 	dev = &rte_eventdevs[adapter->data->event_dev_id];
328 
329 	/* Query eventdev PMD for timer adapter capabilities and ops */
330 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
331 						   adapter->data->conf.flags,
332 						   &adapter->data->caps,
333 						   &adapter->ops);
334 	if (ret < 0) {
335 		rte_errno = EINVAL;
336 		return NULL;
337 	}
338 
339 	/* If eventdev PMD did not provide ops, use default software
340 	 * implementation.
341 	 */
342 	if (adapter->ops == NULL)
343 		adapter->ops = &swtim_ops;
344 
345 	/* Set fast-path function pointers */
346 	adapter->arm_burst = adapter->ops->arm_burst;
347 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
348 	adapter->cancel_burst = adapter->ops->cancel_burst;
349 
350 	adapter->allocated = 1;
351 
352 	return adapter;
353 }
354 
355 int
356 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
357 {
358 	int ret;
359 
360 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
361 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
362 
363 	if (adapter->data->started == 1) {
364 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
365 			      "before freeing", adapter->data->id);
366 		return -EBUSY;
367 	}
368 
369 	/* free impl priv data */
370 	ret = adapter->ops->uninit(adapter);
371 	if (ret < 0)
372 		return ret;
373 
374 	/* free shared data area */
375 	ret = rte_memzone_free(adapter->data->mz);
376 	if (ret < 0)
377 		return ret;
378 
379 	adapter->data = NULL;
380 	adapter->allocated = 0;
381 
382 	return 0;
383 }
384 
385 int
386 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
387 				       uint32_t *service_id)
388 {
389 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
390 
391 	if (adapter->data->service_inited && service_id != NULL)
392 		*service_id = adapter->data->service_id;
393 
394 	return adapter->data->service_inited ? 0 : -ESRCH;
395 }
396 
397 int
398 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
399 				  struct rte_event_timer_adapter_stats *stats)
400 {
401 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
402 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
403 	if (stats == NULL)
404 		return -EINVAL;
405 
406 	return adapter->ops->stats_get(adapter, stats);
407 }
408 
409 int
410 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
411 {
412 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
413 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
414 	return adapter->ops->stats_reset(adapter);
415 }
416 
417 /*
418  * Software event timer adapter buffer helper functions
419  */
420 
421 #define NSECPERSEC 1E9
422 
423 /* Optimizations used to index into the buffer require that the buffer size
424  * be a power of 2.
425  */
426 #define EVENT_BUFFER_SZ 4096
427 #define EVENT_BUFFER_BATCHSZ 32
428 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
429 
430 #define EXP_TIM_BUF_SZ 128
431 
432 struct event_buffer {
433 	size_t head;
434 	size_t tail;
435 	struct rte_event events[EVENT_BUFFER_SZ];
436 } __rte_cache_aligned;
437 
438 static inline bool
439 event_buffer_full(struct event_buffer *bufp)
440 {
441 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
442 }
443 
444 static inline bool
445 event_buffer_batch_ready(struct event_buffer *bufp)
446 {
447 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
448 }
449 
450 static void
451 event_buffer_init(struct event_buffer *bufp)
452 {
453 	bufp->head = bufp->tail = 0;
454 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
455 }
456 
457 static int
458 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
459 {
460 	size_t head_idx;
461 	struct rte_event *buf_eventp;
462 
463 	if (event_buffer_full(bufp))
464 		return -1;
465 
466 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
467 	head_idx = bufp->head & EVENT_BUFFER_MASK;
468 	buf_eventp = &bufp->events[head_idx];
469 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
470 
471 	/* Wrap automatically when overflow occurs. */
472 	bufp->head++;
473 
474 	return 0;
475 }
476 
477 static void
478 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
479 		   uint16_t *nb_events_flushed,
480 		   uint16_t *nb_events_inv)
481 {
482 	struct rte_event *events = bufp->events;
483 	size_t head_idx, tail_idx;
484 	uint16_t n = 0;
485 
486 	/* Instead of modulus, bitwise AND with mask to get index. */
487 	head_idx = bufp->head & EVENT_BUFFER_MASK;
488 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
489 
490 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
491 
492 	/* Determine the largest contigous run we can attempt to enqueue to the
493 	 * event device.
494 	 */
495 	if (head_idx > tail_idx)
496 		n = head_idx - tail_idx;
497 	else if (head_idx < tail_idx)
498 		n = EVENT_BUFFER_SZ - tail_idx;
499 	else if (event_buffer_full(bufp))
500 		n = EVENT_BUFFER_SZ - tail_idx;
501 	else {
502 		*nb_events_flushed = 0;
503 		return;
504 	}
505 
506 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
507 	*nb_events_inv = 0;
508 
509 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
510 						     &events[tail_idx], n);
511 	if (*nb_events_flushed != n) {
512 		if (rte_errno == EINVAL) {
513 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
514 				      "dropping it");
515 			(*nb_events_inv)++;
516 		} else if (rte_errno == ENOSPC)
517 			rte_pause();
518 	}
519 
520 	if (*nb_events_flushed > 0)
521 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
522 				  "device", *nb_events_flushed);
523 
524 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
525 }
526 
527 /*
528  * Software event timer adapter implementation
529  */
530 struct swtim {
531 	/* Identifier of service executing timer management logic. */
532 	uint32_t service_id;
533 	/* The cycle count at which the adapter should next tick */
534 	uint64_t next_tick_cycles;
535 	/* The tick resolution used by adapter instance. May have been
536 	 * adjusted from what user requested
537 	 */
538 	uint64_t timer_tick_ns;
539 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
540 	uint64_t max_tmo_ns;
541 	/* Buffered timer expiry events to be enqueued to an event device. */
542 	struct event_buffer buffer;
543 	/* Statistics */
544 	struct rte_event_timer_adapter_stats stats;
545 	/* Mempool of timer objects */
546 	struct rte_mempool *tim_pool;
547 	/* Back pointer for convenience */
548 	struct rte_event_timer_adapter *adapter;
549 	/* Identifier of timer data instance */
550 	uint32_t timer_data_id;
551 	/* Track which cores have actually armed a timer */
552 	struct {
553 		rte_atomic16_t v;
554 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
555 	/* Track which cores' timer lists should be polled */
556 	unsigned int poll_lcores[RTE_MAX_LCORE];
557 	/* The number of lists that should be polled */
558 	int n_poll_lcores;
559 	/* Timers which have expired and can be returned to a mempool */
560 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
561 	/* The number of timers that can be returned to a mempool */
562 	size_t n_expired_timers;
563 };
564 
565 static inline struct swtim *
566 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
567 {
568 	return adapter->data->adapter_priv;
569 }
570 
571 static void
572 swtim_callback(struct rte_timer *tim)
573 {
574 	struct rte_event_timer *evtim = tim->arg;
575 	struct rte_event_timer_adapter *adapter;
576 	unsigned int lcore = rte_lcore_id();
577 	struct swtim *sw;
578 	uint16_t nb_evs_flushed = 0;
579 	uint16_t nb_evs_invalid = 0;
580 	uint64_t opaque;
581 	int ret;
582 
583 	opaque = evtim->impl_opaque[1];
584 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
585 	sw = swtim_pmd_priv(adapter);
586 
587 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
588 	if (ret < 0) {
589 		/* If event buffer is full, put timer back in list with
590 		 * immediate expiry value, so that we process it again on the
591 		 * next iteration.
592 		 */
593 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
594 					  lcore, NULL, evtim);
595 		if (ret < 0) {
596 			EVTIM_LOG_DBG("event buffer full, failed to reset "
597 				      "timer with immediate expiry value");
598 		} else {
599 			sw->stats.evtim_retry_count++;
600 			EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
601 				      "with immediate expiry value");
602 		}
603 
604 		if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore].v)))
605 			sw->poll_lcores[sw->n_poll_lcores++] = lcore;
606 	} else {
607 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
608 
609 		/* Empty the buffer here, if necessary, to free older expired
610 		 * timers only
611 		 */
612 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
613 			rte_mempool_put_bulk(sw->tim_pool,
614 					     (void **)sw->expired_timers,
615 					     sw->n_expired_timers);
616 			sw->n_expired_timers = 0;
617 		}
618 
619 		sw->expired_timers[sw->n_expired_timers++] = tim;
620 		sw->stats.evtim_exp_count++;
621 
622 		evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
623 	}
624 
625 	if (event_buffer_batch_ready(&sw->buffer)) {
626 		event_buffer_flush(&sw->buffer,
627 				   adapter->data->event_dev_id,
628 				   adapter->data->event_port_id,
629 				   &nb_evs_flushed,
630 				   &nb_evs_invalid);
631 
632 		sw->stats.ev_enq_count += nb_evs_flushed;
633 		sw->stats.ev_inv_count += nb_evs_invalid;
634 	}
635 }
636 
637 static __rte_always_inline uint64_t
638 get_timeout_cycles(struct rte_event_timer *evtim,
639 		   const struct rte_event_timer_adapter *adapter)
640 {
641 	struct swtim *sw = swtim_pmd_priv(adapter);
642 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
643 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
644 }
645 
646 /* This function returns true if one or more (adapter) ticks have occurred since
647  * the last time it was called.
648  */
649 static inline bool
650 swtim_did_tick(struct swtim *sw)
651 {
652 	uint64_t cycles_per_adapter_tick, start_cycles;
653 	uint64_t *next_tick_cyclesp;
654 
655 	next_tick_cyclesp = &sw->next_tick_cycles;
656 	cycles_per_adapter_tick = sw->timer_tick_ns *
657 			(rte_get_timer_hz() / NSECPERSEC);
658 	start_cycles = rte_get_timer_cycles();
659 
660 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
661 	 * execute, and set things going.
662 	 */
663 
664 	if (start_cycles >= *next_tick_cyclesp) {
665 		/* Snap the current cycle count to the preceding adapter tick
666 		 * boundary.
667 		 */
668 		start_cycles -= start_cycles % cycles_per_adapter_tick;
669 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
670 
671 		return true;
672 	}
673 
674 	return false;
675 }
676 
677 /* Check that event timer timeout value is in range */
678 static __rte_always_inline int
679 check_timeout(struct rte_event_timer *evtim,
680 	      const struct rte_event_timer_adapter *adapter)
681 {
682 	uint64_t tmo_nsec;
683 	struct swtim *sw = swtim_pmd_priv(adapter);
684 
685 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
686 	if (tmo_nsec > sw->max_tmo_ns)
687 		return -1;
688 	if (tmo_nsec < sw->timer_tick_ns)
689 		return -2;
690 
691 	return 0;
692 }
693 
694 /* Check that event timer event queue sched type matches destination event queue
695  * sched type
696  */
697 static __rte_always_inline int
698 check_destination_event_queue(struct rte_event_timer *evtim,
699 			      const struct rte_event_timer_adapter *adapter)
700 {
701 	int ret;
702 	uint32_t sched_type;
703 
704 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
705 				       evtim->ev.queue_id,
706 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
707 				       &sched_type);
708 
709 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
710 	    ret == -EOVERFLOW)
711 		return 0;
712 
713 	return -1;
714 }
715 
716 static int
717 swtim_service_func(void *arg)
718 {
719 	struct rte_event_timer_adapter *adapter = arg;
720 	struct swtim *sw = swtim_pmd_priv(adapter);
721 	uint16_t nb_evs_flushed = 0;
722 	uint16_t nb_evs_invalid = 0;
723 
724 	if (swtim_did_tick(sw)) {
725 		rte_timer_alt_manage(sw->timer_data_id,
726 				     sw->poll_lcores,
727 				     sw->n_poll_lcores,
728 				     swtim_callback);
729 
730 		/* Return expired timer objects back to mempool */
731 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
732 				     sw->n_expired_timers);
733 		sw->n_expired_timers = 0;
734 
735 		event_buffer_flush(&sw->buffer,
736 				   adapter->data->event_dev_id,
737 				   adapter->data->event_port_id,
738 				   &nb_evs_flushed,
739 				   &nb_evs_invalid);
740 
741 		sw->stats.ev_enq_count += nb_evs_flushed;
742 		sw->stats.ev_inv_count += nb_evs_invalid;
743 		sw->stats.adapter_tick_count++;
744 	}
745 
746 	return 0;
747 }
748 
749 /* The adapter initialization function rounds the mempool size up to the next
750  * power of 2, so we can take the difference between that value and what the
751  * user requested, and use the space for caches.  This avoids a scenario where a
752  * user can't arm the number of timers the adapter was configured with because
753  * mempool objects have been lost to caches.
754  *
755  * nb_actual should always be a power of 2, so we can iterate over the powers
756  * of 2 to see what the largest cache size we can use is.
757  */
758 static int
759 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
760 {
761 	int i;
762 	int size;
763 	int cache_size = 0;
764 
765 	for (i = 0;; i++) {
766 		size = 1 << i;
767 
768 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
769 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
770 		    size <= nb_actual / 1.5)
771 			cache_size = size;
772 		else
773 			break;
774 	}
775 
776 	return cache_size;
777 }
778 
779 static int
780 swtim_init(struct rte_event_timer_adapter *adapter)
781 {
782 	int i, ret;
783 	struct swtim *sw;
784 	unsigned int flags;
785 	struct rte_service_spec service;
786 
787 	/* Allocate storage for private data area */
788 #define SWTIM_NAMESIZE 32
789 	char swtim_name[SWTIM_NAMESIZE];
790 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
791 			adapter->data->id);
792 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
793 			adapter->data->socket_id);
794 	if (sw == NULL) {
795 		EVTIM_LOG_ERR("failed to allocate space for private data");
796 		rte_errno = ENOMEM;
797 		return -1;
798 	}
799 
800 	/* Connect storage to adapter instance */
801 	adapter->data->adapter_priv = sw;
802 	sw->adapter = adapter;
803 
804 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
805 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
806 
807 	/* Create a timer pool */
808 	char pool_name[SWTIM_NAMESIZE];
809 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
810 		 adapter->data->id);
811 	/* Optimal mempool size is a power of 2 minus one */
812 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
813 	int pool_size = nb_timers - 1;
814 	int cache_size = compute_msg_mempool_cache_size(
815 				adapter->data->conf.nb_timers, nb_timers);
816 	flags = 0; /* pool is multi-producer, multi-consumer */
817 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
818 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
819 			NULL, NULL, adapter->data->socket_id, flags);
820 	if (sw->tim_pool == NULL) {
821 		EVTIM_LOG_ERR("failed to create timer object mempool");
822 		rte_errno = ENOMEM;
823 		goto free_alloc;
824 	}
825 
826 	/* Initialize the variables that track in-use timer lists */
827 	for (i = 0; i < RTE_MAX_LCORE; i++)
828 		rte_atomic16_init(&sw->in_use[i].v);
829 
830 	/* Initialize the timer subsystem and allocate timer data instance */
831 	ret = rte_timer_subsystem_init();
832 	if (ret < 0) {
833 		if (ret != -EALREADY) {
834 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
835 			rte_errno = -ret;
836 			goto free_mempool;
837 		}
838 	}
839 
840 	ret = rte_timer_data_alloc(&sw->timer_data_id);
841 	if (ret < 0) {
842 		EVTIM_LOG_ERR("failed to allocate timer data instance");
843 		rte_errno = -ret;
844 		goto free_mempool;
845 	}
846 
847 	/* Initialize timer event buffer */
848 	event_buffer_init(&sw->buffer);
849 
850 	sw->adapter = adapter;
851 
852 	/* Register a service component to run adapter logic */
853 	memset(&service, 0, sizeof(service));
854 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
855 		 "swtim_svc_%"PRIu8, adapter->data->id);
856 	service.socket_id = adapter->data->socket_id;
857 	service.callback = swtim_service_func;
858 	service.callback_userdata = adapter;
859 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
860 	ret = rte_service_component_register(&service, &sw->service_id);
861 	if (ret < 0) {
862 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
863 			      ": err = %d", service.name, sw->service_id,
864 			      ret);
865 
866 		rte_errno = ENOSPC;
867 		goto free_mempool;
868 	}
869 
870 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
871 		      sw->service_id);
872 
873 	adapter->data->service_id = sw->service_id;
874 	adapter->data->service_inited = 1;
875 
876 	return 0;
877 free_mempool:
878 	rte_mempool_free(sw->tim_pool);
879 free_alloc:
880 	rte_free(sw);
881 	return -1;
882 }
883 
884 static void
885 swtim_free_tim(struct rte_timer *tim, void *arg)
886 {
887 	struct swtim *sw = arg;
888 
889 	rte_mempool_put(sw->tim_pool, tim);
890 }
891 
892 /* Traverse the list of outstanding timers and put them back in the mempool
893  * before freeing the adapter to avoid leaking the memory.
894  */
895 static int
896 swtim_uninit(struct rte_event_timer_adapter *adapter)
897 {
898 	int ret;
899 	struct swtim *sw = swtim_pmd_priv(adapter);
900 
901 	/* Free outstanding timers */
902 	rte_timer_stop_all(sw->timer_data_id,
903 			   sw->poll_lcores,
904 			   sw->n_poll_lcores,
905 			   swtim_free_tim,
906 			   sw);
907 
908 	ret = rte_service_component_unregister(sw->service_id);
909 	if (ret < 0) {
910 		EVTIM_LOG_ERR("failed to unregister service component");
911 		return ret;
912 	}
913 
914 	rte_mempool_free(sw->tim_pool);
915 	rte_free(sw);
916 	adapter->data->adapter_priv = NULL;
917 
918 	return 0;
919 }
920 
921 static inline int32_t
922 get_mapped_count_for_service(uint32_t service_id)
923 {
924 	int32_t core_count, i, mapped_count = 0;
925 	uint32_t lcore_arr[RTE_MAX_LCORE];
926 
927 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
928 
929 	for (i = 0; i < core_count; i++)
930 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
931 			mapped_count++;
932 
933 	return mapped_count;
934 }
935 
936 static int
937 swtim_start(const struct rte_event_timer_adapter *adapter)
938 {
939 	int mapped_count;
940 	struct swtim *sw = swtim_pmd_priv(adapter);
941 
942 	/* Mapping the service to more than one service core can introduce
943 	 * delays while one thread is waiting to acquire a lock, so only allow
944 	 * one core to be mapped to the service.
945 	 *
946 	 * Note: the service could be modified such that it spreads cores to
947 	 * poll over multiple service instances.
948 	 */
949 	mapped_count = get_mapped_count_for_service(sw->service_id);
950 
951 	if (mapped_count != 1)
952 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
953 
954 	return rte_service_component_runstate_set(sw->service_id, 1);
955 }
956 
957 static int
958 swtim_stop(const struct rte_event_timer_adapter *adapter)
959 {
960 	int ret;
961 	struct swtim *sw = swtim_pmd_priv(adapter);
962 
963 	ret = rte_service_component_runstate_set(sw->service_id, 0);
964 	if (ret < 0)
965 		return ret;
966 
967 	/* Wait for the service to complete its final iteration */
968 	while (rte_service_may_be_active(sw->service_id))
969 		rte_pause();
970 
971 	return 0;
972 }
973 
974 static void
975 swtim_get_info(const struct rte_event_timer_adapter *adapter,
976 		struct rte_event_timer_adapter_info *adapter_info)
977 {
978 	struct swtim *sw = swtim_pmd_priv(adapter);
979 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
980 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
981 }
982 
983 static int
984 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
985 		struct rte_event_timer_adapter_stats *stats)
986 {
987 	struct swtim *sw = swtim_pmd_priv(adapter);
988 	*stats = sw->stats; /* structure copy */
989 	return 0;
990 }
991 
992 static int
993 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
994 {
995 	struct swtim *sw = swtim_pmd_priv(adapter);
996 	memset(&sw->stats, 0, sizeof(sw->stats));
997 	return 0;
998 }
999 
1000 static uint16_t
1001 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1002 		struct rte_event_timer **evtims,
1003 		uint16_t nb_evtims)
1004 {
1005 	int i, ret;
1006 	struct swtim *sw = swtim_pmd_priv(adapter);
1007 	uint32_t lcore_id = rte_lcore_id();
1008 	struct rte_timer *tim, *tims[nb_evtims];
1009 	uint64_t cycles;
1010 
1011 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1012 	/* Check that the service is running. */
1013 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1014 		rte_errno = EINVAL;
1015 		return 0;
1016 	}
1017 #endif
1018 
1019 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1020 	 * the highest lcore to insert such timers into
1021 	 */
1022 	if (lcore_id == LCORE_ID_ANY)
1023 		lcore_id = RTE_MAX_LCORE - 1;
1024 
1025 	/* If this is the first time we're arming an event timer on this lcore,
1026 	 * mark this lcore as "in use"; this will cause the service
1027 	 * function to process the timer list that corresponds to this lcore.
1028 	 */
1029 	if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore_id].v))) {
1030 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1031 			      lcore_id);
1032 		sw->poll_lcores[sw->n_poll_lcores] = lcore_id;
1033 		++sw->n_poll_lcores;
1034 	}
1035 
1036 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1037 				   nb_evtims);
1038 	if (ret < 0) {
1039 		rte_errno = ENOSPC;
1040 		return 0;
1041 	}
1042 
1043 	for (i = 0; i < nb_evtims; i++) {
1044 		/* Don't modify the event timer state in these cases */
1045 		if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
1046 			rte_errno = EALREADY;
1047 			break;
1048 		} else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
1049 			     evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
1050 			rte_errno = EINVAL;
1051 			break;
1052 		}
1053 
1054 		ret = check_timeout(evtims[i], adapter);
1055 		if (unlikely(ret == -1)) {
1056 			evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
1057 			rte_errno = EINVAL;
1058 			break;
1059 		} else if (unlikely(ret == -2)) {
1060 			evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
1061 			rte_errno = EINVAL;
1062 			break;
1063 		}
1064 
1065 		if (unlikely(check_destination_event_queue(evtims[i],
1066 							   adapter) < 0)) {
1067 			evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1068 			rte_errno = EINVAL;
1069 			break;
1070 		}
1071 
1072 		tim = tims[i];
1073 		rte_timer_init(tim);
1074 
1075 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1076 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1077 
1078 		cycles = get_timeout_cycles(evtims[i], adapter);
1079 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1080 					  SINGLE, lcore_id, NULL, evtims[i]);
1081 		if (ret < 0) {
1082 			/* tim was in RUNNING or CONFIG state */
1083 			evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1084 			break;
1085 		}
1086 
1087 		rte_smp_wmb();
1088 		EVTIM_LOG_DBG("armed an event timer");
1089 		evtims[i]->state = RTE_EVENT_TIMER_ARMED;
1090 	}
1091 
1092 	if (i < nb_evtims)
1093 		rte_mempool_put_bulk(sw->tim_pool,
1094 				     (void **)&tims[i], nb_evtims - i);
1095 
1096 	return i;
1097 }
1098 
1099 static uint16_t
1100 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1101 		struct rte_event_timer **evtims,
1102 		uint16_t nb_evtims)
1103 {
1104 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1105 }
1106 
1107 static uint16_t
1108 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1109 		   struct rte_event_timer **evtims,
1110 		   uint16_t nb_evtims)
1111 {
1112 	int i, ret;
1113 	struct rte_timer *timp;
1114 	uint64_t opaque;
1115 	struct swtim *sw = swtim_pmd_priv(adapter);
1116 
1117 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1118 	/* Check that the service is running. */
1119 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1120 		rte_errno = EINVAL;
1121 		return 0;
1122 	}
1123 #endif
1124 
1125 	for (i = 0; i < nb_evtims; i++) {
1126 		/* Don't modify the event timer state in these cases */
1127 		if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
1128 			rte_errno = EALREADY;
1129 			break;
1130 		} else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
1131 			rte_errno = EINVAL;
1132 			break;
1133 		}
1134 
1135 		rte_smp_rmb();
1136 
1137 		opaque = evtims[i]->impl_opaque[0];
1138 		timp = (struct rte_timer *)(uintptr_t)opaque;
1139 		RTE_ASSERT(timp != NULL);
1140 
1141 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1142 		if (ret < 0) {
1143 			/* Timer is running or being configured */
1144 			rte_errno = EAGAIN;
1145 			break;
1146 		}
1147 
1148 		rte_mempool_put(sw->tim_pool, (void **)timp);
1149 
1150 		evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
1151 		evtims[i]->impl_opaque[0] = 0;
1152 		evtims[i]->impl_opaque[1] = 0;
1153 
1154 		rte_smp_wmb();
1155 	}
1156 
1157 	return i;
1158 }
1159 
1160 static uint16_t
1161 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1162 			 struct rte_event_timer **evtims,
1163 			 uint64_t timeout_ticks,
1164 			 uint16_t nb_evtims)
1165 {
1166 	int i;
1167 
1168 	for (i = 0; i < nb_evtims; i++)
1169 		evtims[i]->timeout_ticks = timeout_ticks;
1170 
1171 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1172 }
1173 
1174 static const struct rte_event_timer_adapter_ops swtim_ops = {
1175 	.init			= swtim_init,
1176 	.uninit			= swtim_uninit,
1177 	.start			= swtim_start,
1178 	.stop			= swtim_stop,
1179 	.get_info		= swtim_get_info,
1180 	.stats_get		= swtim_stats_get,
1181 	.stats_reset		= swtim_stats_reset,
1182 	.arm_burst		= swtim_arm_burst,
1183 	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
1184 	.cancel_burst		= swtim_cancel_burst,
1185 };
1186 
1187 RTE_INIT(event_timer_adapter_init_log)
1188 {
1189 	evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
1190 	if (evtim_logtype >= 0)
1191 		rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
1192 
1193 	evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
1194 						"buffer");
1195 	if (evtim_buffer_logtype >= 0)
1196 		rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
1197 
1198 	evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
1199 	if (evtim_svc_logtype >= 0)
1200 		rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
1201 }
1202