1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <sys/queue.h>
10 
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
13 #include <rte_dev.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_ring.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
22 
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
27 
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
30 
31 static int evtim_logtype;
32 static int evtim_svc_logtype;
33 static int evtim_buffer_logtype;
34 
35 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
36 
37 static const struct rte_event_timer_adapter_ops swtim_ops;
38 
39 #define EVTIM_LOG(level, logtype, ...) \
40 	rte_log(RTE_LOG_ ## level, logtype, \
41 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
45 
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
53 #else
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
57 #endif
58 
59 static int
60 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
61 		     void *conf_arg)
62 {
63 	struct rte_event_timer_adapter *adapter;
64 	struct rte_eventdev *dev;
65 	struct rte_event_dev_config dev_conf;
66 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
67 	int started;
68 	uint8_t port_id;
69 	uint8_t dev_id;
70 	int ret;
71 
72 	RTE_SET_USED(event_dev_id);
73 
74 	adapter = &adapters[id];
75 	dev = &rte_eventdevs[adapter->data->event_dev_id];
76 	dev_id = dev->data->dev_id;
77 	dev_conf = dev->data->dev_conf;
78 
79 	started = dev->data->dev_started;
80 	if (started)
81 		rte_event_dev_stop(dev_id);
82 
83 	port_id = dev_conf.nb_event_ports;
84 	dev_conf.nb_event_ports += 1;
85 	ret = rte_event_dev_configure(dev_id, &dev_conf);
86 	if (ret < 0) {
87 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
88 		if (started)
89 			if (rte_event_dev_start(dev_id))
90 				return -EIO;
91 
92 		return ret;
93 	}
94 
95 	if (conf_arg != NULL)
96 		port_conf = conf_arg;
97 	else {
98 		port_conf = &def_port_conf;
99 		ret = rte_event_port_default_conf_get(dev_id, port_id,
100 						      port_conf);
101 		if (ret < 0)
102 			return ret;
103 	}
104 
105 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
106 	if (ret < 0) {
107 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
108 			      port_id, dev_id);
109 		return ret;
110 	}
111 
112 	*event_port_id = port_id;
113 
114 	if (started)
115 		ret = rte_event_dev_start(dev_id);
116 
117 	return ret;
118 }
119 
120 struct rte_event_timer_adapter *
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
122 {
123 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
124 						  NULL);
125 }
126 
127 struct rte_event_timer_adapter *
128 rte_event_timer_adapter_create_ext(
129 		const struct rte_event_timer_adapter_conf *conf,
130 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
131 		void *conf_arg)
132 {
133 	uint16_t adapter_id;
134 	struct rte_event_timer_adapter *adapter;
135 	const struct rte_memzone *mz;
136 	char mz_name[DATA_MZ_NAME_MAX_LEN];
137 	int n, ret;
138 	struct rte_eventdev *dev;
139 
140 	if (conf == NULL) {
141 		rte_errno = EINVAL;
142 		return NULL;
143 	}
144 
145 	/* Check eventdev ID */
146 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
147 		rte_errno = EINVAL;
148 		return NULL;
149 	}
150 	dev = &rte_eventdevs[conf->event_dev_id];
151 
152 	adapter_id = conf->timer_adapter_id;
153 
154 	/* Check that adapter_id is in range */
155 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
156 		rte_errno = EINVAL;
157 		return NULL;
158 	}
159 
160 	/* Check adapter ID not already allocated */
161 	adapter = &adapters[adapter_id];
162 	if (adapter->allocated) {
163 		rte_errno = EEXIST;
164 		return NULL;
165 	}
166 
167 	/* Create shared data area. */
168 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
169 	if (n >= (int)sizeof(mz_name)) {
170 		rte_errno = EINVAL;
171 		return NULL;
172 	}
173 	mz = rte_memzone_reserve(mz_name,
174 				 sizeof(struct rte_event_timer_adapter_data),
175 				 conf->socket_id, 0);
176 	if (mz == NULL)
177 		/* rte_errno set by rte_memzone_reserve */
178 		return NULL;
179 
180 	adapter->data = mz->addr;
181 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
182 
183 	adapter->data->mz = mz;
184 	adapter->data->event_dev_id = conf->event_dev_id;
185 	adapter->data->id = adapter_id;
186 	adapter->data->socket_id = conf->socket_id;
187 	adapter->data->conf = *conf;  /* copy conf structure */
188 
189 	/* Query eventdev PMD for timer adapter capabilities and ops */
190 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
191 						   adapter->data->conf.flags,
192 						   &adapter->data->caps,
193 						   &adapter->ops);
194 	if (ret < 0) {
195 		rte_errno = -ret;
196 		goto free_memzone;
197 	}
198 
199 	if (!(adapter->data->caps &
200 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
201 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
202 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
203 			      &adapter->data->event_port_id, conf_arg);
204 		if (ret < 0) {
205 			rte_errno = -ret;
206 			goto free_memzone;
207 		}
208 	}
209 
210 	/* If eventdev PMD did not provide ops, use default software
211 	 * implementation.
212 	 */
213 	if (adapter->ops == NULL)
214 		adapter->ops = &swtim_ops;
215 
216 	/* Allow driver to do some setup */
217 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
218 	ret = adapter->ops->init(adapter);
219 	if (ret < 0) {
220 		rte_errno = -ret;
221 		goto free_memzone;
222 	}
223 
224 	/* Set fast-path function pointers */
225 	adapter->arm_burst = adapter->ops->arm_burst;
226 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
227 	adapter->cancel_burst = adapter->ops->cancel_burst;
228 
229 	adapter->allocated = 1;
230 
231 	return adapter;
232 
233 free_memzone:
234 	rte_memzone_free(adapter->data->mz);
235 	return NULL;
236 }
237 
238 int
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
240 		struct rte_event_timer_adapter_info *adapter_info)
241 {
242 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
243 
244 	if (adapter->ops->get_info)
245 		/* let driver set values it knows */
246 		adapter->ops->get_info(adapter, adapter_info);
247 
248 	/* Set common values */
249 	adapter_info->conf = adapter->data->conf;
250 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
251 	adapter_info->caps = adapter->data->caps;
252 
253 	return 0;
254 }
255 
256 int
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
258 {
259 	int ret;
260 
261 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
262 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
263 
264 	if (adapter->data->started) {
265 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
266 			      adapter->data->id);
267 		return -EALREADY;
268 	}
269 
270 	ret = adapter->ops->start(adapter);
271 	if (ret < 0)
272 		return ret;
273 
274 	adapter->data->started = 1;
275 
276 	return 0;
277 }
278 
279 int
280 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
281 {
282 	int ret;
283 
284 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
285 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
286 
287 	if (adapter->data->started == 0) {
288 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
289 			      adapter->data->id);
290 		return 0;
291 	}
292 
293 	ret = adapter->ops->stop(adapter);
294 	if (ret < 0)
295 		return ret;
296 
297 	adapter->data->started = 0;
298 
299 	return 0;
300 }
301 
302 struct rte_event_timer_adapter *
303 rte_event_timer_adapter_lookup(uint16_t adapter_id)
304 {
305 	char name[DATA_MZ_NAME_MAX_LEN];
306 	const struct rte_memzone *mz;
307 	struct rte_event_timer_adapter_data *data;
308 	struct rte_event_timer_adapter *adapter;
309 	int ret;
310 	struct rte_eventdev *dev;
311 
312 	if (adapters[adapter_id].allocated)
313 		return &adapters[adapter_id]; /* Adapter is already loaded */
314 
315 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
316 	mz = rte_memzone_lookup(name);
317 	if (mz == NULL) {
318 		rte_errno = ENOENT;
319 		return NULL;
320 	}
321 
322 	data = mz->addr;
323 
324 	adapter = &adapters[data->id];
325 	adapter->data = data;
326 
327 	dev = &rte_eventdevs[adapter->data->event_dev_id];
328 
329 	/* Query eventdev PMD for timer adapter capabilities and ops */
330 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
331 						   adapter->data->conf.flags,
332 						   &adapter->data->caps,
333 						   &adapter->ops);
334 	if (ret < 0) {
335 		rte_errno = EINVAL;
336 		return NULL;
337 	}
338 
339 	/* If eventdev PMD did not provide ops, use default software
340 	 * implementation.
341 	 */
342 	if (adapter->ops == NULL)
343 		adapter->ops = &swtim_ops;
344 
345 	/* Set fast-path function pointers */
346 	adapter->arm_burst = adapter->ops->arm_burst;
347 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
348 	adapter->cancel_burst = adapter->ops->cancel_burst;
349 
350 	adapter->allocated = 1;
351 
352 	return adapter;
353 }
354 
355 int
356 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
357 {
358 	int ret;
359 
360 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
361 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
362 
363 	if (adapter->data->started == 1) {
364 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
365 			      "before freeing", adapter->data->id);
366 		return -EBUSY;
367 	}
368 
369 	/* free impl priv data */
370 	ret = adapter->ops->uninit(adapter);
371 	if (ret < 0)
372 		return ret;
373 
374 	/* free shared data area */
375 	ret = rte_memzone_free(adapter->data->mz);
376 	if (ret < 0)
377 		return ret;
378 
379 	adapter->data = NULL;
380 	adapter->allocated = 0;
381 
382 	return 0;
383 }
384 
385 int
386 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
387 				       uint32_t *service_id)
388 {
389 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
390 
391 	if (adapter->data->service_inited && service_id != NULL)
392 		*service_id = adapter->data->service_id;
393 
394 	return adapter->data->service_inited ? 0 : -ESRCH;
395 }
396 
397 int
398 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
399 				  struct rte_event_timer_adapter_stats *stats)
400 {
401 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
402 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
403 	if (stats == NULL)
404 		return -EINVAL;
405 
406 	return adapter->ops->stats_get(adapter, stats);
407 }
408 
409 int
410 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
411 {
412 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
413 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
414 	return adapter->ops->stats_reset(adapter);
415 }
416 
417 /*
418  * Software event timer adapter buffer helper functions
419  */
420 
421 #define NSECPERSEC 1E9
422 
423 /* Optimizations used to index into the buffer require that the buffer size
424  * be a power of 2.
425  */
426 #define EVENT_BUFFER_SZ 4096
427 #define EVENT_BUFFER_BATCHSZ 32
428 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
429 
430 #define EXP_TIM_BUF_SZ 128
431 
432 struct event_buffer {
433 	size_t head;
434 	size_t tail;
435 	struct rte_event events[EVENT_BUFFER_SZ];
436 } __rte_cache_aligned;
437 
438 static inline bool
439 event_buffer_full(struct event_buffer *bufp)
440 {
441 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
442 }
443 
444 static inline bool
445 event_buffer_batch_ready(struct event_buffer *bufp)
446 {
447 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
448 }
449 
450 static void
451 event_buffer_init(struct event_buffer *bufp)
452 {
453 	bufp->head = bufp->tail = 0;
454 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
455 }
456 
457 static int
458 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
459 {
460 	size_t head_idx;
461 	struct rte_event *buf_eventp;
462 
463 	if (event_buffer_full(bufp))
464 		return -1;
465 
466 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
467 	head_idx = bufp->head & EVENT_BUFFER_MASK;
468 	buf_eventp = &bufp->events[head_idx];
469 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
470 
471 	/* Wrap automatically when overflow occurs. */
472 	bufp->head++;
473 
474 	return 0;
475 }
476 
477 static void
478 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
479 		   uint16_t *nb_events_flushed,
480 		   uint16_t *nb_events_inv)
481 {
482 	struct rte_event *events = bufp->events;
483 	size_t head_idx, tail_idx;
484 	uint16_t n = 0;
485 
486 	/* Instead of modulus, bitwise AND with mask to get index. */
487 	head_idx = bufp->head & EVENT_BUFFER_MASK;
488 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
489 
490 	RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
491 
492 	/* Determine the largest contigous run we can attempt to enqueue to the
493 	 * event device.
494 	 */
495 	if (head_idx > tail_idx)
496 		n = head_idx - tail_idx;
497 	else if (head_idx < tail_idx)
498 		n = EVENT_BUFFER_SZ - tail_idx;
499 	else if (event_buffer_full(bufp))
500 		n = EVENT_BUFFER_SZ - tail_idx;
501 	else {
502 		*nb_events_flushed = 0;
503 		return;
504 	}
505 
506 	n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
507 	*nb_events_inv = 0;
508 
509 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
510 						     &events[tail_idx], n);
511 	if (*nb_events_flushed != n) {
512 		if (rte_errno == EINVAL) {
513 			EVTIM_LOG_ERR("failed to enqueue invalid event - "
514 				      "dropping it");
515 			(*nb_events_inv)++;
516 		} else if (rte_errno == ENOSPC)
517 			rte_pause();
518 	}
519 
520 	if (*nb_events_flushed > 0)
521 		EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
522 				  "device", *nb_events_flushed);
523 
524 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
525 }
526 
527 /*
528  * Software event timer adapter implementation
529  */
530 struct swtim {
531 	/* Identifier of service executing timer management logic. */
532 	uint32_t service_id;
533 	/* The cycle count at which the adapter should next tick */
534 	uint64_t next_tick_cycles;
535 	/* The tick resolution used by adapter instance. May have been
536 	 * adjusted from what user requested
537 	 */
538 	uint64_t timer_tick_ns;
539 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
540 	uint64_t max_tmo_ns;
541 	/* Buffered timer expiry events to be enqueued to an event device. */
542 	struct event_buffer buffer;
543 	/* Statistics */
544 	struct rte_event_timer_adapter_stats stats;
545 	/* Mempool of timer objects */
546 	struct rte_mempool *tim_pool;
547 	/* Back pointer for convenience */
548 	struct rte_event_timer_adapter *adapter;
549 	/* Identifier of timer data instance */
550 	uint32_t timer_data_id;
551 	/* Track which cores have actually armed a timer */
552 	struct {
553 		uint16_t v;
554 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
555 	/* Track which cores' timer lists should be polled */
556 	unsigned int poll_lcores[RTE_MAX_LCORE];
557 	/* The number of lists that should be polled */
558 	int n_poll_lcores;
559 	/* Timers which have expired and can be returned to a mempool */
560 	struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
561 	/* The number of timers that can be returned to a mempool */
562 	size_t n_expired_timers;
563 };
564 
565 static inline struct swtim *
566 swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
567 {
568 	return adapter->data->adapter_priv;
569 }
570 
571 static void
572 swtim_callback(struct rte_timer *tim)
573 {
574 	struct rte_event_timer *evtim = tim->arg;
575 	struct rte_event_timer_adapter *adapter;
576 	unsigned int lcore = rte_lcore_id();
577 	struct swtim *sw;
578 	uint16_t nb_evs_flushed = 0;
579 	uint16_t nb_evs_invalid = 0;
580 	uint64_t opaque;
581 	int ret;
582 	int n_lcores;
583 
584 	opaque = evtim->impl_opaque[1];
585 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
586 	sw = swtim_pmd_priv(adapter);
587 
588 	ret = event_buffer_add(&sw->buffer, &evtim->ev);
589 	if (ret < 0) {
590 		/* If event buffer is full, put timer back in list with
591 		 * immediate expiry value, so that we process it again on the
592 		 * next iteration.
593 		 */
594 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
595 					  lcore, NULL, evtim);
596 		if (ret < 0) {
597 			EVTIM_LOG_DBG("event buffer full, failed to reset "
598 				      "timer with immediate expiry value");
599 		} else {
600 			sw->stats.evtim_retry_count++;
601 			EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
602 				      "with immediate expiry value");
603 		}
604 
605 		if (unlikely(sw->in_use[lcore].v == 0)) {
606 			sw->in_use[lcore].v = 1;
607 			n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
608 						     __ATOMIC_RELAXED);
609 			__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
610 					__ATOMIC_RELAXED);
611 		}
612 	} else {
613 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
614 
615 		/* Empty the buffer here, if necessary, to free older expired
616 		 * timers only
617 		 */
618 		if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
619 			rte_mempool_put_bulk(sw->tim_pool,
620 					     (void **)sw->expired_timers,
621 					     sw->n_expired_timers);
622 			sw->n_expired_timers = 0;
623 		}
624 
625 		sw->expired_timers[sw->n_expired_timers++] = tim;
626 		sw->stats.evtim_exp_count++;
627 
628 		__atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
629 				__ATOMIC_RELEASE);
630 	}
631 
632 	if (event_buffer_batch_ready(&sw->buffer)) {
633 		event_buffer_flush(&sw->buffer,
634 				   adapter->data->event_dev_id,
635 				   adapter->data->event_port_id,
636 				   &nb_evs_flushed,
637 				   &nb_evs_invalid);
638 
639 		sw->stats.ev_enq_count += nb_evs_flushed;
640 		sw->stats.ev_inv_count += nb_evs_invalid;
641 	}
642 }
643 
644 static __rte_always_inline uint64_t
645 get_timeout_cycles(struct rte_event_timer *evtim,
646 		   const struct rte_event_timer_adapter *adapter)
647 {
648 	struct swtim *sw = swtim_pmd_priv(adapter);
649 	uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
650 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
651 }
652 
653 /* This function returns true if one or more (adapter) ticks have occurred since
654  * the last time it was called.
655  */
656 static inline bool
657 swtim_did_tick(struct swtim *sw)
658 {
659 	uint64_t cycles_per_adapter_tick, start_cycles;
660 	uint64_t *next_tick_cyclesp;
661 
662 	next_tick_cyclesp = &sw->next_tick_cycles;
663 	cycles_per_adapter_tick = sw->timer_tick_ns *
664 			(rte_get_timer_hz() / NSECPERSEC);
665 	start_cycles = rte_get_timer_cycles();
666 
667 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
668 	 * execute, and set things going.
669 	 */
670 
671 	if (start_cycles >= *next_tick_cyclesp) {
672 		/* Snap the current cycle count to the preceding adapter tick
673 		 * boundary.
674 		 */
675 		start_cycles -= start_cycles % cycles_per_adapter_tick;
676 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
677 
678 		return true;
679 	}
680 
681 	return false;
682 }
683 
684 /* Check that event timer timeout value is in range */
685 static __rte_always_inline int
686 check_timeout(struct rte_event_timer *evtim,
687 	      const struct rte_event_timer_adapter *adapter)
688 {
689 	uint64_t tmo_nsec;
690 	struct swtim *sw = swtim_pmd_priv(adapter);
691 
692 	tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
693 	if (tmo_nsec > sw->max_tmo_ns)
694 		return -1;
695 	if (tmo_nsec < sw->timer_tick_ns)
696 		return -2;
697 
698 	return 0;
699 }
700 
701 /* Check that event timer event queue sched type matches destination event queue
702  * sched type
703  */
704 static __rte_always_inline int
705 check_destination_event_queue(struct rte_event_timer *evtim,
706 			      const struct rte_event_timer_adapter *adapter)
707 {
708 	int ret;
709 	uint32_t sched_type;
710 
711 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
712 				       evtim->ev.queue_id,
713 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
714 				       &sched_type);
715 
716 	if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
717 	    ret == -EOVERFLOW)
718 		return 0;
719 
720 	return -1;
721 }
722 
723 static int
724 swtim_service_func(void *arg)
725 {
726 	struct rte_event_timer_adapter *adapter = arg;
727 	struct swtim *sw = swtim_pmd_priv(adapter);
728 	uint16_t nb_evs_flushed = 0;
729 	uint16_t nb_evs_invalid = 0;
730 
731 	if (swtim_did_tick(sw)) {
732 		rte_timer_alt_manage(sw->timer_data_id,
733 				     sw->poll_lcores,
734 				     sw->n_poll_lcores,
735 				     swtim_callback);
736 
737 		/* Return expired timer objects back to mempool */
738 		rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
739 				     sw->n_expired_timers);
740 		sw->n_expired_timers = 0;
741 
742 		event_buffer_flush(&sw->buffer,
743 				   adapter->data->event_dev_id,
744 				   adapter->data->event_port_id,
745 				   &nb_evs_flushed,
746 				   &nb_evs_invalid);
747 
748 		sw->stats.ev_enq_count += nb_evs_flushed;
749 		sw->stats.ev_inv_count += nb_evs_invalid;
750 		sw->stats.adapter_tick_count++;
751 	}
752 
753 	return 0;
754 }
755 
756 /* The adapter initialization function rounds the mempool size up to the next
757  * power of 2, so we can take the difference between that value and what the
758  * user requested, and use the space for caches.  This avoids a scenario where a
759  * user can't arm the number of timers the adapter was configured with because
760  * mempool objects have been lost to caches.
761  *
762  * nb_actual should always be a power of 2, so we can iterate over the powers
763  * of 2 to see what the largest cache size we can use is.
764  */
765 static int
766 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
767 {
768 	int i;
769 	int size;
770 	int cache_size = 0;
771 
772 	for (i = 0;; i++) {
773 		size = 1 << i;
774 
775 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
776 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
777 		    size <= nb_actual / 1.5)
778 			cache_size = size;
779 		else
780 			break;
781 	}
782 
783 	return cache_size;
784 }
785 
786 static int
787 swtim_init(struct rte_event_timer_adapter *adapter)
788 {
789 	int i, ret;
790 	struct swtim *sw;
791 	unsigned int flags;
792 	struct rte_service_spec service;
793 
794 	/* Allocate storage for private data area */
795 #define SWTIM_NAMESIZE 32
796 	char swtim_name[SWTIM_NAMESIZE];
797 	snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
798 			adapter->data->id);
799 	sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
800 			adapter->data->socket_id);
801 	if (sw == NULL) {
802 		EVTIM_LOG_ERR("failed to allocate space for private data");
803 		rte_errno = ENOMEM;
804 		return -1;
805 	}
806 
807 	/* Connect storage to adapter instance */
808 	adapter->data->adapter_priv = sw;
809 	sw->adapter = adapter;
810 
811 	sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
812 	sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
813 
814 	/* Create a timer pool */
815 	char pool_name[SWTIM_NAMESIZE];
816 	snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
817 		 adapter->data->id);
818 	/* Optimal mempool size is a power of 2 minus one */
819 	uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
820 	int pool_size = nb_timers - 1;
821 	int cache_size = compute_msg_mempool_cache_size(
822 				adapter->data->conf.nb_timers, nb_timers);
823 	flags = 0; /* pool is multi-producer, multi-consumer */
824 	sw->tim_pool = rte_mempool_create(pool_name, pool_size,
825 			sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
826 			NULL, NULL, adapter->data->socket_id, flags);
827 	if (sw->tim_pool == NULL) {
828 		EVTIM_LOG_ERR("failed to create timer object mempool");
829 		rte_errno = ENOMEM;
830 		goto free_alloc;
831 	}
832 
833 	/* Initialize the variables that track in-use timer lists */
834 	for (i = 0; i < RTE_MAX_LCORE; i++)
835 		sw->in_use[i].v = 0;
836 
837 	/* Initialize the timer subsystem and allocate timer data instance */
838 	ret = rte_timer_subsystem_init();
839 	if (ret < 0) {
840 		if (ret != -EALREADY) {
841 			EVTIM_LOG_ERR("failed to initialize timer subsystem");
842 			rte_errno = -ret;
843 			goto free_mempool;
844 		}
845 	}
846 
847 	ret = rte_timer_data_alloc(&sw->timer_data_id);
848 	if (ret < 0) {
849 		EVTIM_LOG_ERR("failed to allocate timer data instance");
850 		rte_errno = -ret;
851 		goto free_mempool;
852 	}
853 
854 	/* Initialize timer event buffer */
855 	event_buffer_init(&sw->buffer);
856 
857 	sw->adapter = adapter;
858 
859 	/* Register a service component to run adapter logic */
860 	memset(&service, 0, sizeof(service));
861 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
862 		 "swtim_svc_%"PRIu8, adapter->data->id);
863 	service.socket_id = adapter->data->socket_id;
864 	service.callback = swtim_service_func;
865 	service.callback_userdata = adapter;
866 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
867 	ret = rte_service_component_register(&service, &sw->service_id);
868 	if (ret < 0) {
869 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
870 			      ": err = %d", service.name, sw->service_id,
871 			      ret);
872 
873 		rte_errno = ENOSPC;
874 		goto free_mempool;
875 	}
876 
877 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
878 		      sw->service_id);
879 
880 	adapter->data->service_id = sw->service_id;
881 	adapter->data->service_inited = 1;
882 
883 	return 0;
884 free_mempool:
885 	rte_mempool_free(sw->tim_pool);
886 free_alloc:
887 	rte_free(sw);
888 	return -1;
889 }
890 
891 static void
892 swtim_free_tim(struct rte_timer *tim, void *arg)
893 {
894 	struct swtim *sw = arg;
895 
896 	rte_mempool_put(sw->tim_pool, tim);
897 }
898 
899 /* Traverse the list of outstanding timers and put them back in the mempool
900  * before freeing the adapter to avoid leaking the memory.
901  */
902 static int
903 swtim_uninit(struct rte_event_timer_adapter *adapter)
904 {
905 	int ret;
906 	struct swtim *sw = swtim_pmd_priv(adapter);
907 
908 	/* Free outstanding timers */
909 	rte_timer_stop_all(sw->timer_data_id,
910 			   sw->poll_lcores,
911 			   sw->n_poll_lcores,
912 			   swtim_free_tim,
913 			   sw);
914 
915 	ret = rte_service_component_unregister(sw->service_id);
916 	if (ret < 0) {
917 		EVTIM_LOG_ERR("failed to unregister service component");
918 		return ret;
919 	}
920 
921 	rte_mempool_free(sw->tim_pool);
922 	rte_free(sw);
923 	adapter->data->adapter_priv = NULL;
924 
925 	return 0;
926 }
927 
928 static inline int32_t
929 get_mapped_count_for_service(uint32_t service_id)
930 {
931 	int32_t core_count, i, mapped_count = 0;
932 	uint32_t lcore_arr[RTE_MAX_LCORE];
933 
934 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
935 
936 	for (i = 0; i < core_count; i++)
937 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
938 			mapped_count++;
939 
940 	return mapped_count;
941 }
942 
943 static int
944 swtim_start(const struct rte_event_timer_adapter *adapter)
945 {
946 	int mapped_count;
947 	struct swtim *sw = swtim_pmd_priv(adapter);
948 
949 	/* Mapping the service to more than one service core can introduce
950 	 * delays while one thread is waiting to acquire a lock, so only allow
951 	 * one core to be mapped to the service.
952 	 *
953 	 * Note: the service could be modified such that it spreads cores to
954 	 * poll over multiple service instances.
955 	 */
956 	mapped_count = get_mapped_count_for_service(sw->service_id);
957 
958 	if (mapped_count != 1)
959 		return mapped_count < 1 ? -ENOENT : -ENOTSUP;
960 
961 	return rte_service_component_runstate_set(sw->service_id, 1);
962 }
963 
964 static int
965 swtim_stop(const struct rte_event_timer_adapter *adapter)
966 {
967 	int ret;
968 	struct swtim *sw = swtim_pmd_priv(adapter);
969 
970 	ret = rte_service_component_runstate_set(sw->service_id, 0);
971 	if (ret < 0)
972 		return ret;
973 
974 	/* Wait for the service to complete its final iteration */
975 	while (rte_service_may_be_active(sw->service_id))
976 		rte_pause();
977 
978 	return 0;
979 }
980 
981 static void
982 swtim_get_info(const struct rte_event_timer_adapter *adapter,
983 		struct rte_event_timer_adapter_info *adapter_info)
984 {
985 	struct swtim *sw = swtim_pmd_priv(adapter);
986 	adapter_info->min_resolution_ns = sw->timer_tick_ns;
987 	adapter_info->max_tmo_ns = sw->max_tmo_ns;
988 }
989 
990 static int
991 swtim_stats_get(const struct rte_event_timer_adapter *adapter,
992 		struct rte_event_timer_adapter_stats *stats)
993 {
994 	struct swtim *sw = swtim_pmd_priv(adapter);
995 	*stats = sw->stats; /* structure copy */
996 	return 0;
997 }
998 
999 static int
1000 swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1001 {
1002 	struct swtim *sw = swtim_pmd_priv(adapter);
1003 	memset(&sw->stats, 0, sizeof(sw->stats));
1004 	return 0;
1005 }
1006 
1007 static uint16_t
1008 __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1009 		struct rte_event_timer **evtims,
1010 		uint16_t nb_evtims)
1011 {
1012 	int i, ret;
1013 	struct swtim *sw = swtim_pmd_priv(adapter);
1014 	uint32_t lcore_id = rte_lcore_id();
1015 	struct rte_timer *tim, *tims[nb_evtims];
1016 	uint64_t cycles;
1017 	int n_lcores;
1018 	/* Timer list for this lcore is not in use. */
1019 	uint16_t exp_state = 0;
1020 	enum rte_event_timer_state n_state;
1021 
1022 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1023 	/* Check that the service is running. */
1024 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1025 		rte_errno = EINVAL;
1026 		return 0;
1027 	}
1028 #endif
1029 
1030 	/* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1031 	 * the highest lcore to insert such timers into
1032 	 */
1033 	if (lcore_id == LCORE_ID_ANY)
1034 		lcore_id = RTE_MAX_LCORE - 1;
1035 
1036 	/* If this is the first time we're arming an event timer on this lcore,
1037 	 * mark this lcore as "in use"; this will cause the service
1038 	 * function to process the timer list that corresponds to this lcore.
1039 	 * The atomic compare-and-swap operation can prevent the race condition
1040 	 * on in_use flag between multiple non-EAL threads.
1041 	 */
1042 	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1043 			&exp_state, 1, 0,
1044 			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1045 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1046 			      lcore_id);
1047 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1048 					     __ATOMIC_RELAXED);
1049 		__atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1050 				__ATOMIC_RELAXED);
1051 	}
1052 
1053 	ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1054 				   nb_evtims);
1055 	if (ret < 0) {
1056 		rte_errno = ENOSPC;
1057 		return 0;
1058 	}
1059 
1060 	for (i = 0; i < nb_evtims; i++) {
1061 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1062 		if (n_state == RTE_EVENT_TIMER_ARMED) {
1063 			rte_errno = EALREADY;
1064 			break;
1065 		} else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1066 			     n_state == RTE_EVENT_TIMER_CANCELED)) {
1067 			rte_errno = EINVAL;
1068 			break;
1069 		}
1070 
1071 		ret = check_timeout(evtims[i], adapter);
1072 		if (unlikely(ret == -1)) {
1073 			__atomic_store_n(&evtims[i]->state,
1074 					RTE_EVENT_TIMER_ERROR_TOOLATE,
1075 					__ATOMIC_RELAXED);
1076 			rte_errno = EINVAL;
1077 			break;
1078 		} else if (unlikely(ret == -2)) {
1079 			__atomic_store_n(&evtims[i]->state,
1080 					RTE_EVENT_TIMER_ERROR_TOOEARLY,
1081 					__ATOMIC_RELAXED);
1082 			rte_errno = EINVAL;
1083 			break;
1084 		}
1085 
1086 		if (unlikely(check_destination_event_queue(evtims[i],
1087 							   adapter) < 0)) {
1088 			__atomic_store_n(&evtims[i]->state,
1089 					RTE_EVENT_TIMER_ERROR,
1090 					__ATOMIC_RELAXED);
1091 			rte_errno = EINVAL;
1092 			break;
1093 		}
1094 
1095 		tim = tims[i];
1096 		rte_timer_init(tim);
1097 
1098 		evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1099 		evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1100 
1101 		cycles = get_timeout_cycles(evtims[i], adapter);
1102 		ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1103 					  SINGLE, lcore_id, NULL, evtims[i]);
1104 		if (ret < 0) {
1105 			/* tim was in RUNNING or CONFIG state */
1106 			__atomic_store_n(&evtims[i]->state,
1107 					RTE_EVENT_TIMER_ERROR,
1108 					__ATOMIC_RELEASE);
1109 			break;
1110 		}
1111 
1112 		EVTIM_LOG_DBG("armed an event timer");
1113 		/* RELEASE ordering guarantees the adapter specific value
1114 		 * changes observed before the update of state.
1115 		 */
1116 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1117 				__ATOMIC_RELEASE);
1118 	}
1119 
1120 	if (i < nb_evtims)
1121 		rte_mempool_put_bulk(sw->tim_pool,
1122 				     (void **)&tims[i], nb_evtims - i);
1123 
1124 	return i;
1125 }
1126 
1127 static uint16_t
1128 swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1129 		struct rte_event_timer **evtims,
1130 		uint16_t nb_evtims)
1131 {
1132 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1133 }
1134 
1135 static uint16_t
1136 swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1137 		   struct rte_event_timer **evtims,
1138 		   uint16_t nb_evtims)
1139 {
1140 	int i, ret;
1141 	struct rte_timer *timp;
1142 	uint64_t opaque;
1143 	struct swtim *sw = swtim_pmd_priv(adapter);
1144 	enum rte_event_timer_state n_state;
1145 
1146 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1147 	/* Check that the service is running. */
1148 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1149 		rte_errno = EINVAL;
1150 		return 0;
1151 	}
1152 #endif
1153 
1154 	for (i = 0; i < nb_evtims; i++) {
1155 		/* Don't modify the event timer state in these cases */
1156 		/* ACQUIRE ordering guarantees the access of implementation
1157 		 * specific opaque data under the correct state.
1158 		 */
1159 		n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1160 		if (n_state == RTE_EVENT_TIMER_CANCELED) {
1161 			rte_errno = EALREADY;
1162 			break;
1163 		} else if (n_state != RTE_EVENT_TIMER_ARMED) {
1164 			rte_errno = EINVAL;
1165 			break;
1166 		}
1167 
1168 		opaque = evtims[i]->impl_opaque[0];
1169 		timp = (struct rte_timer *)(uintptr_t)opaque;
1170 		RTE_ASSERT(timp != NULL);
1171 
1172 		ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1173 		if (ret < 0) {
1174 			/* Timer is running or being configured */
1175 			rte_errno = EAGAIN;
1176 			break;
1177 		}
1178 
1179 		rte_mempool_put(sw->tim_pool, (void **)timp);
1180 
1181 		/* The RELEASE ordering here pairs with atomic ordering
1182 		 * to make sure the state update data observed between
1183 		 * threads.
1184 		 */
1185 		__atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1186 				__ATOMIC_RELEASE);
1187 	}
1188 
1189 	return i;
1190 }
1191 
1192 static uint16_t
1193 swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1194 			 struct rte_event_timer **evtims,
1195 			 uint64_t timeout_ticks,
1196 			 uint16_t nb_evtims)
1197 {
1198 	int i;
1199 
1200 	for (i = 0; i < nb_evtims; i++)
1201 		evtims[i]->timeout_ticks = timeout_ticks;
1202 
1203 	return __swtim_arm_burst(adapter, evtims, nb_evtims);
1204 }
1205 
1206 static const struct rte_event_timer_adapter_ops swtim_ops = {
1207 	.init			= swtim_init,
1208 	.uninit			= swtim_uninit,
1209 	.start			= swtim_start,
1210 	.stop			= swtim_stop,
1211 	.get_info		= swtim_get_info,
1212 	.stats_get		= swtim_stats_get,
1213 	.stats_reset		= swtim_stats_reset,
1214 	.arm_burst		= swtim_arm_burst,
1215 	.arm_tmo_tick_burst	= swtim_arm_tmo_tick_burst,
1216 	.cancel_burst		= swtim_cancel_burst,
1217 };
1218 
1219 RTE_INIT(event_timer_adapter_init_log)
1220 {
1221 	evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
1222 	if (evtim_logtype >= 0)
1223 		rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
1224 
1225 	evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
1226 						"buffer");
1227 	if (evtim_buffer_logtype >= 0)
1228 		rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
1229 
1230 	evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
1231 	if (evtim_svc_logtype >= 0)
1232 		rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
1233 }
1234