1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation.
3  * All rights reserved.
4  */
5 
6 #include <string.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <sys/queue.h>
10 
11 #include <rte_memzone.h>
12 #include <rte_memory.h>
13 #include <rte_dev.h>
14 #include <rte_errno.h>
15 #include <rte_malloc.h>
16 #include <rte_ring.h>
17 #include <rte_mempool.h>
18 #include <rte_common.h>
19 #include <rte_timer.h>
20 #include <rte_service_component.h>
21 #include <rte_cycles.h>
22 
23 #include "rte_eventdev.h"
24 #include "rte_eventdev_pmd.h"
25 #include "rte_event_timer_adapter.h"
26 #include "rte_event_timer_adapter_pmd.h"
27 
28 #define DATA_MZ_NAME_MAX_LEN 64
29 #define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
30 
31 static int evtim_logtype;
32 static int evtim_svc_logtype;
33 static int evtim_buffer_logtype;
34 
35 static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
36 
37 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops;
38 
39 #define EVTIM_LOG(level, logtype, ...) \
40 	rte_log(RTE_LOG_ ## level, logtype, \
41 		RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
42 			"\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
43 
44 #define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
45 
46 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
47 #define EVTIM_LOG_DBG(...) \
48 	EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
49 #define EVTIM_BUF_LOG_DBG(...) \
50 	EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
51 #define EVTIM_SVC_LOG_DBG(...) \
52 	EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
53 #else
54 #define EVTIM_LOG_DBG(...) (void)0
55 #define EVTIM_BUF_LOG_DBG(...) (void)0
56 #define EVTIM_SVC_LOG_DBG(...) (void)0
57 #endif
58 
59 static int
60 default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
61 		     void *conf_arg)
62 {
63 	struct rte_event_timer_adapter *adapter;
64 	struct rte_eventdev *dev;
65 	struct rte_event_dev_config dev_conf;
66 	struct rte_event_port_conf *port_conf, def_port_conf = {0};
67 	int started;
68 	uint8_t port_id;
69 	uint8_t dev_id;
70 	int ret;
71 
72 	RTE_SET_USED(event_dev_id);
73 
74 	adapter = &adapters[id];
75 	dev = &rte_eventdevs[adapter->data->event_dev_id];
76 	dev_id = dev->data->dev_id;
77 	dev_conf = dev->data->dev_conf;
78 
79 	started = dev->data->dev_started;
80 	if (started)
81 		rte_event_dev_stop(dev_id);
82 
83 	port_id = dev_conf.nb_event_ports;
84 	dev_conf.nb_event_ports += 1;
85 	ret = rte_event_dev_configure(dev_id, &dev_conf);
86 	if (ret < 0) {
87 		EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
88 		if (started)
89 			if (rte_event_dev_start(dev_id))
90 				return -EIO;
91 
92 		return ret;
93 	}
94 
95 	if (conf_arg != NULL)
96 		port_conf = conf_arg;
97 	else {
98 		port_conf = &def_port_conf;
99 		ret = rte_event_port_default_conf_get(dev_id, port_id,
100 						      port_conf);
101 		if (ret < 0)
102 			return ret;
103 	}
104 
105 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
106 	if (ret < 0) {
107 		EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
108 			      port_id, dev_id);
109 		return ret;
110 	}
111 
112 	*event_port_id = port_id;
113 
114 	if (started)
115 		ret = rte_event_dev_start(dev_id);
116 
117 	return ret;
118 }
119 
120 struct rte_event_timer_adapter * __rte_experimental
121 rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
122 {
123 	return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
124 						  NULL);
125 }
126 
127 struct rte_event_timer_adapter * __rte_experimental
128 rte_event_timer_adapter_create_ext(
129 		const struct rte_event_timer_adapter_conf *conf,
130 		rte_event_timer_adapter_port_conf_cb_t conf_cb,
131 		void *conf_arg)
132 {
133 	uint16_t adapter_id;
134 	struct rte_event_timer_adapter *adapter;
135 	const struct rte_memzone *mz;
136 	char mz_name[DATA_MZ_NAME_MAX_LEN];
137 	int n, ret;
138 	struct rte_eventdev *dev;
139 
140 	if (conf == NULL) {
141 		rte_errno = EINVAL;
142 		return NULL;
143 	}
144 
145 	/* Check eventdev ID */
146 	if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
147 		rte_errno = EINVAL;
148 		return NULL;
149 	}
150 	dev = &rte_eventdevs[conf->event_dev_id];
151 
152 	adapter_id = conf->timer_adapter_id;
153 
154 	/* Check that adapter_id is in range */
155 	if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
156 		rte_errno = EINVAL;
157 		return NULL;
158 	}
159 
160 	/* Check adapter ID not already allocated */
161 	adapter = &adapters[adapter_id];
162 	if (adapter->allocated) {
163 		rte_errno = EEXIST;
164 		return NULL;
165 	}
166 
167 	/* Create shared data area. */
168 	n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
169 	if (n >= (int)sizeof(mz_name)) {
170 		rte_errno = EINVAL;
171 		return NULL;
172 	}
173 	mz = rte_memzone_reserve(mz_name,
174 				 sizeof(struct rte_event_timer_adapter_data),
175 				 conf->socket_id, 0);
176 	if (mz == NULL)
177 		/* rte_errno set by rte_memzone_reserve */
178 		return NULL;
179 
180 	adapter->data = mz->addr;
181 	memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
182 
183 	adapter->data->mz = mz;
184 	adapter->data->event_dev_id = conf->event_dev_id;
185 	adapter->data->id = adapter_id;
186 	adapter->data->socket_id = conf->socket_id;
187 	adapter->data->conf = *conf;  /* copy conf structure */
188 
189 	/* Query eventdev PMD for timer adapter capabilities and ops */
190 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
191 						   adapter->data->conf.flags,
192 						   &adapter->data->caps,
193 						   &adapter->ops);
194 	if (ret < 0) {
195 		rte_errno = ret;
196 		goto free_memzone;
197 	}
198 
199 	if (!(adapter->data->caps &
200 	      RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
201 		FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL);
202 		ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
203 			      &adapter->data->event_port_id, conf_arg);
204 		if (ret < 0) {
205 			rte_errno = ret;
206 			goto free_memzone;
207 		}
208 	}
209 
210 	/* If eventdev PMD did not provide ops, use default software
211 	 * implementation.
212 	 */
213 	if (adapter->ops == NULL)
214 		adapter->ops = &sw_event_adapter_timer_ops;
215 
216 	/* Allow driver to do some setup */
217 	FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP);
218 	ret = adapter->ops->init(adapter);
219 	if (ret < 0) {
220 		rte_errno = ret;
221 		goto free_memzone;
222 	}
223 
224 	/* Set fast-path function pointers */
225 	adapter->arm_burst = adapter->ops->arm_burst;
226 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
227 	adapter->cancel_burst = adapter->ops->cancel_burst;
228 
229 	adapter->allocated = 1;
230 
231 	return adapter;
232 
233 free_memzone:
234 	rte_memzone_free(adapter->data->mz);
235 	return NULL;
236 }
237 
238 int __rte_experimental
239 rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
240 		struct rte_event_timer_adapter_info *adapter_info)
241 {
242 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
243 
244 	if (adapter->ops->get_info)
245 		/* let driver set values it knows */
246 		adapter->ops->get_info(adapter, adapter_info);
247 
248 	/* Set common values */
249 	adapter_info->conf = adapter->data->conf;
250 	adapter_info->event_dev_port_id = adapter->data->event_port_id;
251 	adapter_info->caps = adapter->data->caps;
252 
253 	return 0;
254 }
255 
256 int __rte_experimental
257 rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
258 {
259 	int ret;
260 
261 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
262 	FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
263 
264 	ret = adapter->ops->start(adapter);
265 	if (ret < 0)
266 		return ret;
267 
268 	adapter->data->started = 1;
269 
270 	return 0;
271 }
272 
273 int __rte_experimental
274 rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
275 {
276 	int ret;
277 
278 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
279 	FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
280 
281 	if (adapter->data->started == 0) {
282 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
283 			      adapter->data->id);
284 		return 0;
285 	}
286 
287 	ret = adapter->ops->stop(adapter);
288 	if (ret < 0)
289 		return ret;
290 
291 	adapter->data->started = 0;
292 
293 	return 0;
294 }
295 
296 struct rte_event_timer_adapter * __rte_experimental
297 rte_event_timer_adapter_lookup(uint16_t adapter_id)
298 {
299 	char name[DATA_MZ_NAME_MAX_LEN];
300 	const struct rte_memzone *mz;
301 	struct rte_event_timer_adapter_data *data;
302 	struct rte_event_timer_adapter *adapter;
303 	int ret;
304 	struct rte_eventdev *dev;
305 
306 	if (adapters[adapter_id].allocated)
307 		return &adapters[adapter_id]; /* Adapter is already loaded */
308 
309 	snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
310 	mz = rte_memzone_lookup(name);
311 	if (mz == NULL) {
312 		rte_errno = ENOENT;
313 		return NULL;
314 	}
315 
316 	data = mz->addr;
317 
318 	adapter = &adapters[data->id];
319 	adapter->data = data;
320 
321 	dev = &rte_eventdevs[adapter->data->event_dev_id];
322 
323 	/* Query eventdev PMD for timer adapter capabilities and ops */
324 	ret = dev->dev_ops->timer_adapter_caps_get(dev,
325 						   adapter->data->conf.flags,
326 						   &adapter->data->caps,
327 						   &adapter->ops);
328 	if (ret < 0) {
329 		rte_errno = EINVAL;
330 		return NULL;
331 	}
332 
333 	/* If eventdev PMD did not provide ops, use default software
334 	 * implementation.
335 	 */
336 	if (adapter->ops == NULL)
337 		adapter->ops = &sw_event_adapter_timer_ops;
338 
339 	/* Set fast-path function pointers */
340 	adapter->arm_burst = adapter->ops->arm_burst;
341 	adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
342 	adapter->cancel_burst = adapter->ops->cancel_burst;
343 
344 	adapter->allocated = 1;
345 
346 	return adapter;
347 }
348 
349 int __rte_experimental
350 rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
351 {
352 	int ret;
353 
354 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
355 	FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
356 
357 	if (adapter->data->started == 1) {
358 		EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
359 			      "before freeing", adapter->data->id);
360 		return -EBUSY;
361 	}
362 
363 	/* free impl priv data */
364 	ret = adapter->ops->uninit(adapter);
365 	if (ret < 0)
366 		return ret;
367 
368 	/* free shared data area */
369 	ret = rte_memzone_free(adapter->data->mz);
370 	if (ret < 0)
371 		return ret;
372 
373 	adapter->data = NULL;
374 	adapter->allocated = 0;
375 
376 	return 0;
377 }
378 
379 int __rte_experimental
380 rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
381 				       uint32_t *service_id)
382 {
383 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
384 
385 	if (adapter->data->service_inited && service_id != NULL)
386 		*service_id = adapter->data->service_id;
387 
388 	return adapter->data->service_inited ? 0 : -ESRCH;
389 }
390 
391 int __rte_experimental
392 rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
393 				  struct rte_event_timer_adapter_stats *stats)
394 {
395 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
396 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
397 	if (stats == NULL)
398 		return -EINVAL;
399 
400 	return adapter->ops->stats_get(adapter, stats);
401 }
402 
403 int __rte_experimental
404 rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
405 {
406 	ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
407 	FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
408 	return adapter->ops->stats_reset(adapter);
409 }
410 
411 /*
412  * Software event timer adapter buffer helper functions
413  */
414 
415 #define NSECPERSEC 1E9
416 
417 /* Optimizations used to index into the buffer require that the buffer size
418  * be a power of 2.
419  */
420 #define EVENT_BUFFER_SZ 4096
421 #define EVENT_BUFFER_BATCHSZ 32
422 #define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
423 
424 struct event_buffer {
425 	uint16_t head;
426 	uint16_t tail;
427 	struct rte_event events[EVENT_BUFFER_SZ];
428 } __rte_cache_aligned;
429 
430 static inline bool
431 event_buffer_full(struct event_buffer *bufp)
432 {
433 	return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
434 }
435 
436 static inline bool
437 event_buffer_batch_ready(struct event_buffer *bufp)
438 {
439 	return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
440 }
441 
442 static void
443 event_buffer_init(struct event_buffer *bufp)
444 {
445 	bufp->head = bufp->tail = 0;
446 	memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
447 }
448 
449 static int
450 event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
451 {
452 	uint16_t head_idx;
453 	struct rte_event *buf_eventp;
454 
455 	if (event_buffer_full(bufp))
456 		return -1;
457 
458 	/* Instead of modulus, bitwise AND with mask to get head_idx. */
459 	head_idx = bufp->head & EVENT_BUFFER_MASK;
460 	buf_eventp = &bufp->events[head_idx];
461 	rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
462 
463 	/* Wrap automatically when overflow occurs. */
464 	bufp->head++;
465 
466 	return 0;
467 }
468 
469 static void
470 event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
471 		   uint16_t *nb_events_flushed,
472 		   uint16_t *nb_events_inv)
473 {
474 	uint16_t head_idx, tail_idx, n = 0;
475 	struct rte_event *events = bufp->events;
476 
477 	/* Instead of modulus, bitwise AND with mask to get index. */
478 	head_idx = bufp->head & EVENT_BUFFER_MASK;
479 	tail_idx = bufp->tail & EVENT_BUFFER_MASK;
480 
481 	/* Determine the largest contigous run we can attempt to enqueue to the
482 	 * event device.
483 	 */
484 	if (head_idx > tail_idx)
485 		n = head_idx - tail_idx;
486 	else if (head_idx < tail_idx)
487 		n = EVENT_BUFFER_SZ - tail_idx;
488 	else {
489 		*nb_events_flushed = 0;
490 		return;
491 	}
492 
493 	*nb_events_inv = 0;
494 	*nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
495 						     &events[tail_idx], n);
496 	if (*nb_events_flushed != n && rte_errno == -EINVAL) {
497 		EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
498 		(*nb_events_inv)++;
499 	}
500 
501 	bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
502 }
503 
504 /*
505  * Software event timer adapter implementation
506  */
507 
508 struct rte_event_timer_adapter_sw_data {
509 	/* List of messages for outstanding timers */
510 	TAILQ_HEAD(, msg) msgs_tailq_head;
511 	/* Lock to guard tailq and armed count */
512 	rte_spinlock_t msgs_tailq_sl;
513 	/* Identifier of service executing timer management logic. */
514 	uint32_t service_id;
515 	/* The cycle count at which the adapter should next tick */
516 	uint64_t next_tick_cycles;
517 	/* Incremented as the service moves through phases of an iteration */
518 	volatile int service_phase;
519 	/* The tick resolution used by adapter instance. May have been
520 	 * adjusted from what user requested
521 	 */
522 	uint64_t timer_tick_ns;
523 	/* Maximum timeout in nanoseconds allowed by adapter instance. */
524 	uint64_t max_tmo_ns;
525 	/* Ring containing messages to arm or cancel event timers */
526 	struct rte_ring *msg_ring;
527 	/* Mempool containing msg objects */
528 	struct rte_mempool *msg_pool;
529 	/* Buffered timer expiry events to be enqueued to an event device. */
530 	struct event_buffer buffer;
531 	/* Statistics */
532 	struct rte_event_timer_adapter_stats stats;
533 	/* The number of threads currently adding to the message ring */
534 	rte_atomic16_t message_producer_count;
535 };
536 
537 enum msg_type {MSG_TYPE_ARM, MSG_TYPE_CANCEL};
538 
539 struct msg {
540 	enum msg_type type;
541 	struct rte_event_timer *evtim;
542 	struct rte_timer tim;
543 	TAILQ_ENTRY(msg) msgs;
544 };
545 
546 static void
547 sw_event_timer_cb(struct rte_timer *tim, void *arg)
548 {
549 	int ret;
550 	uint16_t nb_evs_flushed = 0;
551 	uint16_t nb_evs_invalid = 0;
552 	uint64_t opaque;
553 	struct rte_event_timer *evtim;
554 	struct rte_event_timer_adapter *adapter;
555 	struct rte_event_timer_adapter_sw_data *sw_data;
556 
557 	evtim = arg;
558 	opaque = evtim->impl_opaque[1];
559 	adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
560 	sw_data = adapter->data->adapter_priv;
561 
562 	ret = event_buffer_add(&sw_data->buffer, &evtim->ev);
563 	if (ret < 0) {
564 		/* If event buffer is full, put timer back in list with
565 		 * immediate expiry value, so that we process it again on the
566 		 * next iteration.
567 		 */
568 		rte_timer_reset_sync(tim, 0, SINGLE, rte_lcore_id(),
569 				     sw_event_timer_cb, evtim);
570 
571 		sw_data->stats.evtim_retry_count++;
572 		EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
573 			      "immediate expiry value");
574 	} else {
575 		struct msg *m = container_of(tim, struct msg, tim);
576 		TAILQ_REMOVE(&sw_data->msgs_tailq_head, m, msgs);
577 		EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
578 		evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
579 
580 		/* Free the msg object containing the rte_timer now that
581 		 * we've buffered its event successfully.
582 		 */
583 		rte_mempool_put(sw_data->msg_pool, m);
584 
585 		/* Bump the count when we successfully add an expiry event to
586 		 * the buffer.
587 		 */
588 		sw_data->stats.evtim_exp_count++;
589 	}
590 
591 	if (event_buffer_batch_ready(&sw_data->buffer)) {
592 		event_buffer_flush(&sw_data->buffer,
593 				   adapter->data->event_dev_id,
594 				   adapter->data->event_port_id,
595 				   &nb_evs_flushed,
596 				   &nb_evs_invalid);
597 
598 		sw_data->stats.ev_enq_count += nb_evs_flushed;
599 		sw_data->stats.ev_inv_count += nb_evs_invalid;
600 	}
601 }
602 
603 static __rte_always_inline uint64_t
604 get_timeout_cycles(struct rte_event_timer *evtim,
605 		   struct rte_event_timer_adapter *adapter)
606 {
607 	uint64_t timeout_ns;
608 	struct rte_event_timer_adapter_sw_data *sw_data;
609 
610 	sw_data = adapter->data->adapter_priv;
611 	timeout_ns = evtim->timeout_ticks * sw_data->timer_tick_ns;
612 	return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
613 
614 }
615 
616 /* This function returns true if one or more (adapter) ticks have occurred since
617  * the last time it was called.
618  */
619 static inline bool
620 adapter_did_tick(struct rte_event_timer_adapter *adapter)
621 {
622 	uint64_t cycles_per_adapter_tick, start_cycles;
623 	uint64_t *next_tick_cyclesp;
624 	struct rte_event_timer_adapter_sw_data *sw_data;
625 
626 	sw_data = adapter->data->adapter_priv;
627 	next_tick_cyclesp = &sw_data->next_tick_cycles;
628 
629 	cycles_per_adapter_tick = sw_data->timer_tick_ns *
630 			(rte_get_timer_hz() / NSECPERSEC);
631 
632 	start_cycles = rte_get_timer_cycles();
633 
634 	/* Note: initially, *next_tick_cyclesp == 0, so the clause below will
635 	 * execute, and set things going.
636 	 */
637 
638 	if (start_cycles >= *next_tick_cyclesp) {
639 		/* Snap the current cycle count to the preceding adapter tick
640 		 * boundary.
641 		 */
642 		start_cycles -= start_cycles % cycles_per_adapter_tick;
643 
644 		*next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
645 
646 		return true;
647 	}
648 
649 	return false;
650 }
651 
652 /* Check that event timer timeout value is in range */
653 static __rte_always_inline int
654 check_timeout(struct rte_event_timer *evtim,
655 	      const struct rte_event_timer_adapter *adapter)
656 {
657 	uint64_t tmo_nsec;
658 	struct rte_event_timer_adapter_sw_data *sw_data;
659 
660 	sw_data = adapter->data->adapter_priv;
661 	tmo_nsec = evtim->timeout_ticks * sw_data->timer_tick_ns;
662 
663 	if (tmo_nsec > sw_data->max_tmo_ns)
664 		return -1;
665 
666 	if (tmo_nsec < sw_data->timer_tick_ns)
667 		return -2;
668 
669 	return 0;
670 }
671 
672 /* Check that event timer event queue sched type matches destination event queue
673  * sched type
674  */
675 static __rte_always_inline int
676 check_destination_event_queue(struct rte_event_timer *evtim,
677 			      const struct rte_event_timer_adapter *adapter)
678 {
679 	int ret;
680 	uint32_t sched_type;
681 
682 	ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
683 				       evtim->ev.queue_id,
684 				       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
685 				       &sched_type);
686 
687 	if ((ret < 0 && ret != -EOVERFLOW) ||
688 	    evtim->ev.sched_type != sched_type)
689 		return -1;
690 
691 	return 0;
692 }
693 
694 #define NB_OBJS 32
695 static int
696 sw_event_timer_adapter_service_func(void *arg)
697 {
698 	int i, num_msgs;
699 	uint64_t cycles, opaque;
700 	uint16_t nb_evs_flushed = 0;
701 	uint16_t nb_evs_invalid = 0;
702 	struct rte_event_timer_adapter *adapter;
703 	struct rte_event_timer_adapter_sw_data *sw_data;
704 	struct rte_event_timer *evtim = NULL;
705 	struct rte_timer *tim = NULL;
706 	struct msg *msg, *msgs[NB_OBJS];
707 
708 	adapter = arg;
709 	sw_data = adapter->data->adapter_priv;
710 
711 	sw_data->service_phase = 1;
712 	rte_smp_wmb();
713 
714 	while (rte_atomic16_read(&sw_data->message_producer_count) > 0 ||
715 	       !rte_ring_empty(sw_data->msg_ring)) {
716 
717 		num_msgs = rte_ring_dequeue_burst(sw_data->msg_ring,
718 						  (void **)msgs, NB_OBJS, NULL);
719 
720 		for (i = 0; i < num_msgs; i++) {
721 			int ret = 0;
722 
723 			RTE_SET_USED(ret);
724 
725 			msg = msgs[i];
726 			evtim = msg->evtim;
727 
728 			switch (msg->type) {
729 			case MSG_TYPE_ARM:
730 				EVTIM_SVC_LOG_DBG("dequeued ARM message from "
731 						  "ring");
732 				tim = &msg->tim;
733 				rte_timer_init(tim);
734 				cycles = get_timeout_cycles(evtim,
735 							    adapter);
736 				ret = rte_timer_reset(tim, cycles, SINGLE,
737 						      rte_lcore_id(),
738 						      sw_event_timer_cb,
739 						      evtim);
740 				RTE_ASSERT(ret == 0);
741 
742 				evtim->impl_opaque[0] = (uintptr_t)tim;
743 				evtim->impl_opaque[1] = (uintptr_t)adapter;
744 
745 				TAILQ_INSERT_TAIL(&sw_data->msgs_tailq_head,
746 						  msg,
747 						  msgs);
748 				break;
749 			case MSG_TYPE_CANCEL:
750 				EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
751 						  "from ring");
752 				opaque = evtim->impl_opaque[0];
753 				tim = (struct rte_timer *)(uintptr_t)opaque;
754 				RTE_ASSERT(tim != NULL);
755 
756 				ret = rte_timer_stop(tim);
757 				RTE_ASSERT(ret == 0);
758 
759 				/* Free the msg object for the original arm
760 				 * request.
761 				 */
762 				struct msg *m;
763 				m = container_of(tim, struct msg, tim);
764 				TAILQ_REMOVE(&sw_data->msgs_tailq_head, m,
765 					     msgs);
766 				rte_mempool_put(sw_data->msg_pool, m);
767 
768 				/* Free the msg object for the current msg */
769 				rte_mempool_put(sw_data->msg_pool, msg);
770 
771 				evtim->impl_opaque[0] = 0;
772 				evtim->impl_opaque[1] = 0;
773 
774 				break;
775 			}
776 		}
777 	}
778 
779 	sw_data->service_phase = 2;
780 	rte_smp_wmb();
781 
782 	if (adapter_did_tick(adapter)) {
783 		rte_timer_manage();
784 
785 		event_buffer_flush(&sw_data->buffer,
786 				   adapter->data->event_dev_id,
787 				   adapter->data->event_port_id,
788 				   &nb_evs_flushed, &nb_evs_invalid);
789 
790 		sw_data->stats.ev_enq_count += nb_evs_flushed;
791 		sw_data->stats.ev_inv_count += nb_evs_invalid;
792 		sw_data->stats.adapter_tick_count++;
793 	}
794 
795 	sw_data->service_phase = 0;
796 	rte_smp_wmb();
797 
798 	return 0;
799 }
800 
801 /* The adapter initialization function rounds the mempool size up to the next
802  * power of 2, so we can take the difference between that value and what the
803  * user requested, and use the space for caches.  This avoids a scenario where a
804  * user can't arm the number of timers the adapter was configured with because
805  * mempool objects have been lost to caches.
806  *
807  * nb_actual should always be a power of 2, so we can iterate over the powers
808  * of 2 to see what the largest cache size we can use is.
809  */
810 static int
811 compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
812 {
813 	int i;
814 	int size;
815 	int cache_size = 0;
816 
817 	for (i = 0; ; i++) {
818 		size = 1 << i;
819 
820 		if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
821 		    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
822 		    size <= nb_actual / 1.5)
823 			cache_size = size;
824 		else
825 			break;
826 	}
827 
828 	return cache_size;
829 }
830 
831 #define SW_MIN_INTERVAL 1E5
832 
833 static int
834 sw_event_timer_adapter_init(struct rte_event_timer_adapter *adapter)
835 {
836 	int ret;
837 	struct rte_event_timer_adapter_sw_data *sw_data;
838 	uint64_t nb_timers;
839 	unsigned int flags;
840 	struct rte_service_spec service;
841 	static bool timer_subsystem_inited; // static initialized to false
842 
843 	/* Allocate storage for SW implementation data */
844 	char priv_data_name[RTE_RING_NAMESIZE];
845 	snprintf(priv_data_name, RTE_RING_NAMESIZE, "sw_evtim_adap_priv_%"PRIu8,
846 		 adapter->data->id);
847 	adapter->data->adapter_priv = rte_zmalloc_socket(
848 				priv_data_name,
849 				sizeof(struct rte_event_timer_adapter_sw_data),
850 				RTE_CACHE_LINE_SIZE,
851 				adapter->data->socket_id);
852 	if (adapter->data->adapter_priv == NULL) {
853 		EVTIM_LOG_ERR("failed to allocate space for private data");
854 		rte_errno = ENOMEM;
855 		return -1;
856 	}
857 
858 	if (adapter->data->conf.timer_tick_ns < SW_MIN_INTERVAL) {
859 		EVTIM_LOG_ERR("failed to create adapter with requested tick "
860 			      "interval");
861 		rte_errno = EINVAL;
862 		return -1;
863 	}
864 
865 	sw_data = adapter->data->adapter_priv;
866 
867 	sw_data->timer_tick_ns = adapter->data->conf.timer_tick_ns;
868 	sw_data->max_tmo_ns = adapter->data->conf.max_tmo_ns;
869 
870 	TAILQ_INIT(&sw_data->msgs_tailq_head);
871 	rte_spinlock_init(&sw_data->msgs_tailq_sl);
872 	rte_atomic16_init(&sw_data->message_producer_count);
873 
874 	/* Rings require power of 2, so round up to next such value */
875 	nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
876 
877 	char msg_ring_name[RTE_RING_NAMESIZE];
878 	snprintf(msg_ring_name, RTE_RING_NAMESIZE,
879 		 "sw_evtim_adap_msg_ring_%"PRIu8, adapter->data->id);
880 	flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
881 		RING_F_SP_ENQ | RING_F_SC_DEQ :
882 		RING_F_SC_DEQ;
883 	sw_data->msg_ring = rte_ring_create(msg_ring_name, nb_timers,
884 					    adapter->data->socket_id, flags);
885 	if (sw_data->msg_ring == NULL) {
886 		EVTIM_LOG_ERR("failed to create message ring");
887 		rte_errno = ENOMEM;
888 		goto free_priv_data;
889 	}
890 
891 	char pool_name[RTE_RING_NAMESIZE];
892 	snprintf(pool_name, RTE_RING_NAMESIZE, "sw_evtim_adap_msg_pool_%"PRIu8,
893 		 adapter->data->id);
894 
895 	/* Both the arming/canceling thread and the service thread will do puts
896 	 * to the mempool, but if the SP_PUT flag is enabled, we can specify
897 	 * single-consumer get for the mempool.
898 	 */
899 	flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
900 		MEMPOOL_F_SC_GET : 0;
901 
902 	/* The usable size of a ring is count - 1, so subtract one here to
903 	 * make the counts agree.
904 	 */
905 	int pool_size = nb_timers - 1;
906 	int cache_size = compute_msg_mempool_cache_size(
907 				adapter->data->conf.nb_timers, nb_timers);
908 	sw_data->msg_pool = rte_mempool_create(pool_name, pool_size,
909 					       sizeof(struct msg), cache_size,
910 					       0, NULL, NULL, NULL, NULL,
911 					       adapter->data->socket_id, flags);
912 	if (sw_data->msg_pool == NULL) {
913 		EVTIM_LOG_ERR("failed to create message object mempool");
914 		rte_errno = ENOMEM;
915 		goto free_msg_ring;
916 	}
917 
918 	event_buffer_init(&sw_data->buffer);
919 
920 	/* Register a service component to run adapter logic */
921 	memset(&service, 0, sizeof(service));
922 	snprintf(service.name, RTE_SERVICE_NAME_MAX,
923 		 "sw_evimer_adap_svc_%"PRIu8, adapter->data->id);
924 	service.socket_id = adapter->data->socket_id;
925 	service.callback = sw_event_timer_adapter_service_func;
926 	service.callback_userdata = adapter;
927 	service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
928 	ret = rte_service_component_register(&service, &sw_data->service_id);
929 	if (ret < 0) {
930 		EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
931 			      ": err = %d", service.name, sw_data->service_id,
932 			      ret);
933 
934 		rte_errno = ENOSPC;
935 		goto free_msg_pool;
936 	}
937 
938 	EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
939 		      sw_data->service_id);
940 
941 	adapter->data->service_id = sw_data->service_id;
942 	adapter->data->service_inited = 1;
943 
944 	if (!timer_subsystem_inited) {
945 		rte_timer_subsystem_init();
946 		timer_subsystem_inited = true;
947 	}
948 
949 	return 0;
950 
951 free_msg_pool:
952 	rte_mempool_free(sw_data->msg_pool);
953 free_msg_ring:
954 	rte_ring_free(sw_data->msg_ring);
955 free_priv_data:
956 	rte_free(sw_data);
957 	return -1;
958 }
959 
960 static int
961 sw_event_timer_adapter_uninit(struct rte_event_timer_adapter *adapter)
962 {
963 	int ret;
964 	struct msg *m1, *m2;
965 	struct rte_event_timer_adapter_sw_data *sw_data =
966 						adapter->data->adapter_priv;
967 
968 	rte_spinlock_lock(&sw_data->msgs_tailq_sl);
969 
970 	/* Cancel outstanding rte_timers and free msg objects */
971 	m1 = TAILQ_FIRST(&sw_data->msgs_tailq_head);
972 	while (m1 != NULL) {
973 		EVTIM_LOG_DBG("freeing outstanding timer");
974 		m2 = TAILQ_NEXT(m1, msgs);
975 
976 		rte_timer_stop_sync(&m1->tim);
977 		rte_mempool_put(sw_data->msg_pool, m1);
978 
979 		m1 = m2;
980 	}
981 
982 	rte_spinlock_unlock(&sw_data->msgs_tailq_sl);
983 
984 	ret = rte_service_component_unregister(sw_data->service_id);
985 	if (ret < 0) {
986 		EVTIM_LOG_ERR("failed to unregister service component");
987 		return ret;
988 	}
989 
990 	rte_ring_free(sw_data->msg_ring);
991 	rte_mempool_free(sw_data->msg_pool);
992 	rte_free(adapter->data->adapter_priv);
993 
994 	return 0;
995 }
996 
997 static inline int32_t
998 get_mapped_count_for_service(uint32_t service_id)
999 {
1000 	int32_t core_count, i, mapped_count = 0;
1001 	uint32_t lcore_arr[RTE_MAX_LCORE];
1002 
1003 	core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
1004 
1005 	for (i = 0; i < core_count; i++)
1006 		if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
1007 			mapped_count++;
1008 
1009 	return mapped_count;
1010 }
1011 
1012 static int
1013 sw_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
1014 {
1015 	int mapped_count;
1016 	struct rte_event_timer_adapter_sw_data *sw_data;
1017 
1018 	sw_data = adapter->data->adapter_priv;
1019 
1020 	/* Mapping the service to more than one service core can introduce
1021 	 * delays while one thread is waiting to acquire a lock, so only allow
1022 	 * one core to be mapped to the service.
1023 	 */
1024 	mapped_count = get_mapped_count_for_service(sw_data->service_id);
1025 
1026 	if (mapped_count == 1)
1027 		return rte_service_component_runstate_set(sw_data->service_id,
1028 							  1);
1029 
1030 	return mapped_count < 1 ? -ENOENT : -ENOTSUP;
1031 }
1032 
1033 static int
1034 sw_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
1035 {
1036 	int ret;
1037 	struct rte_event_timer_adapter_sw_data *sw_data =
1038 						adapter->data->adapter_priv;
1039 
1040 	ret = rte_service_component_runstate_set(sw_data->service_id, 0);
1041 	if (ret < 0)
1042 		return ret;
1043 
1044 	/* Wait for the service to complete its final iteration before
1045 	 * stopping.
1046 	 */
1047 	while (sw_data->service_phase != 0)
1048 		rte_pause();
1049 
1050 	rte_smp_rmb();
1051 
1052 	return 0;
1053 }
1054 
1055 static void
1056 sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
1057 		struct rte_event_timer_adapter_info *adapter_info)
1058 {
1059 	struct rte_event_timer_adapter_sw_data *sw_data;
1060 	sw_data = adapter->data->adapter_priv;
1061 
1062 	adapter_info->min_resolution_ns = sw_data->timer_tick_ns;
1063 	adapter_info->max_tmo_ns = sw_data->max_tmo_ns;
1064 }
1065 
1066 static int
1067 sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter *adapter,
1068 				 struct rte_event_timer_adapter_stats *stats)
1069 {
1070 	struct rte_event_timer_adapter_sw_data *sw_data;
1071 	sw_data = adapter->data->adapter_priv;
1072 	*stats = sw_data->stats;
1073 	return 0;
1074 }
1075 
1076 static int
1077 sw_event_timer_adapter_stats_reset(
1078 				const struct rte_event_timer_adapter *adapter)
1079 {
1080 	struct rte_event_timer_adapter_sw_data *sw_data;
1081 	sw_data = adapter->data->adapter_priv;
1082 	memset(&sw_data->stats, 0, sizeof(sw_data->stats));
1083 	return 0;
1084 }
1085 
1086 static __rte_always_inline uint16_t
1087 __sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
1088 			  struct rte_event_timer **evtims,
1089 			  uint16_t nb_evtims)
1090 {
1091 	uint16_t i;
1092 	int ret;
1093 	struct rte_event_timer_adapter_sw_data *sw_data;
1094 	struct msg *msgs[nb_evtims];
1095 
1096 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1097 	/* Check that the service is running. */
1098 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1099 		rte_errno = EINVAL;
1100 		return 0;
1101 	}
1102 #endif
1103 
1104 	sw_data = adapter->data->adapter_priv;
1105 
1106 	ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
1107 	if (ret < 0) {
1108 		rte_errno = ENOSPC;
1109 		return 0;
1110 	}
1111 
1112 	/* Let the service know we're producing messages for it to process */
1113 	rte_atomic16_inc(&sw_data->message_producer_count);
1114 
1115 	/* If the service is managing timers, wait for it to finish */
1116 	while (sw_data->service_phase == 2)
1117 		rte_pause();
1118 
1119 	rte_smp_rmb();
1120 
1121 	for (i = 0; i < nb_evtims; i++) {
1122 		/* Don't modify the event timer state in these cases */
1123 		if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
1124 			rte_errno = EALREADY;
1125 			break;
1126 		} else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
1127 		    evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
1128 			rte_errno = EINVAL;
1129 			break;
1130 		}
1131 
1132 		ret = check_timeout(evtims[i], adapter);
1133 		if (ret == -1) {
1134 			evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
1135 			rte_errno = EINVAL;
1136 			break;
1137 		}
1138 		if (ret == -2) {
1139 			evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
1140 			rte_errno = EINVAL;
1141 			break;
1142 		}
1143 
1144 		if (check_destination_event_queue(evtims[i], adapter) < 0) {
1145 			evtims[i]->state = RTE_EVENT_TIMER_ERROR;
1146 			rte_errno = EINVAL;
1147 			break;
1148 		}
1149 
1150 		/* Checks passed, set up a message to enqueue */
1151 		msgs[i]->type = MSG_TYPE_ARM;
1152 		msgs[i]->evtim = evtims[i];
1153 
1154 		/* Set the payload pointer if not set. */
1155 		if (evtims[i]->ev.event_ptr == NULL)
1156 			evtims[i]->ev.event_ptr = evtims[i];
1157 
1158 		/* msg objects that get enqueued successfully will be freed
1159 		 * either by a future cancel operation or by the timer
1160 		 * expiration callback.
1161 		 */
1162 		if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
1163 			rte_errno = ENOSPC;
1164 			break;
1165 		}
1166 
1167 		EVTIM_LOG_DBG("enqueued ARM message to ring");
1168 
1169 		evtims[i]->state = RTE_EVENT_TIMER_ARMED;
1170 	}
1171 
1172 	/* Let the service know we're done producing messages */
1173 	rte_atomic16_dec(&sw_data->message_producer_count);
1174 
1175 	if (i < nb_evtims)
1176 		rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
1177 				     nb_evtims - i);
1178 
1179 	return i;
1180 }
1181 
1182 static uint16_t
1183 sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
1184 			 struct rte_event_timer **evtims,
1185 			 uint16_t nb_evtims)
1186 {
1187 	return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
1188 }
1189 
1190 static uint16_t
1191 sw_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
1192 			    struct rte_event_timer **evtims,
1193 			    uint16_t nb_evtims)
1194 {
1195 	uint16_t i;
1196 	int ret;
1197 	struct rte_event_timer_adapter_sw_data *sw_data;
1198 	struct msg *msgs[nb_evtims];
1199 
1200 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1201 	/* Check that the service is running. */
1202 	if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1203 		rte_errno = EINVAL;
1204 		return 0;
1205 	}
1206 #endif
1207 
1208 	sw_data = adapter->data->adapter_priv;
1209 
1210 	ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
1211 	if (ret < 0) {
1212 		rte_errno = ENOSPC;
1213 		return 0;
1214 	}
1215 
1216 	/* Let the service know we're producing messages for it to process */
1217 	rte_atomic16_inc(&sw_data->message_producer_count);
1218 
1219 	/* If the service could be modifying event timer states, wait */
1220 	while (sw_data->service_phase == 2)
1221 		rte_pause();
1222 
1223 	rte_smp_rmb();
1224 
1225 	for (i = 0; i < nb_evtims; i++) {
1226 		/* Don't modify the event timer state in these cases */
1227 		if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
1228 			rte_errno = EALREADY;
1229 			break;
1230 		} else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
1231 			rte_errno = EINVAL;
1232 			break;
1233 		}
1234 
1235 		msgs[i]->type = MSG_TYPE_CANCEL;
1236 		msgs[i]->evtim = evtims[i];
1237 
1238 		if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
1239 			rte_errno = ENOSPC;
1240 			break;
1241 		}
1242 
1243 		EVTIM_LOG_DBG("enqueued CANCEL message to ring");
1244 
1245 		evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
1246 	}
1247 
1248 	/* Let the service know we're done producing messages */
1249 	rte_atomic16_dec(&sw_data->message_producer_count);
1250 
1251 	if (i < nb_evtims)
1252 		rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
1253 				     nb_evtims - i);
1254 
1255 	return i;
1256 }
1257 
1258 static uint16_t
1259 sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1260 				  struct rte_event_timer **evtims,
1261 				  uint64_t timeout_ticks,
1262 				  uint16_t nb_evtims)
1263 {
1264 	int i;
1265 
1266 	for (i = 0; i < nb_evtims; i++)
1267 		evtims[i]->timeout_ticks = timeout_ticks;
1268 
1269 	return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
1270 }
1271 
1272 static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops = {
1273 	.init = sw_event_timer_adapter_init,
1274 	.uninit = sw_event_timer_adapter_uninit,
1275 	.start = sw_event_timer_adapter_start,
1276 	.stop = sw_event_timer_adapter_stop,
1277 	.get_info = sw_event_timer_adapter_get_info,
1278 	.stats_get = sw_event_timer_adapter_stats_get,
1279 	.stats_reset = sw_event_timer_adapter_stats_reset,
1280 	.arm_burst = sw_event_timer_arm_burst,
1281 	.arm_tmo_tick_burst = sw_event_timer_arm_tmo_tick_burst,
1282 	.cancel_burst = sw_event_timer_cancel_burst,
1283 };
1284 
1285 RTE_INIT(event_timer_adapter_init_log)
1286 {
1287 	evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
1288 	if (evtim_logtype >= 0)
1289 		rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
1290 
1291 	evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
1292 						"buffer");
1293 	if (evtim_buffer_logtype >= 0)
1294 		rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
1295 
1296 	evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
1297 	if (evtim_svc_logtype >= 0)
1298 		rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
1299 }
1300