xref: /f-stack/dpdk/lib/librte_timer/rte_timer.c (revision 2d9fd380)
1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2010-2014 Intel Corporation
3a9643ea8Slogwang  */
4a9643ea8Slogwang 
5a9643ea8Slogwang #include <string.h>
6a9643ea8Slogwang #include <stdio.h>
7a9643ea8Slogwang #include <stdint.h>
84418919fSjohnjiang #include <stdbool.h>
9a9643ea8Slogwang #include <inttypes.h>
10a9643ea8Slogwang #include <assert.h>
11a9643ea8Slogwang #include <sys/queue.h>
12a9643ea8Slogwang 
13a9643ea8Slogwang #include <rte_common.h>
14a9643ea8Slogwang #include <rte_cycles.h>
154418919fSjohnjiang #include <rte_eal_memconfig.h>
16a9643ea8Slogwang #include <rte_per_lcore.h>
17a9643ea8Slogwang #include <rte_memory.h>
18a9643ea8Slogwang #include <rte_launch.h>
19a9643ea8Slogwang #include <rte_eal.h>
20a9643ea8Slogwang #include <rte_lcore.h>
21a9643ea8Slogwang #include <rte_branch_prediction.h>
22a9643ea8Slogwang #include <rte_spinlock.h>
23a9643ea8Slogwang #include <rte_random.h>
242bfe3f2eSlogwang #include <rte_pause.h>
254418919fSjohnjiang #include <rte_memzone.h>
264418919fSjohnjiang #include <rte_malloc.h>
274418919fSjohnjiang #include <rte_errno.h>
28a9643ea8Slogwang 
29a9643ea8Slogwang #include "rte_timer.h"
30a9643ea8Slogwang 
314418919fSjohnjiang /**
324418919fSjohnjiang  * Per-lcore info for timers.
334418919fSjohnjiang  */
34a9643ea8Slogwang struct priv_timer {
35a9643ea8Slogwang 	struct rte_timer pending_head;  /**< dummy timer instance to head up list */
36a9643ea8Slogwang 	rte_spinlock_t list_lock;       /**< lock to protect list access */
37a9643ea8Slogwang 
38a9643ea8Slogwang 	/** per-core variable that true if a timer was updated on this
39a9643ea8Slogwang 	 *  core since last reset of the variable */
40a9643ea8Slogwang 	int updated;
41a9643ea8Slogwang 
42a9643ea8Slogwang 	/** track the current depth of the skiplist */
43a9643ea8Slogwang 	unsigned curr_skiplist_depth;
44a9643ea8Slogwang 
45a9643ea8Slogwang 	unsigned prev_lcore;              /**< used for lcore round robin */
46a9643ea8Slogwang 
47a9643ea8Slogwang 	/** running timer on this lcore now */
48a9643ea8Slogwang 	struct rte_timer *running_tim;
49a9643ea8Slogwang 
50a9643ea8Slogwang #ifdef RTE_LIBRTE_TIMER_DEBUG
51a9643ea8Slogwang 	/** per-lcore statistics */
52a9643ea8Slogwang 	struct rte_timer_debug_stats stats;
53a9643ea8Slogwang #endif
54a9643ea8Slogwang } __rte_cache_aligned;
55a9643ea8Slogwang 
564418919fSjohnjiang #define FL_ALLOCATED	(1 << 0)
574418919fSjohnjiang struct rte_timer_data {
584418919fSjohnjiang 	struct priv_timer priv_timer[RTE_MAX_LCORE];
594418919fSjohnjiang 	uint8_t internal_flags;
604418919fSjohnjiang };
614418919fSjohnjiang 
624418919fSjohnjiang #define RTE_MAX_DATA_ELS 64
634418919fSjohnjiang static const struct rte_memzone *rte_timer_data_mz;
644418919fSjohnjiang static int *volatile rte_timer_mz_refcnt;
654418919fSjohnjiang static struct rte_timer_data *rte_timer_data_arr;
664418919fSjohnjiang static const uint32_t default_data_id;
674418919fSjohnjiang static uint32_t rte_timer_subsystem_initialized;
68a9643ea8Slogwang 
69a9643ea8Slogwang /* when debug is enabled, store some statistics */
70a9643ea8Slogwang #ifdef RTE_LIBRTE_TIMER_DEBUG
714418919fSjohnjiang #define __TIMER_STAT_ADD(priv_timer, name, n) do {			\
72a9643ea8Slogwang 		unsigned __lcore_id = rte_lcore_id();			\
73a9643ea8Slogwang 		if (__lcore_id < RTE_MAX_LCORE)				\
74a9643ea8Slogwang 			priv_timer[__lcore_id].stats.name += (n);	\
75a9643ea8Slogwang 	} while(0)
76a9643ea8Slogwang #else
774418919fSjohnjiang #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
78a9643ea8Slogwang #endif
79a9643ea8Slogwang 
804418919fSjohnjiang static inline int
timer_data_valid(uint32_t id)814418919fSjohnjiang timer_data_valid(uint32_t id)
824418919fSjohnjiang {
834418919fSjohnjiang 	return rte_timer_data_arr &&
844418919fSjohnjiang 		(rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
854418919fSjohnjiang }
864418919fSjohnjiang 
874418919fSjohnjiang /* validate ID and retrieve timer data pointer, or return error value */
884418919fSjohnjiang #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do {	\
894418919fSjohnjiang 	if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id))		\
904418919fSjohnjiang 		return retval;						\
914418919fSjohnjiang 	timer_data = &rte_timer_data_arr[id];				\
924418919fSjohnjiang } while (0)
934418919fSjohnjiang 
944418919fSjohnjiang int
rte_timer_data_alloc(uint32_t * id_ptr)954418919fSjohnjiang rte_timer_data_alloc(uint32_t *id_ptr)
964418919fSjohnjiang {
974418919fSjohnjiang 	int i;
984418919fSjohnjiang 	struct rte_timer_data *data;
994418919fSjohnjiang 
1004418919fSjohnjiang 	if (!rte_timer_subsystem_initialized)
1014418919fSjohnjiang 		return -ENOMEM;
1024418919fSjohnjiang 
1034418919fSjohnjiang 	for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
1044418919fSjohnjiang 		data = &rte_timer_data_arr[i];
1054418919fSjohnjiang 		if (!(data->internal_flags & FL_ALLOCATED)) {
1064418919fSjohnjiang 			data->internal_flags |= FL_ALLOCATED;
1074418919fSjohnjiang 
1084418919fSjohnjiang 			if (id_ptr)
1094418919fSjohnjiang 				*id_ptr = i;
1104418919fSjohnjiang 
1114418919fSjohnjiang 			return 0;
1124418919fSjohnjiang 		}
1134418919fSjohnjiang 	}
1144418919fSjohnjiang 
1154418919fSjohnjiang 	return -ENOSPC;
1164418919fSjohnjiang }
1174418919fSjohnjiang 
1184418919fSjohnjiang int
rte_timer_data_dealloc(uint32_t id)1194418919fSjohnjiang rte_timer_data_dealloc(uint32_t id)
1204418919fSjohnjiang {
1214418919fSjohnjiang 	struct rte_timer_data *timer_data;
1224418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
1234418919fSjohnjiang 
1244418919fSjohnjiang 	timer_data->internal_flags &= ~(FL_ALLOCATED);
1254418919fSjohnjiang 
1264418919fSjohnjiang 	return 0;
1274418919fSjohnjiang }
1284418919fSjohnjiang 
1294418919fSjohnjiang /* Init the timer library. Allocate an array of timer data structs in shared
1304418919fSjohnjiang  * memory, and allocate the zeroth entry for use with original timer
1314418919fSjohnjiang  * APIs. Since the intersection of the sets of lcore ids in primary and
1324418919fSjohnjiang  * secondary processes should be empty, the zeroth entry can be shared by
1334418919fSjohnjiang  * multiple processes.
1344418919fSjohnjiang  */
1354418919fSjohnjiang int
rte_timer_subsystem_init(void)136a9643ea8Slogwang rte_timer_subsystem_init(void)
137a9643ea8Slogwang {
1384418919fSjohnjiang 	const struct rte_memzone *mz;
1394418919fSjohnjiang 	struct rte_timer_data *data;
1404418919fSjohnjiang 	int i, lcore_id;
1414418919fSjohnjiang 	static const char *mz_name = "rte_timer_mz";
1424418919fSjohnjiang 	const size_t data_arr_size =
1434418919fSjohnjiang 			RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
1444418919fSjohnjiang 	const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
1454418919fSjohnjiang 	bool do_full_init = true;
146a9643ea8Slogwang 
1474418919fSjohnjiang 	rte_mcfg_timer_lock();
1484418919fSjohnjiang 
1490c6bd470Sfengbojiang 	if (rte_timer_subsystem_initialized) {
1500c6bd470Sfengbojiang 		rte_mcfg_timer_unlock();
1510c6bd470Sfengbojiang 		return -EALREADY;
1520c6bd470Sfengbojiang 	}
1530c6bd470Sfengbojiang 
1544418919fSjohnjiang 	mz = rte_memzone_lookup(mz_name);
1554418919fSjohnjiang 	if (mz == NULL) {
1564418919fSjohnjiang 		mz = rte_memzone_reserve_aligned(mz_name, mem_size,
1574418919fSjohnjiang 				SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
1584418919fSjohnjiang 		if (mz == NULL) {
1594418919fSjohnjiang 			rte_mcfg_timer_unlock();
1604418919fSjohnjiang 			return -ENOMEM;
161a9643ea8Slogwang 		}
1624418919fSjohnjiang 		do_full_init = true;
1634418919fSjohnjiang 	} else
1644418919fSjohnjiang 		do_full_init = false;
1654418919fSjohnjiang 
1664418919fSjohnjiang 	rte_timer_data_mz = mz;
1674418919fSjohnjiang 	rte_timer_data_arr = mz->addr;
1684418919fSjohnjiang 	rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
1694418919fSjohnjiang 
1704418919fSjohnjiang 	if (do_full_init) {
1714418919fSjohnjiang 		for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
1724418919fSjohnjiang 			data = &rte_timer_data_arr[i];
1734418919fSjohnjiang 
1744418919fSjohnjiang 			for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
1754418919fSjohnjiang 			     lcore_id++) {
1764418919fSjohnjiang 				rte_spinlock_init(
1774418919fSjohnjiang 					&data->priv_timer[lcore_id].list_lock);
1784418919fSjohnjiang 				data->priv_timer[lcore_id].prev_lcore =
1794418919fSjohnjiang 					lcore_id;
1804418919fSjohnjiang 			}
1814418919fSjohnjiang 		}
1824418919fSjohnjiang 	}
1834418919fSjohnjiang 
1844418919fSjohnjiang 	rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
1854418919fSjohnjiang 	(*rte_timer_mz_refcnt)++;
1864418919fSjohnjiang 
1874418919fSjohnjiang 	rte_timer_subsystem_initialized = 1;
1884418919fSjohnjiang 
1890c6bd470Sfengbojiang 	rte_mcfg_timer_unlock();
1900c6bd470Sfengbojiang 
1914418919fSjohnjiang 	return 0;
1924418919fSjohnjiang }
1934418919fSjohnjiang 
1944418919fSjohnjiang void
rte_timer_subsystem_finalize(void)1954418919fSjohnjiang rte_timer_subsystem_finalize(void)
1964418919fSjohnjiang {
1974418919fSjohnjiang 	rte_mcfg_timer_lock();
1984418919fSjohnjiang 
1990c6bd470Sfengbojiang 	if (!rte_timer_subsystem_initialized) {
2000c6bd470Sfengbojiang 		rte_mcfg_timer_unlock();
2010c6bd470Sfengbojiang 		return;
2020c6bd470Sfengbojiang 	}
2030c6bd470Sfengbojiang 
2044418919fSjohnjiang 	if (--(*rte_timer_mz_refcnt) == 0)
2054418919fSjohnjiang 		rte_memzone_free(rte_timer_data_mz);
2064418919fSjohnjiang 
2074418919fSjohnjiang 	rte_timer_subsystem_initialized = 0;
2080c6bd470Sfengbojiang 
2090c6bd470Sfengbojiang 	rte_mcfg_timer_unlock();
210a9643ea8Slogwang }
211a9643ea8Slogwang 
212a9643ea8Slogwang /* Initialize the timer handle tim for use */
213a9643ea8Slogwang void
rte_timer_init(struct rte_timer * tim)214a9643ea8Slogwang rte_timer_init(struct rte_timer *tim)
215a9643ea8Slogwang {
216a9643ea8Slogwang 	union rte_timer_status status;
217a9643ea8Slogwang 
218a9643ea8Slogwang 	status.state = RTE_TIMER_STOP;
219a9643ea8Slogwang 	status.owner = RTE_TIMER_NO_OWNER;
220*2d9fd380Sjfb8856606 	__atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELAXED);
221a9643ea8Slogwang }
222a9643ea8Slogwang 
223a9643ea8Slogwang /*
224a9643ea8Slogwang  * if timer is pending or stopped (or running on the same core than
225a9643ea8Slogwang  * us), mark timer as configuring, and on success return the previous
226a9643ea8Slogwang  * status of the timer
227a9643ea8Slogwang  */
228a9643ea8Slogwang static int
timer_set_config_state(struct rte_timer * tim,union rte_timer_status * ret_prev_status,struct priv_timer * priv_timer)229a9643ea8Slogwang timer_set_config_state(struct rte_timer *tim,
2304418919fSjohnjiang 		       union rte_timer_status *ret_prev_status,
2314418919fSjohnjiang 		       struct priv_timer *priv_timer)
232a9643ea8Slogwang {
233a9643ea8Slogwang 	union rte_timer_status prev_status, status;
234a9643ea8Slogwang 	int success = 0;
235a9643ea8Slogwang 	unsigned lcore_id;
236a9643ea8Slogwang 
237a9643ea8Slogwang 	lcore_id = rte_lcore_id();
238a9643ea8Slogwang 
239a9643ea8Slogwang 	/* wait that the timer is in correct status before update,
240a9643ea8Slogwang 	 * and mark it as being configured */
241*2d9fd380Sjfb8856606 	prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
242a9643ea8Slogwang 
243*2d9fd380Sjfb8856606 	while (success == 0) {
244a9643ea8Slogwang 		/* timer is running on another core
245a9643ea8Slogwang 		 * or ready to run on local core, exit
246a9643ea8Slogwang 		 */
247a9643ea8Slogwang 		if (prev_status.state == RTE_TIMER_RUNNING &&
248a9643ea8Slogwang 		    (prev_status.owner != (uint16_t)lcore_id ||
249a9643ea8Slogwang 		     tim != priv_timer[lcore_id].running_tim))
250a9643ea8Slogwang 			return -1;
251a9643ea8Slogwang 
252a9643ea8Slogwang 		/* timer is being configured on another core */
253a9643ea8Slogwang 		if (prev_status.state == RTE_TIMER_CONFIG)
254a9643ea8Slogwang 			return -1;
255a9643ea8Slogwang 
256a9643ea8Slogwang 		/* here, we know that timer is stopped or pending,
257a9643ea8Slogwang 		 * mark it atomically as being configured */
258a9643ea8Slogwang 		status.state = RTE_TIMER_CONFIG;
259a9643ea8Slogwang 		status.owner = (int16_t)lcore_id;
260*2d9fd380Sjfb8856606 		/* CONFIG states are acting as locked states. If the
261*2d9fd380Sjfb8856606 		 * timer is in CONFIG state, the state cannot be changed
262*2d9fd380Sjfb8856606 		 * by other threads. So, we should use ACQUIRE here.
263*2d9fd380Sjfb8856606 		 */
264*2d9fd380Sjfb8856606 		success = __atomic_compare_exchange_n(&tim->status.u32,
265*2d9fd380Sjfb8856606 					      &prev_status.u32,
266*2d9fd380Sjfb8856606 					      status.u32, 0,
267*2d9fd380Sjfb8856606 					      __ATOMIC_ACQUIRE,
268*2d9fd380Sjfb8856606 					      __ATOMIC_RELAXED);
269a9643ea8Slogwang 	}
270a9643ea8Slogwang 
271a9643ea8Slogwang 	ret_prev_status->u32 = prev_status.u32;
272a9643ea8Slogwang 	return 0;
273a9643ea8Slogwang }
274a9643ea8Slogwang 
275a9643ea8Slogwang /*
276a9643ea8Slogwang  * if timer is pending, mark timer as running
277a9643ea8Slogwang  */
278a9643ea8Slogwang static int
timer_set_running_state(struct rte_timer * tim)279a9643ea8Slogwang timer_set_running_state(struct rte_timer *tim)
280a9643ea8Slogwang {
281a9643ea8Slogwang 	union rte_timer_status prev_status, status;
282a9643ea8Slogwang 	unsigned lcore_id = rte_lcore_id();
283a9643ea8Slogwang 	int success = 0;
284a9643ea8Slogwang 
285a9643ea8Slogwang 	/* wait that the timer is in correct status before update,
286a9643ea8Slogwang 	 * and mark it as running */
287*2d9fd380Sjfb8856606 	prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
288a9643ea8Slogwang 
289*2d9fd380Sjfb8856606 	while (success == 0) {
290a9643ea8Slogwang 		/* timer is not pending anymore */
291a9643ea8Slogwang 		if (prev_status.state != RTE_TIMER_PENDING)
292a9643ea8Slogwang 			return -1;
293a9643ea8Slogwang 
294*2d9fd380Sjfb8856606 		/* we know that the timer will be pending at this point
295*2d9fd380Sjfb8856606 		 * mark it atomically as being running
296*2d9fd380Sjfb8856606 		 */
297a9643ea8Slogwang 		status.state = RTE_TIMER_RUNNING;
298a9643ea8Slogwang 		status.owner = (int16_t)lcore_id;
299*2d9fd380Sjfb8856606 		/* RUNNING states are acting as locked states. If the
300*2d9fd380Sjfb8856606 		 * timer is in RUNNING state, the state cannot be changed
301*2d9fd380Sjfb8856606 		 * by other threads. So, we should use ACQUIRE here.
302*2d9fd380Sjfb8856606 		 */
303*2d9fd380Sjfb8856606 		success = __atomic_compare_exchange_n(&tim->status.u32,
304*2d9fd380Sjfb8856606 					      &prev_status.u32,
305*2d9fd380Sjfb8856606 					      status.u32, 0,
306*2d9fd380Sjfb8856606 					      __ATOMIC_ACQUIRE,
307*2d9fd380Sjfb8856606 					      __ATOMIC_RELAXED);
308a9643ea8Slogwang 	}
309a9643ea8Slogwang 
310a9643ea8Slogwang 	return 0;
311a9643ea8Slogwang }
312a9643ea8Slogwang 
313a9643ea8Slogwang /*
314a9643ea8Slogwang  * Return a skiplist level for a new entry.
3152bfe3f2eSlogwang  * This probabilistically gives a level with p=1/4 that an entry at level n
316a9643ea8Slogwang  * will also appear at level n+1.
317a9643ea8Slogwang  */
318a9643ea8Slogwang static uint32_t
timer_get_skiplist_level(unsigned curr_depth)319a9643ea8Slogwang timer_get_skiplist_level(unsigned curr_depth)
320a9643ea8Slogwang {
321a9643ea8Slogwang #ifdef RTE_LIBRTE_TIMER_DEBUG
322a9643ea8Slogwang 	static uint32_t i, count = 0;
323a9643ea8Slogwang 	static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
324a9643ea8Slogwang #endif
325a9643ea8Slogwang 
326a9643ea8Slogwang 	/* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
327a9643ea8Slogwang 	 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
328a9643ea8Slogwang 	 * bit position of a (pseudo)random number.
329a9643ea8Slogwang 	 */
330a9643ea8Slogwang 	uint32_t rand = rte_rand() & (UINT32_MAX - 1);
331a9643ea8Slogwang 	uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
332a9643ea8Slogwang 
333a9643ea8Slogwang 	/* limit the levels used to one above our current level, so we don't,
334a9643ea8Slogwang 	 * for instance, have a level 0 and a level 7 without anything between
335a9643ea8Slogwang 	 */
336a9643ea8Slogwang 	if (level > curr_depth)
337a9643ea8Slogwang 		level = curr_depth;
338a9643ea8Slogwang 	if (level >= MAX_SKIPLIST_DEPTH)
339a9643ea8Slogwang 		level = MAX_SKIPLIST_DEPTH-1;
340a9643ea8Slogwang #ifdef RTE_LIBRTE_TIMER_DEBUG
341a9643ea8Slogwang 	count ++;
342a9643ea8Slogwang 	levels[level]++;
343a9643ea8Slogwang 	if (count % 10000 == 0)
344a9643ea8Slogwang 		for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
345a9643ea8Slogwang 			printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
346a9643ea8Slogwang #endif
347a9643ea8Slogwang 	return level;
348a9643ea8Slogwang }
349a9643ea8Slogwang 
350a9643ea8Slogwang /*
351a9643ea8Slogwang  * For a given time value, get the entries at each level which
352a9643ea8Slogwang  * are <= that time value.
353a9643ea8Slogwang  */
354a9643ea8Slogwang static void
timer_get_prev_entries(uint64_t time_val,unsigned tim_lcore,struct rte_timer ** prev,struct priv_timer * priv_timer)355a9643ea8Slogwang timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
3564418919fSjohnjiang 		       struct rte_timer **prev, struct priv_timer *priv_timer)
357a9643ea8Slogwang {
358a9643ea8Slogwang 	unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
359a9643ea8Slogwang 	prev[lvl] = &priv_timer[tim_lcore].pending_head;
360a9643ea8Slogwang 	while(lvl != 0) {
361a9643ea8Slogwang 		lvl--;
362a9643ea8Slogwang 		prev[lvl] = prev[lvl+1];
363a9643ea8Slogwang 		while (prev[lvl]->sl_next[lvl] &&
364a9643ea8Slogwang 				prev[lvl]->sl_next[lvl]->expire <= time_val)
365a9643ea8Slogwang 			prev[lvl] = prev[lvl]->sl_next[lvl];
366a9643ea8Slogwang 	}
367a9643ea8Slogwang }
368a9643ea8Slogwang 
369a9643ea8Slogwang /*
370a9643ea8Slogwang  * Given a timer node in the skiplist, find the previous entries for it at
371a9643ea8Slogwang  * all skiplist levels.
372a9643ea8Slogwang  */
373a9643ea8Slogwang static void
timer_get_prev_entries_for_node(struct rte_timer * tim,unsigned tim_lcore,struct rte_timer ** prev,struct priv_timer * priv_timer)374a9643ea8Slogwang timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
3754418919fSjohnjiang 				struct rte_timer **prev,
3764418919fSjohnjiang 				struct priv_timer *priv_timer)
377a9643ea8Slogwang {
378a9643ea8Slogwang 	int i;
3794418919fSjohnjiang 
380a9643ea8Slogwang 	/* to get a specific entry in the list, look for just lower than the time
381a9643ea8Slogwang 	 * values, and then increment on each level individually if necessary
382a9643ea8Slogwang 	 */
3834418919fSjohnjiang 	timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
384a9643ea8Slogwang 	for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
385a9643ea8Slogwang 		while (prev[i]->sl_next[i] != NULL &&
386a9643ea8Slogwang 				prev[i]->sl_next[i] != tim &&
387a9643ea8Slogwang 				prev[i]->sl_next[i]->expire <= tim->expire)
388a9643ea8Slogwang 			prev[i] = prev[i]->sl_next[i];
389a9643ea8Slogwang 	}
390a9643ea8Slogwang }
391a9643ea8Slogwang 
3921646932aSjfb8856606 /* call with lock held as necessary
3931646932aSjfb8856606  * add in list
394a9643ea8Slogwang  * timer must be in config state
395a9643ea8Slogwang  * timer must not be in a list
396a9643ea8Slogwang  */
397a9643ea8Slogwang static void
timer_add(struct rte_timer * tim,unsigned int tim_lcore,struct priv_timer * priv_timer)3984418919fSjohnjiang timer_add(struct rte_timer *tim, unsigned int tim_lcore,
3994418919fSjohnjiang 	  struct priv_timer *priv_timer)
400a9643ea8Slogwang {
401a9643ea8Slogwang 	unsigned lvl;
402a9643ea8Slogwang 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
403a9643ea8Slogwang 
404a9643ea8Slogwang 	/* find where exactly this element goes in the list of elements
405a9643ea8Slogwang 	 * for each depth. */
4064418919fSjohnjiang 	timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
407a9643ea8Slogwang 
408a9643ea8Slogwang 	/* now assign it a new level and add at that level */
409a9643ea8Slogwang 	const unsigned tim_level = timer_get_skiplist_level(
410a9643ea8Slogwang 			priv_timer[tim_lcore].curr_skiplist_depth);
411a9643ea8Slogwang 	if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
412a9643ea8Slogwang 		priv_timer[tim_lcore].curr_skiplist_depth++;
413a9643ea8Slogwang 
414a9643ea8Slogwang 	lvl = tim_level;
415a9643ea8Slogwang 	while (lvl > 0) {
416a9643ea8Slogwang 		tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
417a9643ea8Slogwang 		prev[lvl]->sl_next[lvl] = tim;
418a9643ea8Slogwang 		lvl--;
419a9643ea8Slogwang 	}
420a9643ea8Slogwang 	tim->sl_next[0] = prev[0]->sl_next[0];
421a9643ea8Slogwang 	prev[0]->sl_next[0] = tim;
422a9643ea8Slogwang 
423a9643ea8Slogwang 	/* save the lowest list entry into the expire field of the dummy hdr
424a9643ea8Slogwang 	 * NOTE: this is not atomic on 32-bit*/
425a9643ea8Slogwang 	priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
426a9643ea8Slogwang 			pending_head.sl_next[0]->expire;
427a9643ea8Slogwang }
428a9643ea8Slogwang 
429a9643ea8Slogwang /*
430a9643ea8Slogwang  * del from list, lock if needed
431a9643ea8Slogwang  * timer must be in config state
432a9643ea8Slogwang  * timer must be in a list
433a9643ea8Slogwang  */
434a9643ea8Slogwang static void
timer_del(struct rte_timer * tim,union rte_timer_status prev_status,int local_is_locked,struct priv_timer * priv_timer)435a9643ea8Slogwang timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
4364418919fSjohnjiang 	  int local_is_locked, struct priv_timer *priv_timer)
437a9643ea8Slogwang {
438a9643ea8Slogwang 	unsigned lcore_id = rte_lcore_id();
439a9643ea8Slogwang 	unsigned prev_owner = prev_status.owner;
440a9643ea8Slogwang 	int i;
441a9643ea8Slogwang 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
442a9643ea8Slogwang 
443a9643ea8Slogwang 	/* if timer needs is pending another core, we need to lock the
444a9643ea8Slogwang 	 * list; if it is on local core, we need to lock if we are not
445a9643ea8Slogwang 	 * called from rte_timer_manage() */
446a9643ea8Slogwang 	if (prev_owner != lcore_id || !local_is_locked)
447a9643ea8Slogwang 		rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
448a9643ea8Slogwang 
449a9643ea8Slogwang 	/* save the lowest list entry into the expire field of the dummy hdr.
450a9643ea8Slogwang 	 * NOTE: this is not atomic on 32-bit */
451a9643ea8Slogwang 	if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
452a9643ea8Slogwang 		priv_timer[prev_owner].pending_head.expire =
453a9643ea8Slogwang 				((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
454a9643ea8Slogwang 
455a9643ea8Slogwang 	/* adjust pointers from previous entries to point past this */
4564418919fSjohnjiang 	timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
457a9643ea8Slogwang 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
458a9643ea8Slogwang 		if (prev[i]->sl_next[i] == tim)
459a9643ea8Slogwang 			prev[i]->sl_next[i] = tim->sl_next[i];
460a9643ea8Slogwang 	}
461a9643ea8Slogwang 
462a9643ea8Slogwang 	/* in case we deleted last entry at a level, adjust down max level */
463a9643ea8Slogwang 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
464a9643ea8Slogwang 		if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
465a9643ea8Slogwang 			priv_timer[prev_owner].curr_skiplist_depth --;
466a9643ea8Slogwang 		else
467a9643ea8Slogwang 			break;
468a9643ea8Slogwang 
469a9643ea8Slogwang 	if (prev_owner != lcore_id || !local_is_locked)
470a9643ea8Slogwang 		rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
471a9643ea8Slogwang }
472a9643ea8Slogwang 
473a9643ea8Slogwang /* Reset and start the timer associated with the timer handle (private func) */
474a9643ea8Slogwang static int
__rte_timer_reset(struct rte_timer * tim,uint64_t expire,uint64_t period,unsigned tim_lcore,rte_timer_cb_t fct,void * arg,int local_is_locked,struct rte_timer_data * timer_data)475a9643ea8Slogwang __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
476a9643ea8Slogwang 		  uint64_t period, unsigned tim_lcore,
477a9643ea8Slogwang 		  rte_timer_cb_t fct, void *arg,
4784418919fSjohnjiang 		  int local_is_locked,
4794418919fSjohnjiang 		  struct rte_timer_data *timer_data)
480a9643ea8Slogwang {
481a9643ea8Slogwang 	union rte_timer_status prev_status, status;
482a9643ea8Slogwang 	int ret;
483a9643ea8Slogwang 	unsigned lcore_id = rte_lcore_id();
4844418919fSjohnjiang 	struct priv_timer *priv_timer = timer_data->priv_timer;
485a9643ea8Slogwang 
486a9643ea8Slogwang 	/* round robin for tim_lcore */
487a9643ea8Slogwang 	if (tim_lcore == (unsigned)LCORE_ID_ANY) {
488a9643ea8Slogwang 		if (lcore_id < RTE_MAX_LCORE) {
489a9643ea8Slogwang 			/* EAL thread with valid lcore_id */
490a9643ea8Slogwang 			tim_lcore = rte_get_next_lcore(
491a9643ea8Slogwang 				priv_timer[lcore_id].prev_lcore,
492a9643ea8Slogwang 				0, 1);
493a9643ea8Slogwang 			priv_timer[lcore_id].prev_lcore = tim_lcore;
494a9643ea8Slogwang 		} else
495a9643ea8Slogwang 			/* non-EAL thread do not run rte_timer_manage(),
496a9643ea8Slogwang 			 * so schedule the timer on the first enabled lcore. */
497a9643ea8Slogwang 			tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
498a9643ea8Slogwang 	}
499a9643ea8Slogwang 
500a9643ea8Slogwang 	/* wait that the timer is in correct status before update,
501a9643ea8Slogwang 	 * and mark it as being configured */
5024418919fSjohnjiang 	ret = timer_set_config_state(tim, &prev_status, priv_timer);
503a9643ea8Slogwang 	if (ret < 0)
504a9643ea8Slogwang 		return -1;
505a9643ea8Slogwang 
5064418919fSjohnjiang 	__TIMER_STAT_ADD(priv_timer, reset, 1);
507a9643ea8Slogwang 	if (prev_status.state == RTE_TIMER_RUNNING &&
508a9643ea8Slogwang 	    lcore_id < RTE_MAX_LCORE) {
509a9643ea8Slogwang 		priv_timer[lcore_id].updated = 1;
510a9643ea8Slogwang 	}
511a9643ea8Slogwang 
512a9643ea8Slogwang 	/* remove it from list */
513a9643ea8Slogwang 	if (prev_status.state == RTE_TIMER_PENDING) {
5144418919fSjohnjiang 		timer_del(tim, prev_status, local_is_locked, priv_timer);
5154418919fSjohnjiang 		__TIMER_STAT_ADD(priv_timer, pending, -1);
516a9643ea8Slogwang 	}
517a9643ea8Slogwang 
518a9643ea8Slogwang 	tim->period = period;
519a9643ea8Slogwang 	tim->expire = expire;
520a9643ea8Slogwang 	tim->f = fct;
521a9643ea8Slogwang 	tim->arg = arg;
522a9643ea8Slogwang 
5231646932aSjfb8856606 	/* if timer needs to be scheduled on another core, we need to
5241646932aSjfb8856606 	 * lock the destination list; if it is on local core, we need to lock if
5251646932aSjfb8856606 	 * we are not called from rte_timer_manage()
5261646932aSjfb8856606 	 */
5271646932aSjfb8856606 	if (tim_lcore != lcore_id || !local_is_locked)
5281646932aSjfb8856606 		rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
5291646932aSjfb8856606 
5304418919fSjohnjiang 	__TIMER_STAT_ADD(priv_timer, pending, 1);
5314418919fSjohnjiang 	timer_add(tim, tim_lcore, priv_timer);
532a9643ea8Slogwang 
533a9643ea8Slogwang 	/* update state: as we are in CONFIG state, only us can modify
534a9643ea8Slogwang 	 * the state so we don't need to use cmpset() here */
535a9643ea8Slogwang 	status.state = RTE_TIMER_PENDING;
536a9643ea8Slogwang 	status.owner = (int16_t)tim_lcore;
537*2d9fd380Sjfb8856606 	/* The "RELEASE" ordering guarantees the memory operations above
538*2d9fd380Sjfb8856606 	 * the status update are observed before the update by all threads
539*2d9fd380Sjfb8856606 	 */
540*2d9fd380Sjfb8856606 	__atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
541a9643ea8Slogwang 
5421646932aSjfb8856606 	if (tim_lcore != lcore_id || !local_is_locked)
5431646932aSjfb8856606 		rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
5441646932aSjfb8856606 
545a9643ea8Slogwang 	return 0;
546a9643ea8Slogwang }
547a9643ea8Slogwang 
548a9643ea8Slogwang /* Reset and start the timer associated with the timer handle tim */
549a9643ea8Slogwang int
rte_timer_reset(struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned int tim_lcore,rte_timer_cb_t fct,void * arg)550a9643ea8Slogwang rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
5514418919fSjohnjiang 		      enum rte_timer_type type, unsigned int tim_lcore,
552a9643ea8Slogwang 		      rte_timer_cb_t fct, void *arg)
553a9643ea8Slogwang {
5544418919fSjohnjiang 	return rte_timer_alt_reset(default_data_id, tim, ticks, type,
5554418919fSjohnjiang 				   tim_lcore, fct, arg);
5564418919fSjohnjiang }
5574418919fSjohnjiang 
5584418919fSjohnjiang int
rte_timer_alt_reset(uint32_t timer_data_id,struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned int tim_lcore,rte_timer_cb_t fct,void * arg)5594418919fSjohnjiang rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
5604418919fSjohnjiang 		    uint64_t ticks, enum rte_timer_type type,
5614418919fSjohnjiang 		    unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
5624418919fSjohnjiang {
563a9643ea8Slogwang 	uint64_t cur_time = rte_get_timer_cycles();
564a9643ea8Slogwang 	uint64_t period;
5654418919fSjohnjiang 	struct rte_timer_data *timer_data;
566a9643ea8Slogwang 
5674418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
568a9643ea8Slogwang 
569a9643ea8Slogwang 	if (type == PERIODICAL)
570a9643ea8Slogwang 		period = ticks;
571a9643ea8Slogwang 	else
572a9643ea8Slogwang 		period = 0;
573a9643ea8Slogwang 
574a9643ea8Slogwang 	return __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
5754418919fSjohnjiang 				 fct, arg, 0, timer_data);
576a9643ea8Slogwang }
577a9643ea8Slogwang 
578a9643ea8Slogwang /* loop until rte_timer_reset() succeed */
579a9643ea8Slogwang void
rte_timer_reset_sync(struct rte_timer * tim,uint64_t ticks,enum rte_timer_type type,unsigned tim_lcore,rte_timer_cb_t fct,void * arg)580a9643ea8Slogwang rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
581a9643ea8Slogwang 		     enum rte_timer_type type, unsigned tim_lcore,
582a9643ea8Slogwang 		     rte_timer_cb_t fct, void *arg)
583a9643ea8Slogwang {
584a9643ea8Slogwang 	while (rte_timer_reset(tim, ticks, type, tim_lcore,
585a9643ea8Slogwang 			       fct, arg) != 0)
586a9643ea8Slogwang 		rte_pause();
587a9643ea8Slogwang }
588a9643ea8Slogwang 
5894418919fSjohnjiang static int
__rte_timer_stop(struct rte_timer * tim,int local_is_locked,struct rte_timer_data * timer_data)5904418919fSjohnjiang __rte_timer_stop(struct rte_timer *tim, int local_is_locked,
5914418919fSjohnjiang 		 struct rte_timer_data *timer_data)
592a9643ea8Slogwang {
593a9643ea8Slogwang 	union rte_timer_status prev_status, status;
594a9643ea8Slogwang 	unsigned lcore_id = rte_lcore_id();
595a9643ea8Slogwang 	int ret;
5964418919fSjohnjiang 	struct priv_timer *priv_timer = timer_data->priv_timer;
597a9643ea8Slogwang 
598a9643ea8Slogwang 	/* wait that the timer is in correct status before update,
599a9643ea8Slogwang 	 * and mark it as being configured */
6004418919fSjohnjiang 	ret = timer_set_config_state(tim, &prev_status, priv_timer);
601a9643ea8Slogwang 	if (ret < 0)
602a9643ea8Slogwang 		return -1;
603a9643ea8Slogwang 
6044418919fSjohnjiang 	__TIMER_STAT_ADD(priv_timer, stop, 1);
605a9643ea8Slogwang 	if (prev_status.state == RTE_TIMER_RUNNING &&
606a9643ea8Slogwang 	    lcore_id < RTE_MAX_LCORE) {
607a9643ea8Slogwang 		priv_timer[lcore_id].updated = 1;
608a9643ea8Slogwang 	}
609a9643ea8Slogwang 
610a9643ea8Slogwang 	/* remove it from list */
611a9643ea8Slogwang 	if (prev_status.state == RTE_TIMER_PENDING) {
6124418919fSjohnjiang 		timer_del(tim, prev_status, local_is_locked, priv_timer);
6134418919fSjohnjiang 		__TIMER_STAT_ADD(priv_timer, pending, -1);
614a9643ea8Slogwang 	}
615a9643ea8Slogwang 
616a9643ea8Slogwang 	/* mark timer as stopped */
617a9643ea8Slogwang 	status.state = RTE_TIMER_STOP;
618a9643ea8Slogwang 	status.owner = RTE_TIMER_NO_OWNER;
619*2d9fd380Sjfb8856606 	/* The "RELEASE" ordering guarantees the memory operations above
620*2d9fd380Sjfb8856606 	 * the status update are observed before the update by all threads
621*2d9fd380Sjfb8856606 	 */
622*2d9fd380Sjfb8856606 	__atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
623a9643ea8Slogwang 
624a9643ea8Slogwang 	return 0;
625a9643ea8Slogwang }
626a9643ea8Slogwang 
6274418919fSjohnjiang /* Stop the timer associated with the timer handle tim */
6284418919fSjohnjiang int
rte_timer_stop(struct rte_timer * tim)6294418919fSjohnjiang rte_timer_stop(struct rte_timer *tim)
6304418919fSjohnjiang {
6314418919fSjohnjiang 	return rte_timer_alt_stop(default_data_id, tim);
6324418919fSjohnjiang }
6334418919fSjohnjiang 
6344418919fSjohnjiang int
rte_timer_alt_stop(uint32_t timer_data_id,struct rte_timer * tim)6354418919fSjohnjiang rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
6364418919fSjohnjiang {
6374418919fSjohnjiang 	struct rte_timer_data *timer_data;
6384418919fSjohnjiang 
6394418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
6404418919fSjohnjiang 
6414418919fSjohnjiang 	return __rte_timer_stop(tim, 0, timer_data);
6424418919fSjohnjiang }
6434418919fSjohnjiang 
644a9643ea8Slogwang /* loop until rte_timer_stop() succeed */
645a9643ea8Slogwang void
rte_timer_stop_sync(struct rte_timer * tim)646a9643ea8Slogwang rte_timer_stop_sync(struct rte_timer *tim)
647a9643ea8Slogwang {
648a9643ea8Slogwang 	while (rte_timer_stop(tim) != 0)
649a9643ea8Slogwang 		rte_pause();
650a9643ea8Slogwang }
651a9643ea8Slogwang 
652a9643ea8Slogwang /* Test the PENDING status of the timer handle tim */
653a9643ea8Slogwang int
rte_timer_pending(struct rte_timer * tim)654a9643ea8Slogwang rte_timer_pending(struct rte_timer *tim)
655a9643ea8Slogwang {
656*2d9fd380Sjfb8856606 	return __atomic_load_n(&tim->status.state,
657*2d9fd380Sjfb8856606 				__ATOMIC_RELAXED) == RTE_TIMER_PENDING;
658a9643ea8Slogwang }
659a9643ea8Slogwang 
660a9643ea8Slogwang /* must be called periodically, run all timer that expired */
6614418919fSjohnjiang static void
__rte_timer_manage(struct rte_timer_data * timer_data)6624418919fSjohnjiang __rte_timer_manage(struct rte_timer_data *timer_data)
663a9643ea8Slogwang {
664a9643ea8Slogwang 	union rte_timer_status status;
665a9643ea8Slogwang 	struct rte_timer *tim, *next_tim;
666a9643ea8Slogwang 	struct rte_timer *run_first_tim, **pprev;
667a9643ea8Slogwang 	unsigned lcore_id = rte_lcore_id();
668a9643ea8Slogwang 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
669a9643ea8Slogwang 	uint64_t cur_time;
670a9643ea8Slogwang 	int i, ret;
6714418919fSjohnjiang 	struct priv_timer *priv_timer = timer_data->priv_timer;
672a9643ea8Slogwang 
673a9643ea8Slogwang 	/* timer manager only runs on EAL thread with valid lcore_id */
674a9643ea8Slogwang 	assert(lcore_id < RTE_MAX_LCORE);
675a9643ea8Slogwang 
6764418919fSjohnjiang 	__TIMER_STAT_ADD(priv_timer, manage, 1);
677a9643ea8Slogwang 	/* optimize for the case where per-cpu list is empty */
678a9643ea8Slogwang 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
679a9643ea8Slogwang 		return;
680a9643ea8Slogwang 	cur_time = rte_get_timer_cycles();
681a9643ea8Slogwang 
6822bfe3f2eSlogwang #ifdef RTE_ARCH_64
683a9643ea8Slogwang 	/* on 64-bit the value cached in the pending_head.expired will be
684a9643ea8Slogwang 	 * updated atomically, so we can consult that for a quick check here
685a9643ea8Slogwang 	 * outside the lock */
686a9643ea8Slogwang 	if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
687a9643ea8Slogwang 		return;
688a9643ea8Slogwang #endif
689a9643ea8Slogwang 
690a9643ea8Slogwang 	/* browse ordered list, add expired timers in 'expired' list */
691a9643ea8Slogwang 	rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
692a9643ea8Slogwang 
693a9643ea8Slogwang 	/* if nothing to do just unlock and return */
694a9643ea8Slogwang 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
695a9643ea8Slogwang 	    priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
696a9643ea8Slogwang 		rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
697a9643ea8Slogwang 		return;
698a9643ea8Slogwang 	}
699a9643ea8Slogwang 
700a9643ea8Slogwang 	/* save start of list of expired timers */
701a9643ea8Slogwang 	tim = priv_timer[lcore_id].pending_head.sl_next[0];
702a9643ea8Slogwang 
703a9643ea8Slogwang 	/* break the existing list at current time point */
7044418919fSjohnjiang 	timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
705a9643ea8Slogwang 	for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
706a9643ea8Slogwang 		if (prev[i] == &priv_timer[lcore_id].pending_head)
707a9643ea8Slogwang 			continue;
708a9643ea8Slogwang 		priv_timer[lcore_id].pending_head.sl_next[i] =
709a9643ea8Slogwang 		    prev[i]->sl_next[i];
710a9643ea8Slogwang 		if (prev[i]->sl_next[i] == NULL)
711a9643ea8Slogwang 			priv_timer[lcore_id].curr_skiplist_depth--;
712a9643ea8Slogwang 		prev[i] ->sl_next[i] = NULL;
713a9643ea8Slogwang 	}
714a9643ea8Slogwang 
715a9643ea8Slogwang 	/* transition run-list from PENDING to RUNNING */
716a9643ea8Slogwang 	run_first_tim = tim;
717a9643ea8Slogwang 	pprev = &run_first_tim;
718a9643ea8Slogwang 
719a9643ea8Slogwang 	for ( ; tim != NULL; tim = next_tim) {
720a9643ea8Slogwang 		next_tim = tim->sl_next[0];
721a9643ea8Slogwang 
722a9643ea8Slogwang 		ret = timer_set_running_state(tim);
723a9643ea8Slogwang 		if (likely(ret == 0)) {
724a9643ea8Slogwang 			pprev = &tim->sl_next[0];
725a9643ea8Slogwang 		} else {
726a9643ea8Slogwang 			/* another core is trying to re-config this one,
727a9643ea8Slogwang 			 * remove it from local expired list
728a9643ea8Slogwang 			 */
729a9643ea8Slogwang 			*pprev = next_tim;
730a9643ea8Slogwang 		}
731a9643ea8Slogwang 	}
732a9643ea8Slogwang 
733a9643ea8Slogwang 	/* update the next to expire timer value */
734a9643ea8Slogwang 	priv_timer[lcore_id].pending_head.expire =
735a9643ea8Slogwang 	    (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
736a9643ea8Slogwang 		priv_timer[lcore_id].pending_head.sl_next[0]->expire;
737a9643ea8Slogwang 
738a9643ea8Slogwang 	rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
739a9643ea8Slogwang 
740a9643ea8Slogwang 	/* now scan expired list and call callbacks */
741a9643ea8Slogwang 	for (tim = run_first_tim; tim != NULL; tim = next_tim) {
742a9643ea8Slogwang 		next_tim = tim->sl_next[0];
743a9643ea8Slogwang 		priv_timer[lcore_id].updated = 0;
744a9643ea8Slogwang 		priv_timer[lcore_id].running_tim = tim;
745a9643ea8Slogwang 
746a9643ea8Slogwang 		/* execute callback function with list unlocked */
747a9643ea8Slogwang 		tim->f(tim, tim->arg);
748a9643ea8Slogwang 
7494418919fSjohnjiang 		__TIMER_STAT_ADD(priv_timer, pending, -1);
750a9643ea8Slogwang 		/* the timer was stopped or reloaded by the callback
751a9643ea8Slogwang 		 * function, we have nothing to do here */
752a9643ea8Slogwang 		if (priv_timer[lcore_id].updated == 1)
753a9643ea8Slogwang 			continue;
754a9643ea8Slogwang 
755a9643ea8Slogwang 		if (tim->period == 0) {
756a9643ea8Slogwang 			/* remove from done list and mark timer as stopped */
757a9643ea8Slogwang 			status.state = RTE_TIMER_STOP;
758a9643ea8Slogwang 			status.owner = RTE_TIMER_NO_OWNER;
759*2d9fd380Sjfb8856606 			/* The "RELEASE" ordering guarantees the memory
760*2d9fd380Sjfb8856606 			 * operations above the status update are observed
761*2d9fd380Sjfb8856606 			 * before the update by all threads
762*2d9fd380Sjfb8856606 			 */
763*2d9fd380Sjfb8856606 			__atomic_store_n(&tim->status.u32, status.u32,
764*2d9fd380Sjfb8856606 				__ATOMIC_RELEASE);
765a9643ea8Slogwang 		}
766a9643ea8Slogwang 		else {
767a9643ea8Slogwang 			/* keep it in list and mark timer as pending */
768a9643ea8Slogwang 			rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
769a9643ea8Slogwang 			status.state = RTE_TIMER_PENDING;
7704418919fSjohnjiang 			__TIMER_STAT_ADD(priv_timer, pending, 1);
771a9643ea8Slogwang 			status.owner = (int16_t)lcore_id;
772*2d9fd380Sjfb8856606 			/* The "RELEASE" ordering guarantees the memory
773*2d9fd380Sjfb8856606 			 * operations above the status update are observed
774*2d9fd380Sjfb8856606 			 * before the update by all threads
775*2d9fd380Sjfb8856606 			 */
776*2d9fd380Sjfb8856606 			__atomic_store_n(&tim->status.u32, status.u32,
777*2d9fd380Sjfb8856606 				__ATOMIC_RELEASE);
778a9643ea8Slogwang 			__rte_timer_reset(tim, tim->expire + tim->period,
7794418919fSjohnjiang 				tim->period, lcore_id, tim->f, tim->arg, 1,
7804418919fSjohnjiang 				timer_data);
781a9643ea8Slogwang 			rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
782a9643ea8Slogwang 		}
783a9643ea8Slogwang 	}
784a9643ea8Slogwang 	priv_timer[lcore_id].running_tim = NULL;
785a9643ea8Slogwang }
786a9643ea8Slogwang 
7874418919fSjohnjiang int
rte_timer_manage(void)7884418919fSjohnjiang rte_timer_manage(void)
7894418919fSjohnjiang {
7904418919fSjohnjiang 	struct rte_timer_data *timer_data;
7914418919fSjohnjiang 
7924418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
7934418919fSjohnjiang 
7944418919fSjohnjiang 	__rte_timer_manage(timer_data);
7954418919fSjohnjiang 
7964418919fSjohnjiang 	return 0;
7974418919fSjohnjiang }
7984418919fSjohnjiang 
7994418919fSjohnjiang int
rte_timer_alt_manage(uint32_t timer_data_id,unsigned int * poll_lcores,int nb_poll_lcores,rte_timer_alt_manage_cb_t f)8004418919fSjohnjiang rte_timer_alt_manage(uint32_t timer_data_id,
8014418919fSjohnjiang 		     unsigned int *poll_lcores,
8024418919fSjohnjiang 		     int nb_poll_lcores,
8034418919fSjohnjiang 		     rte_timer_alt_manage_cb_t f)
8044418919fSjohnjiang {
8054418919fSjohnjiang 	unsigned int default_poll_lcores[] = {rte_lcore_id()};
8064418919fSjohnjiang 	union rte_timer_status status;
8074418919fSjohnjiang 	struct rte_timer *tim, *next_tim, **pprev;
8084418919fSjohnjiang 	struct rte_timer *run_first_tims[RTE_MAX_LCORE];
8094418919fSjohnjiang 	unsigned int this_lcore = rte_lcore_id();
8104418919fSjohnjiang 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
8114418919fSjohnjiang 	uint64_t cur_time;
8124418919fSjohnjiang 	int i, j, ret;
8134418919fSjohnjiang 	int nb_runlists = 0;
8144418919fSjohnjiang 	struct rte_timer_data *data;
8154418919fSjohnjiang 	struct priv_timer *privp;
8164418919fSjohnjiang 	uint32_t poll_lcore;
8174418919fSjohnjiang 
8184418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
8194418919fSjohnjiang 
8204418919fSjohnjiang 	/* timer manager only runs on EAL thread with valid lcore_id */
8214418919fSjohnjiang 	assert(this_lcore < RTE_MAX_LCORE);
8224418919fSjohnjiang 
8234418919fSjohnjiang 	__TIMER_STAT_ADD(data->priv_timer, manage, 1);
8244418919fSjohnjiang 
8254418919fSjohnjiang 	if (poll_lcores == NULL) {
8264418919fSjohnjiang 		poll_lcores = default_poll_lcores;
8274418919fSjohnjiang 		nb_poll_lcores = RTE_DIM(default_poll_lcores);
8284418919fSjohnjiang 	}
8294418919fSjohnjiang 
8304418919fSjohnjiang 	for (i = 0; i < nb_poll_lcores; i++) {
8314418919fSjohnjiang 		poll_lcore = poll_lcores[i];
8324418919fSjohnjiang 		privp = &data->priv_timer[poll_lcore];
8334418919fSjohnjiang 
8344418919fSjohnjiang 		/* optimize for the case where per-cpu list is empty */
8354418919fSjohnjiang 		if (privp->pending_head.sl_next[0] == NULL)
8364418919fSjohnjiang 			continue;
8374418919fSjohnjiang 		cur_time = rte_get_timer_cycles();
8384418919fSjohnjiang 
8394418919fSjohnjiang #ifdef RTE_ARCH_64
8404418919fSjohnjiang 		/* on 64-bit the value cached in the pending_head.expired will
8414418919fSjohnjiang 		 * be updated atomically, so we can consult that for a quick
8424418919fSjohnjiang 		 * check here outside the lock
8434418919fSjohnjiang 		 */
8444418919fSjohnjiang 		if (likely(privp->pending_head.expire > cur_time))
8454418919fSjohnjiang 			continue;
8464418919fSjohnjiang #endif
8474418919fSjohnjiang 
8484418919fSjohnjiang 		/* browse ordered list, add expired timers in 'expired' list */
8494418919fSjohnjiang 		rte_spinlock_lock(&privp->list_lock);
8504418919fSjohnjiang 
8514418919fSjohnjiang 		/* if nothing to do just unlock and return */
8524418919fSjohnjiang 		if (privp->pending_head.sl_next[0] == NULL ||
8534418919fSjohnjiang 		    privp->pending_head.sl_next[0]->expire > cur_time) {
8544418919fSjohnjiang 			rte_spinlock_unlock(&privp->list_lock);
8554418919fSjohnjiang 			continue;
8564418919fSjohnjiang 		}
8574418919fSjohnjiang 
8584418919fSjohnjiang 		/* save start of list of expired timers */
8594418919fSjohnjiang 		tim = privp->pending_head.sl_next[0];
8604418919fSjohnjiang 
8614418919fSjohnjiang 		/* break the existing list at current time point */
8624418919fSjohnjiang 		timer_get_prev_entries(cur_time, poll_lcore, prev,
8634418919fSjohnjiang 				       data->priv_timer);
8644418919fSjohnjiang 		for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
8654418919fSjohnjiang 			if (prev[j] == &privp->pending_head)
8664418919fSjohnjiang 				continue;
8674418919fSjohnjiang 			privp->pending_head.sl_next[j] =
8684418919fSjohnjiang 				prev[j]->sl_next[j];
8694418919fSjohnjiang 			if (prev[j]->sl_next[j] == NULL)
8704418919fSjohnjiang 				privp->curr_skiplist_depth--;
8714418919fSjohnjiang 
8724418919fSjohnjiang 			prev[j]->sl_next[j] = NULL;
8734418919fSjohnjiang 		}
8744418919fSjohnjiang 
8754418919fSjohnjiang 		/* transition run-list from PENDING to RUNNING */
8764418919fSjohnjiang 		run_first_tims[nb_runlists] = tim;
8774418919fSjohnjiang 		pprev = &run_first_tims[nb_runlists];
8784418919fSjohnjiang 		nb_runlists++;
8794418919fSjohnjiang 
8804418919fSjohnjiang 		for ( ; tim != NULL; tim = next_tim) {
8814418919fSjohnjiang 			next_tim = tim->sl_next[0];
8824418919fSjohnjiang 
8834418919fSjohnjiang 			ret = timer_set_running_state(tim);
8844418919fSjohnjiang 			if (likely(ret == 0)) {
8854418919fSjohnjiang 				pprev = &tim->sl_next[0];
8864418919fSjohnjiang 			} else {
8874418919fSjohnjiang 				/* another core is trying to re-config this one,
8884418919fSjohnjiang 				 * remove it from local expired list
8894418919fSjohnjiang 				 */
8904418919fSjohnjiang 				*pprev = next_tim;
8914418919fSjohnjiang 			}
8924418919fSjohnjiang 		}
8934418919fSjohnjiang 
8944418919fSjohnjiang 		/* update the next to expire timer value */
8954418919fSjohnjiang 		privp->pending_head.expire =
8964418919fSjohnjiang 		    (privp->pending_head.sl_next[0] == NULL) ? 0 :
8974418919fSjohnjiang 			privp->pending_head.sl_next[0]->expire;
8984418919fSjohnjiang 
8994418919fSjohnjiang 		rte_spinlock_unlock(&privp->list_lock);
9004418919fSjohnjiang 	}
9014418919fSjohnjiang 
9024418919fSjohnjiang 	/* Now process the run lists */
9034418919fSjohnjiang 	while (1) {
9044418919fSjohnjiang 		bool done = true;
9054418919fSjohnjiang 		uint64_t min_expire = UINT64_MAX;
9064418919fSjohnjiang 		int min_idx = 0;
9074418919fSjohnjiang 
9084418919fSjohnjiang 		/* Find the next oldest timer to process */
9094418919fSjohnjiang 		for (i = 0; i < nb_runlists; i++) {
9104418919fSjohnjiang 			tim = run_first_tims[i];
9114418919fSjohnjiang 
9124418919fSjohnjiang 			if (tim != NULL && tim->expire < min_expire) {
9134418919fSjohnjiang 				min_expire = tim->expire;
9144418919fSjohnjiang 				min_idx = i;
9154418919fSjohnjiang 				done = false;
9164418919fSjohnjiang 			}
9174418919fSjohnjiang 		}
9184418919fSjohnjiang 
9194418919fSjohnjiang 		if (done)
9204418919fSjohnjiang 			break;
9214418919fSjohnjiang 
9224418919fSjohnjiang 		tim = run_first_tims[min_idx];
9234418919fSjohnjiang 
9244418919fSjohnjiang 		/* Move down the runlist from which we picked a timer to
9254418919fSjohnjiang 		 * execute
9264418919fSjohnjiang 		 */
9274418919fSjohnjiang 		run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
9284418919fSjohnjiang 
9294418919fSjohnjiang 		data->priv_timer[this_lcore].updated = 0;
9304418919fSjohnjiang 		data->priv_timer[this_lcore].running_tim = tim;
9314418919fSjohnjiang 
9324418919fSjohnjiang 		/* Call the provided callback function */
9334418919fSjohnjiang 		f(tim);
9344418919fSjohnjiang 
9354418919fSjohnjiang 		__TIMER_STAT_ADD(data->priv_timer, pending, -1);
9364418919fSjohnjiang 
9374418919fSjohnjiang 		/* the timer was stopped or reloaded by the callback
9384418919fSjohnjiang 		 * function, we have nothing to do here
9394418919fSjohnjiang 		 */
9404418919fSjohnjiang 		if (data->priv_timer[this_lcore].updated == 1)
9414418919fSjohnjiang 			continue;
9424418919fSjohnjiang 
9434418919fSjohnjiang 		if (tim->period == 0) {
9444418919fSjohnjiang 			/* remove from done list and mark timer as stopped */
9454418919fSjohnjiang 			status.state = RTE_TIMER_STOP;
9464418919fSjohnjiang 			status.owner = RTE_TIMER_NO_OWNER;
947*2d9fd380Sjfb8856606 			/* The "RELEASE" ordering guarantees the memory
948*2d9fd380Sjfb8856606 			 * operations above the status update are observed
949*2d9fd380Sjfb8856606 			 * before the update by all threads
950*2d9fd380Sjfb8856606 			 */
951*2d9fd380Sjfb8856606 			__atomic_store_n(&tim->status.u32, status.u32,
952*2d9fd380Sjfb8856606 				__ATOMIC_RELEASE);
9534418919fSjohnjiang 		} else {
9544418919fSjohnjiang 			/* keep it in list and mark timer as pending */
9554418919fSjohnjiang 			rte_spinlock_lock(
9564418919fSjohnjiang 				&data->priv_timer[this_lcore].list_lock);
9574418919fSjohnjiang 			status.state = RTE_TIMER_PENDING;
9584418919fSjohnjiang 			__TIMER_STAT_ADD(data->priv_timer, pending, 1);
9594418919fSjohnjiang 			status.owner = (int16_t)this_lcore;
960*2d9fd380Sjfb8856606 			/* The "RELEASE" ordering guarantees the memory
961*2d9fd380Sjfb8856606 			 * operations above the status update are observed
962*2d9fd380Sjfb8856606 			 * before the update by all threads
963*2d9fd380Sjfb8856606 			 */
964*2d9fd380Sjfb8856606 			__atomic_store_n(&tim->status.u32, status.u32,
965*2d9fd380Sjfb8856606 				__ATOMIC_RELEASE);
9664418919fSjohnjiang 			__rte_timer_reset(tim, tim->expire + tim->period,
9674418919fSjohnjiang 				tim->period, this_lcore, tim->f, tim->arg, 1,
9684418919fSjohnjiang 				data);
9694418919fSjohnjiang 			rte_spinlock_unlock(
9704418919fSjohnjiang 				&data->priv_timer[this_lcore].list_lock);
9714418919fSjohnjiang 		}
9724418919fSjohnjiang 
9734418919fSjohnjiang 		data->priv_timer[this_lcore].running_tim = NULL;
9744418919fSjohnjiang 	}
9754418919fSjohnjiang 
9764418919fSjohnjiang 	return 0;
9774418919fSjohnjiang }
9784418919fSjohnjiang 
9794418919fSjohnjiang /* Walk pending lists, stopping timers and calling user-specified function */
9804418919fSjohnjiang int
rte_timer_stop_all(uint32_t timer_data_id,unsigned int * walk_lcores,int nb_walk_lcores,rte_timer_stop_all_cb_t f,void * f_arg)9814418919fSjohnjiang rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
9824418919fSjohnjiang 		   int nb_walk_lcores,
9834418919fSjohnjiang 		   rte_timer_stop_all_cb_t f, void *f_arg)
9844418919fSjohnjiang {
9854418919fSjohnjiang 	int i;
9864418919fSjohnjiang 	struct priv_timer *priv_timer;
9874418919fSjohnjiang 	uint32_t walk_lcore;
9884418919fSjohnjiang 	struct rte_timer *tim, *next_tim;
9894418919fSjohnjiang 	struct rte_timer_data *timer_data;
9904418919fSjohnjiang 
9914418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
9924418919fSjohnjiang 
9934418919fSjohnjiang 	for (i = 0; i < nb_walk_lcores; i++) {
9944418919fSjohnjiang 		walk_lcore = walk_lcores[i];
9954418919fSjohnjiang 		priv_timer = &timer_data->priv_timer[walk_lcore];
9964418919fSjohnjiang 
9974418919fSjohnjiang 		rte_spinlock_lock(&priv_timer->list_lock);
9984418919fSjohnjiang 
9994418919fSjohnjiang 		for (tim = priv_timer->pending_head.sl_next[0];
10004418919fSjohnjiang 		     tim != NULL;
10014418919fSjohnjiang 		     tim = next_tim) {
10024418919fSjohnjiang 			next_tim = tim->sl_next[0];
10034418919fSjohnjiang 
10044418919fSjohnjiang 			/* Call timer_stop with lock held */
10054418919fSjohnjiang 			__rte_timer_stop(tim, 1, timer_data);
10064418919fSjohnjiang 
10074418919fSjohnjiang 			if (f)
10084418919fSjohnjiang 				f(tim, f_arg);
10094418919fSjohnjiang 		}
10104418919fSjohnjiang 
10114418919fSjohnjiang 		rte_spinlock_unlock(&priv_timer->list_lock);
10124418919fSjohnjiang 	}
10134418919fSjohnjiang 
10144418919fSjohnjiang 	return 0;
10154418919fSjohnjiang }
10164418919fSjohnjiang 
1017*2d9fd380Sjfb8856606 int64_t
rte_timer_next_ticks(void)1018*2d9fd380Sjfb8856606 rte_timer_next_ticks(void)
1019*2d9fd380Sjfb8856606 {
1020*2d9fd380Sjfb8856606 	unsigned int lcore_id = rte_lcore_id();
1021*2d9fd380Sjfb8856606 	struct rte_timer_data *timer_data;
1022*2d9fd380Sjfb8856606 	struct priv_timer *priv_timer;
1023*2d9fd380Sjfb8856606 	const struct rte_timer *tm;
1024*2d9fd380Sjfb8856606 	uint64_t cur_time;
1025*2d9fd380Sjfb8856606 	int64_t left = -ENOENT;
1026*2d9fd380Sjfb8856606 
1027*2d9fd380Sjfb8856606 	TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
1028*2d9fd380Sjfb8856606 
1029*2d9fd380Sjfb8856606 	priv_timer = timer_data->priv_timer;
1030*2d9fd380Sjfb8856606 	cur_time = rte_get_timer_cycles();
1031*2d9fd380Sjfb8856606 
1032*2d9fd380Sjfb8856606 	rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
1033*2d9fd380Sjfb8856606 	tm = priv_timer[lcore_id].pending_head.sl_next[0];
1034*2d9fd380Sjfb8856606 	if (tm) {
1035*2d9fd380Sjfb8856606 		left = tm->expire - cur_time;
1036*2d9fd380Sjfb8856606 		if (left < 0)
1037*2d9fd380Sjfb8856606 			left = 0;
1038*2d9fd380Sjfb8856606 	}
1039*2d9fd380Sjfb8856606 	rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
1040*2d9fd380Sjfb8856606 
1041*2d9fd380Sjfb8856606 	return left;
1042*2d9fd380Sjfb8856606 }
1043*2d9fd380Sjfb8856606 
1044a9643ea8Slogwang /* dump statistics about timers */
10454418919fSjohnjiang static void
__rte_timer_dump_stats(struct rte_timer_data * timer_data __rte_unused,FILE * f)10464418919fSjohnjiang __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
1047a9643ea8Slogwang {
1048a9643ea8Slogwang #ifdef RTE_LIBRTE_TIMER_DEBUG
1049a9643ea8Slogwang 	struct rte_timer_debug_stats sum;
1050a9643ea8Slogwang 	unsigned lcore_id;
10514418919fSjohnjiang 	struct priv_timer *priv_timer = timer_data->priv_timer;
1052a9643ea8Slogwang 
1053a9643ea8Slogwang 	memset(&sum, 0, sizeof(sum));
1054a9643ea8Slogwang 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1055a9643ea8Slogwang 		sum.reset += priv_timer[lcore_id].stats.reset;
1056a9643ea8Slogwang 		sum.stop += priv_timer[lcore_id].stats.stop;
1057a9643ea8Slogwang 		sum.manage += priv_timer[lcore_id].stats.manage;
1058a9643ea8Slogwang 		sum.pending += priv_timer[lcore_id].stats.pending;
1059a9643ea8Slogwang 	}
1060a9643ea8Slogwang 	fprintf(f, "Timer statistics:\n");
1061a9643ea8Slogwang 	fprintf(f, "  reset = %"PRIu64"\n", sum.reset);
1062a9643ea8Slogwang 	fprintf(f, "  stop = %"PRIu64"\n", sum.stop);
1063a9643ea8Slogwang 	fprintf(f, "  manage = %"PRIu64"\n", sum.manage);
1064a9643ea8Slogwang 	fprintf(f, "  pending = %"PRIu64"\n", sum.pending);
1065a9643ea8Slogwang #else
1066a9643ea8Slogwang 	fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
1067a9643ea8Slogwang #endif
1068a9643ea8Slogwang }
10694418919fSjohnjiang 
10704418919fSjohnjiang int
rte_timer_dump_stats(FILE * f)10714418919fSjohnjiang rte_timer_dump_stats(FILE *f)
10724418919fSjohnjiang {
10734418919fSjohnjiang 	return rte_timer_alt_dump_stats(default_data_id, f);
10744418919fSjohnjiang }
10754418919fSjohnjiang 
10764418919fSjohnjiang int
rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused,FILE * f)10774418919fSjohnjiang rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
10784418919fSjohnjiang {
10794418919fSjohnjiang 	struct rte_timer_data *timer_data;
10804418919fSjohnjiang 
10814418919fSjohnjiang 	TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
10824418919fSjohnjiang 
10834418919fSjohnjiang 	__rte_timer_dump_stats(timer_data, f);
10844418919fSjohnjiang 
10854418919fSjohnjiang 	return 0;
10864418919fSjohnjiang }
1087