xref: /f-stack/dpdk/lib/librte_timer/rte_timer.c (revision fe450452)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <string.h>
35 #include <stdio.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <assert.h>
39 #include <sys/queue.h>
40 
41 #include <rte_atomic.h>
42 #include <rte_common.h>
43 #include <rte_cycles.h>
44 #include <rte_per_lcore.h>
45 #include <rte_memory.h>
46 #include <rte_launch.h>
47 #include <rte_eal.h>
48 #include <rte_lcore.h>
49 #include <rte_branch_prediction.h>
50 #include <rte_spinlock.h>
51 #include <rte_random.h>
52 #include <rte_pause.h>
53 
54 #include "rte_timer.h"
55 
56 LIST_HEAD(rte_timer_list, rte_timer);
57 
58 struct priv_timer {
59 	struct rte_timer pending_head;  /**< dummy timer instance to head up list */
60 	rte_spinlock_t list_lock;       /**< lock to protect list access */
61 
62 	/** per-core variable that true if a timer was updated on this
63 	 *  core since last reset of the variable */
64 	int updated;
65 
66 	/** track the current depth of the skiplist */
67 	unsigned curr_skiplist_depth;
68 
69 	unsigned prev_lcore;              /**< used for lcore round robin */
70 
71 	/** running timer on this lcore now */
72 	struct rte_timer *running_tim;
73 
74 #ifdef RTE_LIBRTE_TIMER_DEBUG
75 	/** per-lcore statistics */
76 	struct rte_timer_debug_stats stats;
77 #endif
78 } __rte_cache_aligned;
79 
80 /** per-lcore private info for timers */
81 static struct priv_timer priv_timer[RTE_MAX_LCORE];
82 
83 /* when debug is enabled, store some statistics */
84 #ifdef RTE_LIBRTE_TIMER_DEBUG
85 #define __TIMER_STAT_ADD(name, n) do {					\
86 		unsigned __lcore_id = rte_lcore_id();			\
87 		if (__lcore_id < RTE_MAX_LCORE)				\
88 			priv_timer[__lcore_id].stats.name += (n);	\
89 	} while(0)
90 #else
91 #define __TIMER_STAT_ADD(name, n) do {} while(0)
92 #endif
93 
94 /* Init the timer library. */
95 void
96 rte_timer_subsystem_init(void)
97 {
98 	unsigned lcore_id;
99 
100 	/* since priv_timer is static, it's zeroed by default, so only init some
101 	 * fields.
102 	 */
103 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
104 		rte_spinlock_init(&priv_timer[lcore_id].list_lock);
105 		priv_timer[lcore_id].prev_lcore = lcore_id;
106 	}
107 }
108 
109 /* Initialize the timer handle tim for use */
110 void
111 rte_timer_init(struct rte_timer *tim)
112 {
113 	union rte_timer_status status;
114 
115 	status.state = RTE_TIMER_STOP;
116 	status.owner = RTE_TIMER_NO_OWNER;
117 	tim->status.u32 = status.u32;
118 }
119 
120 /*
121  * if timer is pending or stopped (or running on the same core than
122  * us), mark timer as configuring, and on success return the previous
123  * status of the timer
124  */
125 static int
126 timer_set_config_state(struct rte_timer *tim,
127 		       union rte_timer_status *ret_prev_status)
128 {
129 	union rte_timer_status prev_status, status;
130 	int success = 0;
131 	unsigned lcore_id;
132 
133 	lcore_id = rte_lcore_id();
134 
135 	/* wait that the timer is in correct status before update,
136 	 * and mark it as being configured */
137 	while (success == 0) {
138 		prev_status.u32 = tim->status.u32;
139 
140 		/* timer is running on another core
141 		 * or ready to run on local core, exit
142 		 */
143 		if (prev_status.state == RTE_TIMER_RUNNING &&
144 		    (prev_status.owner != (uint16_t)lcore_id ||
145 		     tim != priv_timer[lcore_id].running_tim))
146 			return -1;
147 
148 		/* timer is being configured on another core */
149 		if (prev_status.state == RTE_TIMER_CONFIG)
150 			return -1;
151 
152 		/* here, we know that timer is stopped or pending,
153 		 * mark it atomically as being configured */
154 		status.state = RTE_TIMER_CONFIG;
155 		status.owner = (int16_t)lcore_id;
156 		success = rte_atomic32_cmpset(&tim->status.u32,
157 					      prev_status.u32,
158 					      status.u32);
159 	}
160 
161 	ret_prev_status->u32 = prev_status.u32;
162 	return 0;
163 }
164 
165 /*
166  * if timer is pending, mark timer as running
167  */
168 static int
169 timer_set_running_state(struct rte_timer *tim)
170 {
171 	union rte_timer_status prev_status, status;
172 	unsigned lcore_id = rte_lcore_id();
173 	int success = 0;
174 
175 	/* wait that the timer is in correct status before update,
176 	 * and mark it as running */
177 	while (success == 0) {
178 		prev_status.u32 = tim->status.u32;
179 
180 		/* timer is not pending anymore */
181 		if (prev_status.state != RTE_TIMER_PENDING)
182 			return -1;
183 
184 		/* here, we know that timer is stopped or pending,
185 		 * mark it atomically as being configured */
186 		status.state = RTE_TIMER_RUNNING;
187 		status.owner = (int16_t)lcore_id;
188 		success = rte_atomic32_cmpset(&tim->status.u32,
189 					      prev_status.u32,
190 					      status.u32);
191 	}
192 
193 	return 0;
194 }
195 
196 /*
197  * Return a skiplist level for a new entry.
198  * This probabilistically gives a level with p=1/4 that an entry at level n
199  * will also appear at level n+1.
200  */
201 static uint32_t
202 timer_get_skiplist_level(unsigned curr_depth)
203 {
204 #ifdef RTE_LIBRTE_TIMER_DEBUG
205 	static uint32_t i, count = 0;
206 	static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
207 #endif
208 
209 	/* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
210 	 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
211 	 * bit position of a (pseudo)random number.
212 	 */
213 	uint32_t rand = rte_rand() & (UINT32_MAX - 1);
214 	uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
215 
216 	/* limit the levels used to one above our current level, so we don't,
217 	 * for instance, have a level 0 and a level 7 without anything between
218 	 */
219 	if (level > curr_depth)
220 		level = curr_depth;
221 	if (level >= MAX_SKIPLIST_DEPTH)
222 		level = MAX_SKIPLIST_DEPTH-1;
223 #ifdef RTE_LIBRTE_TIMER_DEBUG
224 	count ++;
225 	levels[level]++;
226 	if (count % 10000 == 0)
227 		for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
228 			printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
229 #endif
230 	return level;
231 }
232 
233 /*
234  * For a given time value, get the entries at each level which
235  * are <= that time value.
236  */
237 static void
238 timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
239 		struct rte_timer **prev)
240 {
241 	unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
242 	prev[lvl] = &priv_timer[tim_lcore].pending_head;
243 	while(lvl != 0) {
244 		lvl--;
245 		prev[lvl] = prev[lvl+1];
246 		while (prev[lvl]->sl_next[lvl] &&
247 				prev[lvl]->sl_next[lvl]->expire <= time_val)
248 			prev[lvl] = prev[lvl]->sl_next[lvl];
249 	}
250 }
251 
252 /*
253  * Given a timer node in the skiplist, find the previous entries for it at
254  * all skiplist levels.
255  */
256 static void
257 timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
258 		struct rte_timer **prev)
259 {
260 	int i;
261 	/* to get a specific entry in the list, look for just lower than the time
262 	 * values, and then increment on each level individually if necessary
263 	 */
264 	timer_get_prev_entries(tim->expire - 1, tim_lcore, prev);
265 	for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
266 		while (prev[i]->sl_next[i] != NULL &&
267 				prev[i]->sl_next[i] != tim &&
268 				prev[i]->sl_next[i]->expire <= tim->expire)
269 			prev[i] = prev[i]->sl_next[i];
270 	}
271 }
272 
273 /*
274  * add in list, lock if needed
275  * timer must be in config state
276  * timer must not be in a list
277  */
278 static void
279 timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
280 {
281 	unsigned lcore_id = rte_lcore_id();
282 	unsigned lvl;
283 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
284 
285 	/* if timer needs to be scheduled on another core, we need to
286 	 * lock the list; if it is on local core, we need to lock if
287 	 * we are not called from rte_timer_manage() */
288 	if (tim_lcore != lcore_id || !local_is_locked)
289 		rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
290 
291 	/* find where exactly this element goes in the list of elements
292 	 * for each depth. */
293 	timer_get_prev_entries(tim->expire, tim_lcore, prev);
294 
295 	/* now assign it a new level and add at that level */
296 	const unsigned tim_level = timer_get_skiplist_level(
297 			priv_timer[tim_lcore].curr_skiplist_depth);
298 	if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
299 		priv_timer[tim_lcore].curr_skiplist_depth++;
300 
301 	lvl = tim_level;
302 	while (lvl > 0) {
303 		tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
304 		prev[lvl]->sl_next[lvl] = tim;
305 		lvl--;
306 	}
307 	tim->sl_next[0] = prev[0]->sl_next[0];
308 	prev[0]->sl_next[0] = tim;
309 
310 	/* save the lowest list entry into the expire field of the dummy hdr
311 	 * NOTE: this is not atomic on 32-bit*/
312 	priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
313 			pending_head.sl_next[0]->expire;
314 
315 	if (tim_lcore != lcore_id || !local_is_locked)
316 		rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
317 }
318 
319 /*
320  * del from list, lock if needed
321  * timer must be in config state
322  * timer must be in a list
323  */
324 static void
325 timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
326 		int local_is_locked)
327 {
328 	unsigned lcore_id = rte_lcore_id();
329 	unsigned prev_owner = prev_status.owner;
330 	int i;
331 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
332 
333 	/* if timer needs is pending another core, we need to lock the
334 	 * list; if it is on local core, we need to lock if we are not
335 	 * called from rte_timer_manage() */
336 	if (prev_owner != lcore_id || !local_is_locked)
337 		rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
338 
339 	/* save the lowest list entry into the expire field of the dummy hdr.
340 	 * NOTE: this is not atomic on 32-bit */
341 	if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
342 		priv_timer[prev_owner].pending_head.expire =
343 				((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
344 
345 	/* adjust pointers from previous entries to point past this */
346 	timer_get_prev_entries_for_node(tim, prev_owner, prev);
347 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
348 		if (prev[i]->sl_next[i] == tim)
349 			prev[i]->sl_next[i] = tim->sl_next[i];
350 	}
351 
352 	/* in case we deleted last entry at a level, adjust down max level */
353 	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
354 		if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
355 			priv_timer[prev_owner].curr_skiplist_depth --;
356 		else
357 			break;
358 
359 	if (prev_owner != lcore_id || !local_is_locked)
360 		rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
361 }
362 
363 /* Reset and start the timer associated with the timer handle (private func) */
364 static int
365 __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
366 		  uint64_t period, unsigned tim_lcore,
367 		  rte_timer_cb_t fct, void *arg,
368 		  int local_is_locked)
369 {
370 	union rte_timer_status prev_status, status;
371 	int ret;
372 	unsigned lcore_id = rte_lcore_id();
373 
374 	/* round robin for tim_lcore */
375 	if (tim_lcore == (unsigned)LCORE_ID_ANY) {
376 		if (lcore_id < RTE_MAX_LCORE) {
377 			/* EAL thread with valid lcore_id */
378 			tim_lcore = rte_get_next_lcore(
379 				priv_timer[lcore_id].prev_lcore,
380 				0, 1);
381 			priv_timer[lcore_id].prev_lcore = tim_lcore;
382 		} else
383 			/* non-EAL thread do not run rte_timer_manage(),
384 			 * so schedule the timer on the first enabled lcore. */
385 			tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
386 	}
387 
388 	/* wait that the timer is in correct status before update,
389 	 * and mark it as being configured */
390 	ret = timer_set_config_state(tim, &prev_status);
391 	if (ret < 0)
392 		return -1;
393 
394 	__TIMER_STAT_ADD(reset, 1);
395 	if (prev_status.state == RTE_TIMER_RUNNING &&
396 	    lcore_id < RTE_MAX_LCORE) {
397 		priv_timer[lcore_id].updated = 1;
398 	}
399 
400 	/* remove it from list */
401 	if (prev_status.state == RTE_TIMER_PENDING) {
402 		timer_del(tim, prev_status, local_is_locked);
403 		__TIMER_STAT_ADD(pending, -1);
404 	}
405 
406 	tim->period = period;
407 	tim->expire = expire;
408 	tim->f = fct;
409 	tim->arg = arg;
410 
411 	__TIMER_STAT_ADD(pending, 1);
412 	timer_add(tim, tim_lcore, local_is_locked);
413 
414 	/* update state: as we are in CONFIG state, only us can modify
415 	 * the state so we don't need to use cmpset() here */
416 	rte_wmb();
417 	status.state = RTE_TIMER_PENDING;
418 	status.owner = (int16_t)tim_lcore;
419 	tim->status.u32 = status.u32;
420 
421 	return 0;
422 }
423 
424 /* Reset and start the timer associated with the timer handle tim */
425 int
426 rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
427 		enum rte_timer_type type, unsigned tim_lcore,
428 		rte_timer_cb_t fct, void *arg)
429 {
430 	uint64_t cur_time = rte_get_timer_cycles();
431 	uint64_t period;
432 
433 	if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
434 			!(rte_lcore_is_enabled(tim_lcore) ||
435 			  rte_lcore_has_role(tim_lcore, ROLE_SERVICE) == 0)))
436 		return -1;
437 
438 	if (type == PERIODICAL)
439 		period = ticks;
440 	else
441 		period = 0;
442 
443 	return __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
444 			  fct, arg, 0);
445 }
446 
447 /* loop until rte_timer_reset() succeed */
448 void
449 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
450 		     enum rte_timer_type type, unsigned tim_lcore,
451 		     rte_timer_cb_t fct, void *arg)
452 {
453 	while (rte_timer_reset(tim, ticks, type, tim_lcore,
454 			       fct, arg) != 0)
455 		rte_pause();
456 }
457 
458 /* Stop the timer associated with the timer handle tim */
459 int
460 rte_timer_stop(struct rte_timer *tim)
461 {
462 	union rte_timer_status prev_status, status;
463 	unsigned lcore_id = rte_lcore_id();
464 	int ret;
465 
466 	/* wait that the timer is in correct status before update,
467 	 * and mark it as being configured */
468 	ret = timer_set_config_state(tim, &prev_status);
469 	if (ret < 0)
470 		return -1;
471 
472 	__TIMER_STAT_ADD(stop, 1);
473 	if (prev_status.state == RTE_TIMER_RUNNING &&
474 	    lcore_id < RTE_MAX_LCORE) {
475 		priv_timer[lcore_id].updated = 1;
476 	}
477 
478 	/* remove it from list */
479 	if (prev_status.state == RTE_TIMER_PENDING) {
480 		timer_del(tim, prev_status, 0);
481 		__TIMER_STAT_ADD(pending, -1);
482 	}
483 
484 	/* mark timer as stopped */
485 	rte_wmb();
486 	status.state = RTE_TIMER_STOP;
487 	status.owner = RTE_TIMER_NO_OWNER;
488 	tim->status.u32 = status.u32;
489 
490 	return 0;
491 }
492 
493 /* loop until rte_timer_stop() succeed */
494 void
495 rte_timer_stop_sync(struct rte_timer *tim)
496 {
497 	while (rte_timer_stop(tim) != 0)
498 		rte_pause();
499 }
500 
501 /* Test the PENDING status of the timer handle tim */
502 int
503 rte_timer_pending(struct rte_timer *tim)
504 {
505 	return tim->status.state == RTE_TIMER_PENDING;
506 }
507 
508 /* must be called periodically, run all timer that expired */
509 void rte_timer_manage(void)
510 {
511 	union rte_timer_status status;
512 	struct rte_timer *tim, *next_tim;
513 	struct rte_timer *run_first_tim, **pprev;
514 	unsigned lcore_id = rte_lcore_id();
515 	struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
516 	uint64_t cur_time;
517 	int i, ret;
518 
519 	/* timer manager only runs on EAL thread with valid lcore_id */
520 	assert(lcore_id < RTE_MAX_LCORE);
521 
522 	__TIMER_STAT_ADD(manage, 1);
523 	/* optimize for the case where per-cpu list is empty */
524 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
525 		return;
526 	cur_time = rte_get_timer_cycles();
527 
528 #ifdef RTE_ARCH_64
529 	/* on 64-bit the value cached in the pending_head.expired will be
530 	 * updated atomically, so we can consult that for a quick check here
531 	 * outside the lock */
532 	if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
533 		return;
534 #endif
535 
536 	/* browse ordered list, add expired timers in 'expired' list */
537 	rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
538 
539 	/* if nothing to do just unlock and return */
540 	if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
541 	    priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
542 		rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
543 		return;
544 	}
545 
546 	/* save start of list of expired timers */
547 	tim = priv_timer[lcore_id].pending_head.sl_next[0];
548 
549 	/* break the existing list at current time point */
550 	timer_get_prev_entries(cur_time, lcore_id, prev);
551 	for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
552 		if (prev[i] == &priv_timer[lcore_id].pending_head)
553 			continue;
554 		priv_timer[lcore_id].pending_head.sl_next[i] =
555 		    prev[i]->sl_next[i];
556 		if (prev[i]->sl_next[i] == NULL)
557 			priv_timer[lcore_id].curr_skiplist_depth--;
558 		prev[i] ->sl_next[i] = NULL;
559 	}
560 
561 	/* transition run-list from PENDING to RUNNING */
562 	run_first_tim = tim;
563 	pprev = &run_first_tim;
564 
565 	for ( ; tim != NULL; tim = next_tim) {
566 		next_tim = tim->sl_next[0];
567 
568 		ret = timer_set_running_state(tim);
569 		if (likely(ret == 0)) {
570 			pprev = &tim->sl_next[0];
571 		} else {
572 			/* another core is trying to re-config this one,
573 			 * remove it from local expired list
574 			 */
575 			*pprev = next_tim;
576 		}
577 	}
578 
579 	/* update the next to expire timer value */
580 	priv_timer[lcore_id].pending_head.expire =
581 	    (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
582 		priv_timer[lcore_id].pending_head.sl_next[0]->expire;
583 
584 	rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
585 
586 	/* now scan expired list and call callbacks */
587 	for (tim = run_first_tim; tim != NULL; tim = next_tim) {
588 		next_tim = tim->sl_next[0];
589 		priv_timer[lcore_id].updated = 0;
590 		priv_timer[lcore_id].running_tim = tim;
591 
592 		/* execute callback function with list unlocked */
593 		tim->f(tim, tim->arg);
594 
595 		__TIMER_STAT_ADD(pending, -1);
596 		/* the timer was stopped or reloaded by the callback
597 		 * function, we have nothing to do here */
598 		if (priv_timer[lcore_id].updated == 1)
599 			continue;
600 
601 		if (tim->period == 0) {
602 			/* remove from done list and mark timer as stopped */
603 			status.state = RTE_TIMER_STOP;
604 			status.owner = RTE_TIMER_NO_OWNER;
605 			rte_wmb();
606 			tim->status.u32 = status.u32;
607 		}
608 		else {
609 			/* keep it in list and mark timer as pending */
610 			rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
611 			status.state = RTE_TIMER_PENDING;
612 			__TIMER_STAT_ADD(pending, 1);
613 			status.owner = (int16_t)lcore_id;
614 			rte_wmb();
615 			tim->status.u32 = status.u32;
616 			__rte_timer_reset(tim, tim->expire + tim->period,
617 				tim->period, lcore_id, tim->f, tim->arg, 1);
618 			rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
619 		}
620 	}
621 	priv_timer[lcore_id].running_tim = NULL;
622 }
623 
624 /* dump statistics about timers */
625 void rte_timer_dump_stats(FILE *f)
626 {
627 #ifdef RTE_LIBRTE_TIMER_DEBUG
628 	struct rte_timer_debug_stats sum;
629 	unsigned lcore_id;
630 
631 	memset(&sum, 0, sizeof(sum));
632 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
633 		sum.reset += priv_timer[lcore_id].stats.reset;
634 		sum.stop += priv_timer[lcore_id].stats.stop;
635 		sum.manage += priv_timer[lcore_id].stats.manage;
636 		sum.pending += priv_timer[lcore_id].stats.pending;
637 	}
638 	fprintf(f, "Timer statistics:\n");
639 	fprintf(f, "  reset = %"PRIu64"\n", sum.reset);
640 	fprintf(f, "  stop = %"PRIu64"\n", sum.stop);
641 	fprintf(f, "  manage = %"PRIu64"\n", sum.manage);
642 	fprintf(f, "  pending = %"PRIu64"\n", sum.pending);
643 #else
644 	fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
645 #endif
646 }
647