1*d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*d30ea906Sjfb8856606 * Copyright(c) 2015 Intel Corporation
3a9643ea8Slogwang */
4a9643ea8Slogwang
5a9643ea8Slogwang #include <rte_log.h>
6a9643ea8Slogwang #include <rte_common.h>
7a9643ea8Slogwang
8a9643ea8Slogwang #include "lthread_diag.h"
9a9643ea8Slogwang #include "lthread_queue.h"
10a9643ea8Slogwang #include "lthread_pool.h"
11a9643ea8Slogwang #include "lthread_objcache.h"
12a9643ea8Slogwang #include "lthread_sched.h"
13a9643ea8Slogwang #include "lthread_diag_api.h"
14a9643ea8Slogwang
15a9643ea8Slogwang
16a9643ea8Slogwang /* dummy ref value of default diagnostic callback */
17a9643ea8Slogwang static uint64_t dummy_ref;
18a9643ea8Slogwang
19a9643ea8Slogwang #define DIAG_SCHED_STATS_FORMAT \
20a9643ea8Slogwang "core %d\n%33s %12s %12s %12s %12s\n"
21a9643ea8Slogwang
22a9643ea8Slogwang #define DIAG_CACHE_STATS_FORMAT \
23a9643ea8Slogwang "%20s %12lu %12lu %12lu %12lu %12lu\n"
24a9643ea8Slogwang
25a9643ea8Slogwang #define DIAG_QUEUE_STATS_FORMAT \
26a9643ea8Slogwang "%20s %12lu %12lu %12lu\n"
27a9643ea8Slogwang
28a9643ea8Slogwang
29a9643ea8Slogwang /*
30a9643ea8Slogwang * texts used in diagnostic events,
31a9643ea8Slogwang * corresponding diagnostic mask bit positions are given as comment
32a9643ea8Slogwang */
33a9643ea8Slogwang const char *diag_event_text[] = {
34a9643ea8Slogwang "LTHREAD_CREATE ", /* 00 */
35a9643ea8Slogwang "LTHREAD_EXIT ", /* 01 */
36a9643ea8Slogwang "LTHREAD_JOIN ", /* 02 */
37a9643ea8Slogwang "LTHREAD_CANCEL ", /* 03 */
38a9643ea8Slogwang "LTHREAD_DETACH ", /* 04 */
39a9643ea8Slogwang "LTHREAD_FREE ", /* 05 */
40a9643ea8Slogwang "LTHREAD_SUSPENDED ", /* 06 */
41a9643ea8Slogwang "LTHREAD_YIELD ", /* 07 */
42a9643ea8Slogwang "LTHREAD_RESCHEDULED", /* 08 */
43a9643ea8Slogwang "LTHREAD_SLEEP ", /* 09 */
44a9643ea8Slogwang "LTHREAD_RESUMED ", /* 10 */
45a9643ea8Slogwang "LTHREAD_AFFINITY ", /* 11 */
46a9643ea8Slogwang "LTHREAD_TMR_START ", /* 12 */
47a9643ea8Slogwang "LTHREAD_TMR_DELETE ", /* 13 */
48a9643ea8Slogwang "LTHREAD_TMR_EXPIRED", /* 14 */
49a9643ea8Slogwang "COND_CREATE ", /* 15 */
50a9643ea8Slogwang "COND_DESTROY ", /* 16 */
51a9643ea8Slogwang "COND_WAIT ", /* 17 */
52a9643ea8Slogwang "COND_SIGNAL ", /* 18 */
53a9643ea8Slogwang "COND_BROADCAST ", /* 19 */
54a9643ea8Slogwang "MUTEX_CREATE ", /* 20 */
55a9643ea8Slogwang "MUTEX_DESTROY ", /* 21 */
56a9643ea8Slogwang "MUTEX_LOCK ", /* 22 */
57a9643ea8Slogwang "MUTEX_TRYLOCK ", /* 23 */
58a9643ea8Slogwang "MUTEX_BLOCKED ", /* 24 */
59a9643ea8Slogwang "MUTEX_UNLOCKED ", /* 25 */
60a9643ea8Slogwang "SCHED_CREATE ", /* 26 */
61a9643ea8Slogwang "SCHED_SHUTDOWN " /* 27 */
62a9643ea8Slogwang };
63a9643ea8Slogwang
64a9643ea8Slogwang
65a9643ea8Slogwang /*
66a9643ea8Slogwang * set diagnostic ,ask
67a9643ea8Slogwang */
lthread_diagnostic_set_mask(DIAG_USED uint64_t mask)68a9643ea8Slogwang void lthread_diagnostic_set_mask(DIAG_USED uint64_t mask)
69a9643ea8Slogwang {
70a9643ea8Slogwang #if LTHREAD_DIAG
71a9643ea8Slogwang diag_mask = mask;
72a9643ea8Slogwang #else
73a9643ea8Slogwang RTE_LOG(INFO, LTHREAD,
74a9643ea8Slogwang "LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
75a9643ea8Slogwang #endif
76a9643ea8Slogwang }
77a9643ea8Slogwang
78a9643ea8Slogwang
79a9643ea8Slogwang /*
80a9643ea8Slogwang * Check consistency of the scheduler stats
81a9643ea8Slogwang * Only sensible run after the schedulers are stopped
82a9643ea8Slogwang * Count the number of objects lying in caches and queues
83a9643ea8Slogwang * and available in the qnode pool.
84a9643ea8Slogwang * This should be equal to the total capacity of all
85a9643ea8Slogwang * qnode pools.
86a9643ea8Slogwang */
87a9643ea8Slogwang void
88a9643ea8Slogwang _sched_stats_consistency_check(void);
89a9643ea8Slogwang void
_sched_stats_consistency_check(void)90a9643ea8Slogwang _sched_stats_consistency_check(void)
91a9643ea8Slogwang {
92a9643ea8Slogwang #if LTHREAD_DIAG
93a9643ea8Slogwang int i;
94a9643ea8Slogwang struct lthread_sched *sched;
95a9643ea8Slogwang uint64_t count = 0;
96a9643ea8Slogwang uint64_t capacity = 0;
97a9643ea8Slogwang
98a9643ea8Slogwang for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
99a9643ea8Slogwang sched = schedcore[i];
100a9643ea8Slogwang if (sched == NULL)
101a9643ea8Slogwang continue;
102a9643ea8Slogwang
103a9643ea8Slogwang /* each of these queues consumes a stub node */
104a9643ea8Slogwang count += 8;
105a9643ea8Slogwang count += DIAG_COUNT(sched->ready, size);
106a9643ea8Slogwang count += DIAG_COUNT(sched->pready, size);
107a9643ea8Slogwang count += DIAG_COUNT(sched->lthread_cache, available);
108a9643ea8Slogwang count += DIAG_COUNT(sched->stack_cache, available);
109a9643ea8Slogwang count += DIAG_COUNT(sched->tls_cache, available);
110a9643ea8Slogwang count += DIAG_COUNT(sched->per_lthread_cache, available);
111a9643ea8Slogwang count += DIAG_COUNT(sched->cond_cache, available);
112a9643ea8Slogwang count += DIAG_COUNT(sched->mutex_cache, available);
113a9643ea8Slogwang
114a9643ea8Slogwang /* the node pool does not consume a stub node */
115a9643ea8Slogwang if (sched->qnode_pool->fast_alloc != NULL)
116a9643ea8Slogwang count++;
117a9643ea8Slogwang count += DIAG_COUNT(sched->qnode_pool, available);
118a9643ea8Slogwang
119a9643ea8Slogwang capacity += DIAG_COUNT(sched->qnode_pool, capacity);
120a9643ea8Slogwang }
121a9643ea8Slogwang if (count != capacity) {
122a9643ea8Slogwang RTE_LOG(CRIT, LTHREAD,
123a9643ea8Slogwang "Scheduler caches are inconsistent\n");
124a9643ea8Slogwang } else {
125a9643ea8Slogwang RTE_LOG(INFO, LTHREAD,
126a9643ea8Slogwang "Scheduler caches are ok\n");
127a9643ea8Slogwang }
128a9643ea8Slogwang #endif
129a9643ea8Slogwang }
130a9643ea8Slogwang
131a9643ea8Slogwang
132a9643ea8Slogwang #if LTHREAD_DIAG
133a9643ea8Slogwang /*
134a9643ea8Slogwang * Display node pool stats
135a9643ea8Slogwang */
136a9643ea8Slogwang static inline void
_qnode_pool_display(DIAG_USED struct qnode_pool * p)137a9643ea8Slogwang _qnode_pool_display(DIAG_USED struct qnode_pool *p)
138a9643ea8Slogwang {
139a9643ea8Slogwang
140a9643ea8Slogwang printf(DIAG_CACHE_STATS_FORMAT,
141a9643ea8Slogwang p->name,
142a9643ea8Slogwang DIAG_COUNT(p, rd),
143a9643ea8Slogwang DIAG_COUNT(p, wr),
144a9643ea8Slogwang DIAG_COUNT(p, available),
145a9643ea8Slogwang DIAG_COUNT(p, prealloc),
146a9643ea8Slogwang DIAG_COUNT(p, capacity));
147a9643ea8Slogwang fflush(stdout);
148a9643ea8Slogwang }
149a9643ea8Slogwang #endif
150a9643ea8Slogwang
151a9643ea8Slogwang
152a9643ea8Slogwang #if LTHREAD_DIAG
153a9643ea8Slogwang /*
154a9643ea8Slogwang * Display queue stats
155a9643ea8Slogwang */
156a9643ea8Slogwang static inline void
_lthread_queue_display(DIAG_USED struct lthread_queue * q)157a9643ea8Slogwang _lthread_queue_display(DIAG_USED struct lthread_queue *q)
158a9643ea8Slogwang {
159a9643ea8Slogwang #if DISPLAY_OBJCACHE_QUEUES
160a9643ea8Slogwang printf(DIAG_QUEUE_STATS_FORMAT,
161a9643ea8Slogwang q->name,
162a9643ea8Slogwang DIAG_COUNT(q, rd),
163a9643ea8Slogwang DIAG_COUNT(q, wr),
164a9643ea8Slogwang DIAG_COUNT(q, size));
165a9643ea8Slogwang fflush(stdout);
166a9643ea8Slogwang #else
167a9643ea8Slogwang printf("%s: queue stats disabled\n",
168a9643ea8Slogwang q->name);
169a9643ea8Slogwang
170a9643ea8Slogwang #endif
171a9643ea8Slogwang }
172a9643ea8Slogwang #endif
173a9643ea8Slogwang
174a9643ea8Slogwang #if LTHREAD_DIAG
175a9643ea8Slogwang /*
176a9643ea8Slogwang * Display objcache stats
177a9643ea8Slogwang */
178a9643ea8Slogwang static inline void
_objcache_display(DIAG_USED struct lthread_objcache * c)179a9643ea8Slogwang _objcache_display(DIAG_USED struct lthread_objcache *c)
180a9643ea8Slogwang {
181a9643ea8Slogwang
182a9643ea8Slogwang printf(DIAG_CACHE_STATS_FORMAT,
183a9643ea8Slogwang c->name,
184a9643ea8Slogwang DIAG_COUNT(c, rd),
185a9643ea8Slogwang DIAG_COUNT(c, wr),
186a9643ea8Slogwang DIAG_COUNT(c, available),
187a9643ea8Slogwang DIAG_COUNT(c, prealloc),
188a9643ea8Slogwang DIAG_COUNT(c, capacity));
189a9643ea8Slogwang _lthread_queue_display(c->q);
190a9643ea8Slogwang fflush(stdout);
191a9643ea8Slogwang }
192a9643ea8Slogwang #endif
193a9643ea8Slogwang
194a9643ea8Slogwang /*
195a9643ea8Slogwang * Display sched stats
196a9643ea8Slogwang */
197a9643ea8Slogwang void
lthread_sched_stats_display(void)198a9643ea8Slogwang lthread_sched_stats_display(void)
199a9643ea8Slogwang {
200a9643ea8Slogwang #if LTHREAD_DIAG
201a9643ea8Slogwang int i;
202a9643ea8Slogwang struct lthread_sched *sched;
203a9643ea8Slogwang
204a9643ea8Slogwang for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
205a9643ea8Slogwang sched = schedcore[i];
206a9643ea8Slogwang if (sched != NULL) {
207a9643ea8Slogwang printf(DIAG_SCHED_STATS_FORMAT,
208a9643ea8Slogwang sched->lcore_id,
209a9643ea8Slogwang "rd",
210a9643ea8Slogwang "wr",
211a9643ea8Slogwang "present",
212a9643ea8Slogwang "nb preallocs",
213a9643ea8Slogwang "capacity");
214a9643ea8Slogwang _lthread_queue_display(sched->ready);
215a9643ea8Slogwang _lthread_queue_display(sched->pready);
216a9643ea8Slogwang _qnode_pool_display(sched->qnode_pool);
217a9643ea8Slogwang _objcache_display(sched->lthread_cache);
218a9643ea8Slogwang _objcache_display(sched->stack_cache);
219a9643ea8Slogwang _objcache_display(sched->tls_cache);
220a9643ea8Slogwang _objcache_display(sched->per_lthread_cache);
221a9643ea8Slogwang _objcache_display(sched->cond_cache);
222a9643ea8Slogwang _objcache_display(sched->mutex_cache);
223a9643ea8Slogwang fflush(stdout);
224a9643ea8Slogwang }
225a9643ea8Slogwang }
226a9643ea8Slogwang _sched_stats_consistency_check();
227a9643ea8Slogwang #else
228a9643ea8Slogwang RTE_LOG(INFO, LTHREAD,
229a9643ea8Slogwang "lthread diagnostics disabled\n"
230a9643ea8Slogwang "hint - set LTHREAD_DIAG in lthread_diag_api.h\n");
231a9643ea8Slogwang #endif
232a9643ea8Slogwang }
233a9643ea8Slogwang
234a9643ea8Slogwang /*
235a9643ea8Slogwang * Defafult diagnostic callback
236a9643ea8Slogwang */
237a9643ea8Slogwang static uint64_t
_lthread_diag_default_cb(uint64_t time,struct lthread * lt,int diag_event,uint64_t diag_ref,const char * text,uint64_t p1,uint64_t p2)238a9643ea8Slogwang _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
239a9643ea8Slogwang uint64_t diag_ref, const char *text, uint64_t p1, uint64_t p2)
240a9643ea8Slogwang {
241a9643ea8Slogwang uint64_t _p2;
242a9643ea8Slogwang int lcore = (int) rte_lcore_id();
243a9643ea8Slogwang
244a9643ea8Slogwang switch (diag_event) {
245a9643ea8Slogwang case LT_DIAG_LTHREAD_CREATE:
246a9643ea8Slogwang case LT_DIAG_MUTEX_CREATE:
247a9643ea8Slogwang case LT_DIAG_COND_CREATE:
248a9643ea8Slogwang _p2 = dummy_ref;
249a9643ea8Slogwang break;
250a9643ea8Slogwang default:
251a9643ea8Slogwang _p2 = p2;
252a9643ea8Slogwang break;
253a9643ea8Slogwang }
254a9643ea8Slogwang
255a9643ea8Slogwang printf("%"PRIu64" %d %8.8lx %8.8lx %s %8.8lx %8.8lx\n",
256a9643ea8Slogwang time,
257a9643ea8Slogwang lcore,
258a9643ea8Slogwang (uint64_t) lt,
259a9643ea8Slogwang diag_ref,
260a9643ea8Slogwang text,
261a9643ea8Slogwang p1,
262a9643ea8Slogwang _p2);
263a9643ea8Slogwang
264a9643ea8Slogwang return dummy_ref++;
265a9643ea8Slogwang }
266a9643ea8Slogwang
267a9643ea8Slogwang /*
268a9643ea8Slogwang * plug in default diag callback with mask off
269a9643ea8Slogwang */
RTE_INIT(_lthread_diag_ctor)2702bfe3f2eSlogwang RTE_INIT(_lthread_diag_ctor)
271a9643ea8Slogwang {
272a9643ea8Slogwang diag_cb = _lthread_diag_default_cb;
273a9643ea8Slogwang diag_mask = 0;
274a9643ea8Slogwang }
275a9643ea8Slogwang
276a9643ea8Slogwang
277a9643ea8Slogwang /*
278a9643ea8Slogwang * enable diagnostics
279a9643ea8Slogwang */
lthread_diagnostic_enable(DIAG_USED diag_callback cb,DIAG_USED uint64_t mask)280a9643ea8Slogwang void lthread_diagnostic_enable(DIAG_USED diag_callback cb,
281a9643ea8Slogwang DIAG_USED uint64_t mask)
282a9643ea8Slogwang {
283a9643ea8Slogwang #if LTHREAD_DIAG
284a9643ea8Slogwang if (cb == NULL)
285a9643ea8Slogwang diag_cb = _lthread_diag_default_cb;
286a9643ea8Slogwang else
287a9643ea8Slogwang diag_cb = cb;
288a9643ea8Slogwang diag_mask = mask;
289a9643ea8Slogwang #else
290a9643ea8Slogwang RTE_LOG(INFO, LTHREAD,
291a9643ea8Slogwang "LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
292a9643ea8Slogwang #endif
293a9643ea8Slogwang }
294