1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <rte_log.h>
35 #include <rte_common.h>
36 
37 #include "lthread_diag.h"
38 #include "lthread_queue.h"
39 #include "lthread_pool.h"
40 #include "lthread_objcache.h"
41 #include "lthread_sched.h"
42 #include "lthread_diag_api.h"
43 
44 
45 /* dummy ref value of default diagnostic callback */
46 static uint64_t dummy_ref;
47 
48 #define DIAG_SCHED_STATS_FORMAT \
49 "core %d\n%33s %12s %12s %12s %12s\n"
50 
51 #define DIAG_CACHE_STATS_FORMAT \
52 "%20s %12lu %12lu %12lu %12lu %12lu\n"
53 
54 #define DIAG_QUEUE_STATS_FORMAT \
55 "%20s %12lu %12lu %12lu\n"
56 
57 
58 /*
59  * texts used in diagnostic events,
60  * corresponding diagnostic mask bit positions are given as comment
61  */
62 const char *diag_event_text[] = {
63 	"LTHREAD_CREATE     ",	/* 00 */
64 	"LTHREAD_EXIT       ",	/* 01 */
65 	"LTHREAD_JOIN       ",	/* 02 */
66 	"LTHREAD_CANCEL     ",	/* 03 */
67 	"LTHREAD_DETACH     ",	/* 04 */
68 	"LTHREAD_FREE       ",	/* 05 */
69 	"LTHREAD_SUSPENDED  ",	/* 06 */
70 	"LTHREAD_YIELD      ",	/* 07 */
71 	"LTHREAD_RESCHEDULED",	/* 08 */
72 	"LTHREAD_SLEEP      ",	/* 09 */
73 	"LTHREAD_RESUMED    ",	/* 10 */
74 	"LTHREAD_AFFINITY   ",	/* 11 */
75 	"LTHREAD_TMR_START  ",	/* 12 */
76 	"LTHREAD_TMR_DELETE ",	/* 13 */
77 	"LTHREAD_TMR_EXPIRED",	/* 14 */
78 	"COND_CREATE        ",	/* 15 */
79 	"COND_DESTROY       ",	/* 16 */
80 	"COND_WAIT          ",	/* 17 */
81 	"COND_SIGNAL        ",	/* 18 */
82 	"COND_BROADCAST     ",	/* 19 */
83 	"MUTEX_CREATE       ",	/* 20 */
84 	"MUTEX_DESTROY      ",	/* 21 */
85 	"MUTEX_LOCK         ",	/* 22 */
86 	"MUTEX_TRYLOCK      ",	/* 23 */
87 	"MUTEX_BLOCKED      ",	/* 24 */
88 	"MUTEX_UNLOCKED     ",	/* 25 */
89 	"SCHED_CREATE       ",	/* 26 */
90 	"SCHED_SHUTDOWN     "	/* 27 */
91 };
92 
93 
94 /*
95  * set diagnostic ,ask
96  */
97 void lthread_diagnostic_set_mask(DIAG_USED uint64_t mask)
98 {
99 #if LTHREAD_DIAG
100 	diag_mask = mask;
101 #else
102 	RTE_LOG(INFO, LTHREAD,
103 		"LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
104 #endif
105 }
106 
107 
108 /*
109  * Check consistency of the scheduler stats
110  * Only sensible run after the schedulers are stopped
111  * Count the number of objects lying in caches and queues
112  * and available in the qnode pool.
113  * This should be equal to the total capacity of all
114  * qnode pools.
115  */
116 void
117 _sched_stats_consistency_check(void);
118 void
119 _sched_stats_consistency_check(void)
120 {
121 #if LTHREAD_DIAG
122 	int i;
123 	struct lthread_sched *sched;
124 	uint64_t count = 0;
125 	uint64_t capacity = 0;
126 
127 	for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
128 		sched = schedcore[i];
129 		if (sched == NULL)
130 			continue;
131 
132 		/* each of these queues consumes a stub node */
133 		count += 8;
134 		count += DIAG_COUNT(sched->ready, size);
135 		count += DIAG_COUNT(sched->pready, size);
136 		count += DIAG_COUNT(sched->lthread_cache, available);
137 		count += DIAG_COUNT(sched->stack_cache, available);
138 		count += DIAG_COUNT(sched->tls_cache, available);
139 		count += DIAG_COUNT(sched->per_lthread_cache, available);
140 		count += DIAG_COUNT(sched->cond_cache, available);
141 		count += DIAG_COUNT(sched->mutex_cache, available);
142 
143 		/* the node pool does not consume a stub node */
144 		if (sched->qnode_pool->fast_alloc != NULL)
145 			count++;
146 		count += DIAG_COUNT(sched->qnode_pool, available);
147 
148 		capacity += DIAG_COUNT(sched->qnode_pool, capacity);
149 	}
150 	if (count != capacity) {
151 		RTE_LOG(CRIT, LTHREAD,
152 			"Scheduler caches are inconsistent\n");
153 	} else {
154 		RTE_LOG(INFO, LTHREAD,
155 			"Scheduler caches are ok\n");
156 	}
157 #endif
158 }
159 
160 
161 #if LTHREAD_DIAG
162 /*
163  * Display node pool stats
164  */
165 static inline void
166 _qnode_pool_display(DIAG_USED struct qnode_pool *p)
167 {
168 
169 	printf(DIAG_CACHE_STATS_FORMAT,
170 			p->name,
171 			DIAG_COUNT(p, rd),
172 			DIAG_COUNT(p, wr),
173 			DIAG_COUNT(p, available),
174 			DIAG_COUNT(p, prealloc),
175 			DIAG_COUNT(p, capacity));
176 	fflush(stdout);
177 }
178 #endif
179 
180 
181 #if LTHREAD_DIAG
182 /*
183  * Display queue stats
184  */
185 static inline void
186 _lthread_queue_display(DIAG_USED struct lthread_queue *q)
187 {
188 #if DISPLAY_OBJCACHE_QUEUES
189 	printf(DIAG_QUEUE_STATS_FORMAT,
190 			q->name,
191 			DIAG_COUNT(q, rd),
192 			DIAG_COUNT(q, wr),
193 			DIAG_COUNT(q, size));
194 	fflush(stdout);
195 #else
196 	printf("%s: queue stats disabled\n",
197 			q->name);
198 
199 #endif
200 }
201 #endif
202 
203 #if LTHREAD_DIAG
204 /*
205  * Display objcache stats
206  */
207 static inline void
208 _objcache_display(DIAG_USED struct lthread_objcache *c)
209 {
210 
211 	printf(DIAG_CACHE_STATS_FORMAT,
212 			c->name,
213 			DIAG_COUNT(c, rd),
214 			DIAG_COUNT(c, wr),
215 			DIAG_COUNT(c, available),
216 			DIAG_COUNT(c, prealloc),
217 			DIAG_COUNT(c, capacity));
218 	_lthread_queue_display(c->q);
219 	fflush(stdout);
220 }
221 #endif
222 
223 /*
224  * Display sched stats
225  */
226 void
227 lthread_sched_stats_display(void)
228 {
229 #if LTHREAD_DIAG
230 	int i;
231 	struct lthread_sched *sched;
232 
233 	for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
234 		sched = schedcore[i];
235 		if (sched != NULL) {
236 			printf(DIAG_SCHED_STATS_FORMAT,
237 					sched->lcore_id,
238 					"rd",
239 					"wr",
240 					"present",
241 					"nb preallocs",
242 					"capacity");
243 			_lthread_queue_display(sched->ready);
244 			_lthread_queue_display(sched->pready);
245 			_qnode_pool_display(sched->qnode_pool);
246 			_objcache_display(sched->lthread_cache);
247 			_objcache_display(sched->stack_cache);
248 			_objcache_display(sched->tls_cache);
249 			_objcache_display(sched->per_lthread_cache);
250 			_objcache_display(sched->cond_cache);
251 			_objcache_display(sched->mutex_cache);
252 		fflush(stdout);
253 		}
254 	}
255 	_sched_stats_consistency_check();
256 #else
257 	RTE_LOG(INFO, LTHREAD,
258 		"lthread diagnostics disabled\n"
259 		"hint - set LTHREAD_DIAG in lthread_diag_api.h\n");
260 #endif
261 }
262 
263 /*
264  * Defafult diagnostic callback
265  */
266 static uint64_t
267 _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
268 		uint64_t diag_ref, const char *text, uint64_t p1, uint64_t p2)
269 {
270 	uint64_t _p2;
271 	int lcore = (int) rte_lcore_id();
272 
273 	switch (diag_event) {
274 	case LT_DIAG_LTHREAD_CREATE:
275 	case LT_DIAG_MUTEX_CREATE:
276 	case LT_DIAG_COND_CREATE:
277 		_p2 = dummy_ref;
278 		break;
279 	default:
280 		_p2 = p2;
281 		break;
282 	}
283 
284 	printf("%"PRIu64" %d %8.8lx %8.8lx %s %8.8lx %8.8lx\n",
285 		time,
286 		lcore,
287 		(uint64_t) lt,
288 		diag_ref,
289 		text,
290 		p1,
291 		_p2);
292 
293 	return dummy_ref++;
294 }
295 
296 /*
297  * plug in default diag callback with mask off
298  */
299 RTE_INIT(_lthread_diag_ctor)
300 {
301 	diag_cb = _lthread_diag_default_cb;
302 	diag_mask = 0;
303 }
304 
305 
306 /*
307  * enable diagnostics
308  */
309 void lthread_diagnostic_enable(DIAG_USED diag_callback cb,
310 				DIAG_USED uint64_t mask)
311 {
312 #if LTHREAD_DIAG
313 	if (cb == NULL)
314 		diag_cb = _lthread_diag_default_cb;
315 	else
316 		diag_cb = cb;
317 	diag_mask = mask;
318 #else
319 	RTE_LOG(INFO, LTHREAD,
320 		"LTHREAD_DIAG is not set, see lthread_diag_api.h\n");
321 #endif
322 }
323