xref: /linux-6.15/kernel/trace/trace_stat.c (revision b3dd7ba7)
1 /*
2  * Infrastructure for statistic tracing (histogram output).
3  *
4  * Copyright (C) 2008-2009 Frederic Weisbecker <[email protected]>
5  *
6  * Based on the code from trace_branch.c which is
7  * Copyright (C) 2008 Steven Rostedt <[email protected]>
8  *
9  */
10 
11 
12 #include <linux/list.h>
13 #include <linux/rbtree.h>
14 #include <linux/debugfs.h>
15 #include "trace_stat.h"
16 #include "trace.h"
17 
18 
19 /*
20  * List of stat red-black nodes from a tracer
21  * We use a such tree to sort quickly the stat
22  * entries from the tracer.
23  */
24 struct stat_node {
25 	struct rb_node		node;
26 	void			*stat;
27 };
28 
29 /* A stat session is the stats output in one file */
30 struct stat_session {
31 	struct list_head	session_list;
32 	struct tracer_stat	*ts;
33 	struct rb_root		stat_root;
34 	struct mutex		stat_mutex;
35 	struct dentry		*file;
36 };
37 
38 /* All of the sessions currently in use. Each stat file embed one session */
39 static LIST_HEAD(all_stat_sessions);
40 static DEFINE_MUTEX(all_stat_sessions_mutex);
41 
42 /* The root directory for all stat files */
43 static struct dentry		*stat_dir;
44 
45 /*
46  * Iterate through the rbtree using a post order traversal path
47  * to release the next node.
48  * It won't necessary release one at each iteration
49  * but it will at least advance closer to the next one
50  * to be released.
51  */
52 static struct rb_node *release_next(struct rb_node *node)
53 {
54 	struct stat_node *snode;
55 	struct rb_node *parent = rb_parent(node);
56 
57 	if (node->rb_left)
58 		return node->rb_left;
59 	else if (node->rb_right)
60 		return node->rb_right;
61 	else {
62 		if (!parent)
63 			return NULL;
64 		if (parent->rb_left == node)
65 			parent->rb_left = NULL;
66 		else
67 			parent->rb_right = NULL;
68 
69 		snode = container_of(node, struct stat_node, node);
70 		kfree(snode);
71 
72 		return parent;
73 	}
74 }
75 
76 static void reset_stat_session(struct stat_session *session)
77 {
78 	struct rb_node *node = session->stat_root.rb_node;
79 
80 	while (node)
81 		node = release_next(node);
82 
83 	session->stat_root = RB_ROOT;
84 }
85 
86 static void destroy_session(struct stat_session *session)
87 {
88 	debugfs_remove(session->file);
89 	reset_stat_session(session);
90 	mutex_destroy(&session->stat_mutex);
91 	kfree(session);
92 }
93 
94 typedef int (*cmp_stat_t)(void *, void *);
95 
96 static void
97 insert_stat(struct rb_root *root, struct stat_node *data, cmp_stat_t cmp)
98 {
99 	struct rb_node **new = &(root->rb_node), *parent = NULL;
100 
101 	/*
102 	 * Figure out where to put new node
103 	 * This is a descendent sorting
104 	 */
105 	while (*new) {
106 		struct stat_node *this;
107 		int result;
108 
109 		this = container_of(*new, struct stat_node, node);
110 		result = cmp(data->stat, this->stat);
111 
112 		parent = *new;
113 		if (result >= 0)
114 			new = &((*new)->rb_left);
115 		else
116 			new = &((*new)->rb_right);
117 	}
118 
119 	rb_link_node(&data->node, parent, new);
120 	rb_insert_color(&data->node, root);
121 }
122 
123 /*
124  * For tracers that don't provide a stat_cmp callback.
125  * This one will force an immediate insertion on tail of
126  * the list.
127  */
128 static int dummy_cmp(void *p1, void *p2)
129 {
130 	return -1;
131 }
132 
133 /*
134  * Initialize the stat list at each trace_stat file opening.
135  * All of these copies and sorting are required on all opening
136  * since the stats could have changed between two file sessions.
137  */
138 static int stat_seq_init(struct stat_session *session)
139 {
140 	struct tracer_stat *ts = session->ts;
141 	struct stat_node *new_entry;
142 	struct rb_root *root;
143 	void *stat;
144 	int ret = 0;
145 	int i;
146 
147 	mutex_lock(&session->stat_mutex);
148 	reset_stat_session(session);
149 
150 	if (!ts->stat_cmp)
151 		ts->stat_cmp = dummy_cmp;
152 
153 	stat = ts->stat_start(ts);
154 	if (!stat)
155 		goto exit;
156 
157 	/*
158 	 * The first entry. Actually this is the second, but the first
159 	 * one (the stat_list head) is pointless.
160 	 */
161 	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
162 	if (!new_entry) {
163 		ret = -ENOMEM;
164 		goto exit;
165 	}
166 	root = &session->stat_root;
167 	insert_stat(root, new_entry, dummy_cmp);
168 
169 	new_entry->stat = stat;
170 
171 	/*
172 	 * Iterate over the tracer stat entries and store them in a sorted
173 	 * list.
174 	 */
175 	for (i = 1; ; i++) {
176 		stat = ts->stat_next(stat, i);
177 
178 		/* End of insertion */
179 		if (!stat)
180 			break;
181 
182 		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
183 		if (!new_entry) {
184 			ret = -ENOMEM;
185 			goto exit_free_list;
186 		}
187 
188 		new_entry->stat = stat;
189 
190 		insert_stat(root, new_entry, ts->stat_cmp);
191 	}
192 
193 exit:
194 	mutex_unlock(&session->stat_mutex);
195 	return ret;
196 
197 exit_free_list:
198 	reset_stat_session(session);
199 	mutex_unlock(&session->stat_mutex);
200 	return ret;
201 }
202 
203 
204 static void *stat_seq_start(struct seq_file *s, loff_t *pos)
205 {
206 	struct stat_session *session = s->private;
207 	struct rb_node *node;
208 	int i;
209 
210 	/* Prevent from tracer switch or stat_list modification */
211 	mutex_lock(&session->stat_mutex);
212 
213 	/* If we are in the beginning of the file, print the headers */
214 	if (!*pos && session->ts->stat_headers) {
215 		(*pos)++;
216 		return SEQ_START_TOKEN;
217 	}
218 
219 	node = rb_first(&session->stat_root);
220 	for (i = 0; node && i < *pos; i++)
221 		node = rb_next(node);
222 
223 	(*pos)++;
224 
225 	return node;
226 }
227 
228 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
229 {
230 	struct stat_session *session = s->private;
231 	struct rb_node *node = p;
232 
233 	(*pos)++;
234 
235 	if (p == SEQ_START_TOKEN)
236 		return rb_first(&session->stat_root);
237 
238 	return rb_next(node);
239 }
240 
241 static void stat_seq_stop(struct seq_file *s, void *p)
242 {
243 	struct stat_session *session = s->private;
244 	mutex_unlock(&session->stat_mutex);
245 }
246 
247 static int stat_seq_show(struct seq_file *s, void *v)
248 {
249 	struct stat_session *session = s->private;
250 	struct stat_node *l = container_of(v, struct stat_node, node);
251 
252 	if (v == SEQ_START_TOKEN)
253 		return session->ts->stat_headers(s);
254 
255 	return session->ts->stat_show(s, l->stat);
256 }
257 
258 static const struct seq_operations trace_stat_seq_ops = {
259 	.start		= stat_seq_start,
260 	.next		= stat_seq_next,
261 	.stop		= stat_seq_stop,
262 	.show		= stat_seq_show
263 };
264 
265 /* The session stat is refilled and resorted at each stat file opening */
266 static int tracing_stat_open(struct inode *inode, struct file *file)
267 {
268 	int ret;
269 
270 	struct stat_session *session = inode->i_private;
271 
272 	ret = seq_open(file, &trace_stat_seq_ops);
273 	if (!ret) {
274 		struct seq_file *m = file->private_data;
275 		m->private = session;
276 		ret = stat_seq_init(session);
277 	}
278 
279 	return ret;
280 }
281 
282 /*
283  * Avoid consuming memory with our now useless list.
284  */
285 static int tracing_stat_release(struct inode *i, struct file *f)
286 {
287 	struct stat_session *session = i->i_private;
288 
289 	mutex_lock(&session->stat_mutex);
290 	reset_stat_session(session);
291 	mutex_unlock(&session->stat_mutex);
292 
293 	return 0;
294 }
295 
296 static const struct file_operations tracing_stat_fops = {
297 	.open		= tracing_stat_open,
298 	.read		= seq_read,
299 	.llseek		= seq_lseek,
300 	.release	= tracing_stat_release
301 };
302 
303 static int tracing_stat_init(void)
304 {
305 	struct dentry *d_tracing;
306 
307 	d_tracing = tracing_init_dentry();
308 
309 	stat_dir = debugfs_create_dir("trace_stat", d_tracing);
310 	if (!stat_dir)
311 		pr_warning("Could not create debugfs "
312 			   "'trace_stat' entry\n");
313 	return 0;
314 }
315 
316 static int init_stat_file(struct stat_session *session)
317 {
318 	if (!stat_dir && tracing_stat_init())
319 		return -ENODEV;
320 
321 	session->file = debugfs_create_file(session->ts->name, 0644,
322 					    stat_dir,
323 					    session, &tracing_stat_fops);
324 	if (!session->file)
325 		return -ENOMEM;
326 	return 0;
327 }
328 
329 int register_stat_tracer(struct tracer_stat *trace)
330 {
331 	struct stat_session *session, *node, *tmp;
332 	int ret;
333 
334 	if (!trace)
335 		return -EINVAL;
336 
337 	if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
338 		return -EINVAL;
339 
340 	/* Already registered? */
341 	mutex_lock(&all_stat_sessions_mutex);
342 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
343 		if (node->ts == trace) {
344 			mutex_unlock(&all_stat_sessions_mutex);
345 			return -EINVAL;
346 		}
347 	}
348 	mutex_unlock(&all_stat_sessions_mutex);
349 
350 	/* Init the session */
351 	session = kzalloc(sizeof(*session), GFP_KERNEL);
352 	if (!session)
353 		return -ENOMEM;
354 
355 	session->ts = trace;
356 	INIT_LIST_HEAD(&session->session_list);
357 	mutex_init(&session->stat_mutex);
358 
359 	ret = init_stat_file(session);
360 	if (ret) {
361 		destroy_session(session);
362 		return ret;
363 	}
364 
365 	/* Register */
366 	mutex_lock(&all_stat_sessions_mutex);
367 	list_add_tail(&session->session_list, &all_stat_sessions);
368 	mutex_unlock(&all_stat_sessions_mutex);
369 
370 	return 0;
371 }
372 
373 void unregister_stat_tracer(struct tracer_stat *trace)
374 {
375 	struct stat_session *node, *tmp;
376 
377 	mutex_lock(&all_stat_sessions_mutex);
378 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
379 		if (node->ts == trace) {
380 			list_del(&node->session_list);
381 			destroy_session(node);
382 			break;
383 		}
384 	}
385 	mutex_unlock(&all_stat_sessions_mutex);
386 }
387