xref: /linux-6.15/kernel/trace/trace_stat.c (revision d8ea37d5)
1 /*
2  * Infrastructure for statistic tracing (histogram output).
3  *
4  * Copyright (C) 2008-2009 Frederic Weisbecker <[email protected]>
5  *
6  * Based on the code from trace_branch.c which is
7  * Copyright (C) 2008 Steven Rostedt <[email protected]>
8  *
9  */
10 
11 
12 #include <linux/list.h>
13 #include <linux/rbtree.h>
14 #include <linux/debugfs.h>
15 #include "trace_stat.h"
16 #include "trace.h"
17 
18 
19 /*
20  * List of stat red-black nodes from a tracer
21  * We use a such tree to sort quickly the stat
22  * entries from the tracer.
23  */
24 struct stat_node {
25 	struct rb_node		node;
26 	void			*stat;
27 };
28 
29 /* A stat session is the stats output in one file */
30 struct stat_session {
31 	struct list_head	session_list;
32 	struct tracer_stat	*ts;
33 	struct rb_root		stat_root;
34 	struct mutex		stat_mutex;
35 	struct dentry		*file;
36 };
37 
38 /* All of the sessions currently in use. Each stat file embed one session */
39 static LIST_HEAD(all_stat_sessions);
40 static DEFINE_MUTEX(all_stat_sessions_mutex);
41 
42 /* The root directory for all stat files */
43 static struct dentry		*stat_dir;
44 
45 /*
46  * Iterate through the rbtree using a post order traversal path
47  * to release the next node.
48  * It won't necessary release one at each iteration
49  * but it will at least advance closer to the next one
50  * to be released.
51  */
52 static struct rb_node *release_next(struct tracer_stat *ts,
53 				    struct rb_node *node)
54 {
55 	struct stat_node *snode;
56 	struct rb_node *parent = rb_parent(node);
57 
58 	if (node->rb_left)
59 		return node->rb_left;
60 	else if (node->rb_right)
61 		return node->rb_right;
62 	else {
63 		if (!parent)
64 			;
65 		else if (parent->rb_left == node)
66 			parent->rb_left = NULL;
67 		else
68 			parent->rb_right = NULL;
69 
70 		snode = container_of(node, struct stat_node, node);
71 		if (ts->stat_release)
72 			ts->stat_release(snode->stat);
73 		kfree(snode);
74 
75 		return parent;
76 	}
77 }
78 
79 static void reset_stat_session(struct stat_session *session)
80 {
81 	struct rb_node *node = session->stat_root.rb_node;
82 
83 	while (node)
84 		node = release_next(session->ts, node);
85 
86 	session->stat_root = RB_ROOT;
87 }
88 
89 static void destroy_session(struct stat_session *session)
90 {
91 	debugfs_remove(session->file);
92 	reset_stat_session(session);
93 	mutex_destroy(&session->stat_mutex);
94 	kfree(session);
95 }
96 
97 typedef int (*cmp_stat_t)(void *, void *);
98 
99 static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
100 {
101 	struct rb_node **new = &(root->rb_node), *parent = NULL;
102 	struct stat_node *data;
103 
104 	data = kzalloc(sizeof(*data), GFP_KERNEL);
105 	if (!data)
106 		return -ENOMEM;
107 	data->stat = stat;
108 
109 	/*
110 	 * Figure out where to put new node
111 	 * This is a descendent sorting
112 	 */
113 	while (*new) {
114 		struct stat_node *this;
115 		int result;
116 
117 		this = container_of(*new, struct stat_node, node);
118 		result = cmp(data->stat, this->stat);
119 
120 		parent = *new;
121 		if (result >= 0)
122 			new = &((*new)->rb_left);
123 		else
124 			new = &((*new)->rb_right);
125 	}
126 
127 	rb_link_node(&data->node, parent, new);
128 	rb_insert_color(&data->node, root);
129 	return 0;
130 }
131 
132 /*
133  * For tracers that don't provide a stat_cmp callback.
134  * This one will force an insertion as right-most node
135  * in the rbtree.
136  */
137 static int dummy_cmp(void *p1, void *p2)
138 {
139 	return -1;
140 }
141 
142 /*
143  * Initialize the stat rbtree at each trace_stat file opening.
144  * All of these copies and sorting are required on all opening
145  * since the stats could have changed between two file sessions.
146  */
147 static int stat_seq_init(struct stat_session *session)
148 {
149 	struct tracer_stat *ts = session->ts;
150 	struct rb_root *root = &session->stat_root;
151 	void *stat;
152 	int ret = 0;
153 	int i;
154 
155 	mutex_lock(&session->stat_mutex);
156 	reset_stat_session(session);
157 
158 	if (!ts->stat_cmp)
159 		ts->stat_cmp = dummy_cmp;
160 
161 	stat = ts->stat_start(ts);
162 	if (!stat)
163 		goto exit;
164 
165 	ret = insert_stat(root, stat, ts->stat_cmp);
166 	if (ret)
167 		goto exit;
168 
169 	/*
170 	 * Iterate over the tracer stat entries and store them in an rbtree.
171 	 */
172 	for (i = 1; ; i++) {
173 		stat = ts->stat_next(stat, i);
174 
175 		/* End of insertion */
176 		if (!stat)
177 			break;
178 
179 		ret = insert_stat(root, stat, ts->stat_cmp);
180 		if (ret)
181 			goto exit_free_rbtree;
182 	}
183 
184 exit:
185 	mutex_unlock(&session->stat_mutex);
186 	return ret;
187 
188 exit_free_rbtree:
189 	reset_stat_session(session);
190 	mutex_unlock(&session->stat_mutex);
191 	return ret;
192 }
193 
194 
195 static void *stat_seq_start(struct seq_file *s, loff_t *pos)
196 {
197 	struct stat_session *session = s->private;
198 	struct rb_node *node;
199 	int i;
200 
201 	/* Prevent from tracer switch or rbtree modification */
202 	mutex_lock(&session->stat_mutex);
203 
204 	/* If we are in the beginning of the file, print the headers */
205 	if (!*pos && session->ts->stat_headers)
206 		return SEQ_START_TOKEN;
207 
208 	node = rb_first(&session->stat_root);
209 	for (i = 0; node && i < *pos; i++)
210 		node = rb_next(node);
211 
212 	return node;
213 }
214 
215 static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
216 {
217 	struct stat_session *session = s->private;
218 	struct rb_node *node = p;
219 
220 	(*pos)++;
221 
222 	if (p == SEQ_START_TOKEN)
223 		return rb_first(&session->stat_root);
224 
225 	return rb_next(node);
226 }
227 
228 static void stat_seq_stop(struct seq_file *s, void *p)
229 {
230 	struct stat_session *session = s->private;
231 	mutex_unlock(&session->stat_mutex);
232 }
233 
234 static int stat_seq_show(struct seq_file *s, void *v)
235 {
236 	struct stat_session *session = s->private;
237 	struct stat_node *l = container_of(v, struct stat_node, node);
238 
239 	if (v == SEQ_START_TOKEN)
240 		return session->ts->stat_headers(s);
241 
242 	return session->ts->stat_show(s, l->stat);
243 }
244 
245 static const struct seq_operations trace_stat_seq_ops = {
246 	.start		= stat_seq_start,
247 	.next		= stat_seq_next,
248 	.stop		= stat_seq_stop,
249 	.show		= stat_seq_show
250 };
251 
252 /* The session stat is refilled and resorted at each stat file opening */
253 static int tracing_stat_open(struct inode *inode, struct file *file)
254 {
255 	int ret;
256 
257 	struct stat_session *session = inode->i_private;
258 
259 	ret = seq_open(file, &trace_stat_seq_ops);
260 	if (!ret) {
261 		struct seq_file *m = file->private_data;
262 		m->private = session;
263 		ret = stat_seq_init(session);
264 	}
265 
266 	return ret;
267 }
268 
269 /*
270  * Avoid consuming memory with our now useless rbtree.
271  */
272 static int tracing_stat_release(struct inode *i, struct file *f)
273 {
274 	struct stat_session *session = i->i_private;
275 
276 	mutex_lock(&session->stat_mutex);
277 	reset_stat_session(session);
278 	mutex_unlock(&session->stat_mutex);
279 
280 	return 0;
281 }
282 
283 static const struct file_operations tracing_stat_fops = {
284 	.open		= tracing_stat_open,
285 	.read		= seq_read,
286 	.llseek		= seq_lseek,
287 	.release	= tracing_stat_release
288 };
289 
290 static int tracing_stat_init(void)
291 {
292 	struct dentry *d_tracing;
293 
294 	d_tracing = tracing_init_dentry();
295 
296 	stat_dir = debugfs_create_dir("trace_stat", d_tracing);
297 	if (!stat_dir)
298 		pr_warning("Could not create debugfs "
299 			   "'trace_stat' entry\n");
300 	return 0;
301 }
302 
303 static int init_stat_file(struct stat_session *session)
304 {
305 	if (!stat_dir && tracing_stat_init())
306 		return -ENODEV;
307 
308 	session->file = debugfs_create_file(session->ts->name, 0644,
309 					    stat_dir,
310 					    session, &tracing_stat_fops);
311 	if (!session->file)
312 		return -ENOMEM;
313 	return 0;
314 }
315 
316 int register_stat_tracer(struct tracer_stat *trace)
317 {
318 	struct stat_session *session, *node;
319 	int ret;
320 
321 	if (!trace)
322 		return -EINVAL;
323 
324 	if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
325 		return -EINVAL;
326 
327 	/* Already registered? */
328 	mutex_lock(&all_stat_sessions_mutex);
329 	list_for_each_entry(node, &all_stat_sessions, session_list) {
330 		if (node->ts == trace) {
331 			mutex_unlock(&all_stat_sessions_mutex);
332 			return -EINVAL;
333 		}
334 	}
335 	mutex_unlock(&all_stat_sessions_mutex);
336 
337 	/* Init the session */
338 	session = kzalloc(sizeof(*session), GFP_KERNEL);
339 	if (!session)
340 		return -ENOMEM;
341 
342 	session->ts = trace;
343 	INIT_LIST_HEAD(&session->session_list);
344 	mutex_init(&session->stat_mutex);
345 
346 	ret = init_stat_file(session);
347 	if (ret) {
348 		destroy_session(session);
349 		return ret;
350 	}
351 
352 	/* Register */
353 	mutex_lock(&all_stat_sessions_mutex);
354 	list_add_tail(&session->session_list, &all_stat_sessions);
355 	mutex_unlock(&all_stat_sessions_mutex);
356 
357 	return 0;
358 }
359 
360 void unregister_stat_tracer(struct tracer_stat *trace)
361 {
362 	struct stat_session *node, *tmp;
363 
364 	mutex_lock(&all_stat_sessions_mutex);
365 	list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) {
366 		if (node->ts == trace) {
367 			list_del(&node->session_list);
368 			destroy_session(node);
369 			break;
370 		}
371 	}
372 	mutex_unlock(&all_stat_sessions_mutex);
373 }
374