xref: /linux-6.15/kernel/trace/trace_functions.c (revision 4413e16d)
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
5  * Copyright (C) 2008 Ingo Molnar <[email protected]>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/pstore.h>
17 #include <linux/fs.h>
18 
19 #include "trace.h"
20 
21 /* function tracing enabled */
22 static int			ftrace_function_enabled;
23 
24 static struct trace_array	*func_trace;
25 
26 static void tracing_start_function_trace(void);
27 static void tracing_stop_function_trace(void);
28 
29 static int function_trace_init(struct trace_array *tr)
30 {
31 	func_trace = tr;
32 	tr->cpu = get_cpu();
33 	put_cpu();
34 
35 	tracing_start_cmdline_record();
36 	tracing_start_function_trace();
37 	return 0;
38 }
39 
40 static void function_trace_reset(struct trace_array *tr)
41 {
42 	tracing_stop_function_trace();
43 	tracing_stop_cmdline_record();
44 }
45 
46 static void function_trace_start(struct trace_array *tr)
47 {
48 	tracing_reset_online_cpus(tr);
49 }
50 
51 static void
52 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
53 				 struct ftrace_ops *op, struct pt_regs *pt_regs)
54 {
55 	struct trace_array *tr = func_trace;
56 	struct trace_array_cpu *data;
57 	unsigned long flags;
58 	long disabled;
59 	int cpu;
60 	int pc;
61 
62 	if (unlikely(!ftrace_function_enabled))
63 		return;
64 
65 	pc = preempt_count();
66 	preempt_disable_notrace();
67 	local_save_flags(flags);
68 	cpu = raw_smp_processor_id();
69 	data = tr->data[cpu];
70 	disabled = atomic_inc_return(&data->disabled);
71 
72 	if (likely(disabled == 1))
73 		trace_function(tr, ip, parent_ip, flags, pc);
74 
75 	atomic_dec(&data->disabled);
76 	preempt_enable_notrace();
77 }
78 
79 /* Our two options */
80 enum {
81 	TRACE_FUNC_OPT_STACK	= 0x1,
82 	TRACE_FUNC_OPT_PSTORE	= 0x2,
83 };
84 
85 static struct tracer_flags func_flags;
86 
87 static void
88 function_trace_call(unsigned long ip, unsigned long parent_ip,
89 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
90 
91 {
92 	struct trace_array *tr = func_trace;
93 	struct trace_array_cpu *data;
94 	unsigned long flags;
95 	long disabled;
96 	int cpu;
97 	int pc;
98 
99 	if (unlikely(!ftrace_function_enabled))
100 		return;
101 
102 	/*
103 	 * Need to use raw, since this must be called before the
104 	 * recursive protection is performed.
105 	 */
106 	local_irq_save(flags);
107 	cpu = raw_smp_processor_id();
108 	data = tr->data[cpu];
109 	disabled = atomic_inc_return(&data->disabled);
110 
111 	if (likely(disabled == 1)) {
112 		/*
113 		 * So far tracing doesn't support multiple buffers, so
114 		 * we make an explicit call for now.
115 		 */
116 		if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
117 			pstore_ftrace_call(ip, parent_ip);
118 		pc = preempt_count();
119 		trace_function(tr, ip, parent_ip, flags, pc);
120 	}
121 
122 	atomic_dec(&data->disabled);
123 	local_irq_restore(flags);
124 }
125 
126 static void
127 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
128 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
129 {
130 	struct trace_array *tr = func_trace;
131 	struct trace_array_cpu *data;
132 	unsigned long flags;
133 	long disabled;
134 	int cpu;
135 	int pc;
136 
137 	if (unlikely(!ftrace_function_enabled))
138 		return;
139 
140 	/*
141 	 * Need to use raw, since this must be called before the
142 	 * recursive protection is performed.
143 	 */
144 	local_irq_save(flags);
145 	cpu = raw_smp_processor_id();
146 	data = tr->data[cpu];
147 	disabled = atomic_inc_return(&data->disabled);
148 
149 	if (likely(disabled == 1)) {
150 		pc = preempt_count();
151 		trace_function(tr, ip, parent_ip, flags, pc);
152 		/*
153 		 * skip over 5 funcs:
154 		 *    __ftrace_trace_stack,
155 		 *    __trace_stack,
156 		 *    function_stack_trace_call
157 		 *    ftrace_list_func
158 		 *    ftrace_call
159 		 */
160 		__trace_stack(tr, flags, 5, pc);
161 	}
162 
163 	atomic_dec(&data->disabled);
164 	local_irq_restore(flags);
165 }
166 
167 
168 static struct ftrace_ops trace_ops __read_mostly =
169 {
170 	.func = function_trace_call,
171 	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
172 };
173 
174 static struct ftrace_ops trace_stack_ops __read_mostly =
175 {
176 	.func = function_stack_trace_call,
177 	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
178 };
179 
180 static struct tracer_opt func_opts[] = {
181 #ifdef CONFIG_STACKTRACE
182 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
183 #endif
184 #ifdef CONFIG_PSTORE_FTRACE
185 	{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
186 #endif
187 	{ } /* Always set a last empty entry */
188 };
189 
190 static struct tracer_flags func_flags = {
191 	.val = 0, /* By default: all flags disabled */
192 	.opts = func_opts
193 };
194 
195 static void tracing_start_function_trace(void)
196 {
197 	ftrace_function_enabled = 0;
198 
199 	if (trace_flags & TRACE_ITER_PREEMPTONLY)
200 		trace_ops.func = function_trace_call_preempt_only;
201 	else
202 		trace_ops.func = function_trace_call;
203 
204 	if (func_flags.val & TRACE_FUNC_OPT_STACK)
205 		register_ftrace_function(&trace_stack_ops);
206 	else
207 		register_ftrace_function(&trace_ops);
208 
209 	ftrace_function_enabled = 1;
210 }
211 
212 static void tracing_stop_function_trace(void)
213 {
214 	ftrace_function_enabled = 0;
215 
216 	if (func_flags.val & TRACE_FUNC_OPT_STACK)
217 		unregister_ftrace_function(&trace_stack_ops);
218 	else
219 		unregister_ftrace_function(&trace_ops);
220 }
221 
222 static int func_set_flag(u32 old_flags, u32 bit, int set)
223 {
224 	switch (bit) {
225 	case TRACE_FUNC_OPT_STACK:
226 		/* do nothing if already set */
227 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
228 			break;
229 
230 		if (set) {
231 			unregister_ftrace_function(&trace_ops);
232 			register_ftrace_function(&trace_stack_ops);
233 		} else {
234 			unregister_ftrace_function(&trace_stack_ops);
235 			register_ftrace_function(&trace_ops);
236 		}
237 
238 		break;
239 	case TRACE_FUNC_OPT_PSTORE:
240 		break;
241 	default:
242 		return -EINVAL;
243 	}
244 
245 	return 0;
246 }
247 
248 static struct tracer function_trace __read_mostly =
249 {
250 	.name		= "function",
251 	.init		= function_trace_init,
252 	.reset		= function_trace_reset,
253 	.start		= function_trace_start,
254 	.wait_pipe	= poll_wait_pipe,
255 	.flags		= &func_flags,
256 	.set_flag	= func_set_flag,
257 #ifdef CONFIG_FTRACE_SELFTEST
258 	.selftest	= trace_selftest_startup_function,
259 #endif
260 };
261 
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 static void
264 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
265 {
266 	long *count = (long *)data;
267 
268 	if (tracing_is_on())
269 		return;
270 
271 	if (!*count)
272 		return;
273 
274 	if (*count != -1)
275 		(*count)--;
276 
277 	tracing_on();
278 }
279 
280 static void
281 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
282 {
283 	long *count = (long *)data;
284 
285 	if (!tracing_is_on())
286 		return;
287 
288 	if (!*count)
289 		return;
290 
291 	if (*count != -1)
292 		(*count)--;
293 
294 	tracing_off();
295 }
296 
297 static int
298 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
299 			 struct ftrace_probe_ops *ops, void *data);
300 
301 static struct ftrace_probe_ops traceon_probe_ops = {
302 	.func			= ftrace_traceon,
303 	.print			= ftrace_trace_onoff_print,
304 };
305 
306 static struct ftrace_probe_ops traceoff_probe_ops = {
307 	.func			= ftrace_traceoff,
308 	.print			= ftrace_trace_onoff_print,
309 };
310 
311 static int
312 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
313 			 struct ftrace_probe_ops *ops, void *data)
314 {
315 	long count = (long)data;
316 
317 	seq_printf(m, "%ps:", (void *)ip);
318 
319 	if (ops == &traceon_probe_ops)
320 		seq_printf(m, "traceon");
321 	else
322 		seq_printf(m, "traceoff");
323 
324 	if (count == -1)
325 		seq_printf(m, ":unlimited\n");
326 	else
327 		seq_printf(m, ":count=%ld\n", count);
328 
329 	return 0;
330 }
331 
332 static int
333 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
334 {
335 	struct ftrace_probe_ops *ops;
336 
337 	/* we register both traceon and traceoff to this callback */
338 	if (strcmp(cmd, "traceon") == 0)
339 		ops = &traceon_probe_ops;
340 	else
341 		ops = &traceoff_probe_ops;
342 
343 	unregister_ftrace_function_probe_func(glob, ops);
344 
345 	return 0;
346 }
347 
348 static int
349 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
350 			    char *glob, char *cmd, char *param, int enable)
351 {
352 	struct ftrace_probe_ops *ops;
353 	void *count = (void *)-1;
354 	char *number;
355 	int ret;
356 
357 	/* hash funcs only work with set_ftrace_filter */
358 	if (!enable)
359 		return -EINVAL;
360 
361 	if (glob[0] == '!')
362 		return ftrace_trace_onoff_unreg(glob+1, cmd, param);
363 
364 	/* we register both traceon and traceoff to this callback */
365 	if (strcmp(cmd, "traceon") == 0)
366 		ops = &traceon_probe_ops;
367 	else
368 		ops = &traceoff_probe_ops;
369 
370 	if (!param)
371 		goto out_reg;
372 
373 	number = strsep(&param, ":");
374 
375 	if (!strlen(number))
376 		goto out_reg;
377 
378 	/*
379 	 * We use the callback data field (which is a pointer)
380 	 * as our counter.
381 	 */
382 	ret = strict_strtoul(number, 0, (unsigned long *)&count);
383 	if (ret)
384 		return ret;
385 
386  out_reg:
387 	ret = register_ftrace_function_probe(glob, ops, count);
388 
389 	return ret < 0 ? ret : 0;
390 }
391 
392 static struct ftrace_func_command ftrace_traceon_cmd = {
393 	.name			= "traceon",
394 	.func			= ftrace_trace_onoff_callback,
395 };
396 
397 static struct ftrace_func_command ftrace_traceoff_cmd = {
398 	.name			= "traceoff",
399 	.func			= ftrace_trace_onoff_callback,
400 };
401 
402 static int __init init_func_cmd_traceon(void)
403 {
404 	int ret;
405 
406 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
407 	if (ret)
408 		return ret;
409 
410 	ret = register_ftrace_command(&ftrace_traceon_cmd);
411 	if (ret)
412 		unregister_ftrace_command(&ftrace_traceoff_cmd);
413 	return ret;
414 }
415 #else
416 static inline int init_func_cmd_traceon(void)
417 {
418 	return 0;
419 }
420 #endif /* CONFIG_DYNAMIC_FTRACE */
421 
422 static __init int init_function_trace(void)
423 {
424 	init_func_cmd_traceon();
425 	return register_tracer(&function_trace);
426 }
427 device_initcall(init_function_trace);
428 
429