xref: /linux-6.15/kernel/trace/trace_functions.c (revision 18d14ebd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static struct tracer_flags func_flags;
31 
32 /* Our option */
33 enum {
34 	TRACE_FUNC_OPT_STACK	= 0x1,
35 };
36 
37 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
38 {
39 	struct ftrace_ops *ops;
40 
41 	/* The top level array uses the "global_ops" */
42 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
43 		return 0;
44 
45 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 	if (!ops)
47 		return -ENOMEM;
48 
49 	/* Currently only the non stack version is supported */
50 	ops->func = function_trace_call;
51 	ops->flags = FTRACE_OPS_FL_PID;
52 
53 	tr->ops = ops;
54 	ops->private = tr;
55 
56 	return 0;
57 }
58 
59 void ftrace_free_ftrace_ops(struct trace_array *tr)
60 {
61 	kfree(tr->ops);
62 	tr->ops = NULL;
63 }
64 
65 int ftrace_create_function_files(struct trace_array *tr,
66 				 struct dentry *parent)
67 {
68 	/*
69 	 * The top level array uses the "global_ops", and the files are
70 	 * created on boot up.
71 	 */
72 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
73 		return 0;
74 
75 	if (!tr->ops)
76 		return -EINVAL;
77 
78 	ftrace_create_filter_files(tr->ops, parent);
79 
80 	return 0;
81 }
82 
83 void ftrace_destroy_function_files(struct trace_array *tr)
84 {
85 	ftrace_destroy_filter_files(tr->ops);
86 	ftrace_free_ftrace_ops(tr);
87 }
88 
89 static int function_trace_init(struct trace_array *tr)
90 {
91 	ftrace_func_t func;
92 	/*
93 	 * Instance trace_arrays get their ops allocated
94 	 * at instance creation. Unless it failed
95 	 * the allocation.
96 	 */
97 	if (!tr->ops)
98 		return -ENOMEM;
99 
100 	/* Currently only the global instance can do stack tracing */
101 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
102 	    func_flags.val & TRACE_FUNC_OPT_STACK)
103 		func = function_stack_trace_call;
104 	else
105 		func = function_trace_call;
106 
107 	ftrace_init_array_ops(tr, func);
108 
109 	tr->array_buffer.cpu = raw_smp_processor_id();
110 
111 	tracing_start_cmdline_record();
112 	tracing_start_function_trace(tr);
113 	return 0;
114 }
115 
116 static void function_trace_reset(struct trace_array *tr)
117 {
118 	tracing_stop_function_trace(tr);
119 	tracing_stop_cmdline_record();
120 	ftrace_reset_array_ops(tr);
121 }
122 
123 static void function_trace_start(struct trace_array *tr)
124 {
125 	tracing_reset_online_cpus(&tr->array_buffer);
126 }
127 
128 static void
129 function_trace_call(unsigned long ip, unsigned long parent_ip,
130 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
131 {
132 	struct trace_array *tr = op->private;
133 	struct trace_array_cpu *data;
134 	unsigned long flags;
135 	int bit;
136 	int cpu;
137 	int pc;
138 
139 	if (unlikely(!tr->function_enabled))
140 		return;
141 
142 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
143 	if (bit < 0)
144 		return;
145 
146 	pc = preempt_count();
147 	preempt_disable_notrace();
148 
149 	cpu = smp_processor_id();
150 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
151 	if (!atomic_read(&data->disabled)) {
152 		local_save_flags(flags);
153 		trace_function(tr, ip, parent_ip, flags, pc);
154 	}
155 	ftrace_test_recursion_unlock(bit);
156 	preempt_enable_notrace();
157 }
158 
159 #ifdef CONFIG_UNWINDER_ORC
160 /*
161  * Skip 2:
162  *
163  *   function_stack_trace_call()
164  *   ftrace_call()
165  */
166 #define STACK_SKIP 2
167 #else
168 /*
169  * Skip 3:
170  *   __trace_stack()
171  *   function_stack_trace_call()
172  *   ftrace_call()
173  */
174 #define STACK_SKIP 3
175 #endif
176 
177 static void
178 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
179 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
180 {
181 	struct trace_array *tr = op->private;
182 	struct trace_array_cpu *data;
183 	unsigned long flags;
184 	long disabled;
185 	int cpu;
186 	int pc;
187 
188 	if (unlikely(!tr->function_enabled))
189 		return;
190 
191 	/*
192 	 * Need to use raw, since this must be called before the
193 	 * recursive protection is performed.
194 	 */
195 	local_irq_save(flags);
196 	cpu = raw_smp_processor_id();
197 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
198 	disabled = atomic_inc_return(&data->disabled);
199 
200 	if (likely(disabled == 1)) {
201 		pc = preempt_count();
202 		trace_function(tr, ip, parent_ip, flags, pc);
203 		__trace_stack(tr, flags, STACK_SKIP, pc);
204 	}
205 
206 	atomic_dec(&data->disabled);
207 	local_irq_restore(flags);
208 }
209 
210 static struct tracer_opt func_opts[] = {
211 #ifdef CONFIG_STACKTRACE
212 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
213 #endif
214 	{ } /* Always set a last empty entry */
215 };
216 
217 static struct tracer_flags func_flags = {
218 	.val = 0, /* By default: all flags disabled */
219 	.opts = func_opts
220 };
221 
222 static void tracing_start_function_trace(struct trace_array *tr)
223 {
224 	tr->function_enabled = 0;
225 	register_ftrace_function(tr->ops);
226 	tr->function_enabled = 1;
227 }
228 
229 static void tracing_stop_function_trace(struct trace_array *tr)
230 {
231 	tr->function_enabled = 0;
232 	unregister_ftrace_function(tr->ops);
233 }
234 
235 static struct tracer function_trace;
236 
237 static int
238 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
239 {
240 	switch (bit) {
241 	case TRACE_FUNC_OPT_STACK:
242 		/* do nothing if already set */
243 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
244 			break;
245 
246 		/* We can change this flag when not running. */
247 		if (tr->current_trace != &function_trace)
248 			break;
249 
250 		unregister_ftrace_function(tr->ops);
251 
252 		if (set) {
253 			tr->ops->func = function_stack_trace_call;
254 			register_ftrace_function(tr->ops);
255 		} else {
256 			tr->ops->func = function_trace_call;
257 			register_ftrace_function(tr->ops);
258 		}
259 
260 		break;
261 	default:
262 		return -EINVAL;
263 	}
264 
265 	return 0;
266 }
267 
268 static struct tracer function_trace __tracer_data =
269 {
270 	.name		= "function",
271 	.init		= function_trace_init,
272 	.reset		= function_trace_reset,
273 	.start		= function_trace_start,
274 	.flags		= &func_flags,
275 	.set_flag	= func_set_flag,
276 	.allow_instances = true,
277 #ifdef CONFIG_FTRACE_SELFTEST
278 	.selftest	= trace_selftest_startup_function,
279 #endif
280 };
281 
282 #ifdef CONFIG_DYNAMIC_FTRACE
283 static void update_traceon_count(struct ftrace_probe_ops *ops,
284 				 unsigned long ip,
285 				 struct trace_array *tr, bool on,
286 				 void *data)
287 {
288 	struct ftrace_func_mapper *mapper = data;
289 	long *count;
290 	long old_count;
291 
292 	/*
293 	 * Tracing gets disabled (or enabled) once per count.
294 	 * This function can be called at the same time on multiple CPUs.
295 	 * It is fine if both disable (or enable) tracing, as disabling
296 	 * (or enabling) the second time doesn't do anything as the
297 	 * state of the tracer is already disabled (or enabled).
298 	 * What needs to be synchronized in this case is that the count
299 	 * only gets decremented once, even if the tracer is disabled
300 	 * (or enabled) twice, as the second one is really a nop.
301 	 *
302 	 * The memory barriers guarantee that we only decrement the
303 	 * counter once. First the count is read to a local variable
304 	 * and a read barrier is used to make sure that it is loaded
305 	 * before checking if the tracer is in the state we want.
306 	 * If the tracer is not in the state we want, then the count
307 	 * is guaranteed to be the old count.
308 	 *
309 	 * Next the tracer is set to the state we want (disabled or enabled)
310 	 * then a write memory barrier is used to make sure that
311 	 * the new state is visible before changing the counter by
312 	 * one minus the old counter. This guarantees that another CPU
313 	 * executing this code will see the new state before seeing
314 	 * the new counter value, and would not do anything if the new
315 	 * counter is seen.
316 	 *
317 	 * Note, there is no synchronization between this and a user
318 	 * setting the tracing_on file. But we currently don't care
319 	 * about that.
320 	 */
321 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
322 	old_count = *count;
323 
324 	if (old_count <= 0)
325 		return;
326 
327 	/* Make sure we see count before checking tracing state */
328 	smp_rmb();
329 
330 	if (on == !!tracer_tracing_is_on(tr))
331 		return;
332 
333 	if (on)
334 		tracer_tracing_on(tr);
335 	else
336 		tracer_tracing_off(tr);
337 
338 	/* Make sure tracing state is visible before updating count */
339 	smp_wmb();
340 
341 	*count = old_count - 1;
342 }
343 
344 static void
345 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
346 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
347 		     void *data)
348 {
349 	update_traceon_count(ops, ip, tr, 1, data);
350 }
351 
352 static void
353 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
354 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
355 		      void *data)
356 {
357 	update_traceon_count(ops, ip, tr, 0, data);
358 }
359 
360 static void
361 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
362 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
363 	       void *data)
364 {
365 	if (tracer_tracing_is_on(tr))
366 		return;
367 
368 	tracer_tracing_on(tr);
369 }
370 
371 static void
372 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
373 		struct trace_array *tr, struct ftrace_probe_ops *ops,
374 		void *data)
375 {
376 	if (!tracer_tracing_is_on(tr))
377 		return;
378 
379 	tracer_tracing_off(tr);
380 }
381 
382 #ifdef CONFIG_UNWINDER_ORC
383 /*
384  * Skip 3:
385  *
386  *   function_trace_probe_call()
387  *   ftrace_ops_assist_func()
388  *   ftrace_call()
389  */
390 #define FTRACE_STACK_SKIP 3
391 #else
392 /*
393  * Skip 5:
394  *
395  *   __trace_stack()
396  *   ftrace_stacktrace()
397  *   function_trace_probe_call()
398  *   ftrace_ops_assist_func()
399  *   ftrace_call()
400  */
401 #define FTRACE_STACK_SKIP 5
402 #endif
403 
404 static __always_inline void trace_stack(struct trace_array *tr)
405 {
406 	unsigned long flags;
407 	int pc;
408 
409 	local_save_flags(flags);
410 	pc = preempt_count();
411 
412 	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
413 }
414 
415 static void
416 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
417 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
418 		  void *data)
419 {
420 	trace_stack(tr);
421 }
422 
423 static void
424 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
425 			struct trace_array *tr, struct ftrace_probe_ops *ops,
426 			void *data)
427 {
428 	struct ftrace_func_mapper *mapper = data;
429 	long *count;
430 	long old_count;
431 	long new_count;
432 
433 	if (!tracing_is_on())
434 		return;
435 
436 	/* unlimited? */
437 	if (!mapper) {
438 		trace_stack(tr);
439 		return;
440 	}
441 
442 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
443 
444 	/*
445 	 * Stack traces should only execute the number of times the
446 	 * user specified in the counter.
447 	 */
448 	do {
449 		old_count = *count;
450 
451 		if (!old_count)
452 			return;
453 
454 		new_count = old_count - 1;
455 		new_count = cmpxchg(count, old_count, new_count);
456 		if (new_count == old_count)
457 			trace_stack(tr);
458 
459 		if (!tracing_is_on())
460 			return;
461 
462 	} while (new_count != old_count);
463 }
464 
465 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
466 			void *data)
467 {
468 	struct ftrace_func_mapper *mapper = data;
469 	long *count = NULL;
470 
471 	if (mapper)
472 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
473 
474 	if (count) {
475 		if (*count <= 0)
476 			return 0;
477 		(*count)--;
478 	}
479 
480 	return 1;
481 }
482 
483 static void
484 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
485 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
486 		  void *data)
487 {
488 	if (update_count(ops, ip, data))
489 		ftrace_dump(DUMP_ALL);
490 }
491 
492 /* Only dump the current CPU buffer. */
493 static void
494 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
495 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
496 		     void *data)
497 {
498 	if (update_count(ops, ip, data))
499 		ftrace_dump(DUMP_ORIG);
500 }
501 
502 static int
503 ftrace_probe_print(const char *name, struct seq_file *m,
504 		   unsigned long ip, struct ftrace_probe_ops *ops,
505 		   void *data)
506 {
507 	struct ftrace_func_mapper *mapper = data;
508 	long *count = NULL;
509 
510 	seq_printf(m, "%ps:%s", (void *)ip, name);
511 
512 	if (mapper)
513 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
514 
515 	if (count)
516 		seq_printf(m, ":count=%ld\n", *count);
517 	else
518 		seq_puts(m, ":unlimited\n");
519 
520 	return 0;
521 }
522 
523 static int
524 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
525 		     struct ftrace_probe_ops *ops,
526 		     void *data)
527 {
528 	return ftrace_probe_print("traceon", m, ip, ops, data);
529 }
530 
531 static int
532 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
533 			 struct ftrace_probe_ops *ops, void *data)
534 {
535 	return ftrace_probe_print("traceoff", m, ip, ops, data);
536 }
537 
538 static int
539 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
540 			struct ftrace_probe_ops *ops, void *data)
541 {
542 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
543 }
544 
545 static int
546 ftrace_dump_print(struct seq_file *m, unsigned long ip,
547 			struct ftrace_probe_ops *ops, void *data)
548 {
549 	return ftrace_probe_print("dump", m, ip, ops, data);
550 }
551 
552 static int
553 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
554 			struct ftrace_probe_ops *ops, void *data)
555 {
556 	return ftrace_probe_print("cpudump", m, ip, ops, data);
557 }
558 
559 
560 static int
561 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
562 		  unsigned long ip, void *init_data, void **data)
563 {
564 	struct ftrace_func_mapper *mapper = *data;
565 
566 	if (!mapper) {
567 		mapper = allocate_ftrace_func_mapper();
568 		if (!mapper)
569 			return -ENOMEM;
570 		*data = mapper;
571 	}
572 
573 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
574 }
575 
576 static void
577 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
578 		  unsigned long ip, void *data)
579 {
580 	struct ftrace_func_mapper *mapper = data;
581 
582 	if (!ip) {
583 		free_ftrace_func_mapper(mapper, NULL);
584 		return;
585 	}
586 
587 	ftrace_func_mapper_remove_ip(mapper, ip);
588 }
589 
590 static struct ftrace_probe_ops traceon_count_probe_ops = {
591 	.func			= ftrace_traceon_count,
592 	.print			= ftrace_traceon_print,
593 	.init			= ftrace_count_init,
594 	.free			= ftrace_count_free,
595 };
596 
597 static struct ftrace_probe_ops traceoff_count_probe_ops = {
598 	.func			= ftrace_traceoff_count,
599 	.print			= ftrace_traceoff_print,
600 	.init			= ftrace_count_init,
601 	.free			= ftrace_count_free,
602 };
603 
604 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
605 	.func			= ftrace_stacktrace_count,
606 	.print			= ftrace_stacktrace_print,
607 	.init			= ftrace_count_init,
608 	.free			= ftrace_count_free,
609 };
610 
611 static struct ftrace_probe_ops dump_probe_ops = {
612 	.func			= ftrace_dump_probe,
613 	.print			= ftrace_dump_print,
614 	.init			= ftrace_count_init,
615 	.free			= ftrace_count_free,
616 };
617 
618 static struct ftrace_probe_ops cpudump_probe_ops = {
619 	.func			= ftrace_cpudump_probe,
620 	.print			= ftrace_cpudump_print,
621 };
622 
623 static struct ftrace_probe_ops traceon_probe_ops = {
624 	.func			= ftrace_traceon,
625 	.print			= ftrace_traceon_print,
626 };
627 
628 static struct ftrace_probe_ops traceoff_probe_ops = {
629 	.func			= ftrace_traceoff,
630 	.print			= ftrace_traceoff_print,
631 };
632 
633 static struct ftrace_probe_ops stacktrace_probe_ops = {
634 	.func			= ftrace_stacktrace,
635 	.print			= ftrace_stacktrace_print,
636 };
637 
638 static int
639 ftrace_trace_probe_callback(struct trace_array *tr,
640 			    struct ftrace_probe_ops *ops,
641 			    struct ftrace_hash *hash, char *glob,
642 			    char *cmd, char *param, int enable)
643 {
644 	void *count = (void *)-1;
645 	char *number;
646 	int ret;
647 
648 	/* hash funcs only work with set_ftrace_filter */
649 	if (!enable)
650 		return -EINVAL;
651 
652 	if (glob[0] == '!')
653 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
654 
655 	if (!param)
656 		goto out_reg;
657 
658 	number = strsep(&param, ":");
659 
660 	if (!strlen(number))
661 		goto out_reg;
662 
663 	/*
664 	 * We use the callback data field (which is a pointer)
665 	 * as our counter.
666 	 */
667 	ret = kstrtoul(number, 0, (unsigned long *)&count);
668 	if (ret)
669 		return ret;
670 
671  out_reg:
672 	ret = register_ftrace_function_probe(glob, tr, ops, count);
673 
674 	return ret < 0 ? ret : 0;
675 }
676 
677 static int
678 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
679 			    char *glob, char *cmd, char *param, int enable)
680 {
681 	struct ftrace_probe_ops *ops;
682 
683 	if (!tr)
684 		return -ENODEV;
685 
686 	/* we register both traceon and traceoff to this callback */
687 	if (strcmp(cmd, "traceon") == 0)
688 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
689 	else
690 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
691 
692 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
693 					   param, enable);
694 }
695 
696 static int
697 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
698 			   char *glob, char *cmd, char *param, int enable)
699 {
700 	struct ftrace_probe_ops *ops;
701 
702 	if (!tr)
703 		return -ENODEV;
704 
705 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
706 
707 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
708 					   param, enable);
709 }
710 
711 static int
712 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
713 			   char *glob, char *cmd, char *param, int enable)
714 {
715 	struct ftrace_probe_ops *ops;
716 
717 	if (!tr)
718 		return -ENODEV;
719 
720 	ops = &dump_probe_ops;
721 
722 	/* Only dump once. */
723 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
724 					   "1", enable);
725 }
726 
727 static int
728 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
729 			   char *glob, char *cmd, char *param, int enable)
730 {
731 	struct ftrace_probe_ops *ops;
732 
733 	if (!tr)
734 		return -ENODEV;
735 
736 	ops = &cpudump_probe_ops;
737 
738 	/* Only dump once. */
739 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
740 					   "1", enable);
741 }
742 
743 static struct ftrace_func_command ftrace_traceon_cmd = {
744 	.name			= "traceon",
745 	.func			= ftrace_trace_onoff_callback,
746 };
747 
748 static struct ftrace_func_command ftrace_traceoff_cmd = {
749 	.name			= "traceoff",
750 	.func			= ftrace_trace_onoff_callback,
751 };
752 
753 static struct ftrace_func_command ftrace_stacktrace_cmd = {
754 	.name			= "stacktrace",
755 	.func			= ftrace_stacktrace_callback,
756 };
757 
758 static struct ftrace_func_command ftrace_dump_cmd = {
759 	.name			= "dump",
760 	.func			= ftrace_dump_callback,
761 };
762 
763 static struct ftrace_func_command ftrace_cpudump_cmd = {
764 	.name			= "cpudump",
765 	.func			= ftrace_cpudump_callback,
766 };
767 
768 static int __init init_func_cmd_traceon(void)
769 {
770 	int ret;
771 
772 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
773 	if (ret)
774 		return ret;
775 
776 	ret = register_ftrace_command(&ftrace_traceon_cmd);
777 	if (ret)
778 		goto out_free_traceoff;
779 
780 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
781 	if (ret)
782 		goto out_free_traceon;
783 
784 	ret = register_ftrace_command(&ftrace_dump_cmd);
785 	if (ret)
786 		goto out_free_stacktrace;
787 
788 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
789 	if (ret)
790 		goto out_free_dump;
791 
792 	return 0;
793 
794  out_free_dump:
795 	unregister_ftrace_command(&ftrace_dump_cmd);
796  out_free_stacktrace:
797 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
798  out_free_traceon:
799 	unregister_ftrace_command(&ftrace_traceon_cmd);
800  out_free_traceoff:
801 	unregister_ftrace_command(&ftrace_traceoff_cmd);
802 
803 	return ret;
804 }
805 #else
806 static inline int init_func_cmd_traceon(void)
807 {
808 	return 0;
809 }
810 #endif /* CONFIG_DYNAMIC_FTRACE */
811 
812 __init int init_function_trace(void)
813 {
814 	init_func_cmd_traceon();
815 	return register_tracer(&function_trace);
816 }
817