xref: /linux-6.15/kernel/trace/trace_functions.c (revision 6e4eb9cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct pt_regs *pt_regs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct pt_regs *pt_regs);
30 static struct tracer_flags func_flags;
31 
32 /* Our option */
33 enum {
34 	TRACE_FUNC_OPT_STACK	= 0x1,
35 };
36 
37 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
38 {
39 	struct ftrace_ops *ops;
40 
41 	/* The top level array uses the "global_ops" */
42 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
43 		return 0;
44 
45 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 	if (!ops)
47 		return -ENOMEM;
48 
49 	/* Currently only the non stack version is supported */
50 	ops->func = function_trace_call;
51 	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
52 
53 	tr->ops = ops;
54 	ops->private = tr;
55 
56 	return 0;
57 }
58 
59 void ftrace_free_ftrace_ops(struct trace_array *tr)
60 {
61 	kfree(tr->ops);
62 	tr->ops = NULL;
63 }
64 
65 int ftrace_create_function_files(struct trace_array *tr,
66 				 struct dentry *parent)
67 {
68 	/*
69 	 * The top level array uses the "global_ops", and the files are
70 	 * created on boot up.
71 	 */
72 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
73 		return 0;
74 
75 	if (!tr->ops)
76 		return -EINVAL;
77 
78 	ftrace_create_filter_files(tr->ops, parent);
79 
80 	return 0;
81 }
82 
83 void ftrace_destroy_function_files(struct trace_array *tr)
84 {
85 	ftrace_destroy_filter_files(tr->ops);
86 	ftrace_free_ftrace_ops(tr);
87 }
88 
89 static int function_trace_init(struct trace_array *tr)
90 {
91 	ftrace_func_t func;
92 
93 	/*
94 	 * Instance trace_arrays get their ops allocated
95 	 * at instance creation. Unless it failed
96 	 * the allocation.
97 	 */
98 	if (!tr->ops)
99 		return -ENOMEM;
100 
101 	/* Currently only the global instance can do stack tracing */
102 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
103 	    func_flags.val & TRACE_FUNC_OPT_STACK)
104 		func = function_stack_trace_call;
105 	else
106 		func = function_trace_call;
107 
108 	ftrace_init_array_ops(tr, func);
109 
110 	tr->array_buffer.cpu = get_cpu();
111 	put_cpu();
112 
113 	tracing_start_cmdline_record();
114 	tracing_start_function_trace(tr);
115 	return 0;
116 }
117 
118 static void function_trace_reset(struct trace_array *tr)
119 {
120 	tracing_stop_function_trace(tr);
121 	tracing_stop_cmdline_record();
122 	ftrace_reset_array_ops(tr);
123 }
124 
125 static void function_trace_start(struct trace_array *tr)
126 {
127 	tracing_reset_online_cpus(&tr->array_buffer);
128 }
129 
130 static void
131 function_trace_call(unsigned long ip, unsigned long parent_ip,
132 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
133 {
134 	struct trace_array *tr = op->private;
135 	struct trace_array_cpu *data;
136 	unsigned long flags;
137 	int bit;
138 	int cpu;
139 	int pc;
140 
141 	if (unlikely(!tr->function_enabled))
142 		return;
143 
144 	bit = ftrace_test_recursion_trylock();
145 	if (bit < 0)
146 		return;
147 
148 	pc = preempt_count();
149 	preempt_disable_notrace();
150 
151 	cpu = smp_processor_id();
152 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
153 	if (!atomic_read(&data->disabled)) {
154 		local_save_flags(flags);
155 		trace_function(tr, ip, parent_ip, flags, pc);
156 	}
157 	ftrace_test_recursion_unlock(bit);
158 	preempt_enable_notrace();
159 }
160 
161 #ifdef CONFIG_UNWINDER_ORC
162 /*
163  * Skip 2:
164  *
165  *   function_stack_trace_call()
166  *   ftrace_call()
167  */
168 #define STACK_SKIP 2
169 #else
170 /*
171  * Skip 3:
172  *   __trace_stack()
173  *   function_stack_trace_call()
174  *   ftrace_call()
175  */
176 #define STACK_SKIP 3
177 #endif
178 
179 static void
180 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
181 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
182 {
183 	struct trace_array *tr = op->private;
184 	struct trace_array_cpu *data;
185 	unsigned long flags;
186 	long disabled;
187 	int cpu;
188 	int pc;
189 
190 	if (unlikely(!tr->function_enabled))
191 		return;
192 
193 	/*
194 	 * Need to use raw, since this must be called before the
195 	 * recursive protection is performed.
196 	 */
197 	local_irq_save(flags);
198 	cpu = raw_smp_processor_id();
199 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
200 	disabled = atomic_inc_return(&data->disabled);
201 
202 	if (likely(disabled == 1)) {
203 		pc = preempt_count();
204 		trace_function(tr, ip, parent_ip, flags, pc);
205 		__trace_stack(tr, flags, STACK_SKIP, pc);
206 	}
207 
208 	atomic_dec(&data->disabled);
209 	local_irq_restore(flags);
210 }
211 
212 static struct tracer_opt func_opts[] = {
213 #ifdef CONFIG_STACKTRACE
214 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
215 #endif
216 	{ } /* Always set a last empty entry */
217 };
218 
219 static struct tracer_flags func_flags = {
220 	.val = 0, /* By default: all flags disabled */
221 	.opts = func_opts
222 };
223 
224 static void tracing_start_function_trace(struct trace_array *tr)
225 {
226 	tr->function_enabled = 0;
227 	register_ftrace_function(tr->ops);
228 	tr->function_enabled = 1;
229 }
230 
231 static void tracing_stop_function_trace(struct trace_array *tr)
232 {
233 	tr->function_enabled = 0;
234 	unregister_ftrace_function(tr->ops);
235 }
236 
237 static struct tracer function_trace;
238 
239 static int
240 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241 {
242 	switch (bit) {
243 	case TRACE_FUNC_OPT_STACK:
244 		/* do nothing if already set */
245 		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246 			break;
247 
248 		/* We can change this flag when not running. */
249 		if (tr->current_trace != &function_trace)
250 			break;
251 
252 		unregister_ftrace_function(tr->ops);
253 
254 		if (set) {
255 			tr->ops->func = function_stack_trace_call;
256 			register_ftrace_function(tr->ops);
257 		} else {
258 			tr->ops->func = function_trace_call;
259 			register_ftrace_function(tr->ops);
260 		}
261 
262 		break;
263 	default:
264 		return -EINVAL;
265 	}
266 
267 	return 0;
268 }
269 
270 static struct tracer function_trace __tracer_data =
271 {
272 	.name		= "function",
273 	.init		= function_trace_init,
274 	.reset		= function_trace_reset,
275 	.start		= function_trace_start,
276 	.flags		= &func_flags,
277 	.set_flag	= func_set_flag,
278 	.allow_instances = true,
279 #ifdef CONFIG_FTRACE_SELFTEST
280 	.selftest	= trace_selftest_startup_function,
281 #endif
282 };
283 
284 #ifdef CONFIG_DYNAMIC_FTRACE
285 static void update_traceon_count(struct ftrace_probe_ops *ops,
286 				 unsigned long ip,
287 				 struct trace_array *tr, bool on,
288 				 void *data)
289 {
290 	struct ftrace_func_mapper *mapper = data;
291 	long *count;
292 	long old_count;
293 
294 	/*
295 	 * Tracing gets disabled (or enabled) once per count.
296 	 * This function can be called at the same time on multiple CPUs.
297 	 * It is fine if both disable (or enable) tracing, as disabling
298 	 * (or enabling) the second time doesn't do anything as the
299 	 * state of the tracer is already disabled (or enabled).
300 	 * What needs to be synchronized in this case is that the count
301 	 * only gets decremented once, even if the tracer is disabled
302 	 * (or enabled) twice, as the second one is really a nop.
303 	 *
304 	 * The memory barriers guarantee that we only decrement the
305 	 * counter once. First the count is read to a local variable
306 	 * and a read barrier is used to make sure that it is loaded
307 	 * before checking if the tracer is in the state we want.
308 	 * If the tracer is not in the state we want, then the count
309 	 * is guaranteed to be the old count.
310 	 *
311 	 * Next the tracer is set to the state we want (disabled or enabled)
312 	 * then a write memory barrier is used to make sure that
313 	 * the new state is visible before changing the counter by
314 	 * one minus the old counter. This guarantees that another CPU
315 	 * executing this code will see the new state before seeing
316 	 * the new counter value, and would not do anything if the new
317 	 * counter is seen.
318 	 *
319 	 * Note, there is no synchronization between this and a user
320 	 * setting the tracing_on file. But we currently don't care
321 	 * about that.
322 	 */
323 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
324 	old_count = *count;
325 
326 	if (old_count <= 0)
327 		return;
328 
329 	/* Make sure we see count before checking tracing state */
330 	smp_rmb();
331 
332 	if (on == !!tracer_tracing_is_on(tr))
333 		return;
334 
335 	if (on)
336 		tracer_tracing_on(tr);
337 	else
338 		tracer_tracing_off(tr);
339 
340 	/* Make sure tracing state is visible before updating count */
341 	smp_wmb();
342 
343 	*count = old_count - 1;
344 }
345 
346 static void
347 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
348 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
349 		     void *data)
350 {
351 	update_traceon_count(ops, ip, tr, 1, data);
352 }
353 
354 static void
355 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
356 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
357 		      void *data)
358 {
359 	update_traceon_count(ops, ip, tr, 0, data);
360 }
361 
362 static void
363 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
364 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
365 	       void *data)
366 {
367 	if (tracer_tracing_is_on(tr))
368 		return;
369 
370 	tracer_tracing_on(tr);
371 }
372 
373 static void
374 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
375 		struct trace_array *tr, struct ftrace_probe_ops *ops,
376 		void *data)
377 {
378 	if (!tracer_tracing_is_on(tr))
379 		return;
380 
381 	tracer_tracing_off(tr);
382 }
383 
384 #ifdef CONFIG_UNWINDER_ORC
385 /*
386  * Skip 3:
387  *
388  *   function_trace_probe_call()
389  *   ftrace_ops_assist_func()
390  *   ftrace_call()
391  */
392 #define FTRACE_STACK_SKIP 3
393 #else
394 /*
395  * Skip 5:
396  *
397  *   __trace_stack()
398  *   ftrace_stacktrace()
399  *   function_trace_probe_call()
400  *   ftrace_ops_assist_func()
401  *   ftrace_call()
402  */
403 #define FTRACE_STACK_SKIP 5
404 #endif
405 
406 static __always_inline void trace_stack(struct trace_array *tr)
407 {
408 	unsigned long flags;
409 	int pc;
410 
411 	local_save_flags(flags);
412 	pc = preempt_count();
413 
414 	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
415 }
416 
417 static void
418 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
419 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
420 		  void *data)
421 {
422 	trace_stack(tr);
423 }
424 
425 static void
426 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
427 			struct trace_array *tr, struct ftrace_probe_ops *ops,
428 			void *data)
429 {
430 	struct ftrace_func_mapper *mapper = data;
431 	long *count;
432 	long old_count;
433 	long new_count;
434 
435 	if (!tracing_is_on())
436 		return;
437 
438 	/* unlimited? */
439 	if (!mapper) {
440 		trace_stack(tr);
441 		return;
442 	}
443 
444 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
445 
446 	/*
447 	 * Stack traces should only execute the number of times the
448 	 * user specified in the counter.
449 	 */
450 	do {
451 		old_count = *count;
452 
453 		if (!old_count)
454 			return;
455 
456 		new_count = old_count - 1;
457 		new_count = cmpxchg(count, old_count, new_count);
458 		if (new_count == old_count)
459 			trace_stack(tr);
460 
461 		if (!tracing_is_on())
462 			return;
463 
464 	} while (new_count != old_count);
465 }
466 
467 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
468 			void *data)
469 {
470 	struct ftrace_func_mapper *mapper = data;
471 	long *count = NULL;
472 
473 	if (mapper)
474 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
475 
476 	if (count) {
477 		if (*count <= 0)
478 			return 0;
479 		(*count)--;
480 	}
481 
482 	return 1;
483 }
484 
485 static void
486 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
487 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
488 		  void *data)
489 {
490 	if (update_count(ops, ip, data))
491 		ftrace_dump(DUMP_ALL);
492 }
493 
494 /* Only dump the current CPU buffer. */
495 static void
496 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
497 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
498 		     void *data)
499 {
500 	if (update_count(ops, ip, data))
501 		ftrace_dump(DUMP_ORIG);
502 }
503 
504 static int
505 ftrace_probe_print(const char *name, struct seq_file *m,
506 		   unsigned long ip, struct ftrace_probe_ops *ops,
507 		   void *data)
508 {
509 	struct ftrace_func_mapper *mapper = data;
510 	long *count = NULL;
511 
512 	seq_printf(m, "%ps:%s", (void *)ip, name);
513 
514 	if (mapper)
515 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
516 
517 	if (count)
518 		seq_printf(m, ":count=%ld\n", *count);
519 	else
520 		seq_puts(m, ":unlimited\n");
521 
522 	return 0;
523 }
524 
525 static int
526 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
527 		     struct ftrace_probe_ops *ops,
528 		     void *data)
529 {
530 	return ftrace_probe_print("traceon", m, ip, ops, data);
531 }
532 
533 static int
534 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
535 			 struct ftrace_probe_ops *ops, void *data)
536 {
537 	return ftrace_probe_print("traceoff", m, ip, ops, data);
538 }
539 
540 static int
541 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
542 			struct ftrace_probe_ops *ops, void *data)
543 {
544 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
545 }
546 
547 static int
548 ftrace_dump_print(struct seq_file *m, unsigned long ip,
549 			struct ftrace_probe_ops *ops, void *data)
550 {
551 	return ftrace_probe_print("dump", m, ip, ops, data);
552 }
553 
554 static int
555 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
556 			struct ftrace_probe_ops *ops, void *data)
557 {
558 	return ftrace_probe_print("cpudump", m, ip, ops, data);
559 }
560 
561 
562 static int
563 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
564 		  unsigned long ip, void *init_data, void **data)
565 {
566 	struct ftrace_func_mapper *mapper = *data;
567 
568 	if (!mapper) {
569 		mapper = allocate_ftrace_func_mapper();
570 		if (!mapper)
571 			return -ENOMEM;
572 		*data = mapper;
573 	}
574 
575 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
576 }
577 
578 static void
579 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
580 		  unsigned long ip, void *data)
581 {
582 	struct ftrace_func_mapper *mapper = data;
583 
584 	if (!ip) {
585 		free_ftrace_func_mapper(mapper, NULL);
586 		return;
587 	}
588 
589 	ftrace_func_mapper_remove_ip(mapper, ip);
590 }
591 
592 static struct ftrace_probe_ops traceon_count_probe_ops = {
593 	.func			= ftrace_traceon_count,
594 	.print			= ftrace_traceon_print,
595 	.init			= ftrace_count_init,
596 	.free			= ftrace_count_free,
597 };
598 
599 static struct ftrace_probe_ops traceoff_count_probe_ops = {
600 	.func			= ftrace_traceoff_count,
601 	.print			= ftrace_traceoff_print,
602 	.init			= ftrace_count_init,
603 	.free			= ftrace_count_free,
604 };
605 
606 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
607 	.func			= ftrace_stacktrace_count,
608 	.print			= ftrace_stacktrace_print,
609 	.init			= ftrace_count_init,
610 	.free			= ftrace_count_free,
611 };
612 
613 static struct ftrace_probe_ops dump_probe_ops = {
614 	.func			= ftrace_dump_probe,
615 	.print			= ftrace_dump_print,
616 	.init			= ftrace_count_init,
617 	.free			= ftrace_count_free,
618 };
619 
620 static struct ftrace_probe_ops cpudump_probe_ops = {
621 	.func			= ftrace_cpudump_probe,
622 	.print			= ftrace_cpudump_print,
623 };
624 
625 static struct ftrace_probe_ops traceon_probe_ops = {
626 	.func			= ftrace_traceon,
627 	.print			= ftrace_traceon_print,
628 };
629 
630 static struct ftrace_probe_ops traceoff_probe_ops = {
631 	.func			= ftrace_traceoff,
632 	.print			= ftrace_traceoff_print,
633 };
634 
635 static struct ftrace_probe_ops stacktrace_probe_ops = {
636 	.func			= ftrace_stacktrace,
637 	.print			= ftrace_stacktrace_print,
638 };
639 
640 static int
641 ftrace_trace_probe_callback(struct trace_array *tr,
642 			    struct ftrace_probe_ops *ops,
643 			    struct ftrace_hash *hash, char *glob,
644 			    char *cmd, char *param, int enable)
645 {
646 	void *count = (void *)-1;
647 	char *number;
648 	int ret;
649 
650 	/* hash funcs only work with set_ftrace_filter */
651 	if (!enable)
652 		return -EINVAL;
653 
654 	if (glob[0] == '!')
655 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
656 
657 	if (!param)
658 		goto out_reg;
659 
660 	number = strsep(&param, ":");
661 
662 	if (!strlen(number))
663 		goto out_reg;
664 
665 	/*
666 	 * We use the callback data field (which is a pointer)
667 	 * as our counter.
668 	 */
669 	ret = kstrtoul(number, 0, (unsigned long *)&count);
670 	if (ret)
671 		return ret;
672 
673  out_reg:
674 	ret = register_ftrace_function_probe(glob, tr, ops, count);
675 
676 	return ret < 0 ? ret : 0;
677 }
678 
679 static int
680 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
681 			    char *glob, char *cmd, char *param, int enable)
682 {
683 	struct ftrace_probe_ops *ops;
684 
685 	if (!tr)
686 		return -ENODEV;
687 
688 	/* we register both traceon and traceoff to this callback */
689 	if (strcmp(cmd, "traceon") == 0)
690 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
691 	else
692 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
693 
694 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
695 					   param, enable);
696 }
697 
698 static int
699 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
700 			   char *glob, char *cmd, char *param, int enable)
701 {
702 	struct ftrace_probe_ops *ops;
703 
704 	if (!tr)
705 		return -ENODEV;
706 
707 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
708 
709 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
710 					   param, enable);
711 }
712 
713 static int
714 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
715 			   char *glob, char *cmd, char *param, int enable)
716 {
717 	struct ftrace_probe_ops *ops;
718 
719 	if (!tr)
720 		return -ENODEV;
721 
722 	ops = &dump_probe_ops;
723 
724 	/* Only dump once. */
725 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
726 					   "1", enable);
727 }
728 
729 static int
730 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
731 			   char *glob, char *cmd, char *param, int enable)
732 {
733 	struct ftrace_probe_ops *ops;
734 
735 	if (!tr)
736 		return -ENODEV;
737 
738 	ops = &cpudump_probe_ops;
739 
740 	/* Only dump once. */
741 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
742 					   "1", enable);
743 }
744 
745 static struct ftrace_func_command ftrace_traceon_cmd = {
746 	.name			= "traceon",
747 	.func			= ftrace_trace_onoff_callback,
748 };
749 
750 static struct ftrace_func_command ftrace_traceoff_cmd = {
751 	.name			= "traceoff",
752 	.func			= ftrace_trace_onoff_callback,
753 };
754 
755 static struct ftrace_func_command ftrace_stacktrace_cmd = {
756 	.name			= "stacktrace",
757 	.func			= ftrace_stacktrace_callback,
758 };
759 
760 static struct ftrace_func_command ftrace_dump_cmd = {
761 	.name			= "dump",
762 	.func			= ftrace_dump_callback,
763 };
764 
765 static struct ftrace_func_command ftrace_cpudump_cmd = {
766 	.name			= "cpudump",
767 	.func			= ftrace_cpudump_callback,
768 };
769 
770 static int __init init_func_cmd_traceon(void)
771 {
772 	int ret;
773 
774 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
775 	if (ret)
776 		return ret;
777 
778 	ret = register_ftrace_command(&ftrace_traceon_cmd);
779 	if (ret)
780 		goto out_free_traceoff;
781 
782 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
783 	if (ret)
784 		goto out_free_traceon;
785 
786 	ret = register_ftrace_command(&ftrace_dump_cmd);
787 	if (ret)
788 		goto out_free_stacktrace;
789 
790 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
791 	if (ret)
792 		goto out_free_dump;
793 
794 	return 0;
795 
796  out_free_dump:
797 	unregister_ftrace_command(&ftrace_dump_cmd);
798  out_free_stacktrace:
799 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
800  out_free_traceon:
801 	unregister_ftrace_command(&ftrace_traceon_cmd);
802  out_free_traceoff:
803 	unregister_ftrace_command(&ftrace_traceoff_cmd);
804 
805 	return ret;
806 }
807 #else
808 static inline int init_func_cmd_traceon(void)
809 {
810 	return 0;
811 }
812 #endif /* CONFIG_DYNAMIC_FTRACE */
813 
814 __init int init_function_trace(void)
815 {
816 	init_func_cmd_traceon();
817 	return register_tracer(&function_trace);
818 }
819