xref: /linux-6.15/kernel/trace/trace_functions.c (revision 4994891e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ring buffer based function tracer
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
6  * Copyright (C) 2008 Ingo Molnar <[email protected]>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/ring_buffer.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/slab.h>
18 #include <linux/fs.h>
19 
20 #include "trace.h"
21 
22 static void tracing_start_function_trace(struct trace_array *tr);
23 static void tracing_stop_function_trace(struct trace_array *tr);
24 static void
25 function_trace_call(unsigned long ip, unsigned long parent_ip,
26 		    struct ftrace_ops *op, struct ftrace_regs *fregs);
27 static void
28 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
30 static struct tracer_flags func_flags;
31 
32 /* Our option */
33 enum {
34 	TRACE_FUNC_NO_OPTS	= 0x0, /* No flags set. */
35 	TRACE_FUNC_OPT_STACK	= 0x1,
36 };
37 
38 #define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_STACK)
39 
40 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
41 {
42 	struct ftrace_ops *ops;
43 
44 	/* The top level array uses the "global_ops" */
45 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
46 		return 0;
47 
48 	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
49 	if (!ops)
50 		return -ENOMEM;
51 
52 	/* Currently only the non stack version is supported */
53 	ops->func = function_trace_call;
54 	ops->flags = FTRACE_OPS_FL_PID;
55 
56 	tr->ops = ops;
57 	ops->private = tr;
58 
59 	return 0;
60 }
61 
62 void ftrace_free_ftrace_ops(struct trace_array *tr)
63 {
64 	kfree(tr->ops);
65 	tr->ops = NULL;
66 }
67 
68 int ftrace_create_function_files(struct trace_array *tr,
69 				 struct dentry *parent)
70 {
71 	/*
72 	 * The top level array uses the "global_ops", and the files are
73 	 * created on boot up.
74 	 */
75 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
76 		return 0;
77 
78 	if (!tr->ops)
79 		return -EINVAL;
80 
81 	ftrace_create_filter_files(tr->ops, parent);
82 
83 	return 0;
84 }
85 
86 void ftrace_destroy_function_files(struct trace_array *tr)
87 {
88 	ftrace_destroy_filter_files(tr->ops);
89 	ftrace_free_ftrace_ops(tr);
90 }
91 
92 static ftrace_func_t select_trace_function(u32 flags_val)
93 {
94 	switch (flags_val & TRACE_FUNC_OPT_MASK) {
95 	case TRACE_FUNC_NO_OPTS:
96 		return function_trace_call;
97 	case TRACE_FUNC_OPT_STACK:
98 		return function_stack_trace_call;
99 	default:
100 		return NULL;
101 	}
102 }
103 
104 static int function_trace_init(struct trace_array *tr)
105 {
106 	ftrace_func_t func;
107 	/*
108 	 * Instance trace_arrays get their ops allocated
109 	 * at instance creation. Unless it failed
110 	 * the allocation.
111 	 */
112 	if (!tr->ops)
113 		return -ENOMEM;
114 
115 	func = select_trace_function(func_flags.val);
116 	if (!func)
117 		return -EINVAL;
118 
119 	ftrace_init_array_ops(tr, func);
120 
121 	tr->array_buffer.cpu = raw_smp_processor_id();
122 
123 	tracing_start_cmdline_record();
124 	tracing_start_function_trace(tr);
125 	return 0;
126 }
127 
128 static void function_trace_reset(struct trace_array *tr)
129 {
130 	tracing_stop_function_trace(tr);
131 	tracing_stop_cmdline_record();
132 	ftrace_reset_array_ops(tr);
133 }
134 
135 static void function_trace_start(struct trace_array *tr)
136 {
137 	tracing_reset_online_cpus(&tr->array_buffer);
138 }
139 
140 static void
141 function_trace_call(unsigned long ip, unsigned long parent_ip,
142 		    struct ftrace_ops *op, struct ftrace_regs *fregs)
143 {
144 	struct trace_array *tr = op->private;
145 	struct trace_array_cpu *data;
146 	unsigned int trace_ctx;
147 	int bit;
148 	int cpu;
149 
150 	if (unlikely(!tr->function_enabled))
151 		return;
152 
153 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
154 	if (bit < 0)
155 		return;
156 
157 	trace_ctx = tracing_gen_ctx();
158 	preempt_disable_notrace();
159 
160 	cpu = smp_processor_id();
161 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
162 	if (!atomic_read(&data->disabled))
163 		trace_function(tr, ip, parent_ip, trace_ctx);
164 
165 	ftrace_test_recursion_unlock(bit);
166 	preempt_enable_notrace();
167 }
168 
169 #ifdef CONFIG_UNWINDER_ORC
170 /*
171  * Skip 2:
172  *
173  *   function_stack_trace_call()
174  *   ftrace_call()
175  */
176 #define STACK_SKIP 2
177 #else
178 /*
179  * Skip 3:
180  *   __trace_stack()
181  *   function_stack_trace_call()
182  *   ftrace_call()
183  */
184 #define STACK_SKIP 3
185 #endif
186 
187 static void
188 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
189 			  struct ftrace_ops *op, struct ftrace_regs *fregs)
190 {
191 	struct trace_array *tr = op->private;
192 	struct trace_array_cpu *data;
193 	unsigned long flags;
194 	long disabled;
195 	int cpu;
196 	unsigned int trace_ctx;
197 
198 	if (unlikely(!tr->function_enabled))
199 		return;
200 
201 	/*
202 	 * Need to use raw, since this must be called before the
203 	 * recursive protection is performed.
204 	 */
205 	local_irq_save(flags);
206 	cpu = raw_smp_processor_id();
207 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
208 	disabled = atomic_inc_return(&data->disabled);
209 
210 	if (likely(disabled == 1)) {
211 		trace_ctx = tracing_gen_ctx_flags(flags);
212 		trace_function(tr, ip, parent_ip, trace_ctx);
213 		__trace_stack(tr, trace_ctx, STACK_SKIP);
214 	}
215 
216 	atomic_dec(&data->disabled);
217 	local_irq_restore(flags);
218 }
219 
220 static struct tracer_opt func_opts[] = {
221 #ifdef CONFIG_STACKTRACE
222 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
223 #endif
224 	{ } /* Always set a last empty entry */
225 };
226 
227 static struct tracer_flags func_flags = {
228 	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
229 	.opts = func_opts
230 };
231 
232 static void tracing_start_function_trace(struct trace_array *tr)
233 {
234 	tr->function_enabled = 0;
235 	register_ftrace_function(tr->ops);
236 	tr->function_enabled = 1;
237 }
238 
239 static void tracing_stop_function_trace(struct trace_array *tr)
240 {
241 	tr->function_enabled = 0;
242 	unregister_ftrace_function(tr->ops);
243 }
244 
245 static struct tracer function_trace;
246 
247 static int
248 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
249 {
250 	ftrace_func_t func;
251 	u32 new_flags;
252 
253 	/* Do nothing if already set. */
254 	if (!!set == !!(func_flags.val & bit))
255 		return 0;
256 
257 	/* We can change this flag only when not running. */
258 	if (tr->current_trace != &function_trace)
259 		return 0;
260 
261 	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
262 	func = select_trace_function(new_flags);
263 	if (!func)
264 		return -EINVAL;
265 
266 	/* Check if there's anything to change. */
267 	if (tr->ops->func == func)
268 		return 0;
269 
270 	unregister_ftrace_function(tr->ops);
271 	tr->ops->func = func;
272 	register_ftrace_function(tr->ops);
273 
274 	return 0;
275 }
276 
277 static struct tracer function_trace __tracer_data =
278 {
279 	.name		= "function",
280 	.init		= function_trace_init,
281 	.reset		= function_trace_reset,
282 	.start		= function_trace_start,
283 	.flags		= &func_flags,
284 	.set_flag	= func_set_flag,
285 	.allow_instances = true,
286 #ifdef CONFIG_FTRACE_SELFTEST
287 	.selftest	= trace_selftest_startup_function,
288 #endif
289 };
290 
291 #ifdef CONFIG_DYNAMIC_FTRACE
292 static void update_traceon_count(struct ftrace_probe_ops *ops,
293 				 unsigned long ip,
294 				 struct trace_array *tr, bool on,
295 				 void *data)
296 {
297 	struct ftrace_func_mapper *mapper = data;
298 	long *count;
299 	long old_count;
300 
301 	/*
302 	 * Tracing gets disabled (or enabled) once per count.
303 	 * This function can be called at the same time on multiple CPUs.
304 	 * It is fine if both disable (or enable) tracing, as disabling
305 	 * (or enabling) the second time doesn't do anything as the
306 	 * state of the tracer is already disabled (or enabled).
307 	 * What needs to be synchronized in this case is that the count
308 	 * only gets decremented once, even if the tracer is disabled
309 	 * (or enabled) twice, as the second one is really a nop.
310 	 *
311 	 * The memory barriers guarantee that we only decrement the
312 	 * counter once. First the count is read to a local variable
313 	 * and a read barrier is used to make sure that it is loaded
314 	 * before checking if the tracer is in the state we want.
315 	 * If the tracer is not in the state we want, then the count
316 	 * is guaranteed to be the old count.
317 	 *
318 	 * Next the tracer is set to the state we want (disabled or enabled)
319 	 * then a write memory barrier is used to make sure that
320 	 * the new state is visible before changing the counter by
321 	 * one minus the old counter. This guarantees that another CPU
322 	 * executing this code will see the new state before seeing
323 	 * the new counter value, and would not do anything if the new
324 	 * counter is seen.
325 	 *
326 	 * Note, there is no synchronization between this and a user
327 	 * setting the tracing_on file. But we currently don't care
328 	 * about that.
329 	 */
330 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
331 	old_count = *count;
332 
333 	if (old_count <= 0)
334 		return;
335 
336 	/* Make sure we see count before checking tracing state */
337 	smp_rmb();
338 
339 	if (on == !!tracer_tracing_is_on(tr))
340 		return;
341 
342 	if (on)
343 		tracer_tracing_on(tr);
344 	else
345 		tracer_tracing_off(tr);
346 
347 	/* Make sure tracing state is visible before updating count */
348 	smp_wmb();
349 
350 	*count = old_count - 1;
351 }
352 
353 static void
354 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
355 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
356 		     void *data)
357 {
358 	update_traceon_count(ops, ip, tr, 1, data);
359 }
360 
361 static void
362 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
363 		      struct trace_array *tr, struct ftrace_probe_ops *ops,
364 		      void *data)
365 {
366 	update_traceon_count(ops, ip, tr, 0, data);
367 }
368 
369 static void
370 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
371 	       struct trace_array *tr, struct ftrace_probe_ops *ops,
372 	       void *data)
373 {
374 	if (tracer_tracing_is_on(tr))
375 		return;
376 
377 	tracer_tracing_on(tr);
378 }
379 
380 static void
381 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
382 		struct trace_array *tr, struct ftrace_probe_ops *ops,
383 		void *data)
384 {
385 	if (!tracer_tracing_is_on(tr))
386 		return;
387 
388 	tracer_tracing_off(tr);
389 }
390 
391 #ifdef CONFIG_UNWINDER_ORC
392 /*
393  * Skip 3:
394  *
395  *   function_trace_probe_call()
396  *   ftrace_ops_assist_func()
397  *   ftrace_call()
398  */
399 #define FTRACE_STACK_SKIP 3
400 #else
401 /*
402  * Skip 5:
403  *
404  *   __trace_stack()
405  *   ftrace_stacktrace()
406  *   function_trace_probe_call()
407  *   ftrace_ops_assist_func()
408  *   ftrace_call()
409  */
410 #define FTRACE_STACK_SKIP 5
411 #endif
412 
413 static __always_inline void trace_stack(struct trace_array *tr)
414 {
415 	unsigned int trace_ctx;
416 
417 	trace_ctx = tracing_gen_ctx();
418 
419 	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
420 }
421 
422 static void
423 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
424 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
425 		  void *data)
426 {
427 	trace_stack(tr);
428 }
429 
430 static void
431 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
432 			struct trace_array *tr, struct ftrace_probe_ops *ops,
433 			void *data)
434 {
435 	struct ftrace_func_mapper *mapper = data;
436 	long *count;
437 	long old_count;
438 	long new_count;
439 
440 	if (!tracing_is_on())
441 		return;
442 
443 	/* unlimited? */
444 	if (!mapper) {
445 		trace_stack(tr);
446 		return;
447 	}
448 
449 	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
450 
451 	/*
452 	 * Stack traces should only execute the number of times the
453 	 * user specified in the counter.
454 	 */
455 	do {
456 		old_count = *count;
457 
458 		if (!old_count)
459 			return;
460 
461 		new_count = old_count - 1;
462 		new_count = cmpxchg(count, old_count, new_count);
463 		if (new_count == old_count)
464 			trace_stack(tr);
465 
466 		if (!tracing_is_on())
467 			return;
468 
469 	} while (new_count != old_count);
470 }
471 
472 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
473 			void *data)
474 {
475 	struct ftrace_func_mapper *mapper = data;
476 	long *count = NULL;
477 
478 	if (mapper)
479 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
480 
481 	if (count) {
482 		if (*count <= 0)
483 			return 0;
484 		(*count)--;
485 	}
486 
487 	return 1;
488 }
489 
490 static void
491 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
492 		  struct trace_array *tr, struct ftrace_probe_ops *ops,
493 		  void *data)
494 {
495 	if (update_count(ops, ip, data))
496 		ftrace_dump(DUMP_ALL);
497 }
498 
499 /* Only dump the current CPU buffer. */
500 static void
501 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
502 		     struct trace_array *tr, struct ftrace_probe_ops *ops,
503 		     void *data)
504 {
505 	if (update_count(ops, ip, data))
506 		ftrace_dump(DUMP_ORIG);
507 }
508 
509 static int
510 ftrace_probe_print(const char *name, struct seq_file *m,
511 		   unsigned long ip, struct ftrace_probe_ops *ops,
512 		   void *data)
513 {
514 	struct ftrace_func_mapper *mapper = data;
515 	long *count = NULL;
516 
517 	seq_printf(m, "%ps:%s", (void *)ip, name);
518 
519 	if (mapper)
520 		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
521 
522 	if (count)
523 		seq_printf(m, ":count=%ld\n", *count);
524 	else
525 		seq_puts(m, ":unlimited\n");
526 
527 	return 0;
528 }
529 
530 static int
531 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
532 		     struct ftrace_probe_ops *ops,
533 		     void *data)
534 {
535 	return ftrace_probe_print("traceon", m, ip, ops, data);
536 }
537 
538 static int
539 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
540 			 struct ftrace_probe_ops *ops, void *data)
541 {
542 	return ftrace_probe_print("traceoff", m, ip, ops, data);
543 }
544 
545 static int
546 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
547 			struct ftrace_probe_ops *ops, void *data)
548 {
549 	return ftrace_probe_print("stacktrace", m, ip, ops, data);
550 }
551 
552 static int
553 ftrace_dump_print(struct seq_file *m, unsigned long ip,
554 			struct ftrace_probe_ops *ops, void *data)
555 {
556 	return ftrace_probe_print("dump", m, ip, ops, data);
557 }
558 
559 static int
560 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
561 			struct ftrace_probe_ops *ops, void *data)
562 {
563 	return ftrace_probe_print("cpudump", m, ip, ops, data);
564 }
565 
566 
567 static int
568 ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
569 		  unsigned long ip, void *init_data, void **data)
570 {
571 	struct ftrace_func_mapper *mapper = *data;
572 
573 	if (!mapper) {
574 		mapper = allocate_ftrace_func_mapper();
575 		if (!mapper)
576 			return -ENOMEM;
577 		*data = mapper;
578 	}
579 
580 	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
581 }
582 
583 static void
584 ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
585 		  unsigned long ip, void *data)
586 {
587 	struct ftrace_func_mapper *mapper = data;
588 
589 	if (!ip) {
590 		free_ftrace_func_mapper(mapper, NULL);
591 		return;
592 	}
593 
594 	ftrace_func_mapper_remove_ip(mapper, ip);
595 }
596 
597 static struct ftrace_probe_ops traceon_count_probe_ops = {
598 	.func			= ftrace_traceon_count,
599 	.print			= ftrace_traceon_print,
600 	.init			= ftrace_count_init,
601 	.free			= ftrace_count_free,
602 };
603 
604 static struct ftrace_probe_ops traceoff_count_probe_ops = {
605 	.func			= ftrace_traceoff_count,
606 	.print			= ftrace_traceoff_print,
607 	.init			= ftrace_count_init,
608 	.free			= ftrace_count_free,
609 };
610 
611 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
612 	.func			= ftrace_stacktrace_count,
613 	.print			= ftrace_stacktrace_print,
614 	.init			= ftrace_count_init,
615 	.free			= ftrace_count_free,
616 };
617 
618 static struct ftrace_probe_ops dump_probe_ops = {
619 	.func			= ftrace_dump_probe,
620 	.print			= ftrace_dump_print,
621 	.init			= ftrace_count_init,
622 	.free			= ftrace_count_free,
623 };
624 
625 static struct ftrace_probe_ops cpudump_probe_ops = {
626 	.func			= ftrace_cpudump_probe,
627 	.print			= ftrace_cpudump_print,
628 };
629 
630 static struct ftrace_probe_ops traceon_probe_ops = {
631 	.func			= ftrace_traceon,
632 	.print			= ftrace_traceon_print,
633 };
634 
635 static struct ftrace_probe_ops traceoff_probe_ops = {
636 	.func			= ftrace_traceoff,
637 	.print			= ftrace_traceoff_print,
638 };
639 
640 static struct ftrace_probe_ops stacktrace_probe_ops = {
641 	.func			= ftrace_stacktrace,
642 	.print			= ftrace_stacktrace_print,
643 };
644 
645 static int
646 ftrace_trace_probe_callback(struct trace_array *tr,
647 			    struct ftrace_probe_ops *ops,
648 			    struct ftrace_hash *hash, char *glob,
649 			    char *cmd, char *param, int enable)
650 {
651 	void *count = (void *)-1;
652 	char *number;
653 	int ret;
654 
655 	/* hash funcs only work with set_ftrace_filter */
656 	if (!enable)
657 		return -EINVAL;
658 
659 	if (glob[0] == '!')
660 		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
661 
662 	if (!param)
663 		goto out_reg;
664 
665 	number = strsep(&param, ":");
666 
667 	if (!strlen(number))
668 		goto out_reg;
669 
670 	/*
671 	 * We use the callback data field (which is a pointer)
672 	 * as our counter.
673 	 */
674 	ret = kstrtoul(number, 0, (unsigned long *)&count);
675 	if (ret)
676 		return ret;
677 
678  out_reg:
679 	ret = register_ftrace_function_probe(glob, tr, ops, count);
680 
681 	return ret < 0 ? ret : 0;
682 }
683 
684 static int
685 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
686 			    char *glob, char *cmd, char *param, int enable)
687 {
688 	struct ftrace_probe_ops *ops;
689 
690 	if (!tr)
691 		return -ENODEV;
692 
693 	/* we register both traceon and traceoff to this callback */
694 	if (strcmp(cmd, "traceon") == 0)
695 		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
696 	else
697 		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
698 
699 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
700 					   param, enable);
701 }
702 
703 static int
704 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
705 			   char *glob, char *cmd, char *param, int enable)
706 {
707 	struct ftrace_probe_ops *ops;
708 
709 	if (!tr)
710 		return -ENODEV;
711 
712 	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
713 
714 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
715 					   param, enable);
716 }
717 
718 static int
719 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
720 			   char *glob, char *cmd, char *param, int enable)
721 {
722 	struct ftrace_probe_ops *ops;
723 
724 	if (!tr)
725 		return -ENODEV;
726 
727 	ops = &dump_probe_ops;
728 
729 	/* Only dump once. */
730 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
731 					   "1", enable);
732 }
733 
734 static int
735 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
736 			   char *glob, char *cmd, char *param, int enable)
737 {
738 	struct ftrace_probe_ops *ops;
739 
740 	if (!tr)
741 		return -ENODEV;
742 
743 	ops = &cpudump_probe_ops;
744 
745 	/* Only dump once. */
746 	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
747 					   "1", enable);
748 }
749 
750 static struct ftrace_func_command ftrace_traceon_cmd = {
751 	.name			= "traceon",
752 	.func			= ftrace_trace_onoff_callback,
753 };
754 
755 static struct ftrace_func_command ftrace_traceoff_cmd = {
756 	.name			= "traceoff",
757 	.func			= ftrace_trace_onoff_callback,
758 };
759 
760 static struct ftrace_func_command ftrace_stacktrace_cmd = {
761 	.name			= "stacktrace",
762 	.func			= ftrace_stacktrace_callback,
763 };
764 
765 static struct ftrace_func_command ftrace_dump_cmd = {
766 	.name			= "dump",
767 	.func			= ftrace_dump_callback,
768 };
769 
770 static struct ftrace_func_command ftrace_cpudump_cmd = {
771 	.name			= "cpudump",
772 	.func			= ftrace_cpudump_callback,
773 };
774 
775 static int __init init_func_cmd_traceon(void)
776 {
777 	int ret;
778 
779 	ret = register_ftrace_command(&ftrace_traceoff_cmd);
780 	if (ret)
781 		return ret;
782 
783 	ret = register_ftrace_command(&ftrace_traceon_cmd);
784 	if (ret)
785 		goto out_free_traceoff;
786 
787 	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
788 	if (ret)
789 		goto out_free_traceon;
790 
791 	ret = register_ftrace_command(&ftrace_dump_cmd);
792 	if (ret)
793 		goto out_free_stacktrace;
794 
795 	ret = register_ftrace_command(&ftrace_cpudump_cmd);
796 	if (ret)
797 		goto out_free_dump;
798 
799 	return 0;
800 
801  out_free_dump:
802 	unregister_ftrace_command(&ftrace_dump_cmd);
803  out_free_stacktrace:
804 	unregister_ftrace_command(&ftrace_stacktrace_cmd);
805  out_free_traceon:
806 	unregister_ftrace_command(&ftrace_traceon_cmd);
807  out_free_traceoff:
808 	unregister_ftrace_command(&ftrace_traceoff_cmd);
809 
810 	return ret;
811 }
812 #else
813 static inline int init_func_cmd_traceon(void)
814 {
815 	return 0;
816 }
817 #endif /* CONFIG_DYNAMIC_FTRACE */
818 
819 __init int init_function_trace(void)
820 {
821 	init_func_cmd_traceon();
822 	return register_tracer(&function_trace);
823 }
824