1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
29849ed4dSMike Frysinger /*
39849ed4dSMike Frysinger * Ftrace header. For implementation details beyond the random comments
45fb94e9cSMauro Carvalho Chehab * scattered below, see: Documentation/trace/ftrace-design.rst
59849ed4dSMike Frysinger */
69849ed4dSMike Frysinger
716444a8aSArnaldo Carvalho de Melo #ifndef _LINUX_FTRACE_H
816444a8aSArnaldo Carvalho de Melo #define _LINUX_FTRACE_H
916444a8aSArnaldo Carvalho de Melo
100264c8c9SSteven Rostedt (VMware) #include <linux/trace_recursion.h>
110012693aSFrederic Weisbecker #include <linux/trace_clock.h>
1218bfee32SChristophe Leroy #include <linux/jump_label.h>
135601020fSFrederic Weisbecker #include <linux/kallsyms.h>
140012693aSFrederic Weisbecker #include <linux/linkage.h>
15ea4e2bc4SSteven Rostedt #include <linux/bitops.h>
16a1e2e31dSSteven Rostedt #include <linux/ptrace.h>
170012693aSFrederic Weisbecker #include <linux/ktime.h>
1821a8c466SFrederic Weisbecker #include <linux/sched.h>
190012693aSFrederic Weisbecker #include <linux/types.h>
200012693aSFrederic Weisbecker #include <linux/init.h>
210012693aSFrederic Weisbecker #include <linux/fs.h>
2216444a8aSArnaldo Carvalho de Melo
23c79a61f5SUwe Kleine-Koenig #include <asm/ftrace.h>
24c79a61f5SUwe Kleine-Koenig
252f5f6ad9SSteven Rostedt /*
262f5f6ad9SSteven Rostedt * If the arch supports passing the variable contents of
272f5f6ad9SSteven Rostedt * function_trace_op as the third parameter back from the
282f5f6ad9SSteven Rostedt * mcount call, then the arch should define this as 1.
292f5f6ad9SSteven Rostedt */
302f5f6ad9SSteven Rostedt #ifndef ARCH_SUPPORTS_FTRACE_OPS
312f5f6ad9SSteven Rostedt #define ARCH_SUPPORTS_FTRACE_OPS 0
322f5f6ad9SSteven Rostedt #endif
332f5f6ad9SSteven Rostedt
34380af29bSSteven Rostedt (Google) #ifdef CONFIG_TRACING
35380af29bSSteven Rostedt (Google) extern void ftrace_boot_snapshot(void);
36380af29bSSteven Rostedt (Google) #else
ftrace_boot_snapshot(void)37380af29bSSteven Rostedt (Google) static inline void ftrace_boot_snapshot(void) { }
38380af29bSSteven Rostedt (Google) #endif
39380af29bSSteven Rostedt (Google)
4034cdd18bSSteven Rostedt (VMware) struct ftrace_ops;
4134cdd18bSSteven Rostedt (VMware) struct ftrace_regs;
42cbad0fb2SMark Rutland struct dyn_ftrace;
439705bc70SMark Rutland
447d8b31b7SArnd Bergmann char *arch_ftrace_match_adjust(char *str, const char *search);
457d8b31b7SArnd Bergmann
46a3ed4157SMasami Hiramatsu (Google) #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
47a3ed4157SMasami Hiramatsu (Google) unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
487d8b31b7SArnd Bergmann #else
497d8b31b7SArnd Bergmann unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
507d8b31b7SArnd Bergmann #endif
517d8b31b7SArnd Bergmann
529705bc70SMark Rutland #ifdef CONFIG_FUNCTION_TRACER
53ccf3672dSSteven Rostedt /*
54ccf3672dSSteven Rostedt * If the arch's mcount caller does not support all of ftrace's
55ccf3672dSSteven Rostedt * features, then it must call an indirect function that
56f2cc020dSIngo Molnar * does. Or at least does enough to prevent any unwelcome side effects.
5734cdd18bSSteven Rostedt (VMware) *
5834cdd18bSSteven Rostedt (VMware) * Also define the function prototype that these architectures use
5934cdd18bSSteven Rostedt (VMware) * to call the ftrace_ops_list_func().
60ccf3672dSSteven Rostedt */
617544256aSSteven Rostedt (Red Hat) #if !ARCH_SUPPORTS_FTRACE_OPS
62ccf3672dSSteven Rostedt # define FTRACE_FORCE_LIST_FUNC 1
6334cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
64ccf3672dSSteven Rostedt #else
65ccf3672dSSteven Rostedt # define FTRACE_FORCE_LIST_FUNC 0
6634cdd18bSSteven Rostedt (VMware) void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6734cdd18bSSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
68ccf3672dSSteven Rostedt #endif
69cbad0fb2SMark Rutland extern const struct ftrace_ops ftrace_nop_ops;
70cbad0fb2SMark Rutland extern const struct ftrace_ops ftrace_list_ops;
71cbad0fb2SMark Rutland struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
7234cdd18bSSteven Rostedt (VMware) #endif /* CONFIG_FUNCTION_TRACER */
73ccf3672dSSteven Rostedt
745f893b26SSteven Rostedt (Red Hat) /* Main tracing buffer and events set up */
755f893b26SSteven Rostedt (Red Hat) #ifdef CONFIG_TRACING
765f893b26SSteven Rostedt (Red Hat) void trace_init(void);
77e725c731SSteven Rostedt (VMware) void early_trace_init(void);
785f893b26SSteven Rostedt (Red Hat) #else
trace_init(void)795f893b26SSteven Rostedt (Red Hat) static inline void trace_init(void) { }
early_trace_init(void)80e725c731SSteven Rostedt (VMware) static inline void early_trace_init(void) { }
815f893b26SSteven Rostedt (Red Hat) #endif
82ccf3672dSSteven Rostedt
83de477254SPaul Gortmaker struct module;
8404da85b8SSteven Rostedt struct ftrace_hash;
8504da85b8SSteven Rostedt
86aba4b5c2SSteven Rostedt (VMware) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
87aba4b5c2SSteven Rostedt (VMware) defined(CONFIG_DYNAMIC_FTRACE)
887e1f4eb9SArnd Bergmann int
89aba4b5c2SSteven Rostedt (VMware) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
90aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char **modname, char *sym);
91aba4b5c2SSteven Rostedt (VMware) #else
927e1f4eb9SArnd Bergmann static inline int
ftrace_mod_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)93aba4b5c2SSteven Rostedt (VMware) ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
94aba4b5c2SSteven Rostedt (VMware) unsigned long *off, char **modname, char *sym)
95aba4b5c2SSteven Rostedt (VMware) {
967e1f4eb9SArnd Bergmann return 0;
97aba4b5c2SSteven Rostedt (VMware) }
98fc0ea795SAdrian Hunter #endif
99fc0ea795SAdrian Hunter
100fc0ea795SAdrian Hunter #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
101fc0ea795SAdrian Hunter int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
102fc0ea795SAdrian Hunter char *type, char *name,
103fc0ea795SAdrian Hunter char *module_name, int *exported);
104fc0ea795SAdrian Hunter #else
ftrace_mod_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * name,char * module_name,int * exported)1056171a031SSteven Rostedt (VMware) static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
1066171a031SSteven Rostedt (VMware) char *type, char *name,
1076171a031SSteven Rostedt (VMware) char *module_name, int *exported)
1086171a031SSteven Rostedt (VMware) {
1096171a031SSteven Rostedt (VMware) return -1;
1106171a031SSteven Rostedt (VMware) }
111aba4b5c2SSteven Rostedt (VMware) #endif
112aba4b5c2SSteven Rostedt (VMware)
113606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
1143e1932adSIngo Molnar
115e4cf33caSSteven Rostedt #include <linux/ftrace_regs.h>
116e4cf33caSSteven Rostedt
117b0fc494fSSteven Rostedt extern int ftrace_enabled;
118b0fc494fSSteven Rostedt
119a370b72eSMasami Hiramatsu (Google) /**
120a370b72eSMasami Hiramatsu (Google) * ftrace_regs - ftrace partial/optimal register set
121a370b72eSMasami Hiramatsu (Google) *
122a370b72eSMasami Hiramatsu (Google) * ftrace_regs represents a group of registers which is used at the
123a370b72eSMasami Hiramatsu (Google) * function entry and exit. There are three types of registers.
124a370b72eSMasami Hiramatsu (Google) *
125a370b72eSMasami Hiramatsu (Google) * - Registers for passing the parameters to callee, including the stack
126a370b72eSMasami Hiramatsu (Google) * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
127a370b72eSMasami Hiramatsu (Google) * - Registers for passing the return values to caller.
128a370b72eSMasami Hiramatsu (Google) * (e.g. rax and rdx on x86_64)
129a370b72eSMasami Hiramatsu (Google) * - Registers for hooking the function call and return including the
130a370b72eSMasami Hiramatsu (Google) * frame pointer (the frame pointer is architecture/config dependent)
131a370b72eSMasami Hiramatsu (Google) * (e.g. rip, rbp and rsp for x86_64)
132a370b72eSMasami Hiramatsu (Google) *
133a370b72eSMasami Hiramatsu (Google) * Also, architecture dependent fields can be used for internal process.
134a370b72eSMasami Hiramatsu (Google) * (e.g. orig_ax on x86_64)
135a370b72eSMasami Hiramatsu (Google) *
136a3ed4157SMasami Hiramatsu (Google) * Basically, ftrace_regs stores the registers related to the context.
137a3ed4157SMasami Hiramatsu (Google) * On function entry, registers for function parameters and hooking the
138a3ed4157SMasami Hiramatsu (Google) * function call are stored, and on function exit, registers for function
139a3ed4157SMasami Hiramatsu (Google) * return value and frame pointers are stored.
140a3ed4157SMasami Hiramatsu (Google) *
141a3ed4157SMasami Hiramatsu (Google) * And also, it dpends on the context that which registers are restored
142a3ed4157SMasami Hiramatsu (Google) * from the ftrace_regs.
143a370b72eSMasami Hiramatsu (Google) * On the function entry, those registers will be restored except for
144a370b72eSMasami Hiramatsu (Google) * the stack pointer, so that user can change the function parameters
145a370b72eSMasami Hiramatsu (Google) * and instruction pointer (e.g. live patching.)
146a370b72eSMasami Hiramatsu (Google) * On the function exit, only registers which is used for return values
147a370b72eSMasami Hiramatsu (Google) * are restored.
148a370b72eSMasami Hiramatsu (Google) *
149a370b72eSMasami Hiramatsu (Google) * NOTE: user *must not* access regs directly, only do it via APIs, because
150a370b72eSMasami Hiramatsu (Google) * the member can be changed according to the architecture.
1517888af41SSteven Rostedt * This is why the structure is empty here, so that nothing accesses
1527888af41SSteven Rostedt * the ftrace_regs directly.
153a370b72eSMasami Hiramatsu (Google) */
154d19ad077SSteven Rostedt (VMware) struct ftrace_regs {
1557888af41SSteven Rostedt /* Nothing to see here, use the accessor functions! */
1567888af41SSteven Rostedt };
1577888af41SSteven Rostedt
1587888af41SSteven Rostedt #define ftrace_regs_size() sizeof(struct __arch_ftrace_regs)
1597888af41SSteven Rostedt
1607888af41SSteven Rostedt #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
161e4cf33caSSteven Rostedt /*
162e4cf33caSSteven Rostedt * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
163e4cf33caSSteven Rostedt * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
164e4cf33caSSteven Rostedt * populated. It should return NULL otherwise.
165e4cf33caSSteven Rostedt */
arch_ftrace_get_regs(struct ftrace_regs * fregs)1667888af41SSteven Rostedt static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
1677888af41SSteven Rostedt {
1687888af41SSteven Rostedt return &arch_ftrace_regs(fregs)->regs;
1697888af41SSteven Rostedt }
17002a474caSSteven Rostedt (VMware)
1712860cd8aSSteven Rostedt (VMware) /*
1720ef86097SMark Rutland * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
1730ef86097SMark Rutland * if to allow setting of the instruction pointer from the ftrace_regs when
1740ef86097SMark Rutland * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
1752860cd8aSSteven Rostedt (VMware) */
1760ef86097SMark Rutland #define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
17702a474caSSteven Rostedt (VMware) #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
178d19ad077SSteven Rostedt (VMware)
179762abbc0SMasami Hiramatsu (Google) #ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
180762abbc0SMasami Hiramatsu (Google)
181762abbc0SMasami Hiramatsu (Google) static_assert(sizeof(struct pt_regs) == ftrace_regs_size());
182762abbc0SMasami Hiramatsu (Google)
183762abbc0SMasami Hiramatsu (Google) #endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
184762abbc0SMasami Hiramatsu (Google)
ftrace_get_regs(struct ftrace_regs * fregs)185d19ad077SSteven Rostedt (VMware) static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
186d19ad077SSteven Rostedt (VMware) {
187d19ad077SSteven Rostedt (VMware) if (!fregs)
188d19ad077SSteven Rostedt (VMware) return NULL;
189d19ad077SSteven Rostedt (VMware)
19002a474caSSteven Rostedt (VMware) return arch_ftrace_get_regs(fregs);
191d19ad077SSteven Rostedt (VMware) }
192d19ad077SSteven Rostedt (VMware)
193b9b55c89SMasami Hiramatsu (Google) #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
194b9b55c89SMasami Hiramatsu (Google) defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
195b9b55c89SMasami Hiramatsu (Google)
196b9b55c89SMasami Hiramatsu (Google) static __always_inline struct pt_regs *
ftrace_partial_regs(struct ftrace_regs * fregs,struct pt_regs * regs)197b9b55c89SMasami Hiramatsu (Google) ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
198b9b55c89SMasami Hiramatsu (Google) {
199b9b55c89SMasami Hiramatsu (Google) /*
200b9b55c89SMasami Hiramatsu (Google) * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory
201b9b55c89SMasami Hiramatsu (Google) * layout is including pt_regs. So always returns that address.
202b9b55c89SMasami Hiramatsu (Google) * Since arch_ftrace_get_regs() will check some members and may return
203b9b55c89SMasami Hiramatsu (Google) * NULL, we can not use it.
204b9b55c89SMasami Hiramatsu (Google) */
205b9b55c89SMasami Hiramatsu (Google) return &arch_ftrace_regs(fregs)->regs;
206b9b55c89SMasami Hiramatsu (Google) }
207b9b55c89SMasami Hiramatsu (Google)
208b9b55c89SMasami Hiramatsu (Google) #endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
209b9b55c89SMasami Hiramatsu (Google)
210d5d01b71SMasami Hiramatsu (Google) #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
211d5d01b71SMasami Hiramatsu (Google)
212d5d01b71SMasami Hiramatsu (Google) /*
213d5d01b71SMasami Hiramatsu (Google) * Please define arch dependent pt_regs which compatible to the
214d5d01b71SMasami Hiramatsu (Google) * perf_arch_fetch_caller_regs() but based on ftrace_regs.
215d5d01b71SMasami Hiramatsu (Google) * This requires
216d5d01b71SMasami Hiramatsu (Google) * - user_mode(_regs) returns false (always kernel mode).
217d5d01b71SMasami Hiramatsu (Google) * - able to use the _regs for stack trace.
218d5d01b71SMasami Hiramatsu (Google) */
219d5d01b71SMasami Hiramatsu (Google) #ifndef arch_ftrace_fill_perf_regs
220d5d01b71SMasami Hiramatsu (Google) /* As same as perf_arch_fetch_caller_regs(), do nothing by default */
221d5d01b71SMasami Hiramatsu (Google) #define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0)
222d5d01b71SMasami Hiramatsu (Google) #endif
223d5d01b71SMasami Hiramatsu (Google)
224d5d01b71SMasami Hiramatsu (Google) static __always_inline struct pt_regs *
ftrace_fill_perf_regs(struct ftrace_regs * fregs,struct pt_regs * regs)225d5d01b71SMasami Hiramatsu (Google) ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
226d5d01b71SMasami Hiramatsu (Google) {
227d5d01b71SMasami Hiramatsu (Google) arch_ftrace_fill_perf_regs(fregs, regs);
228d5d01b71SMasami Hiramatsu (Google) return regs;
229d5d01b71SMasami Hiramatsu (Google) }
230d5d01b71SMasami Hiramatsu (Google)
231d5d01b71SMasami Hiramatsu (Google) #else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
232d5d01b71SMasami Hiramatsu (Google)
233d5d01b71SMasami Hiramatsu (Google) static __always_inline struct pt_regs *
ftrace_fill_perf_regs(struct ftrace_regs * fregs,struct pt_regs * regs)234d5d01b71SMasami Hiramatsu (Google) ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
235d5d01b71SMasami Hiramatsu (Google) {
236d5d01b71SMasami Hiramatsu (Google) return &arch_ftrace_regs(fregs)->regs;
237d5d01b71SMasami Hiramatsu (Google) }
238d5d01b71SMasami Hiramatsu (Google)
239d5d01b71SMasami Hiramatsu (Google) #endif
240d5d01b71SMasami Hiramatsu (Google)
24194d095ffSMark Rutland /*
24294d095ffSMark Rutland * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
24394d095ffSMark Rutland * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
24494d095ffSMark Rutland */
ftrace_regs_has_args(struct ftrace_regs * fregs)24594d095ffSMark Rutland static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
24694d095ffSMark Rutland {
24794d095ffSMark Rutland if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
24894d095ffSMark Rutland return true;
24994d095ffSMark Rutland
25094d095ffSMark Rutland return ftrace_get_regs(fregs) != NULL;
25194d095ffSMark Rutland }
25294d095ffSMark Rutland
2530566cefeSMasami Hiramatsu (Google) #ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
2540566cefeSMasami Hiramatsu (Google) static __always_inline unsigned long
ftrace_regs_get_kernel_stack_nth(struct ftrace_regs * fregs,unsigned int nth)2550566cefeSMasami Hiramatsu (Google) ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth)
2560566cefeSMasami Hiramatsu (Google) {
2570566cefeSMasami Hiramatsu (Google) unsigned long *stackp;
2580566cefeSMasami Hiramatsu (Google)
2590566cefeSMasami Hiramatsu (Google) stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs);
2600566cefeSMasami Hiramatsu (Google) if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) ==
2610566cefeSMasami Hiramatsu (Google) ((unsigned long)stackp & ~(THREAD_SIZE - 1)))
2620566cefeSMasami Hiramatsu (Google) return *(stackp + nth);
2630566cefeSMasami Hiramatsu (Google)
2640566cefeSMasami Hiramatsu (Google) return 0;
2650566cefeSMasami Hiramatsu (Google) }
2660566cefeSMasami Hiramatsu (Google) #else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
2670566cefeSMasami Hiramatsu (Google) #define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L)
2680566cefeSMasami Hiramatsu (Google) #endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
2690566cefeSMasami Hiramatsu (Google)
2702f5f6ad9SSteven Rostedt typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
271d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
27216444a8aSArnaldo Carvalho de Melo
27387354059SSteven Rostedt (Red Hat) ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
27487354059SSteven Rostedt (Red Hat)
275e248491aSJiri Olsa /*
276e248491aSJiri Olsa * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
277e248491aSJiri Olsa * set in the flags member.
278a25d036dSSteven Rostedt (VMware) * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
279f8b8be8aSMasami Hiramatsu * IPMODIFY are a kind of attribute flags which can be set only before
280f8b8be8aSMasami Hiramatsu * registering the ftrace_ops, and can not be modified while registered.
281ad61dd30SStephen Boyd * Changing those attribute flags after registering ftrace_ops will
282f8b8be8aSMasami Hiramatsu * cause unexpected results.
283e248491aSJiri Olsa *
284e248491aSJiri Olsa * ENABLED - set/unset when ftrace_ops is registered/unregistered
285e248491aSJiri Olsa * DYNAMIC - set when ftrace_ops is registered to denote dynamically
286e248491aSJiri Olsa * allocated ftrace_ops which need special care
28708f6fba5SSteven Rostedt * SAVE_REGS - The ftrace_ops wants regs saved at each function called
28808f6fba5SSteven Rostedt * and passed to the callback. If this flag is set, but the
28908f6fba5SSteven Rostedt * architecture does not support passing regs
29006aeaaeaSMasami Hiramatsu * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
29108f6fba5SSteven Rostedt * ftrace_ops will fail to register, unless the next flag
29208f6fba5SSteven Rostedt * is set.
29308f6fba5SSteven Rostedt * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
29408f6fba5SSteven Rostedt * handler can handle an arch that does not save regs
29508f6fba5SSteven Rostedt * (the handler tests if regs == NULL), then it can set
29608f6fba5SSteven Rostedt * this flag instead. It will not fail registering the ftrace_ops
29708f6fba5SSteven Rostedt * but, the regs field will be NULL if the arch does not support
29808f6fba5SSteven Rostedt * passing regs to the handler.
29908f6fba5SSteven Rostedt * Note, if this flag is set, the SAVE_REGS flag will automatically
30008f6fba5SSteven Rostedt * get set upon registering the ftrace_ops, if the arch supports it.
301a25d036dSSteven Rostedt (VMware) * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
302a25d036dSSteven Rostedt (VMware) * that the call back needs recursion protection. If it does
303a25d036dSSteven Rostedt (VMware) * not set this, then the ftrace infrastructure will assume
304a25d036dSSteven Rostedt (VMware) * that the callback can handle recursion on its own.
305395b97a3SSteven Rostedt (Red Hat) * STUB - The ftrace_ops is just a place holder.
306f04f24fbSMasami Hiramatsu * INITIALIZED - The ftrace_ops has already been initialized (first use time
307f04f24fbSMasami Hiramatsu * register_ftrace_function() is called, it will initialized the ops)
308591dffdaSSteven Rostedt (Red Hat) * DELETED - The ops are being deleted, do not let them be registered again.
309e1effa01SSteven Rostedt (Red Hat) * ADDING - The ops is in the process of being added.
310e1effa01SSteven Rostedt (Red Hat) * REMOVING - The ops is in the process of being removed.
311e1effa01SSteven Rostedt (Red Hat) * MODIFYING - The ops is in the process of changing its filter functions.
312f3bea491SSteven Rostedt (Red Hat) * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
313f3bea491SSteven Rostedt (Red Hat) * The arch specific code sets this flag when it allocated a
314f3bea491SSteven Rostedt (Red Hat) * trampoline. This lets the arch know that it can update the
315f3bea491SSteven Rostedt (Red Hat) * trampoline in case the callback function changes.
316f3bea491SSteven Rostedt (Red Hat) * The ftrace_ops trampoline can be set by the ftrace users, and
317f3bea491SSteven Rostedt (Red Hat) * in such cases the arch must not modify it. Only the arch ftrace
318f3bea491SSteven Rostedt (Red Hat) * core code should set this flag.
319f8b8be8aSMasami Hiramatsu * IPMODIFY - The ops can modify the IP register. This can only be set with
320f8b8be8aSMasami Hiramatsu * SAVE_REGS. If another ops with this flag set is already registered
321f8b8be8aSMasami Hiramatsu * for any of the functions that this ops will be registered for, then
322f8b8be8aSMasami Hiramatsu * this ops will fail to register or set_filter_ip.
323e3eea140SSteven Rostedt (Red Hat) * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
324d0ba52f1SSteven Rostedt (VMware) * RCU - Set when the ops can only be called when RCU is watching.
3258c08f0d5SSteven Rostedt (VMware) * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
3267162431dSMiroslav Benes * PERMANENT - Set when the ops is permanent and should not be affected by
3277162431dSMiroslav Benes * ftrace_enabled.
328763e34e7SSteven Rostedt (VMware) * DIRECT - Used by the direct ftrace_ops helper for direct functions
329763e34e7SSteven Rostedt (VMware) * (internal ftrace only, should not be used by others)
330d9bbfbd1SSteven Rostedt (Google) * SUBOP - Is controlled by another op in field managed.
331e248491aSJiri Olsa */
332b848914cSSteven Rostedt enum {
333b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ENABLED = BIT(0),
334b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DYNAMIC = BIT(1),
335b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_SAVE_REGS = BIT(2),
336b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
337a25d036dSSteven Rostedt (VMware) FTRACE_OPS_FL_RECURSION = BIT(4),
338b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_STUB = BIT(5),
339b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_INITIALIZED = BIT(6),
340b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DELETED = BIT(7),
341b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ADDING = BIT(8),
342b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_REMOVING = BIT(9),
343b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_MODIFYING = BIT(10),
344b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
345b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_IPMODIFY = BIT(12),
346b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_PID = BIT(13),
347b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_RCU = BIT(14),
348b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
349b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_PERMANENT = BIT(16),
350b41db132SEnrico Weigelt, metux IT consult FTRACE_OPS_FL_DIRECT = BIT(17),
351d9bbfbd1SSteven Rostedt (Google) FTRACE_OPS_FL_SUBOP = BIT(18),
352b848914cSSteven Rostedt };
353b848914cSSteven Rostedt
35460c89718SFlorent Revest #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
35560c89718SFlorent Revest #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
35660c89718SFlorent Revest #else
35760c89718SFlorent Revest #define FTRACE_OPS_FL_SAVE_ARGS 0
35860c89718SFlorent Revest #endif
35960c89718SFlorent Revest
36053cd885bSSong Liu /*
36153cd885bSSong Liu * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
36253cd885bSSong Liu * to a ftrace_ops. Note, the requests may fail.
36353cd885bSSong Liu *
36453cd885bSSong Liu * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
36553cd885bSSong Liu * function as an ops with IPMODIFY. Called
36653cd885bSSong Liu * when the DIRECT ops is being registered.
36753cd885bSSong Liu * This is called with both direct_mutex and
36853cd885bSSong Liu * ftrace_lock are locked.
36953cd885bSSong Liu *
37053cd885bSSong Liu * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
37153cd885bSSong Liu * function as an ops with IPMODIFY. Called
37253cd885bSSong Liu * when the other ops (the one with IPMODIFY)
37353cd885bSSong Liu * is being registered.
37453cd885bSSong Liu * This is called with direct_mutex locked.
37553cd885bSSong Liu *
37653cd885bSSong Liu * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
37753cd885bSSong Liu * function as an ops with IPMODIFY. Called
37853cd885bSSong Liu * when the other ops (the one with IPMODIFY)
37953cd885bSSong Liu * is being unregistered.
38053cd885bSSong Liu * This is called with direct_mutex locked.
38153cd885bSSong Liu */
38253cd885bSSong Liu enum ftrace_ops_cmd {
38353cd885bSSong Liu FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
38453cd885bSSong Liu FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
38553cd885bSSong Liu FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
38653cd885bSSong Liu };
38753cd885bSSong Liu
38853cd885bSSong Liu /*
38953cd885bSSong Liu * For most ftrace_ops_cmd,
39053cd885bSSong Liu * Returns:
39153cd885bSSong Liu * 0 - Success.
39253cd885bSSong Liu * Negative on failure. The return value is dependent on the
39353cd885bSSong Liu * callback.
39453cd885bSSong Liu */
39553cd885bSSong Liu typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
39653cd885bSSong Liu
39733b7f99cSSteven Rostedt (Red Hat) #ifdef CONFIG_DYNAMIC_FTRACE
39833b7f99cSSteven Rostedt (Red Hat) /* The hash used to know what functions callbacks trace */
39933b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash {
400f86f4180SChunyan Zhang struct ftrace_hash __rcu *notrace_hash;
401f86f4180SChunyan Zhang struct ftrace_hash __rcu *filter_hash;
40233b7f99cSSteven Rostedt (Red Hat) struct mutex regex_lock;
40333b7f99cSSteven Rostedt (Red Hat) };
40442c269c8SSteven Rostedt (VMware)
405b80f0f6cSSteven Rostedt (VMware) void ftrace_free_init_mem(void);
406aba4b5c2SSteven Rostedt (VMware) void ftrace_free_mem(struct module *mod, void *start, void *end);
40742c269c8SSteven Rostedt (VMware) #else
ftrace_free_init_mem(void)408380af29bSSteven Rostedt (Google) static inline void ftrace_free_init_mem(void)
409380af29bSSteven Rostedt (Google) {
410380af29bSSteven Rostedt (Google) ftrace_boot_snapshot();
411380af29bSSteven Rostedt (Google) }
ftrace_free_mem(struct module * mod,void * start,void * end)412aba4b5c2SSteven Rostedt (VMware) static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
41333b7f99cSSteven Rostedt (Red Hat) #endif
41433b7f99cSSteven Rostedt (Red Hat)
415b7e00a6cSSteven Rostedt (Red Hat) /*
416ba27f2bcSSteven Rostedt (Red Hat) * Note, ftrace_ops can be referenced outside of RCU protection, unless
417ba27f2bcSSteven Rostedt (Red Hat) * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
418ba27f2bcSSteven Rostedt (Red Hat) * core data, the unregistering of it will perform a scheduling on all CPUs
419ba27f2bcSSteven Rostedt (Red Hat) * to make sure that there are no more users. Depending on the load of the
420ba27f2bcSSteven Rostedt (Red Hat) * system that may take a bit of time.
421b7e00a6cSSteven Rostedt (Red Hat) *
422b7e00a6cSSteven Rostedt (Red Hat) * Any private data added must also take care not to be freed and if private
423b7e00a6cSSteven Rostedt (Red Hat) * data is added to a ftrace_ops that is in core code, the user of the
424b7e00a6cSSteven Rostedt (Red Hat) * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
425b7e00a6cSSteven Rostedt (Red Hat) */
42616444a8aSArnaldo Carvalho de Melo struct ftrace_ops {
42716444a8aSArnaldo Carvalho de Melo ftrace_func_t func;
428f86f4180SChunyan Zhang struct ftrace_ops __rcu *next;
429b848914cSSteven Rostedt unsigned long flags;
430b7e00a6cSSteven Rostedt (Red Hat) void *private;
431e3eea140SSteven Rostedt (Red Hat) ftrace_func_t saved_func;
432f45948e8SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
43333b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash local_hash;
43433b7f99cSSteven Rostedt (Red Hat) struct ftrace_ops_hash *func_hash;
435fef5aeeeSSteven Rostedt (Red Hat) struct ftrace_ops_hash old_hash;
43679922b80SSteven Rostedt (Red Hat) unsigned long trampoline;
437aec0be2dSSteven Rostedt (Red Hat) unsigned long trampoline_size;
438fc0ea795SAdrian Hunter struct list_head list;
4395fccc755SSteven Rostedt (Google) struct list_head subop_list;
44053cd885bSSong Liu ftrace_ops_func_t ops_func;
441d9bbfbd1SSteven Rostedt (Google) struct ftrace_ops *managed;
442dbaccb61SFlorent Revest #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
443dbaccb61SFlorent Revest unsigned long direct_call;
444dbaccb61SFlorent Revest #endif
445f45948e8SSteven Rostedt #endif
44616444a8aSArnaldo Carvalho de Melo };
44716444a8aSArnaldo Carvalho de Melo
44859566b0bSSteven Rostedt (VMware) extern struct ftrace_ops __rcu *ftrace_ops_list;
44959566b0bSSteven Rostedt (VMware) extern struct ftrace_ops ftrace_list_end;
45059566b0bSSteven Rostedt (VMware)
45159566b0bSSteven Rostedt (VMware) /*
45240dc4a42SWei Yang * Traverse the ftrace_ops_list, invoking all entries. The reason that we
45359566b0bSSteven Rostedt (VMware) * can use rcu_dereference_raw_check() is that elements removed from this list
45459566b0bSSteven Rostedt (VMware) * are simply leaked, so there is no need to interact with a grace-period
45559566b0bSSteven Rostedt (VMware) * mechanism. The rcu_dereference_raw_check() calls are needed to handle
45640dc4a42SWei Yang * concurrent insertions into the ftrace_ops_list.
45759566b0bSSteven Rostedt (VMware) *
45859566b0bSSteven Rostedt (VMware) * Silly Alpha and silly pointer-speculation compiler optimizations!
45959566b0bSSteven Rostedt (VMware) */
46059566b0bSSteven Rostedt (VMware) #define do_for_each_ftrace_op(op, list) \
46159566b0bSSteven Rostedt (VMware) op = rcu_dereference_raw_check(list); \
46259566b0bSSteven Rostedt (VMware) do
46359566b0bSSteven Rostedt (VMware)
46459566b0bSSteven Rostedt (VMware) /*
46559566b0bSSteven Rostedt (VMware) * Optimized for just a single item in the list (as that is the normal case).
46659566b0bSSteven Rostedt (VMware) */
46759566b0bSSteven Rostedt (VMware) #define while_for_each_ftrace_op(op) \
46859566b0bSSteven Rostedt (VMware) while (likely(op = rcu_dereference_raw_check((op)->next)) && \
46959566b0bSSteven Rostedt (VMware) unlikely((op) != &ftrace_list_end))
47059566b0bSSteven Rostedt (VMware)
471e7d3737eSFrederic Weisbecker /*
472e7d3737eSFrederic Weisbecker * Type of the current tracing.
473e7d3737eSFrederic Weisbecker */
474e7d3737eSFrederic Weisbecker enum ftrace_tracing_type_t {
475e7d3737eSFrederic Weisbecker FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
476e7d3737eSFrederic Weisbecker FTRACE_TYPE_RETURN, /* Hook the return of the function */
477e7d3737eSFrederic Weisbecker };
478e7d3737eSFrederic Weisbecker
479e7d3737eSFrederic Weisbecker /* Current tracing type, default is FTRACE_TYPE_ENTER */
480e7d3737eSFrederic Weisbecker extern enum ftrace_tracing_type_t ftrace_tracing_type;
481e7d3737eSFrederic Weisbecker
48216444a8aSArnaldo Carvalho de Melo /*
48316444a8aSArnaldo Carvalho de Melo * The ftrace_ops must be a static and should also
48416444a8aSArnaldo Carvalho de Melo * be read_mostly. These functions do modify read_mostly variables
48516444a8aSArnaldo Carvalho de Melo * so use them sparely. Never free an ftrace_op or modify the
48616444a8aSArnaldo Carvalho de Melo * next pointer after it has been registered. Even after unregistering
48716444a8aSArnaldo Carvalho de Melo * it, the next pointer may still be used internally.
48816444a8aSArnaldo Carvalho de Melo */
48916444a8aSArnaldo Carvalho de Melo int register_ftrace_function(struct ftrace_ops *ops);
49016444a8aSArnaldo Carvalho de Melo int unregister_ftrace_function(struct ftrace_ops *ops);
49116444a8aSArnaldo Carvalho de Melo
492a1e2e31dSSteven Rostedt extern void ftrace_stub(unsigned long a0, unsigned long a1,
493d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs);
49416444a8aSArnaldo Carvalho de Melo
495bed0d9a5SJiri Olsa
496bed0d9a5SJiri Olsa int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
497606576ceSSteven Rostedt #else /* !CONFIG_FUNCTION_TRACER */
4984dbf6bc2SSteven Rostedt /*
4994dbf6bc2SSteven Rostedt * (un)register_ftrace_function must be a macro since the ops parameter
5004dbf6bc2SSteven Rostedt * must not be evaluated.
5014dbf6bc2SSteven Rostedt */
5024dbf6bc2SSteven Rostedt #define register_ftrace_function(ops) ({ 0; })
5034dbf6bc2SSteven Rostedt #define unregister_ftrace_function(ops) ({ 0; })
ftrace_kill(void)50481adbdc0SSteven Rostedt static inline void ftrace_kill(void) { }
ftrace_free_init_mem(void)505b80f0f6cSSteven Rostedt (VMware) static inline void ftrace_free_init_mem(void) { }
ftrace_free_mem(struct module * mod,void * start,void * end)506aba4b5c2SSteven Rostedt (VMware) static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
ftrace_lookup_symbols(const char ** sorted_syms,size_t cnt,unsigned long * addrs)507bed0d9a5SJiri Olsa static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
508bed0d9a5SJiri Olsa {
509bed0d9a5SJiri Olsa return -EOPNOTSUPP;
510bed0d9a5SJiri Olsa }
511606576ceSSteven Rostedt #endif /* CONFIG_FUNCTION_TRACER */
512352ad25aSSteven Rostedt
513ea806eb3SSteven Rostedt (VMware) struct ftrace_func_entry {
514ea806eb3SSteven Rostedt (VMware) struct hlist_node hlist;
515ea806eb3SSteven Rostedt (VMware) unsigned long ip;
516ea806eb3SSteven Rostedt (VMware) unsigned long direct; /* for direct lookup only */
517ea806eb3SSteven Rostedt (VMware) };
518ea806eb3SSteven Rostedt (VMware)
519763e34e7SSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
520ff205766SAlexei Starovoitov unsigned long ftrace_find_rec_direct(unsigned long ip);
521da8bdfbdSFlorent Revest int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
522da8bdfbdSFlorent Revest int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
52359495740SFlorent Revest bool free_filters);
524da8bdfbdSFlorent Revest int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
525da8bdfbdSFlorent Revest int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
526ccf5a89eSJiri Olsa
527fee86a4eSMark Rutland void ftrace_stub_direct_tramp(void);
528f64dd462SJiri Olsa
529763e34e7SSteven Rostedt (VMware) #else
530f64dd462SJiri Olsa struct ftrace_ops;
ftrace_find_rec_direct(unsigned long ip)531ff205766SAlexei Starovoitov static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
532ff205766SAlexei Starovoitov {
533ff205766SAlexei Starovoitov return 0;
534ff205766SAlexei Starovoitov }
register_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)535da8bdfbdSFlorent Revest static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
536f64dd462SJiri Olsa {
537f64dd462SJiri Olsa return -ENODEV;
538f64dd462SJiri Olsa }
unregister_ftrace_direct(struct ftrace_ops * ops,unsigned long addr,bool free_filters)539da8bdfbdSFlorent Revest static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
54059495740SFlorent Revest bool free_filters)
541f64dd462SJiri Olsa {
542f64dd462SJiri Olsa return -ENODEV;
543f64dd462SJiri Olsa }
modify_ftrace_direct(struct ftrace_ops * ops,unsigned long addr)544da8bdfbdSFlorent Revest static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
545ccf5a89eSJiri Olsa {
546ccf5a89eSJiri Olsa return -ENODEV;
547ccf5a89eSJiri Olsa }
modify_ftrace_direct_nolock(struct ftrace_ops * ops,unsigned long addr)548da8bdfbdSFlorent Revest static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
549f96f644aSSong Liu {
550f96f644aSSong Liu return -ENODEV;
551f96f644aSSong Liu }
552763e34e7SSteven Rostedt (VMware)
553763e34e7SSteven Rostedt (VMware) /*
554763e34e7SSteven Rostedt (VMware) * This must be implemented by the architecture.
555763e34e7SSteven Rostedt (VMware) * It is the way the ftrace direct_ops helper, when called
556763e34e7SSteven Rostedt (VMware) * via ftrace (because there's other callbacks besides the
557763e34e7SSteven Rostedt (VMware) * direct call), can inform the architecture's trampoline that this
558763e34e7SSteven Rostedt (VMware) * routine has a direct caller, and what the caller is.
559562955feSSteven Rostedt (VMware) *
560562955feSSteven Rostedt (VMware) * For example, in x86, it returns the direct caller
561562955feSSteven Rostedt (VMware) * callback function via the regs->orig_ax parameter.
562562955feSSteven Rostedt (VMware) * Then in the ftrace trampoline, if this is set, it makes
563562955feSSteven Rostedt (VMware) * the return from the trampoline jump to the direct caller
564562955feSSteven Rostedt (VMware) * instead of going back to the function it just traced.
565763e34e7SSteven Rostedt (VMware) */
arch_ftrace_set_direct_caller(struct ftrace_regs * fregs,unsigned long addr)5669705bc70SMark Rutland static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
567763e34e7SSteven Rostedt (VMware) unsigned long addr) { }
5689705bc70SMark Rutland #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
569763e34e7SSteven Rostedt (VMware)
570f38f1d2aSSteven Rostedt #ifdef CONFIG_STACK_TRACER
571bb99d8ccSAKASHI Takahiro
572f38f1d2aSSteven Rostedt extern int stack_tracer_enabled;
5733d9a8072SThomas Gleixner
57478eb4ea2SJoel Granados int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
5757ff0d449SChristoph Hellwig size_t *lenp, loff_t *ppos);
5765367278cSSteven Rostedt (VMware)
5778aaf1ee7SSteven Rostedt (VMware) /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
5788aaf1ee7SSteven Rostedt (VMware) DECLARE_PER_CPU(int, disable_stack_tracer);
5798aaf1ee7SSteven Rostedt (VMware)
5808aaf1ee7SSteven Rostedt (VMware) /**
5818aaf1ee7SSteven Rostedt (VMware) * stack_tracer_disable - temporarily disable the stack tracer
5828aaf1ee7SSteven Rostedt (VMware) *
5838aaf1ee7SSteven Rostedt (VMware) * There's a few locations (namely in RCU) where stack tracing
5848aaf1ee7SSteven Rostedt (VMware) * cannot be executed. This function is used to disable stack
5858aaf1ee7SSteven Rostedt (VMware) * tracing during those critical sections.
5868aaf1ee7SSteven Rostedt (VMware) *
5878aaf1ee7SSteven Rostedt (VMware) * This function must be called with preemption or interrupts
5888aaf1ee7SSteven Rostedt (VMware) * disabled and stack_tracer_enable() must be called shortly after
5898aaf1ee7SSteven Rostedt (VMware) * while preemption or interrupts are still disabled.
5908aaf1ee7SSteven Rostedt (VMware) */
stack_tracer_disable(void)5918aaf1ee7SSteven Rostedt (VMware) static inline void stack_tracer_disable(void)
5928aaf1ee7SSteven Rostedt (VMware) {
593f2cc020dSIngo Molnar /* Preemption or interrupts must be disabled */
59460361e12SZev Weiss if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
5958aaf1ee7SSteven Rostedt (VMware) WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
5968aaf1ee7SSteven Rostedt (VMware) this_cpu_inc(disable_stack_tracer);
5978aaf1ee7SSteven Rostedt (VMware) }
5988aaf1ee7SSteven Rostedt (VMware)
5998aaf1ee7SSteven Rostedt (VMware) /**
6008aaf1ee7SSteven Rostedt (VMware) * stack_tracer_enable - re-enable the stack tracer
6018aaf1ee7SSteven Rostedt (VMware) *
6028aaf1ee7SSteven Rostedt (VMware) * After stack_tracer_disable() is called, stack_tracer_enable()
6038aaf1ee7SSteven Rostedt (VMware) * must be called shortly afterward.
6048aaf1ee7SSteven Rostedt (VMware) */
stack_tracer_enable(void)6058aaf1ee7SSteven Rostedt (VMware) static inline void stack_tracer_enable(void)
6068aaf1ee7SSteven Rostedt (VMware) {
60760361e12SZev Weiss if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
6088aaf1ee7SSteven Rostedt (VMware) WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
6098aaf1ee7SSteven Rostedt (VMware) this_cpu_dec(disable_stack_tracer);
6108aaf1ee7SSteven Rostedt (VMware) }
6115367278cSSteven Rostedt (VMware) #else
stack_tracer_disable(void)6125367278cSSteven Rostedt (VMware) static inline void stack_tracer_disable(void) { }
stack_tracer_enable(void)6135367278cSSteven Rostedt (VMware) static inline void stack_tracer_enable(void) { }
614f38f1d2aSSteven Rostedt #endif
615f38f1d2aSSteven Rostedt
616ab6b8463SSteven Rostedt (VMware) enum {
617ab6b8463SSteven Rostedt (VMware) FTRACE_UPDATE_CALLS = (1 << 0),
618ab6b8463SSteven Rostedt (VMware) FTRACE_DISABLE_CALLS = (1 << 1),
619ab6b8463SSteven Rostedt (VMware) FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
620ab6b8463SSteven Rostedt (VMware) FTRACE_START_FUNC_RET = (1 << 3),
621ab6b8463SSteven Rostedt (VMware) FTRACE_STOP_FUNC_RET = (1 << 4),
622ab6b8463SSteven Rostedt (VMware) FTRACE_MAY_SLEEP = (1 << 5),
623ab6b8463SSteven Rostedt (VMware) };
624ab6b8463SSteven Rostedt (VMware)
625*2bc56fdaSMasami Hiramatsu (Google) /* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
626*2bc56fdaSMasami Hiramatsu (Google) #ifndef ftrace_get_symaddr
627*2bc56fdaSMasami Hiramatsu (Google) /**
628*2bc56fdaSMasami Hiramatsu (Google) * ftrace_get_symaddr - return the symbol address from fentry_ip
629*2bc56fdaSMasami Hiramatsu (Google) * @fentry_ip: the address of ftrace location
630*2bc56fdaSMasami Hiramatsu (Google) *
631*2bc56fdaSMasami Hiramatsu (Google) * Get the symbol address from @fentry_ip (fast path). If there is no fast
632*2bc56fdaSMasami Hiramatsu (Google) * search path, this returns 0.
633*2bc56fdaSMasami Hiramatsu (Google) * User may need to use kallsyms API to find the symbol address.
634*2bc56fdaSMasami Hiramatsu (Google) */
635*2bc56fdaSMasami Hiramatsu (Google) #define ftrace_get_symaddr(fentry_ip) (0)
636*2bc56fdaSMasami Hiramatsu (Google) #endif
637*2bc56fdaSMasami Hiramatsu (Google)
6383d083395SSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
63931e88909SSteven Rostedt
6403a2bfec0SLi kunyu void ftrace_arch_code_modify_prepare(void);
6413a2bfec0SLi kunyu void ftrace_arch_code_modify_post_process(void);
642000ab691SSteven Rostedt
64302a392a0SSteven Rostedt (Red Hat) enum ftrace_bug_type {
64402a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_UNKNOWN,
64502a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_INIT,
64602a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_NOP,
64702a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_CALL,
64802a392a0SSteven Rostedt (Red Hat) FTRACE_BUG_UPDATE,
64902a392a0SSteven Rostedt (Red Hat) };
65002a392a0SSteven Rostedt (Red Hat) extern enum ftrace_bug_type ftrace_bug_type;
65102a392a0SSteven Rostedt (Red Hat)
652b05086c7SSteven Rostedt (Red Hat) /*
653b05086c7SSteven Rostedt (Red Hat) * Archs can set this to point to a variable that holds the value that was
654b05086c7SSteven Rostedt (Red Hat) * expected at the call site before calling ftrace_bug().
655b05086c7SSteven Rostedt (Red Hat) */
656b05086c7SSteven Rostedt (Red Hat) extern const void *ftrace_expected;
657b05086c7SSteven Rostedt (Red Hat)
6584fd3279bSSteven Rostedt (Red Hat) void ftrace_bug(int err, struct dyn_ftrace *rec);
659c88fd863SSteven Rostedt
660809dcf29SSteven Rostedt struct seq_file;
661809dcf29SSteven Rostedt
662d88471cbSSasha Levin extern int ftrace_text_reserved(const void *start, const void *end);
6632cfa1978SMasami Hiramatsu
6646be7fa3cSSteven Rostedt (VMware) struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
6656be7fa3cSSteven Rostedt (VMware)
666aec0be2dSSteven Rostedt (Red Hat) bool is_ftrace_trampoline(unsigned long addr);
667aec0be2dSSteven Rostedt (Red Hat)
66808f6fba5SSteven Rostedt /*
66908f6fba5SSteven Rostedt * The dyn_ftrace record's flags field is split into two parts.
67008f6fba5SSteven Rostedt * the first part which is '0-FTRACE_REF_MAX' is a counter of
67108f6fba5SSteven Rostedt * the number of callbacks that have registered the function that
67208f6fba5SSteven Rostedt * the dyn_ftrace descriptor represents.
67308f6fba5SSteven Rostedt *
67408f6fba5SSteven Rostedt * The second part is a mask:
67508f6fba5SSteven Rostedt * ENABLED - the function is being traced
67608f6fba5SSteven Rostedt * REGS - the record wants the function to save regs
67708f6fba5SSteven Rostedt * REGS_EN - the function is set up to save regs.
678f8b8be8aSMasami Hiramatsu * IPMODIFY - the record allows for the IP address to be changed.
679b7ffffbbSSteven Rostedt (Red Hat) * DISABLED - the record is not ready to be touched yet
680763e34e7SSteven Rostedt (VMware) * DIRECT - there is a direct function to call
681cbad0fb2SMark Rutland * CALL_OPS - the record can use callsite-specific ops
682cbad0fb2SMark Rutland * CALL_OPS_EN - the function is set up to use callsite-specific ops
683e11b521aSSteven Rostedt (Google) * TOUCHED - A callback was added since boot up
6846ce2c04fSSteven Rostedt (Google) * MODIFIED - The function had IPMODIFY or DIRECT attached to it
68508f6fba5SSteven Rostedt *
68608f6fba5SSteven Rostedt * When a new ftrace_ops is registered and wants a function to save
68702dae28fSWei Yang * pt_regs, the rec->flags REGS is set. When the function has been
68808f6fba5SSteven Rostedt * set up to save regs, the REG_EN flag is set. Once a function
68908f6fba5SSteven Rostedt * starts saving regs it will do so until all ftrace_ops are removed
69008f6fba5SSteven Rostedt * from tracing that function.
69108f6fba5SSteven Rostedt */
6923c1720f0SSteven Rostedt enum {
69379922b80SSteven Rostedt (Red Hat) FTRACE_FL_ENABLED = (1UL << 31),
69408f6fba5SSteven Rostedt FTRACE_FL_REGS = (1UL << 30),
69579922b80SSteven Rostedt (Red Hat) FTRACE_FL_REGS_EN = (1UL << 29),
69679922b80SSteven Rostedt (Red Hat) FTRACE_FL_TRAMP = (1UL << 28),
69779922b80SSteven Rostedt (Red Hat) FTRACE_FL_TRAMP_EN = (1UL << 27),
698f8b8be8aSMasami Hiramatsu FTRACE_FL_IPMODIFY = (1UL << 26),
699b7ffffbbSSteven Rostedt (Red Hat) FTRACE_FL_DISABLED = (1UL << 25),
700763e34e7SSteven Rostedt (VMware) FTRACE_FL_DIRECT = (1UL << 24),
701763e34e7SSteven Rostedt (VMware) FTRACE_FL_DIRECT_EN = (1UL << 23),
702cbad0fb2SMark Rutland FTRACE_FL_CALL_OPS = (1UL << 22),
703cbad0fb2SMark Rutland FTRACE_FL_CALL_OPS_EN = (1UL << 21),
704e11b521aSSteven Rostedt (Google) FTRACE_FL_TOUCHED = (1UL << 20),
7056ce2c04fSSteven Rostedt (Google) FTRACE_FL_MODIFIED = (1UL << 19),
7063c1720f0SSteven Rostedt };
7073c1720f0SSteven Rostedt
7086ce2c04fSSteven Rostedt (Google) #define FTRACE_REF_MAX_SHIFT 19
709cf2cb0b2SSteven Rostedt (Red Hat) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
710ed926f9bSSteven Rostedt
71102dae28fSWei Yang #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
7120376bde1SSteven Rostedt (Red Hat)
7133d083395SSteven Rostedt struct dyn_ftrace {
714395a59d0SAbhishek Sagar unsigned long ip; /* address of mcount call-site */
7153c1720f0SSteven Rostedt unsigned long flags;
71631e88909SSteven Rostedt struct dyn_arch_ftrace arch;
7173d083395SSteven Rostedt };
7183d083395SSteven Rostedt
719647664eaSMasami Hiramatsu int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
720647664eaSMasami Hiramatsu int remove, int reset);
7214f554e95SJiri Olsa int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
7224f554e95SJiri Olsa unsigned int cnt, int remove, int reset);
723ac483c44SJiri Olsa int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
724936e074bSSteven Rostedt int len, int reset);
725ac483c44SJiri Olsa int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
726936e074bSSteven Rostedt int len, int reset);
727936e074bSSteven Rostedt void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
728936e074bSSteven Rostedt void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
7295500fa51SJiri Olsa void ftrace_free_filter(struct ftrace_ops *ops);
730d032ae89SJoel Fernandes void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
731e1c08bddSSteven Rostedt
73208f6fba5SSteven Rostedt /*
73308f6fba5SSteven Rostedt * The FTRACE_UPDATE_* enum is used to pass information back
73408f6fba5SSteven Rostedt * from the ftrace_update_record() and ftrace_test_record()
73508f6fba5SSteven Rostedt * functions. These are called by the code update routines
73608f6fba5SSteven Rostedt * to find out what is to be done for a given function.
73708f6fba5SSteven Rostedt *
73808f6fba5SSteven Rostedt * IGNORE - The function is already what we want it to be
73908f6fba5SSteven Rostedt * MAKE_CALL - Start tracing the function
74008f6fba5SSteven Rostedt * MODIFY_CALL - Stop saving regs for the function
74108f6fba5SSteven Rostedt * MAKE_NOP - Stop tracing the function
74208f6fba5SSteven Rostedt */
743c88fd863SSteven Rostedt enum {
744c88fd863SSteven Rostedt FTRACE_UPDATE_IGNORE,
745c88fd863SSteven Rostedt FTRACE_UPDATE_MAKE_CALL,
74608f6fba5SSteven Rostedt FTRACE_UPDATE_MODIFY_CALL,
747c88fd863SSteven Rostedt FTRACE_UPDATE_MAKE_NOP,
748c88fd863SSteven Rostedt };
749c88fd863SSteven Rostedt
750fc13cb0cSSteven Rostedt enum {
751fc13cb0cSSteven Rostedt FTRACE_ITER_FILTER = (1 << 0),
752fc13cb0cSSteven Rostedt FTRACE_ITER_NOTRACE = (1 << 1),
753fc13cb0cSSteven Rostedt FTRACE_ITER_PRINTALL = (1 << 2),
754eee8ded1SSteven Rostedt (VMware) FTRACE_ITER_DO_PROBES = (1 << 3),
755eee8ded1SSteven Rostedt (VMware) FTRACE_ITER_PROBE = (1 << 4),
7565985ea8bSSteven Rostedt (VMware) FTRACE_ITER_MOD = (1 << 5),
7575985ea8bSSteven Rostedt (VMware) FTRACE_ITER_ENABLED = (1 << 6),
758e11b521aSSteven Rostedt (Google) FTRACE_ITER_TOUCHED = (1 << 7),
75983f74441SJiri Olsa FTRACE_ITER_ADDRS = (1 << 8),
760fc13cb0cSSteven Rostedt };
761fc13cb0cSSteven Rostedt
762c88fd863SSteven Rostedt void arch_ftrace_update_code(int command);
76389f579ceSYi Wang void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
76489f579ceSYi Wang void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
76589f579ceSYi Wang void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
766c88fd863SSteven Rostedt
767c88fd863SSteven Rostedt struct ftrace_rec_iter;
768c88fd863SSteven Rostedt
769c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_start(void);
770c88fd863SSteven Rostedt struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
771c88fd863SSteven Rostedt struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
772c88fd863SSteven Rostedt
77308d636b6SSteven Rostedt #define for_ftrace_rec_iter(iter) \
77408d636b6SSteven Rostedt for (iter = ftrace_rec_iter_start(); \
77508d636b6SSteven Rostedt iter; \
77608d636b6SSteven Rostedt iter = ftrace_rec_iter_next(iter))
77708d636b6SSteven Rostedt
77808d636b6SSteven Rostedt
7797375dca1SSteven Rostedt (VMware) int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
7807375dca1SSteven Rostedt (VMware) int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
781c88fd863SSteven Rostedt void ftrace_run_stop_machine(int command);
782f0cf973aSSteven Rostedt unsigned long ftrace_location(unsigned long ip);
78304cf31a7SMichael Ellerman unsigned long ftrace_location_range(unsigned long start, unsigned long end);
7847413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
7857413af1fSSteven Rostedt (Red Hat) unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
786c88fd863SSteven Rostedt
787c88fd863SSteven Rostedt extern ftrace_func_t ftrace_trace_function;
788c88fd863SSteven Rostedt
789fc13cb0cSSteven Rostedt int ftrace_regex_open(struct ftrace_ops *ops, int flag,
790fc13cb0cSSteven Rostedt struct inode *inode, struct file *file);
791fc13cb0cSSteven Rostedt ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
792fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos);
793fc13cb0cSSteven Rostedt ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
794fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos);
795fc13cb0cSSteven Rostedt int ftrace_regex_release(struct inode *inode, struct file *file);
796fc13cb0cSSteven Rostedt
7972a85a37fSSteven Rostedt void __init
7982a85a37fSSteven Rostedt ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
7992a85a37fSSteven Rostedt
8003d083395SSteven Rostedt /* defined in arch */
8013a36cb11SJiri Slaby extern int ftrace_dyn_arch_init(void);
802e4f5d544SSteven Rostedt extern void ftrace_replace_code(int enable);
803d61f82d0SSteven Rostedt extern int ftrace_update_ftrace_func(ftrace_func_t func);
804d61f82d0SSteven Rostedt extern void ftrace_caller(void);
80508f6fba5SSteven Rostedt extern void ftrace_regs_caller(void);
806d61f82d0SSteven Rostedt extern void ftrace_call(void);
80708f6fba5SSteven Rostedt extern void ftrace_regs_call(void);
808d61f82d0SSteven Rostedt extern void mcount_call(void);
809f0001207SShaohua Li
8108ed3e2cfSSteven Rostedt void ftrace_modify_all_code(int command);
8118ed3e2cfSSteven Rostedt
812f0001207SShaohua Li #ifndef FTRACE_ADDR
813f0001207SShaohua Li #define FTRACE_ADDR ((unsigned long)ftrace_caller)
814f0001207SShaohua Li #endif
81508f6fba5SSteven Rostedt
81679922b80SSteven Rostedt (Red Hat) #ifndef FTRACE_GRAPH_ADDR
81779922b80SSteven Rostedt (Red Hat) #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
81879922b80SSteven Rostedt (Red Hat) #endif
81979922b80SSteven Rostedt (Red Hat)
82008f6fba5SSteven Rostedt #ifndef FTRACE_REGS_ADDR
82106aeaaeaSMasami Hiramatsu #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
82208f6fba5SSteven Rostedt # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
82308f6fba5SSteven Rostedt #else
82408f6fba5SSteven Rostedt # define FTRACE_REGS_ADDR FTRACE_ADDR
82508f6fba5SSteven Rostedt #endif
82608f6fba5SSteven Rostedt #endif
82708f6fba5SSteven Rostedt
828646d7043SSteven Rostedt (Red Hat) /*
829646d7043SSteven Rostedt (Red Hat) * If an arch would like functions that are only traced
830646d7043SSteven Rostedt (Red Hat) * by the function graph tracer to jump directly to its own
831646d7043SSteven Rostedt (Red Hat) * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
832646d7043SSteven Rostedt (Red Hat) * to be that address to jump to.
833646d7043SSteven Rostedt (Red Hat) */
834646d7043SSteven Rostedt (Red Hat) #ifndef FTRACE_GRAPH_TRAMP_ADDR
835646d7043SSteven Rostedt (Red Hat) #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
836646d7043SSteven Rostedt (Red Hat) #endif
837646d7043SSteven Rostedt (Red Hat)
838fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
839fb52607aSFrederic Weisbecker extern void ftrace_graph_caller(void);
8405a45cfe1SSteven Rostedt extern int ftrace_enable_ftrace_graph_caller(void);
8415a45cfe1SSteven Rostedt extern int ftrace_disable_ftrace_graph_caller(void);
8425a45cfe1SSteven Rostedt #else
ftrace_enable_ftrace_graph_caller(void)8435a45cfe1SSteven Rostedt static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
ftrace_disable_ftrace_graph_caller(void)8445a45cfe1SSteven Rostedt static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
845e7d3737eSFrederic Weisbecker #endif
846ad90c0e3SSteven Rostedt
847593eb8a2SSteven Rostedt /**
84857794a9dSWenji Huang * ftrace_make_nop - convert code into nop
84931e88909SSteven Rostedt * @mod: module structure if called by module load initialization
850fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
85131e88909SSteven Rostedt * @addr: the address that the call site should be calling
852593eb8a2SSteven Rostedt *
853593eb8a2SSteven Rostedt * This is a very sensitive operation and great care needs
854593eb8a2SSteven Rostedt * to be taken by the arch. The operation should carefully
855593eb8a2SSteven Rostedt * read the location, check to see if what is read is indeed
856593eb8a2SSteven Rostedt * what we expect it to be, and then on success of the compare,
857593eb8a2SSteven Rostedt * it should write to the location.
858593eb8a2SSteven Rostedt *
85931e88909SSteven Rostedt * The code segment at @rec->ip should be a caller to @addr
86031e88909SSteven Rostedt *
861593eb8a2SSteven Rostedt * Return must be:
862593eb8a2SSteven Rostedt * 0 on success
863593eb8a2SSteven Rostedt * -EFAULT on error reading the location
864593eb8a2SSteven Rostedt * -EINVAL on a failed compare of the contents
865593eb8a2SSteven Rostedt * -EPERM on error writing to the location
866593eb8a2SSteven Rostedt * Any other value will be considered a failure.
867593eb8a2SSteven Rostedt */
86831e88909SSteven Rostedt extern int ftrace_make_nop(struct module *mod,
86931e88909SSteven Rostedt struct dyn_ftrace *rec, unsigned long addr);
87031e88909SSteven Rostedt
87167ccddf8SIlya Leoshkevich /**
87267ccddf8SIlya Leoshkevich * ftrace_need_init_nop - return whether nop call sites should be initialized
87367ccddf8SIlya Leoshkevich *
87467ccddf8SIlya Leoshkevich * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
87567ccddf8SIlya Leoshkevich * need to call ftrace_init_nop() if the code is built with that flag.
87667ccddf8SIlya Leoshkevich * Architectures where this is not always the case may define their own
87767ccddf8SIlya Leoshkevich * condition.
87867ccddf8SIlya Leoshkevich *
87967ccddf8SIlya Leoshkevich * Return must be:
88067ccddf8SIlya Leoshkevich * 0 if ftrace_init_nop() should be called
88167ccddf8SIlya Leoshkevich * Nonzero if ftrace_init_nop() should not be called
88267ccddf8SIlya Leoshkevich */
88367ccddf8SIlya Leoshkevich
88467ccddf8SIlya Leoshkevich #ifndef ftrace_need_init_nop
88567ccddf8SIlya Leoshkevich #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
88667ccddf8SIlya Leoshkevich #endif
887fbf6c73cSMark Rutland
888fbf6c73cSMark Rutland /**
889fbf6c73cSMark Rutland * ftrace_init_nop - initialize a nop call site
890fbf6c73cSMark Rutland * @mod: module structure if called by module load initialization
891fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
892fbf6c73cSMark Rutland *
893fbf6c73cSMark Rutland * This is a very sensitive operation and great care needs
894fbf6c73cSMark Rutland * to be taken by the arch. The operation should carefully
895fbf6c73cSMark Rutland * read the location, check to see if what is read is indeed
896fbf6c73cSMark Rutland * what we expect it to be, and then on success of the compare,
897fbf6c73cSMark Rutland * it should write to the location.
898fbf6c73cSMark Rutland *
899fbf6c73cSMark Rutland * The code segment at @rec->ip should contain the contents created by
900fbf6c73cSMark Rutland * the compiler
901fbf6c73cSMark Rutland *
902fbf6c73cSMark Rutland * Return must be:
903fbf6c73cSMark Rutland * 0 on success
904fbf6c73cSMark Rutland * -EFAULT on error reading the location
905fbf6c73cSMark Rutland * -EINVAL on a failed compare of the contents
906fbf6c73cSMark Rutland * -EPERM on error writing to the location
907fbf6c73cSMark Rutland * Any other value will be considered a failure.
908fbf6c73cSMark Rutland */
909fbf6c73cSMark Rutland #ifndef ftrace_init_nop
ftrace_init_nop(struct module * mod,struct dyn_ftrace * rec)910fbf6c73cSMark Rutland static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
911fbf6c73cSMark Rutland {
912fbf6c73cSMark Rutland return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
913fbf6c73cSMark Rutland }
914fbf6c73cSMark Rutland #endif
915fbf6c73cSMark Rutland
91631e88909SSteven Rostedt /**
91731e88909SSteven Rostedt * ftrace_make_call - convert a nop call site into a call to addr
918fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
91931e88909SSteven Rostedt * @addr: the address that the call site should call
92031e88909SSteven Rostedt *
92131e88909SSteven Rostedt * This is a very sensitive operation and great care needs
92231e88909SSteven Rostedt * to be taken by the arch. The operation should carefully
92331e88909SSteven Rostedt * read the location, check to see if what is read is indeed
92431e88909SSteven Rostedt * what we expect it to be, and then on success of the compare,
92531e88909SSteven Rostedt * it should write to the location.
92631e88909SSteven Rostedt *
92731e88909SSteven Rostedt * The code segment at @rec->ip should be a nop
92831e88909SSteven Rostedt *
92931e88909SSteven Rostedt * Return must be:
93031e88909SSteven Rostedt * 0 on success
93131e88909SSteven Rostedt * -EFAULT on error reading the location
93231e88909SSteven Rostedt * -EINVAL on a failed compare of the contents
93331e88909SSteven Rostedt * -EPERM on error writing to the location
93431e88909SSteven Rostedt * Any other value will be considered a failure.
93531e88909SSteven Rostedt */
93631e88909SSteven Rostedt extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
93731e88909SSteven Rostedt
938cbad0fb2SMark Rutland #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
9397caa9765SPuranjay Mohan defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
9407caa9765SPuranjay Mohan defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
94108f6fba5SSteven Rostedt /**
94208f6fba5SSteven Rostedt * ftrace_modify_call - convert from one addr to another (no nop)
943fbf6c73cSMark Rutland * @rec: the call site record (e.g. mcount/fentry)
94408f6fba5SSteven Rostedt * @old_addr: the address expected to be currently called to
94508f6fba5SSteven Rostedt * @addr: the address to change to
94608f6fba5SSteven Rostedt *
94708f6fba5SSteven Rostedt * This is a very sensitive operation and great care needs
94808f6fba5SSteven Rostedt * to be taken by the arch. The operation should carefully
94908f6fba5SSteven Rostedt * read the location, check to see if what is read is indeed
95008f6fba5SSteven Rostedt * what we expect it to be, and then on success of the compare,
95108f6fba5SSteven Rostedt * it should write to the location.
95208f6fba5SSteven Rostedt *
953cbad0fb2SMark Rutland * When using call ops, this is called when the associated ops change, even
954cbad0fb2SMark Rutland * when (addr == old_addr).
955cbad0fb2SMark Rutland *
95608f6fba5SSteven Rostedt * The code segment at @rec->ip should be a caller to @old_addr
95708f6fba5SSteven Rostedt *
95808f6fba5SSteven Rostedt * Return must be:
95908f6fba5SSteven Rostedt * 0 on success
96008f6fba5SSteven Rostedt * -EFAULT on error reading the location
96108f6fba5SSteven Rostedt * -EINVAL on a failed compare of the contents
96208f6fba5SSteven Rostedt * -EPERM on error writing to the location
96308f6fba5SSteven Rostedt * Any other value will be considered a failure.
96408f6fba5SSteven Rostedt */
96508f6fba5SSteven Rostedt extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
96608f6fba5SSteven Rostedt unsigned long addr);
96708f6fba5SSteven Rostedt #else
96808f6fba5SSteven Rostedt /* Should never be called */
ftrace_modify_call(struct dyn_ftrace * rec,unsigned long old_addr,unsigned long addr)96908f6fba5SSteven Rostedt static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
97008f6fba5SSteven Rostedt unsigned long addr)
97108f6fba5SSteven Rostedt {
97208f6fba5SSteven Rostedt return -EINVAL;
97308f6fba5SSteven Rostedt }
97408f6fba5SSteven Rostedt #endif
97508f6fba5SSteven Rostedt
976ecea656dSAbhishek Sagar extern int skip_trace(unsigned long ip);
977a949ae56SSteven Rostedt (Red Hat) extern void ftrace_module_init(struct module *mod);
9787dcd182bSJessica Yu extern void ftrace_module_enable(struct module *mod);
979049fb9bdSSteven Rostedt (Red Hat) extern void ftrace_release_mod(struct module *mod);
9804dc93676SSteven Rostedt #else /* CONFIG_DYNAMIC_FTRACE */
skip_trace(unsigned long ip)9814dbf6bc2SSteven Rostedt static inline int skip_trace(unsigned long ip) { return 0; }
ftrace_module_init(struct module * mod)982a949ae56SSteven Rostedt (Red Hat) static inline void ftrace_module_init(struct module *mod) { }
ftrace_module_enable(struct module * mod)9837dcd182bSJessica Yu static inline void ftrace_module_enable(struct module *mod) { }
ftrace_release_mod(struct module * mod)9847dcd182bSJessica Yu static inline void ftrace_release_mod(struct module *mod) { }
ftrace_text_reserved(const void * start,const void * end)985d88471cbSSasha Levin static inline int ftrace_text_reserved(const void *start, const void *end)
9862cfa1978SMasami Hiramatsu {
9872cfa1978SMasami Hiramatsu return 0;
9882cfa1978SMasami Hiramatsu }
ftrace_location(unsigned long ip)9894dc93676SSteven Rostedt static inline unsigned long ftrace_location(unsigned long ip)
9904dc93676SSteven Rostedt {
9914dc93676SSteven Rostedt return 0;
9924dc93676SSteven Rostedt }
993fc13cb0cSSteven Rostedt
994fc13cb0cSSteven Rostedt /*
995fc13cb0cSSteven Rostedt * Again users of functions that have ftrace_ops may not
996fc13cb0cSSteven Rostedt * have them defined when ftrace is not enabled, but these
997fc13cb0cSSteven Rostedt * functions may still be called. Use a macro instead of inline.
998fc13cb0cSSteven Rostedt */
999fc13cb0cSSteven Rostedt #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
100096de37b6SSteven Rostedt #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
1001647664eaSMasami Hiramatsu #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
10024f554e95SJiri Olsa #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
10035500fa51SJiri Olsa #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
10045500fa51SJiri Olsa #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
10055500fa51SJiri Olsa #define ftrace_free_filter(ops) do { } while (0)
1006d032ae89SJoel Fernandes #define ftrace_ops_set_global_filter(ops) do { } while (0)
1007fc13cb0cSSteven Rostedt
ftrace_filter_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1008fc13cb0cSSteven Rostedt static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
1009fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos) { return -ENODEV; }
ftrace_notrace_write(struct file * file,const char __user * ubuf,size_t cnt,loff_t * ppos)1010fc13cb0cSSteven Rostedt static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
1011fc13cb0cSSteven Rostedt size_t cnt, loff_t *ppos) { return -ENODEV; }
1012fc13cb0cSSteven Rostedt static inline int
ftrace_regex_release(struct inode * inode,struct file * file)1013fc13cb0cSSteven Rostedt ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
1014aec0be2dSSteven Rostedt (Red Hat)
is_ftrace_trampoline(unsigned long addr)1015aec0be2dSSteven Rostedt (Red Hat) static inline bool is_ftrace_trampoline(unsigned long addr)
1016aec0be2dSSteven Rostedt (Red Hat) {
1017aec0be2dSSteven Rostedt (Red Hat) return false;
1018aec0be2dSSteven Rostedt (Red Hat) }
1019ecea656dSAbhishek Sagar #endif /* CONFIG_DYNAMIC_FTRACE */
1020352ad25aSSteven Rostedt
10210c0593b4SSteven Rostedt (VMware) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10220c0593b4SSteven Rostedt (VMware) #ifndef ftrace_graph_func
10230c0593b4SSteven Rostedt (VMware) #define ftrace_graph_func ftrace_stub
10240c0593b4SSteven Rostedt (VMware) #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
10250c0593b4SSteven Rostedt (VMware) #else
10260c0593b4SSteven Rostedt (VMware) #define FTRACE_OPS_GRAPH_STUB 0
10270c0593b4SSteven Rostedt (VMware) #endif
10280c0593b4SSteven Rostedt (VMware) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
10290c0593b4SSteven Rostedt (VMware)
1030aeaee8a2SIngo Molnar /* totally disable ftrace - can not re-enable after this */
1031aeaee8a2SIngo Molnar void ftrace_kill(void);
1032aeaee8a2SIngo Molnar
tracer_disable(void)1033f43fdad8SIngo Molnar static inline void tracer_disable(void)
1034f43fdad8SIngo Molnar {
1035606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
1036f43fdad8SIngo Molnar ftrace_enabled = 0;
1037f43fdad8SIngo Molnar #endif
1038f43fdad8SIngo Molnar }
1039f43fdad8SIngo Molnar
104037002735SHuang Ying /*
104137002735SHuang Ying * Ftrace disable/restore without lock. Some synchronization mechanism
10429bdeb7b5SHuang Ying * must be used to prevent ftrace_enabled to be changed between
104337002735SHuang Ying * disable/restore.
104437002735SHuang Ying */
__ftrace_enabled_save(void)10459bdeb7b5SHuang Ying static inline int __ftrace_enabled_save(void)
10469bdeb7b5SHuang Ying {
1047606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
10489bdeb7b5SHuang Ying int saved_ftrace_enabled = ftrace_enabled;
10499bdeb7b5SHuang Ying ftrace_enabled = 0;
10509bdeb7b5SHuang Ying return saved_ftrace_enabled;
10519bdeb7b5SHuang Ying #else
10529bdeb7b5SHuang Ying return 0;
10539bdeb7b5SHuang Ying #endif
10549bdeb7b5SHuang Ying }
10559bdeb7b5SHuang Ying
__ftrace_enabled_restore(int enabled)10569bdeb7b5SHuang Ying static inline void __ftrace_enabled_restore(int enabled)
10579bdeb7b5SHuang Ying {
1058606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
10599bdeb7b5SHuang Ying ftrace_enabled = enabled;
10609bdeb7b5SHuang Ying #endif
10619bdeb7b5SHuang Ying }
10629bdeb7b5SHuang Ying
1063eed542d6SAKASHI Takahiro /* All archs should have this, but we define it for consistency */
1064eed542d6SAKASHI Takahiro #ifndef ftrace_return_address0
1065eed542d6SAKASHI Takahiro # define ftrace_return_address0 __builtin_return_address(0)
1066352ad25aSSteven Rostedt #endif
1067eed542d6SAKASHI Takahiro
1068eed542d6SAKASHI Takahiro /* Archs may use other ways for ADDR1 and beyond */
1069eed542d6SAKASHI Takahiro #ifndef ftrace_return_address
1070eed542d6SAKASHI Takahiro # ifdef CONFIG_FRAME_POINTER
1071eed542d6SAKASHI Takahiro # define ftrace_return_address(n) __builtin_return_address(n)
1072eed542d6SAKASHI Takahiro # else
1073eed542d6SAKASHI Takahiro # define ftrace_return_address(n) 0UL
1074eed542d6SAKASHI Takahiro # endif
1075eed542d6SAKASHI Takahiro #endif
1076eed542d6SAKASHI Takahiro
1077eed542d6SAKASHI Takahiro #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
1078eed542d6SAKASHI Takahiro #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
1079eed542d6SAKASHI Takahiro #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
1080eed542d6SAKASHI Takahiro #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
1081eed542d6SAKASHI Takahiro #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
1082eed542d6SAKASHI Takahiro #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
1083eed542d6SAKASHI Takahiro #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
1084352ad25aSSteven Rostedt
get_lock_parent_ip(void)1085ea65b418SJohn Keeping static __always_inline unsigned long get_lock_parent_ip(void)
1086f904f582SSebastian Andrzej Siewior {
1087f904f582SSebastian Andrzej Siewior unsigned long addr = CALLER_ADDR0;
1088f904f582SSebastian Andrzej Siewior
1089f904f582SSebastian Andrzej Siewior if (!in_lock_functions(addr))
1090f904f582SSebastian Andrzej Siewior return addr;
1091f904f582SSebastian Andrzej Siewior addr = CALLER_ADDR1;
1092f904f582SSebastian Andrzej Siewior if (!in_lock_functions(addr))
1093f904f582SSebastian Andrzej Siewior return addr;
1094f904f582SSebastian Andrzej Siewior return CALLER_ADDR2;
1095f904f582SSebastian Andrzej Siewior }
1096f904f582SSebastian Andrzej Siewior
1097c3bc8fd6SJoel Fernandes (Google) #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
1098489f1396SIngo Molnar extern void trace_preempt_on(unsigned long a0, unsigned long a1);
1099489f1396SIngo Molnar extern void trace_preempt_off(unsigned long a0, unsigned long a1);
11006cd8a4bbSSteven Rostedt #else
1101b02ee9a3SMinho Ban /*
1102b02ee9a3SMinho Ban * Use defines instead of static inlines because some arches will make code out
1103b02ee9a3SMinho Ban * of the CALLER_ADDR, when we really want these to be a real nop.
1104b02ee9a3SMinho Ban */
1105b02ee9a3SMinho Ban # define trace_preempt_on(a0, a1) do { } while (0)
1106b02ee9a3SMinho Ban # define trace_preempt_off(a0, a1) do { } while (0)
11076cd8a4bbSSteven Rostedt #endif
11086cd8a4bbSSteven Rostedt
110968bf21aaSSteven Rostedt #ifdef CONFIG_FTRACE_MCOUNT_RECORD
111068bf21aaSSteven Rostedt extern void ftrace_init(void);
1111a1326b17SMark Rutland #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
1112a1326b17SMark Rutland #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
1113a1326b17SMark Rutland #else
1114a1326b17SMark Rutland #define FTRACE_CALLSITE_SECTION "__mcount_loc"
1115a1326b17SMark Rutland #endif
111668bf21aaSSteven Rostedt #else
ftrace_init(void)111768bf21aaSSteven Rostedt static inline void ftrace_init(void) { }
111868bf21aaSSteven Rostedt #endif
111968bf21aaSSteven Rostedt
112071566a0dSFrederic Weisbecker /*
1121287b6e68SFrederic Weisbecker * Structure that defines an entry function trace.
1122a4a551b8SNamhyung Kim * It's already packed but the attribute "packed" is needed
1123a4a551b8SNamhyung Kim * to remove extra padding at the end.
1124287b6e68SFrederic Weisbecker */
1125287b6e68SFrederic Weisbecker struct ftrace_graph_ent {
1126287b6e68SFrederic Weisbecker unsigned long func; /* Current function */
1127287b6e68SFrederic Weisbecker int depth;
1128a4a551b8SNamhyung Kim } __packed;
1129dd0e545fSSteven Rostedt
113071566a0dSFrederic Weisbecker /*
113121e92806SDonglin Peng * Structure that defines an entry function trace with retaddr.
113221e92806SDonglin Peng * It's already packed but the attribute "packed" is needed
113321e92806SDonglin Peng * to remove extra padding at the end.
113421e92806SDonglin Peng */
113521e92806SDonglin Peng struct fgraph_retaddr_ent {
113621e92806SDonglin Peng unsigned long func; /* Current function */
113721e92806SDonglin Peng int depth;
113821e92806SDonglin Peng unsigned long retaddr; /* Return address */
113921e92806SDonglin Peng } __packed;
114021e92806SDonglin Peng
114121e92806SDonglin Peng /*
1142caf4b323SFrederic Weisbecker * Structure that defines a return function trace.
1143a4a551b8SNamhyung Kim * It's already packed but the attribute "packed" is needed
1144a4a551b8SNamhyung Kim * to remove extra padding at the end.
1145caf4b323SFrederic Weisbecker */
1146fb52607aSFrederic Weisbecker struct ftrace_graph_ret {
1147caf4b323SFrederic Weisbecker unsigned long func; /* Current function */
1148a1be9cccSDonglin Peng #ifdef CONFIG_FUNCTION_GRAPH_RETVAL
1149a1be9cccSDonglin Peng unsigned long retval;
1150a1be9cccSDonglin Peng #endif
115160602cb5SSteven Rostedt (VMware) int depth;
11520231022cSFrederic Weisbecker /* Number of functions that overran the depth limit for current task */
115360602cb5SSteven Rostedt (VMware) unsigned int overrun;
1154a4a551b8SNamhyung Kim } __packed;
1155caf4b323SFrederic Weisbecker
115637238abeSSteven Rostedt (VMware) struct fgraph_ops;
115762b915f1SJiri Olsa
115837238abeSSteven Rostedt (VMware) /* Type of the callback handlers for tracing function graph*/
115937238abeSSteven Rostedt (VMware) typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
11602ca8c112SMasami Hiramatsu (Google) struct fgraph_ops *,
11612ca8c112SMasami Hiramatsu (Google) struct ftrace_regs *); /* return */
116237238abeSSteven Rostedt (VMware) typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
116341705c42SMasami Hiramatsu (Google) struct fgraph_ops *,
116441705c42SMasami Hiramatsu (Google) struct ftrace_regs *); /* entry */
116537238abeSSteven Rostedt (VMware)
116621e92806SDonglin Peng extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
116741705c42SMasami Hiramatsu (Google) struct fgraph_ops *gops,
116841705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs);
1169df3ec5daSSteven Rostedt (Google) bool ftrace_pids_enabled(struct ftrace_ops *ops);
1170e8025babSSteven Rostedt (VMware)
1171fb52607aSFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11728b96f011SFrederic Weisbecker
1173688f7089SSteven Rostedt (VMware) struct fgraph_ops {
1174688f7089SSteven Rostedt (VMware) trace_func_graph_ent_t entryfunc;
1175688f7089SSteven Rostedt (VMware) trace_func_graph_ret_t retfunc;
1176c132be2cSSteven Rostedt (VMware) struct ftrace_ops ops; /* for the hash lists */
117726dda563SSteven Rostedt (VMware) void *private;
1178df3ec5daSSteven Rostedt (Google) trace_func_graph_ent_t saved_func;
11797aa1eaefSSteven Rostedt (VMware) int idx;
1180688f7089SSteven Rostedt (VMware) };
1181688f7089SSteven Rostedt (VMware)
118291c46b0aSSteven Rostedt (VMware) void *fgraph_reserve_data(int idx, int size_bytes);
118391c46b0aSSteven Rostedt (VMware) void *fgraph_retrieve_data(int idx, int *size_bytes);
1184a312a0f7SSteven Rostedt void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
118591c46b0aSSteven Rostedt (VMware)
11868b96f011SFrederic Weisbecker /*
1187712406a6SSteven Rostedt * Stack of return addresses for functions
1188712406a6SSteven Rostedt * of a thread.
1189712406a6SSteven Rostedt * Used in struct thread_info
1190712406a6SSteven Rostedt */
1191712406a6SSteven Rostedt struct ftrace_ret_stack {
1192712406a6SSteven Rostedt unsigned long ret;
1193712406a6SSteven Rostedt unsigned long func;
1194daa460a8SJosh Poimboeuf #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
119571e308a2SSteven Rostedt unsigned long fp;
1196daa460a8SJosh Poimboeuf #endif
11979a7c348bSJosh Poimboeuf unsigned long *retp;
1198712406a6SSteven Rostedt };
1199712406a6SSteven Rostedt
1200712406a6SSteven Rostedt /*
1201712406a6SSteven Rostedt * Primary handler of a function return.
1202712406a6SSteven Rostedt * It relays on ftrace_return_to_handler.
1203712406a6SSteven Rostedt * Defined in entry_32/64.S
1204712406a6SSteven Rostedt */
1205712406a6SSteven Rostedt extern void return_to_handler(void);
1206712406a6SSteven Rostedt
1207712406a6SSteven Rostedt extern int
120841705c42SMasami Hiramatsu (Google) function_graph_enter_regs(unsigned long ret, unsigned long func,
120941705c42SMasami Hiramatsu (Google) unsigned long frame_pointer, unsigned long *retp,
121041705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs);
121141705c42SMasami Hiramatsu (Google)
function_graph_enter(unsigned long ret,unsigned long func,unsigned long fp,unsigned long * retp)121241705c42SMasami Hiramatsu (Google) static inline int function_graph_enter(unsigned long ret, unsigned long func,
121341705c42SMasami Hiramatsu (Google) unsigned long fp, unsigned long *retp)
121441705c42SMasami Hiramatsu (Google) {
121541705c42SMasami Hiramatsu (Google) return function_graph_enter_regs(ret, func, fp, retp, NULL);
121641705c42SMasami Hiramatsu (Google) }
1217712406a6SSteven Rostedt
1218b0e21a61SSteven Rostedt (VMware) struct ftrace_ret_stack *
12197aa1eaefSSteven Rostedt (VMware) ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
12200a6c61bcSMasami Hiramatsu (Google) unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
1221b0e21a61SSteven Rostedt (VMware)
1222223918e3SJosh Poimboeuf unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
1223223918e3SJosh Poimboeuf unsigned long ret, unsigned long *retp);
12244497412aSSteven Rostedt (VMware) unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
1225223918e3SJosh Poimboeuf
1226712406a6SSteven Rostedt /*
12278b96f011SFrederic Weisbecker * Sometimes we don't want to trace a function with the function
12288b96f011SFrederic Weisbecker * graph tracer but we want them to keep traced by the usual function
12298b96f011SFrederic Weisbecker * tracer if the function graph tracer is not configured.
12308b96f011SFrederic Weisbecker */
12318b96f011SFrederic Weisbecker #define __notrace_funcgraph notrace
12328b96f011SFrederic Weisbecker
1233f201ae23SFrederic Weisbecker #define FTRACE_RETFUNC_DEPTH 50
1234f201ae23SFrederic Weisbecker #define FTRACE_RETSTACK_ALLOC_SIZE 32
1235688f7089SSteven Rostedt (VMware)
1236688f7089SSteven Rostedt (VMware) extern int register_ftrace_graph(struct fgraph_ops *ops);
1237688f7089SSteven Rostedt (VMware) extern void unregister_ftrace_graph(struct fgraph_ops *ops);
1238f201ae23SFrederic Weisbecker
123918bfee32SChristophe Leroy /**
124018bfee32SChristophe Leroy * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
124118bfee32SChristophe Leroy *
124218bfee32SChristophe Leroy * ftrace_graph_stop() is called when a severe error is detected in
124318bfee32SChristophe Leroy * the function graph tracing. This function is called by the critical
124418bfee32SChristophe Leroy * paths of function graph to keep those paths from doing any more harm.
124518bfee32SChristophe Leroy */
124618bfee32SChristophe Leroy DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
124718bfee32SChristophe Leroy
ftrace_graph_is_dead(void)124818bfee32SChristophe Leroy static inline bool ftrace_graph_is_dead(void)
124918bfee32SChristophe Leroy {
125018bfee32SChristophe Leroy return static_branch_unlikely(&kill_ftrace_graph);
125118bfee32SChristophe Leroy }
125218bfee32SChristophe Leroy
125314a866c5SSteven Rostedt extern void ftrace_graph_stop(void);
125414a866c5SSteven Rostedt
1255287b6e68SFrederic Weisbecker /* The current handlers in use */
1256287b6e68SFrederic Weisbecker extern trace_func_graph_ret_t ftrace_graph_return;
1257287b6e68SFrederic Weisbecker extern trace_func_graph_ent_t ftrace_graph_entry;
1258287b6e68SFrederic Weisbecker
1259fb52607aSFrederic Weisbecker extern void ftrace_graph_init_task(struct task_struct *t);
1260fb52607aSFrederic Weisbecker extern void ftrace_graph_exit_task(struct task_struct *t);
1261868baf07SSteven Rostedt extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
126221a8c466SFrederic Weisbecker
12631d5f0222SSteven Rostedt (Google) /* Used by assembly, but to quiet sparse warnings */
12641d5f0222SSteven Rostedt (Google) extern struct ftrace_ops *function_trace_op;
12651d5f0222SSteven Rostedt (Google)
pause_graph_tracing(void)1266380c4b14SFrederic Weisbecker static inline void pause_graph_tracing(void)
1267380c4b14SFrederic Weisbecker {
1268380c4b14SFrederic Weisbecker atomic_inc(¤t->tracing_graph_pause);
1269380c4b14SFrederic Weisbecker }
1270380c4b14SFrederic Weisbecker
unpause_graph_tracing(void)1271380c4b14SFrederic Weisbecker static inline void unpause_graph_tracing(void)
1272380c4b14SFrederic Weisbecker {
1273380c4b14SFrederic Weisbecker atomic_dec(¤t->tracing_graph_pause);
1274380c4b14SFrederic Weisbecker }
12755ac9f622SSteven Rostedt #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
12768b96f011SFrederic Weisbecker
12778b96f011SFrederic Weisbecker #define __notrace_funcgraph
12788b96f011SFrederic Weisbecker
ftrace_graph_init_task(struct task_struct * t)1279fb52607aSFrederic Weisbecker static inline void ftrace_graph_init_task(struct task_struct *t) { }
ftrace_graph_exit_task(struct task_struct * t)1280fb52607aSFrederic Weisbecker static inline void ftrace_graph_exit_task(struct task_struct *t) { }
ftrace_graph_init_idle_task(struct task_struct * t,int cpu)1281868baf07SSteven Rostedt static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
128221a8c466SFrederic Weisbecker
1283688f7089SSteven Rostedt (VMware) /* Define as macros as fgraph_ops may not be defined */
1284688f7089SSteven Rostedt (VMware) #define register_ftrace_graph(ops) ({ -1; })
1285688f7089SSteven Rostedt (VMware) #define unregister_ftrace_graph(ops) do { } while (0)
1286380c4b14SFrederic Weisbecker
1287223918e3SJosh Poimboeuf static inline unsigned long
ftrace_graph_ret_addr(struct task_struct * task,int * idx,unsigned long ret,unsigned long * retp)1288223918e3SJosh Poimboeuf ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
1289223918e3SJosh Poimboeuf unsigned long *retp)
1290223918e3SJosh Poimboeuf {
1291223918e3SJosh Poimboeuf return ret;
1292223918e3SJosh Poimboeuf }
1293223918e3SJosh Poimboeuf
pause_graph_tracing(void)1294380c4b14SFrederic Weisbecker static inline void pause_graph_tracing(void) { }
unpause_graph_tracing(void)1295380c4b14SFrederic Weisbecker static inline void unpause_graph_tracing(void) { }
12965ac9f622SSteven Rostedt #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1297caf4b323SFrederic Weisbecker
1298ea4e2bc4SSteven Rostedt #ifdef CONFIG_TRACING
1299cecbca96SFrederic Weisbecker enum ftrace_dump_mode;
1300cecbca96SFrederic Weisbecker
130119f0423fSHuang Yiwei #define MAX_TRACER_SIZE 100
130219f0423fSHuang Yiwei extern char ftrace_dump_on_oops[];
130319f0423fSHuang Yiwei extern int ftrace_dump_on_oops_enabled(void);
13040daa2302SSteven Rostedt (Red Hat) extern int tracepoint_printk;
1305526211bcSIngo Molnar
1306de7edd31SSteven Rostedt (Red Hat) extern void disable_trace_on_warning(void);
1307de7edd31SSteven Rostedt (Red Hat) extern int __disable_trace_on_warning;
1308de7edd31SSteven Rostedt (Red Hat)
130978eb4ea2SJoel Granados int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
131032927393SChristoph Hellwig void *buffer, size_t *lenp, loff_t *ppos);
131142391745SSteven Rostedt (Red Hat)
1312de7edd31SSteven Rostedt (Red Hat) #else /* CONFIG_TRACING */
disable_trace_on_warning(void)1313de7edd31SSteven Rostedt (Red Hat) static inline void disable_trace_on_warning(void) { }
1314ea4e2bc4SSteven Rostedt #endif /* CONFIG_TRACING */
1315ea4e2bc4SSteven Rostedt
1316e7b8e675SMike Frysinger #ifdef CONFIG_FTRACE_SYSCALLS
1317e7b8e675SMike Frysinger
1318e7b8e675SMike Frysinger unsigned long arch_syscall_addr(int nr);
1319e7b8e675SMike Frysinger
1320e7b8e675SMike Frysinger #endif /* CONFIG_FTRACE_SYSCALLS */
1321e7b8e675SMike Frysinger
132216444a8aSArnaldo Carvalho de Melo #endif /* _LINUX_FTRACE_H */
1323