xref: /linux-6.15/include/linux/ftrace.h (revision c8ea0d67)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ftrace header.  For implementation details beyond the random comments
4  * scattered below, see: Documentation/trace/ftrace-design.rst
5  */
6 
7 #ifndef _LINUX_FTRACE_H
8 #define _LINUX_FTRACE_H
9 
10 #include <linux/trace_clock.h>
11 #include <linux/kallsyms.h>
12 #include <linux/linkage.h>
13 #include <linux/bitops.h>
14 #include <linux/ptrace.h>
15 #include <linux/ktime.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/init.h>
19 #include <linux/fs.h>
20 
21 #include <asm/ftrace.h>
22 
23 /*
24  * If the arch supports passing the variable contents of
25  * function_trace_op as the third parameter back from the
26  * mcount call, then the arch should define this as 1.
27  */
28 #ifndef ARCH_SUPPORTS_FTRACE_OPS
29 #define ARCH_SUPPORTS_FTRACE_OPS 0
30 #endif
31 
32 /*
33  * If the arch's mcount caller does not support all of ftrace's
34  * features, then it must call an indirect function that
35  * does. Or at least does enough to prevent any unwelcomed side effects.
36  */
37 #if !ARCH_SUPPORTS_FTRACE_OPS
38 # define FTRACE_FORCE_LIST_FUNC 1
39 #else
40 # define FTRACE_FORCE_LIST_FUNC 0
41 #endif
42 
43 /* Main tracing buffer and events set up */
44 #ifdef CONFIG_TRACING
45 void trace_init(void);
46 void early_trace_init(void);
47 #else
48 static inline void trace_init(void) { }
49 static inline void early_trace_init(void) { }
50 #endif
51 
52 struct module;
53 struct ftrace_hash;
54 struct ftrace_direct_func;
55 
56 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
57 	defined(CONFIG_DYNAMIC_FTRACE)
58 const char *
59 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
60 		   unsigned long *off, char **modname, char *sym);
61 #else
62 static inline const char *
63 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
64 		   unsigned long *off, char **modname, char *sym)
65 {
66 	return NULL;
67 }
68 #endif
69 
70 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
71 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
72 			   char *type, char *name,
73 			   char *module_name, int *exported);
74 #else
75 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
76 					 char *type, char *name,
77 					 char *module_name, int *exported)
78 {
79 	return -1;
80 }
81 #endif
82 
83 #ifdef CONFIG_FUNCTION_TRACER
84 
85 extern int ftrace_enabled;
86 extern int
87 ftrace_enable_sysctl(struct ctl_table *table, int write,
88 		     void __user *buffer, size_t *lenp,
89 		     loff_t *ppos);
90 
91 struct ftrace_ops;
92 
93 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
94 			      struct ftrace_ops *op, struct pt_regs *regs);
95 
96 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
97 
98 /*
99  * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
100  * set in the flags member.
101  * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
102  * IPMODIFY are a kind of attribute flags which can be set only before
103  * registering the ftrace_ops, and can not be modified while registered.
104  * Changing those attribute flags after registering ftrace_ops will
105  * cause unexpected results.
106  *
107  * ENABLED - set/unset when ftrace_ops is registered/unregistered
108  * DYNAMIC - set when ftrace_ops is registered to denote dynamically
109  *           allocated ftrace_ops which need special care
110  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
111  *            and passed to the callback. If this flag is set, but the
112  *            architecture does not support passing regs
113  *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
114  *            ftrace_ops will fail to register, unless the next flag
115  *            is set.
116  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
117  *            handler can handle an arch that does not save regs
118  *            (the handler tests if regs == NULL), then it can set
119  *            this flag instead. It will not fail registering the ftrace_ops
120  *            but, the regs field will be NULL if the arch does not support
121  *            passing regs to the handler.
122  *            Note, if this flag is set, the SAVE_REGS flag will automatically
123  *            get set upon registering the ftrace_ops, if the arch supports it.
124  * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
125  *            that the call back has its own recursion protection. If it does
126  *            not set this, then the ftrace infrastructure will add recursion
127  *            protection for the caller.
128  * STUB   - The ftrace_ops is just a place holder.
129  * INITIALIZED - The ftrace_ops has already been initialized (first use time
130  *            register_ftrace_function() is called, it will initialized the ops)
131  * DELETED - The ops are being deleted, do not let them be registered again.
132  * ADDING  - The ops is in the process of being added.
133  * REMOVING - The ops is in the process of being removed.
134  * MODIFYING - The ops is in the process of changing its filter functions.
135  * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
136  *            The arch specific code sets this flag when it allocated a
137  *            trampoline. This lets the arch know that it can update the
138  *            trampoline in case the callback function changes.
139  *            The ftrace_ops trampoline can be set by the ftrace users, and
140  *            in such cases the arch must not modify it. Only the arch ftrace
141  *            core code should set this flag.
142  * IPMODIFY - The ops can modify the IP register. This can only be set with
143  *            SAVE_REGS. If another ops with this flag set is already registered
144  *            for any of the functions that this ops will be registered for, then
145  *            this ops will fail to register or set_filter_ip.
146  * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
147  * RCU     - Set when the ops can only be called when RCU is watching.
148  * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
149  * PERMANENT - Set when the ops is permanent and should not be affected by
150  *             ftrace_enabled.
151  * DIRECT - Used by the direct ftrace_ops helper for direct functions
152  *            (internal ftrace only, should not be used by others)
153  */
154 enum {
155 	FTRACE_OPS_FL_ENABLED			= BIT(0),
156 	FTRACE_OPS_FL_DYNAMIC			= BIT(1),
157 	FTRACE_OPS_FL_SAVE_REGS			= BIT(2),
158 	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= BIT(3),
159 	FTRACE_OPS_FL_RECURSION_SAFE		= BIT(4),
160 	FTRACE_OPS_FL_STUB			= BIT(5),
161 	FTRACE_OPS_FL_INITIALIZED		= BIT(6),
162 	FTRACE_OPS_FL_DELETED			= BIT(7),
163 	FTRACE_OPS_FL_ADDING			= BIT(8),
164 	FTRACE_OPS_FL_REMOVING			= BIT(9),
165 	FTRACE_OPS_FL_MODIFYING			= BIT(10),
166 	FTRACE_OPS_FL_ALLOC_TRAMP		= BIT(11),
167 	FTRACE_OPS_FL_IPMODIFY			= BIT(12),
168 	FTRACE_OPS_FL_PID			= BIT(13),
169 	FTRACE_OPS_FL_RCU			= BIT(14),
170 	FTRACE_OPS_FL_TRACE_ARRAY		= BIT(15),
171 	FTRACE_OPS_FL_PERMANENT                 = BIT(16),
172 	FTRACE_OPS_FL_DIRECT			= BIT(17),
173 };
174 
175 #ifdef CONFIG_DYNAMIC_FTRACE
176 /* The hash used to know what functions callbacks trace */
177 struct ftrace_ops_hash {
178 	struct ftrace_hash __rcu	*notrace_hash;
179 	struct ftrace_hash __rcu	*filter_hash;
180 	struct mutex			regex_lock;
181 };
182 
183 void ftrace_free_init_mem(void);
184 void ftrace_free_mem(struct module *mod, void *start, void *end);
185 #else
186 static inline void ftrace_free_init_mem(void) { }
187 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
188 #endif
189 
190 /*
191  * Note, ftrace_ops can be referenced outside of RCU protection, unless
192  * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
193  * core data, the unregistering of it will perform a scheduling on all CPUs
194  * to make sure that there are no more users. Depending on the load of the
195  * system that may take a bit of time.
196  *
197  * Any private data added must also take care not to be freed and if private
198  * data is added to a ftrace_ops that is in core code, the user of the
199  * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
200  */
201 struct ftrace_ops {
202 	ftrace_func_t			func;
203 	struct ftrace_ops __rcu		*next;
204 	unsigned long			flags;
205 	void				*private;
206 	ftrace_func_t			saved_func;
207 #ifdef CONFIG_DYNAMIC_FTRACE
208 	struct ftrace_ops_hash		local_hash;
209 	struct ftrace_ops_hash		*func_hash;
210 	struct ftrace_ops_hash		old_hash;
211 	unsigned long			trampoline;
212 	unsigned long			trampoline_size;
213 	struct list_head		list;
214 #endif
215 };
216 
217 extern struct ftrace_ops __rcu *ftrace_ops_list;
218 extern struct ftrace_ops ftrace_list_end;
219 
220 /*
221  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
222  * can use rcu_dereference_raw_check() is that elements removed from this list
223  * are simply leaked, so there is no need to interact with a grace-period
224  * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
225  * concurrent insertions into the ftrace_global_list.
226  *
227  * Silly Alpha and silly pointer-speculation compiler optimizations!
228  */
229 #define do_for_each_ftrace_op(op, list)			\
230 	op = rcu_dereference_raw_check(list);			\
231 	do
232 
233 /*
234  * Optimized for just a single item in the list (as that is the normal case).
235  */
236 #define while_for_each_ftrace_op(op)				\
237 	while (likely(op = rcu_dereference_raw_check((op)->next)) &&	\
238 	       unlikely((op) != &ftrace_list_end))
239 
240 /*
241  * Type of the current tracing.
242  */
243 enum ftrace_tracing_type_t {
244 	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
245 	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
246 };
247 
248 /* Current tracing type, default is FTRACE_TYPE_ENTER */
249 extern enum ftrace_tracing_type_t ftrace_tracing_type;
250 
251 /*
252  * The ftrace_ops must be a static and should also
253  * be read_mostly.  These functions do modify read_mostly variables
254  * so use them sparely. Never free an ftrace_op or modify the
255  * next pointer after it has been registered. Even after unregistering
256  * it, the next pointer may still be used internally.
257  */
258 int register_ftrace_function(struct ftrace_ops *ops);
259 int unregister_ftrace_function(struct ftrace_ops *ops);
260 
261 extern void ftrace_stub(unsigned long a0, unsigned long a1,
262 			struct ftrace_ops *op, struct pt_regs *regs);
263 
264 #else /* !CONFIG_FUNCTION_TRACER */
265 /*
266  * (un)register_ftrace_function must be a macro since the ops parameter
267  * must not be evaluated.
268  */
269 #define register_ftrace_function(ops) ({ 0; })
270 #define unregister_ftrace_function(ops) ({ 0; })
271 static inline void ftrace_kill(void) { }
272 static inline void ftrace_free_init_mem(void) { }
273 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
274 #endif /* CONFIG_FUNCTION_TRACER */
275 
276 struct ftrace_func_entry {
277 	struct hlist_node hlist;
278 	unsigned long ip;
279 	unsigned long direct; /* for direct lookup only */
280 };
281 
282 struct dyn_ftrace;
283 
284 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
285 extern int ftrace_direct_func_count;
286 int register_ftrace_direct(unsigned long ip, unsigned long addr);
287 int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
288 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
289 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
290 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
291 				struct dyn_ftrace *rec,
292 				unsigned long old_addr,
293 				unsigned long new_addr);
294 unsigned long ftrace_find_rec_direct(unsigned long ip);
295 #else
296 # define ftrace_direct_func_count 0
297 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
298 {
299 	return -ENOTSUPP;
300 }
301 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
302 {
303 	return -ENOTSUPP;
304 }
305 static inline int modify_ftrace_direct(unsigned long ip,
306 				       unsigned long old_addr, unsigned long new_addr)
307 {
308 	return -ENOTSUPP;
309 }
310 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
311 {
312 	return NULL;
313 }
314 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
315 					      struct dyn_ftrace *rec,
316 					      unsigned long old_addr,
317 					      unsigned long new_addr)
318 {
319 	return -ENODEV;
320 }
321 static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
322 {
323 	return 0;
324 }
325 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
326 
327 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
328 /*
329  * This must be implemented by the architecture.
330  * It is the way the ftrace direct_ops helper, when called
331  * via ftrace (because there's other callbacks besides the
332  * direct call), can inform the architecture's trampoline that this
333  * routine has a direct caller, and what the caller is.
334  *
335  * For example, in x86, it returns the direct caller
336  * callback function via the regs->orig_ax parameter.
337  * Then in the ftrace trampoline, if this is set, it makes
338  * the return from the trampoline jump to the direct caller
339  * instead of going back to the function it just traced.
340  */
341 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
342 						 unsigned long addr) { }
343 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
344 
345 #ifdef CONFIG_STACK_TRACER
346 
347 extern int stack_tracer_enabled;
348 
349 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
350 		       size_t *lenp, loff_t *ppos);
351 
352 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
353 DECLARE_PER_CPU(int, disable_stack_tracer);
354 
355 /**
356  * stack_tracer_disable - temporarily disable the stack tracer
357  *
358  * There's a few locations (namely in RCU) where stack tracing
359  * cannot be executed. This function is used to disable stack
360  * tracing during those critical sections.
361  *
362  * This function must be called with preemption or interrupts
363  * disabled and stack_tracer_enable() must be called shortly after
364  * while preemption or interrupts are still disabled.
365  */
366 static inline void stack_tracer_disable(void)
367 {
368 	/* Preemption or interupts must be disabled */
369 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
370 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
371 	this_cpu_inc(disable_stack_tracer);
372 }
373 
374 /**
375  * stack_tracer_enable - re-enable the stack tracer
376  *
377  * After stack_tracer_disable() is called, stack_tracer_enable()
378  * must be called shortly afterward.
379  */
380 static inline void stack_tracer_enable(void)
381 {
382 	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
383 		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
384 	this_cpu_dec(disable_stack_tracer);
385 }
386 #else
387 static inline void stack_tracer_disable(void) { }
388 static inline void stack_tracer_enable(void) { }
389 #endif
390 
391 #ifdef CONFIG_DYNAMIC_FTRACE
392 
393 int ftrace_arch_code_modify_prepare(void);
394 int ftrace_arch_code_modify_post_process(void);
395 
396 enum ftrace_bug_type {
397 	FTRACE_BUG_UNKNOWN,
398 	FTRACE_BUG_INIT,
399 	FTRACE_BUG_NOP,
400 	FTRACE_BUG_CALL,
401 	FTRACE_BUG_UPDATE,
402 };
403 extern enum ftrace_bug_type ftrace_bug_type;
404 
405 /*
406  * Archs can set this to point to a variable that holds the value that was
407  * expected at the call site before calling ftrace_bug().
408  */
409 extern const void *ftrace_expected;
410 
411 void ftrace_bug(int err, struct dyn_ftrace *rec);
412 
413 struct seq_file;
414 
415 extern int ftrace_text_reserved(const void *start, const void *end);
416 
417 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
418 
419 bool is_ftrace_trampoline(unsigned long addr);
420 
421 /*
422  * The dyn_ftrace record's flags field is split into two parts.
423  * the first part which is '0-FTRACE_REF_MAX' is a counter of
424  * the number of callbacks that have registered the function that
425  * the dyn_ftrace descriptor represents.
426  *
427  * The second part is a mask:
428  *  ENABLED - the function is being traced
429  *  REGS    - the record wants the function to save regs
430  *  REGS_EN - the function is set up to save regs.
431  *  IPMODIFY - the record allows for the IP address to be changed.
432  *  DISABLED - the record is not ready to be touched yet
433  *  DIRECT   - there is a direct function to call
434  *
435  * When a new ftrace_ops is registered and wants a function to save
436  * pt_regs, the rec->flag REGS is set. When the function has been
437  * set up to save regs, the REG_EN flag is set. Once a function
438  * starts saving regs it will do so until all ftrace_ops are removed
439  * from tracing that function.
440  */
441 enum {
442 	FTRACE_FL_ENABLED	= (1UL << 31),
443 	FTRACE_FL_REGS		= (1UL << 30),
444 	FTRACE_FL_REGS_EN	= (1UL << 29),
445 	FTRACE_FL_TRAMP		= (1UL << 28),
446 	FTRACE_FL_TRAMP_EN	= (1UL << 27),
447 	FTRACE_FL_IPMODIFY	= (1UL << 26),
448 	FTRACE_FL_DISABLED	= (1UL << 25),
449 	FTRACE_FL_DIRECT	= (1UL << 24),
450 	FTRACE_FL_DIRECT_EN	= (1UL << 23),
451 };
452 
453 #define FTRACE_REF_MAX_SHIFT	23
454 #define FTRACE_FL_BITS		9
455 #define FTRACE_FL_MASKED_BITS	((1UL << FTRACE_FL_BITS) - 1)
456 #define FTRACE_FL_MASK		(FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
457 #define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
458 
459 #define ftrace_rec_count(rec)	((rec)->flags & ~FTRACE_FL_MASK)
460 
461 struct dyn_ftrace {
462 	unsigned long		ip; /* address of mcount call-site */
463 	unsigned long		flags;
464 	struct dyn_arch_ftrace	arch;
465 };
466 
467 int ftrace_force_update(void);
468 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
469 			 int remove, int reset);
470 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
471 		       int len, int reset);
472 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
473 			int len, int reset);
474 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
475 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
476 void ftrace_free_filter(struct ftrace_ops *ops);
477 void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
478 
479 enum {
480 	FTRACE_UPDATE_CALLS		= (1 << 0),
481 	FTRACE_DISABLE_CALLS		= (1 << 1),
482 	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
483 	FTRACE_START_FUNC_RET		= (1 << 3),
484 	FTRACE_STOP_FUNC_RET		= (1 << 4),
485 	FTRACE_MAY_SLEEP		= (1 << 5),
486 };
487 
488 /*
489  * The FTRACE_UPDATE_* enum is used to pass information back
490  * from the ftrace_update_record() and ftrace_test_record()
491  * functions. These are called by the code update routines
492  * to find out what is to be done for a given function.
493  *
494  *  IGNORE           - The function is already what we want it to be
495  *  MAKE_CALL        - Start tracing the function
496  *  MODIFY_CALL      - Stop saving regs for the function
497  *  MAKE_NOP         - Stop tracing the function
498  */
499 enum {
500 	FTRACE_UPDATE_IGNORE,
501 	FTRACE_UPDATE_MAKE_CALL,
502 	FTRACE_UPDATE_MODIFY_CALL,
503 	FTRACE_UPDATE_MAKE_NOP,
504 };
505 
506 enum {
507 	FTRACE_ITER_FILTER	= (1 << 0),
508 	FTRACE_ITER_NOTRACE	= (1 << 1),
509 	FTRACE_ITER_PRINTALL	= (1 << 2),
510 	FTRACE_ITER_DO_PROBES	= (1 << 3),
511 	FTRACE_ITER_PROBE	= (1 << 4),
512 	FTRACE_ITER_MOD		= (1 << 5),
513 	FTRACE_ITER_ENABLED	= (1 << 6),
514 };
515 
516 void arch_ftrace_update_code(int command);
517 void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
518 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
519 void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
520 
521 struct ftrace_rec_iter;
522 
523 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
524 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
525 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
526 
527 #define for_ftrace_rec_iter(iter)		\
528 	for (iter = ftrace_rec_iter_start();	\
529 	     iter;				\
530 	     iter = ftrace_rec_iter_next(iter))
531 
532 
533 int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
534 int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
535 void ftrace_run_stop_machine(int command);
536 unsigned long ftrace_location(unsigned long ip);
537 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
538 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
539 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
540 
541 extern ftrace_func_t ftrace_trace_function;
542 
543 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
544 		  struct inode *inode, struct file *file);
545 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
546 			    size_t cnt, loff_t *ppos);
547 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
548 			     size_t cnt, loff_t *ppos);
549 int ftrace_regex_release(struct inode *inode, struct file *file);
550 
551 void __init
552 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
553 
554 /* defined in arch */
555 extern int ftrace_ip_converted(unsigned long ip);
556 extern int ftrace_dyn_arch_init(void);
557 extern void ftrace_replace_code(int enable);
558 extern int ftrace_update_ftrace_func(ftrace_func_t func);
559 extern void ftrace_caller(void);
560 extern void ftrace_regs_caller(void);
561 extern void ftrace_call(void);
562 extern void ftrace_regs_call(void);
563 extern void mcount_call(void);
564 
565 void ftrace_modify_all_code(int command);
566 
567 #ifndef FTRACE_ADDR
568 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
569 #endif
570 
571 #ifndef FTRACE_GRAPH_ADDR
572 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
573 #endif
574 
575 #ifndef FTRACE_REGS_ADDR
576 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
577 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
578 #else
579 # define FTRACE_REGS_ADDR FTRACE_ADDR
580 #endif
581 #endif
582 
583 /*
584  * If an arch would like functions that are only traced
585  * by the function graph tracer to jump directly to its own
586  * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
587  * to be that address to jump to.
588  */
589 #ifndef FTRACE_GRAPH_TRAMP_ADDR
590 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
591 #endif
592 
593 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
594 extern void ftrace_graph_caller(void);
595 extern int ftrace_enable_ftrace_graph_caller(void);
596 extern int ftrace_disable_ftrace_graph_caller(void);
597 #else
598 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
599 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
600 #endif
601 
602 /**
603  * ftrace_make_nop - convert code into nop
604  * @mod: module structure if called by module load initialization
605  * @rec: the call site record (e.g. mcount/fentry)
606  * @addr: the address that the call site should be calling
607  *
608  * This is a very sensitive operation and great care needs
609  * to be taken by the arch.  The operation should carefully
610  * read the location, check to see if what is read is indeed
611  * what we expect it to be, and then on success of the compare,
612  * it should write to the location.
613  *
614  * The code segment at @rec->ip should be a caller to @addr
615  *
616  * Return must be:
617  *  0 on success
618  *  -EFAULT on error reading the location
619  *  -EINVAL on a failed compare of the contents
620  *  -EPERM  on error writing to the location
621  * Any other value will be considered a failure.
622  */
623 extern int ftrace_make_nop(struct module *mod,
624 			   struct dyn_ftrace *rec, unsigned long addr);
625 
626 
627 /**
628  * ftrace_init_nop - initialize a nop call site
629  * @mod: module structure if called by module load initialization
630  * @rec: the call site record (e.g. mcount/fentry)
631  *
632  * This is a very sensitive operation and great care needs
633  * to be taken by the arch.  The operation should carefully
634  * read the location, check to see if what is read is indeed
635  * what we expect it to be, and then on success of the compare,
636  * it should write to the location.
637  *
638  * The code segment at @rec->ip should contain the contents created by
639  * the compiler
640  *
641  * Return must be:
642  *  0 on success
643  *  -EFAULT on error reading the location
644  *  -EINVAL on a failed compare of the contents
645  *  -EPERM  on error writing to the location
646  * Any other value will be considered a failure.
647  */
648 #ifndef ftrace_init_nop
649 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
650 {
651 	return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
652 }
653 #endif
654 
655 /**
656  * ftrace_make_call - convert a nop call site into a call to addr
657  * @rec: the call site record (e.g. mcount/fentry)
658  * @addr: the address that the call site should call
659  *
660  * This is a very sensitive operation and great care needs
661  * to be taken by the arch.  The operation should carefully
662  * read the location, check to see if what is read is indeed
663  * what we expect it to be, and then on success of the compare,
664  * it should write to the location.
665  *
666  * The code segment at @rec->ip should be a nop
667  *
668  * Return must be:
669  *  0 on success
670  *  -EFAULT on error reading the location
671  *  -EINVAL on a failed compare of the contents
672  *  -EPERM  on error writing to the location
673  * Any other value will be considered a failure.
674  */
675 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
676 
677 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
678 /**
679  * ftrace_modify_call - convert from one addr to another (no nop)
680  * @rec: the call site record (e.g. mcount/fentry)
681  * @old_addr: the address expected to be currently called to
682  * @addr: the address to change to
683  *
684  * This is a very sensitive operation and great care needs
685  * to be taken by the arch.  The operation should carefully
686  * read the location, check to see if what is read is indeed
687  * what we expect it to be, and then on success of the compare,
688  * it should write to the location.
689  *
690  * The code segment at @rec->ip should be a caller to @old_addr
691  *
692  * Return must be:
693  *  0 on success
694  *  -EFAULT on error reading the location
695  *  -EINVAL on a failed compare of the contents
696  *  -EPERM  on error writing to the location
697  * Any other value will be considered a failure.
698  */
699 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
700 			      unsigned long addr);
701 #else
702 /* Should never be called */
703 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
704 				     unsigned long addr)
705 {
706 	return -EINVAL;
707 }
708 #endif
709 
710 /* May be defined in arch */
711 extern int ftrace_arch_read_dyn_info(char *buf, int size);
712 
713 extern int skip_trace(unsigned long ip);
714 extern void ftrace_module_init(struct module *mod);
715 extern void ftrace_module_enable(struct module *mod);
716 extern void ftrace_release_mod(struct module *mod);
717 
718 extern void ftrace_disable_daemon(void);
719 extern void ftrace_enable_daemon(void);
720 #else /* CONFIG_DYNAMIC_FTRACE */
721 static inline int skip_trace(unsigned long ip) { return 0; }
722 static inline int ftrace_force_update(void) { return 0; }
723 static inline void ftrace_disable_daemon(void) { }
724 static inline void ftrace_enable_daemon(void) { }
725 static inline void ftrace_module_init(struct module *mod) { }
726 static inline void ftrace_module_enable(struct module *mod) { }
727 static inline void ftrace_release_mod(struct module *mod) { }
728 static inline int ftrace_text_reserved(const void *start, const void *end)
729 {
730 	return 0;
731 }
732 static inline unsigned long ftrace_location(unsigned long ip)
733 {
734 	return 0;
735 }
736 
737 /*
738  * Again users of functions that have ftrace_ops may not
739  * have them defined when ftrace is not enabled, but these
740  * functions may still be called. Use a macro instead of inline.
741  */
742 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
743 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
744 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
745 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
746 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
747 #define ftrace_free_filter(ops) do { } while (0)
748 #define ftrace_ops_set_global_filter(ops) do { } while (0)
749 
750 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
751 			    size_t cnt, loff_t *ppos) { return -ENODEV; }
752 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
753 			     size_t cnt, loff_t *ppos) { return -ENODEV; }
754 static inline int
755 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
756 
757 static inline bool is_ftrace_trampoline(unsigned long addr)
758 {
759 	return false;
760 }
761 #endif /* CONFIG_DYNAMIC_FTRACE */
762 
763 /* totally disable ftrace - can not re-enable after this */
764 void ftrace_kill(void);
765 
766 static inline void tracer_disable(void)
767 {
768 #ifdef CONFIG_FUNCTION_TRACER
769 	ftrace_enabled = 0;
770 #endif
771 }
772 
773 /*
774  * Ftrace disable/restore without lock. Some synchronization mechanism
775  * must be used to prevent ftrace_enabled to be changed between
776  * disable/restore.
777  */
778 static inline int __ftrace_enabled_save(void)
779 {
780 #ifdef CONFIG_FUNCTION_TRACER
781 	int saved_ftrace_enabled = ftrace_enabled;
782 	ftrace_enabled = 0;
783 	return saved_ftrace_enabled;
784 #else
785 	return 0;
786 #endif
787 }
788 
789 static inline void __ftrace_enabled_restore(int enabled)
790 {
791 #ifdef CONFIG_FUNCTION_TRACER
792 	ftrace_enabled = enabled;
793 #endif
794 }
795 
796 /* All archs should have this, but we define it for consistency */
797 #ifndef ftrace_return_address0
798 # define ftrace_return_address0 __builtin_return_address(0)
799 #endif
800 
801 /* Archs may use other ways for ADDR1 and beyond */
802 #ifndef ftrace_return_address
803 # ifdef CONFIG_FRAME_POINTER
804 #  define ftrace_return_address(n) __builtin_return_address(n)
805 # else
806 #  define ftrace_return_address(n) 0UL
807 # endif
808 #endif
809 
810 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
811 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
812 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
813 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
814 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
815 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
816 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
817 
818 static inline unsigned long get_lock_parent_ip(void)
819 {
820 	unsigned long addr = CALLER_ADDR0;
821 
822 	if (!in_lock_functions(addr))
823 		return addr;
824 	addr = CALLER_ADDR1;
825 	if (!in_lock_functions(addr))
826 		return addr;
827 	return CALLER_ADDR2;
828 }
829 
830 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
831   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
832   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
833 #else
834 /*
835  * Use defines instead of static inlines because some arches will make code out
836  * of the CALLER_ADDR, when we really want these to be a real nop.
837  */
838 # define trace_preempt_on(a0, a1) do { } while (0)
839 # define trace_preempt_off(a0, a1) do { } while (0)
840 #endif
841 
842 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
843 extern void ftrace_init(void);
844 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
845 #define FTRACE_CALLSITE_SECTION	"__patchable_function_entries"
846 #else
847 #define FTRACE_CALLSITE_SECTION	"__mcount_loc"
848 #endif
849 #else
850 static inline void ftrace_init(void) { }
851 #endif
852 
853 /*
854  * Structure that defines an entry function trace.
855  * It's already packed but the attribute "packed" is needed
856  * to remove extra padding at the end.
857  */
858 struct ftrace_graph_ent {
859 	unsigned long func; /* Current function */
860 	int depth;
861 } __packed;
862 
863 /*
864  * Structure that defines a return function trace.
865  * It's already packed but the attribute "packed" is needed
866  * to remove extra padding at the end.
867  */
868 struct ftrace_graph_ret {
869 	unsigned long func; /* Current function */
870 	/* Number of functions that overran the depth limit for current task */
871 	unsigned long overrun;
872 	unsigned long long calltime;
873 	unsigned long long rettime;
874 	int depth;
875 } __packed;
876 
877 /* Type of the callback handlers for tracing function graph*/
878 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
879 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
880 
881 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
882 
883 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
884 
885 struct fgraph_ops {
886 	trace_func_graph_ent_t		entryfunc;
887 	trace_func_graph_ret_t		retfunc;
888 };
889 
890 /*
891  * Stack of return addresses for functions
892  * of a thread.
893  * Used in struct thread_info
894  */
895 struct ftrace_ret_stack {
896 	unsigned long ret;
897 	unsigned long func;
898 	unsigned long long calltime;
899 #ifdef CONFIG_FUNCTION_PROFILER
900 	unsigned long long subtime;
901 #endif
902 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
903 	unsigned long fp;
904 #endif
905 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
906 	unsigned long *retp;
907 #endif
908 };
909 
910 /*
911  * Primary handler of a function return.
912  * It relays on ftrace_return_to_handler.
913  * Defined in entry_32/64.S
914  */
915 extern void return_to_handler(void);
916 
917 extern int
918 function_graph_enter(unsigned long ret, unsigned long func,
919 		     unsigned long frame_pointer, unsigned long *retp);
920 
921 struct ftrace_ret_stack *
922 ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
923 
924 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
925 				    unsigned long ret, unsigned long *retp);
926 
927 /*
928  * Sometimes we don't want to trace a function with the function
929  * graph tracer but we want them to keep traced by the usual function
930  * tracer if the function graph tracer is not configured.
931  */
932 #define __notrace_funcgraph		notrace
933 
934 #define FTRACE_RETFUNC_DEPTH 50
935 #define FTRACE_RETSTACK_ALLOC_SIZE 32
936 
937 extern int register_ftrace_graph(struct fgraph_ops *ops);
938 extern void unregister_ftrace_graph(struct fgraph_ops *ops);
939 
940 extern bool ftrace_graph_is_dead(void);
941 extern void ftrace_graph_stop(void);
942 
943 /* The current handlers in use */
944 extern trace_func_graph_ret_t ftrace_graph_return;
945 extern trace_func_graph_ent_t ftrace_graph_entry;
946 
947 extern void ftrace_graph_init_task(struct task_struct *t);
948 extern void ftrace_graph_exit_task(struct task_struct *t);
949 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
950 
951 static inline void pause_graph_tracing(void)
952 {
953 	atomic_inc(&current->tracing_graph_pause);
954 }
955 
956 static inline void unpause_graph_tracing(void)
957 {
958 	atomic_dec(&current->tracing_graph_pause);
959 }
960 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
961 
962 #define __notrace_funcgraph
963 
964 static inline void ftrace_graph_init_task(struct task_struct *t) { }
965 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
966 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
967 
968 /* Define as macros as fgraph_ops may not be defined */
969 #define register_ftrace_graph(ops) ({ -1; })
970 #define unregister_ftrace_graph(ops) do { } while (0)
971 
972 static inline unsigned long
973 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
974 		      unsigned long *retp)
975 {
976 	return ret;
977 }
978 
979 static inline void pause_graph_tracing(void) { }
980 static inline void unpause_graph_tracing(void) { }
981 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
982 
983 #ifdef CONFIG_TRACING
984 
985 /* flags for current->trace */
986 enum {
987 	TSK_TRACE_FL_TRACE_BIT	= 0,
988 	TSK_TRACE_FL_GRAPH_BIT	= 1,
989 };
990 enum {
991 	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
992 	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
993 };
994 
995 static inline void set_tsk_trace_trace(struct task_struct *tsk)
996 {
997 	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
998 }
999 
1000 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
1001 {
1002 	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1003 }
1004 
1005 static inline int test_tsk_trace_trace(struct task_struct *tsk)
1006 {
1007 	return tsk->trace & TSK_TRACE_FL_TRACE;
1008 }
1009 
1010 static inline void set_tsk_trace_graph(struct task_struct *tsk)
1011 {
1012 	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1013 }
1014 
1015 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1016 {
1017 	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1018 }
1019 
1020 static inline int test_tsk_trace_graph(struct task_struct *tsk)
1021 {
1022 	return tsk->trace & TSK_TRACE_FL_GRAPH;
1023 }
1024 
1025 enum ftrace_dump_mode;
1026 
1027 extern enum ftrace_dump_mode ftrace_dump_on_oops;
1028 extern int tracepoint_printk;
1029 
1030 extern void disable_trace_on_warning(void);
1031 extern int __disable_trace_on_warning;
1032 
1033 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1034 			     void *buffer, size_t *lenp, loff_t *ppos);
1035 
1036 #else /* CONFIG_TRACING */
1037 static inline void  disable_trace_on_warning(void) { }
1038 #endif /* CONFIG_TRACING */
1039 
1040 #ifdef CONFIG_FTRACE_SYSCALLS
1041 
1042 unsigned long arch_syscall_addr(int nr);
1043 
1044 #endif /* CONFIG_FTRACE_SYSCALLS */
1045 
1046 #endif /* _LINUX_FTRACE_H */
1047