1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Ftrace header. For implementation details beyond the random comments 4 * scattered below, see: Documentation/trace/ftrace-design.rst 5 */ 6 7 #ifndef _LINUX_FTRACE_H 8 #define _LINUX_FTRACE_H 9 10 #include <linux/trace_recursion.h> 11 #include <linux/trace_clock.h> 12 #include <linux/jump_label.h> 13 #include <linux/kallsyms.h> 14 #include <linux/linkage.h> 15 #include <linux/bitops.h> 16 #include <linux/ptrace.h> 17 #include <linux/ktime.h> 18 #include <linux/sched.h> 19 #include <linux/types.h> 20 #include <linux/init.h> 21 #include <linux/fs.h> 22 23 #include <asm/ftrace.h> 24 25 /* 26 * If the arch supports passing the variable contents of 27 * function_trace_op as the third parameter back from the 28 * mcount call, then the arch should define this as 1. 29 */ 30 #ifndef ARCH_SUPPORTS_FTRACE_OPS 31 #define ARCH_SUPPORTS_FTRACE_OPS 0 32 #endif 33 34 #ifdef CONFIG_TRACING 35 extern void ftrace_boot_snapshot(void); 36 #else 37 static inline void ftrace_boot_snapshot(void) { } 38 #endif 39 40 #ifdef CONFIG_FUNCTION_TRACER 41 struct ftrace_ops; 42 struct ftrace_regs; 43 /* 44 * If the arch's mcount caller does not support all of ftrace's 45 * features, then it must call an indirect function that 46 * does. Or at least does enough to prevent any unwelcome side effects. 47 * 48 * Also define the function prototype that these architectures use 49 * to call the ftrace_ops_list_func(). 50 */ 51 #if !ARCH_SUPPORTS_FTRACE_OPS 52 # define FTRACE_FORCE_LIST_FUNC 1 53 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); 54 #else 55 # define FTRACE_FORCE_LIST_FUNC 0 56 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 57 struct ftrace_ops *op, struct ftrace_regs *fregs); 58 #endif 59 #endif /* CONFIG_FUNCTION_TRACER */ 60 61 /* Main tracing buffer and events set up */ 62 #ifdef CONFIG_TRACING 63 void trace_init(void); 64 void early_trace_init(void); 65 #else 66 static inline void trace_init(void) { } 67 static inline void early_trace_init(void) { } 68 #endif 69 70 struct module; 71 struct ftrace_hash; 72 struct ftrace_direct_func; 73 74 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ 75 defined(CONFIG_DYNAMIC_FTRACE) 76 const char * 77 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 78 unsigned long *off, char **modname, char *sym); 79 #else 80 static inline const char * 81 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 82 unsigned long *off, char **modname, char *sym) 83 { 84 return NULL; 85 } 86 #endif 87 88 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 89 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 90 char *type, char *name, 91 char *module_name, int *exported); 92 #else 93 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 94 char *type, char *name, 95 char *module_name, int *exported) 96 { 97 return -1; 98 } 99 #endif 100 101 #ifdef CONFIG_FUNCTION_TRACER 102 103 extern int ftrace_enabled; 104 extern int 105 ftrace_enable_sysctl(struct ctl_table *table, int write, 106 void *buffer, size_t *lenp, loff_t *ppos); 107 108 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 109 110 struct ftrace_regs { 111 struct pt_regs regs; 112 }; 113 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) 114 115 /* 116 * ftrace_instruction_pointer_set() is to be defined by the architecture 117 * if to allow setting of the instruction pointer from the ftrace_regs 118 * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports 119 * live kernel patching. 120 */ 121 #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) 122 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 123 124 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) 125 { 126 if (!fregs) 127 return NULL; 128 129 return arch_ftrace_get_regs(fregs); 130 } 131 132 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 133 struct ftrace_ops *op, struct ftrace_regs *fregs); 134 135 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 136 137 /* 138 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 139 * set in the flags member. 140 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and 141 * IPMODIFY are a kind of attribute flags which can be set only before 142 * registering the ftrace_ops, and can not be modified while registered. 143 * Changing those attribute flags after registering ftrace_ops will 144 * cause unexpected results. 145 * 146 * ENABLED - set/unset when ftrace_ops is registered/unregistered 147 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 148 * allocated ftrace_ops which need special care 149 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 150 * and passed to the callback. If this flag is set, but the 151 * architecture does not support passing regs 152 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 153 * ftrace_ops will fail to register, unless the next flag 154 * is set. 155 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 156 * handler can handle an arch that does not save regs 157 * (the handler tests if regs == NULL), then it can set 158 * this flag instead. It will not fail registering the ftrace_ops 159 * but, the regs field will be NULL if the arch does not support 160 * passing regs to the handler. 161 * Note, if this flag is set, the SAVE_REGS flag will automatically 162 * get set upon registering the ftrace_ops, if the arch supports it. 163 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure 164 * that the call back needs recursion protection. If it does 165 * not set this, then the ftrace infrastructure will assume 166 * that the callback can handle recursion on its own. 167 * STUB - The ftrace_ops is just a place holder. 168 * INITIALIZED - The ftrace_ops has already been initialized (first use time 169 * register_ftrace_function() is called, it will initialized the ops) 170 * DELETED - The ops are being deleted, do not let them be registered again. 171 * ADDING - The ops is in the process of being added. 172 * REMOVING - The ops is in the process of being removed. 173 * MODIFYING - The ops is in the process of changing its filter functions. 174 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 175 * The arch specific code sets this flag when it allocated a 176 * trampoline. This lets the arch know that it can update the 177 * trampoline in case the callback function changes. 178 * The ftrace_ops trampoline can be set by the ftrace users, and 179 * in such cases the arch must not modify it. Only the arch ftrace 180 * core code should set this flag. 181 * IPMODIFY - The ops can modify the IP register. This can only be set with 182 * SAVE_REGS. If another ops with this flag set is already registered 183 * for any of the functions that this ops will be registered for, then 184 * this ops will fail to register or set_filter_ip. 185 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 186 * RCU - Set when the ops can only be called when RCU is watching. 187 * TRACE_ARRAY - The ops->private points to a trace_array descriptor. 188 * PERMANENT - Set when the ops is permanent and should not be affected by 189 * ftrace_enabled. 190 * DIRECT - Used by the direct ftrace_ops helper for direct functions 191 * (internal ftrace only, should not be used by others) 192 */ 193 enum { 194 FTRACE_OPS_FL_ENABLED = BIT(0), 195 FTRACE_OPS_FL_DYNAMIC = BIT(1), 196 FTRACE_OPS_FL_SAVE_REGS = BIT(2), 197 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), 198 FTRACE_OPS_FL_RECURSION = BIT(4), 199 FTRACE_OPS_FL_STUB = BIT(5), 200 FTRACE_OPS_FL_INITIALIZED = BIT(6), 201 FTRACE_OPS_FL_DELETED = BIT(7), 202 FTRACE_OPS_FL_ADDING = BIT(8), 203 FTRACE_OPS_FL_REMOVING = BIT(9), 204 FTRACE_OPS_FL_MODIFYING = BIT(10), 205 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), 206 FTRACE_OPS_FL_IPMODIFY = BIT(12), 207 FTRACE_OPS_FL_PID = BIT(13), 208 FTRACE_OPS_FL_RCU = BIT(14), 209 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), 210 FTRACE_OPS_FL_PERMANENT = BIT(16), 211 FTRACE_OPS_FL_DIRECT = BIT(17), 212 }; 213 214 #ifdef CONFIG_DYNAMIC_FTRACE 215 /* The hash used to know what functions callbacks trace */ 216 struct ftrace_ops_hash { 217 struct ftrace_hash __rcu *notrace_hash; 218 struct ftrace_hash __rcu *filter_hash; 219 struct mutex regex_lock; 220 }; 221 222 void ftrace_free_init_mem(void); 223 void ftrace_free_mem(struct module *mod, void *start, void *end); 224 #else 225 static inline void ftrace_free_init_mem(void) 226 { 227 ftrace_boot_snapshot(); 228 } 229 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 230 #endif 231 232 /* 233 * Note, ftrace_ops can be referenced outside of RCU protection, unless 234 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel 235 * core data, the unregistering of it will perform a scheduling on all CPUs 236 * to make sure that there are no more users. Depending on the load of the 237 * system that may take a bit of time. 238 * 239 * Any private data added must also take care not to be freed and if private 240 * data is added to a ftrace_ops that is in core code, the user of the 241 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 242 */ 243 struct ftrace_ops { 244 ftrace_func_t func; 245 struct ftrace_ops __rcu *next; 246 unsigned long flags; 247 void *private; 248 ftrace_func_t saved_func; 249 #ifdef CONFIG_DYNAMIC_FTRACE 250 struct ftrace_ops_hash local_hash; 251 struct ftrace_ops_hash *func_hash; 252 struct ftrace_ops_hash old_hash; 253 unsigned long trampoline; 254 unsigned long trampoline_size; 255 struct list_head list; 256 #endif 257 }; 258 259 extern struct ftrace_ops __rcu *ftrace_ops_list; 260 extern struct ftrace_ops ftrace_list_end; 261 262 /* 263 * Traverse the ftrace_ops_list, invoking all entries. The reason that we 264 * can use rcu_dereference_raw_check() is that elements removed from this list 265 * are simply leaked, so there is no need to interact with a grace-period 266 * mechanism. The rcu_dereference_raw_check() calls are needed to handle 267 * concurrent insertions into the ftrace_ops_list. 268 * 269 * Silly Alpha and silly pointer-speculation compiler optimizations! 270 */ 271 #define do_for_each_ftrace_op(op, list) \ 272 op = rcu_dereference_raw_check(list); \ 273 do 274 275 /* 276 * Optimized for just a single item in the list (as that is the normal case). 277 */ 278 #define while_for_each_ftrace_op(op) \ 279 while (likely(op = rcu_dereference_raw_check((op)->next)) && \ 280 unlikely((op) != &ftrace_list_end)) 281 282 /* 283 * Type of the current tracing. 284 */ 285 enum ftrace_tracing_type_t { 286 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 287 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 288 }; 289 290 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 291 extern enum ftrace_tracing_type_t ftrace_tracing_type; 292 293 /* 294 * The ftrace_ops must be a static and should also 295 * be read_mostly. These functions do modify read_mostly variables 296 * so use them sparely. Never free an ftrace_op or modify the 297 * next pointer after it has been registered. Even after unregistering 298 * it, the next pointer may still be used internally. 299 */ 300 int register_ftrace_function(struct ftrace_ops *ops); 301 int unregister_ftrace_function(struct ftrace_ops *ops); 302 303 extern void ftrace_stub(unsigned long a0, unsigned long a1, 304 struct ftrace_ops *op, struct ftrace_regs *fregs); 305 306 307 int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs); 308 #else /* !CONFIG_FUNCTION_TRACER */ 309 /* 310 * (un)register_ftrace_function must be a macro since the ops parameter 311 * must not be evaluated. 312 */ 313 #define register_ftrace_function(ops) ({ 0; }) 314 #define unregister_ftrace_function(ops) ({ 0; }) 315 static inline void ftrace_kill(void) { } 316 static inline void ftrace_free_init_mem(void) { } 317 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 318 static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) 319 { 320 return -EOPNOTSUPP; 321 } 322 #endif /* CONFIG_FUNCTION_TRACER */ 323 324 struct ftrace_func_entry { 325 struct hlist_node hlist; 326 unsigned long ip; 327 unsigned long direct; /* for direct lookup only */ 328 }; 329 330 struct dyn_ftrace; 331 332 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 333 extern int ftrace_direct_func_count; 334 int register_ftrace_direct(unsigned long ip, unsigned long addr); 335 int unregister_ftrace_direct(unsigned long ip, unsigned long addr); 336 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); 337 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); 338 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 339 struct dyn_ftrace *rec, 340 unsigned long old_addr, 341 unsigned long new_addr); 342 unsigned long ftrace_find_rec_direct(unsigned long ip); 343 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 344 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 345 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 346 347 #else 348 struct ftrace_ops; 349 # define ftrace_direct_func_count 0 350 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) 351 { 352 return -ENOTSUPP; 353 } 354 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 355 { 356 return -ENOTSUPP; 357 } 358 static inline int modify_ftrace_direct(unsigned long ip, 359 unsigned long old_addr, unsigned long new_addr) 360 { 361 return -ENOTSUPP; 362 } 363 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 364 { 365 return NULL; 366 } 367 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 368 struct dyn_ftrace *rec, 369 unsigned long old_addr, 370 unsigned long new_addr) 371 { 372 return -ENODEV; 373 } 374 static inline unsigned long ftrace_find_rec_direct(unsigned long ip) 375 { 376 return 0; 377 } 378 static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 379 { 380 return -ENODEV; 381 } 382 static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 383 { 384 return -ENODEV; 385 } 386 static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 387 { 388 return -ENODEV; 389 } 390 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 391 392 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 393 /* 394 * This must be implemented by the architecture. 395 * It is the way the ftrace direct_ops helper, when called 396 * via ftrace (because there's other callbacks besides the 397 * direct call), can inform the architecture's trampoline that this 398 * routine has a direct caller, and what the caller is. 399 * 400 * For example, in x86, it returns the direct caller 401 * callback function via the regs->orig_ax parameter. 402 * Then in the ftrace trampoline, if this is set, it makes 403 * the return from the trampoline jump to the direct caller 404 * instead of going back to the function it just traced. 405 */ 406 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, 407 unsigned long addr) { } 408 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 409 410 #ifdef CONFIG_STACK_TRACER 411 412 extern int stack_tracer_enabled; 413 414 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 415 size_t *lenp, loff_t *ppos); 416 417 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ 418 DECLARE_PER_CPU(int, disable_stack_tracer); 419 420 /** 421 * stack_tracer_disable - temporarily disable the stack tracer 422 * 423 * There's a few locations (namely in RCU) where stack tracing 424 * cannot be executed. This function is used to disable stack 425 * tracing during those critical sections. 426 * 427 * This function must be called with preemption or interrupts 428 * disabled and stack_tracer_enable() must be called shortly after 429 * while preemption or interrupts are still disabled. 430 */ 431 static inline void stack_tracer_disable(void) 432 { 433 /* Preemption or interrupts must be disabled */ 434 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 435 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 436 this_cpu_inc(disable_stack_tracer); 437 } 438 439 /** 440 * stack_tracer_enable - re-enable the stack tracer 441 * 442 * After stack_tracer_disable() is called, stack_tracer_enable() 443 * must be called shortly afterward. 444 */ 445 static inline void stack_tracer_enable(void) 446 { 447 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 448 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 449 this_cpu_dec(disable_stack_tracer); 450 } 451 #else 452 static inline void stack_tracer_disable(void) { } 453 static inline void stack_tracer_enable(void) { } 454 #endif 455 456 #ifdef CONFIG_DYNAMIC_FTRACE 457 458 int ftrace_arch_code_modify_prepare(void); 459 int ftrace_arch_code_modify_post_process(void); 460 461 enum ftrace_bug_type { 462 FTRACE_BUG_UNKNOWN, 463 FTRACE_BUG_INIT, 464 FTRACE_BUG_NOP, 465 FTRACE_BUG_CALL, 466 FTRACE_BUG_UPDATE, 467 }; 468 extern enum ftrace_bug_type ftrace_bug_type; 469 470 /* 471 * Archs can set this to point to a variable that holds the value that was 472 * expected at the call site before calling ftrace_bug(). 473 */ 474 extern const void *ftrace_expected; 475 476 void ftrace_bug(int err, struct dyn_ftrace *rec); 477 478 struct seq_file; 479 480 extern int ftrace_text_reserved(const void *start, const void *end); 481 482 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); 483 484 bool is_ftrace_trampoline(unsigned long addr); 485 486 /* 487 * The dyn_ftrace record's flags field is split into two parts. 488 * the first part which is '0-FTRACE_REF_MAX' is a counter of 489 * the number of callbacks that have registered the function that 490 * the dyn_ftrace descriptor represents. 491 * 492 * The second part is a mask: 493 * ENABLED - the function is being traced 494 * REGS - the record wants the function to save regs 495 * REGS_EN - the function is set up to save regs. 496 * IPMODIFY - the record allows for the IP address to be changed. 497 * DISABLED - the record is not ready to be touched yet 498 * DIRECT - there is a direct function to call 499 * 500 * When a new ftrace_ops is registered and wants a function to save 501 * pt_regs, the rec->flags REGS is set. When the function has been 502 * set up to save regs, the REG_EN flag is set. Once a function 503 * starts saving regs it will do so until all ftrace_ops are removed 504 * from tracing that function. 505 */ 506 enum { 507 FTRACE_FL_ENABLED = (1UL << 31), 508 FTRACE_FL_REGS = (1UL << 30), 509 FTRACE_FL_REGS_EN = (1UL << 29), 510 FTRACE_FL_TRAMP = (1UL << 28), 511 FTRACE_FL_TRAMP_EN = (1UL << 27), 512 FTRACE_FL_IPMODIFY = (1UL << 26), 513 FTRACE_FL_DISABLED = (1UL << 25), 514 FTRACE_FL_DIRECT = (1UL << 24), 515 FTRACE_FL_DIRECT_EN = (1UL << 23), 516 }; 517 518 #define FTRACE_REF_MAX_SHIFT 23 519 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 520 521 #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) 522 523 struct dyn_ftrace { 524 unsigned long ip; /* address of mcount call-site */ 525 unsigned long flags; 526 struct dyn_arch_ftrace arch; 527 }; 528 529 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 530 int remove, int reset); 531 int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, 532 unsigned int cnt, int remove, int reset); 533 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 534 int len, int reset); 535 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 536 int len, int reset); 537 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 538 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 539 void ftrace_free_filter(struct ftrace_ops *ops); 540 void ftrace_ops_set_global_filter(struct ftrace_ops *ops); 541 542 enum { 543 FTRACE_UPDATE_CALLS = (1 << 0), 544 FTRACE_DISABLE_CALLS = (1 << 1), 545 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 546 FTRACE_START_FUNC_RET = (1 << 3), 547 FTRACE_STOP_FUNC_RET = (1 << 4), 548 FTRACE_MAY_SLEEP = (1 << 5), 549 }; 550 551 /* 552 * The FTRACE_UPDATE_* enum is used to pass information back 553 * from the ftrace_update_record() and ftrace_test_record() 554 * functions. These are called by the code update routines 555 * to find out what is to be done for a given function. 556 * 557 * IGNORE - The function is already what we want it to be 558 * MAKE_CALL - Start tracing the function 559 * MODIFY_CALL - Stop saving regs for the function 560 * MAKE_NOP - Stop tracing the function 561 */ 562 enum { 563 FTRACE_UPDATE_IGNORE, 564 FTRACE_UPDATE_MAKE_CALL, 565 FTRACE_UPDATE_MODIFY_CALL, 566 FTRACE_UPDATE_MAKE_NOP, 567 }; 568 569 enum { 570 FTRACE_ITER_FILTER = (1 << 0), 571 FTRACE_ITER_NOTRACE = (1 << 1), 572 FTRACE_ITER_PRINTALL = (1 << 2), 573 FTRACE_ITER_DO_PROBES = (1 << 3), 574 FTRACE_ITER_PROBE = (1 << 4), 575 FTRACE_ITER_MOD = (1 << 5), 576 FTRACE_ITER_ENABLED = (1 << 6), 577 }; 578 579 void arch_ftrace_update_code(int command); 580 void arch_ftrace_update_trampoline(struct ftrace_ops *ops); 581 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); 582 void arch_ftrace_trampoline_free(struct ftrace_ops *ops); 583 584 struct ftrace_rec_iter; 585 586 struct ftrace_rec_iter *ftrace_rec_iter_start(void); 587 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 588 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 589 590 #define for_ftrace_rec_iter(iter) \ 591 for (iter = ftrace_rec_iter_start(); \ 592 iter; \ 593 iter = ftrace_rec_iter_next(iter)) 594 595 596 int ftrace_update_record(struct dyn_ftrace *rec, bool enable); 597 int ftrace_test_record(struct dyn_ftrace *rec, bool enable); 598 void ftrace_run_stop_machine(int command); 599 unsigned long ftrace_location(unsigned long ip); 600 unsigned long ftrace_location_range(unsigned long start, unsigned long end); 601 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 602 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 603 604 extern ftrace_func_t ftrace_trace_function; 605 606 int ftrace_regex_open(struct ftrace_ops *ops, int flag, 607 struct inode *inode, struct file *file); 608 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 609 size_t cnt, loff_t *ppos); 610 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 611 size_t cnt, loff_t *ppos); 612 int ftrace_regex_release(struct inode *inode, struct file *file); 613 614 void __init 615 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 616 617 /* defined in arch */ 618 extern int ftrace_ip_converted(unsigned long ip); 619 extern int ftrace_dyn_arch_init(void); 620 extern void ftrace_replace_code(int enable); 621 extern int ftrace_update_ftrace_func(ftrace_func_t func); 622 extern void ftrace_caller(void); 623 extern void ftrace_regs_caller(void); 624 extern void ftrace_call(void); 625 extern void ftrace_regs_call(void); 626 extern void mcount_call(void); 627 628 void ftrace_modify_all_code(int command); 629 630 #ifndef FTRACE_ADDR 631 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 632 #endif 633 634 #ifndef FTRACE_GRAPH_ADDR 635 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 636 #endif 637 638 #ifndef FTRACE_REGS_ADDR 639 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 640 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 641 #else 642 # define FTRACE_REGS_ADDR FTRACE_ADDR 643 #endif 644 #endif 645 646 /* 647 * If an arch would like functions that are only traced 648 * by the function graph tracer to jump directly to its own 649 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 650 * to be that address to jump to. 651 */ 652 #ifndef FTRACE_GRAPH_TRAMP_ADDR 653 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 654 #endif 655 656 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 657 extern void ftrace_graph_caller(void); 658 extern int ftrace_enable_ftrace_graph_caller(void); 659 extern int ftrace_disable_ftrace_graph_caller(void); 660 #else 661 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 662 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 663 #endif 664 665 /** 666 * ftrace_make_nop - convert code into nop 667 * @mod: module structure if called by module load initialization 668 * @rec: the call site record (e.g. mcount/fentry) 669 * @addr: the address that the call site should be calling 670 * 671 * This is a very sensitive operation and great care needs 672 * to be taken by the arch. The operation should carefully 673 * read the location, check to see if what is read is indeed 674 * what we expect it to be, and then on success of the compare, 675 * it should write to the location. 676 * 677 * The code segment at @rec->ip should be a caller to @addr 678 * 679 * Return must be: 680 * 0 on success 681 * -EFAULT on error reading the location 682 * -EINVAL on a failed compare of the contents 683 * -EPERM on error writing to the location 684 * Any other value will be considered a failure. 685 */ 686 extern int ftrace_make_nop(struct module *mod, 687 struct dyn_ftrace *rec, unsigned long addr); 688 689 /** 690 * ftrace_need_init_nop - return whether nop call sites should be initialized 691 * 692 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't 693 * need to call ftrace_init_nop() if the code is built with that flag. 694 * Architectures where this is not always the case may define their own 695 * condition. 696 * 697 * Return must be: 698 * 0 if ftrace_init_nop() should be called 699 * Nonzero if ftrace_init_nop() should not be called 700 */ 701 702 #ifndef ftrace_need_init_nop 703 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) 704 #endif 705 706 /** 707 * ftrace_init_nop - initialize a nop call site 708 * @mod: module structure if called by module load initialization 709 * @rec: the call site record (e.g. mcount/fentry) 710 * 711 * This is a very sensitive operation and great care needs 712 * to be taken by the arch. The operation should carefully 713 * read the location, check to see if what is read is indeed 714 * what we expect it to be, and then on success of the compare, 715 * it should write to the location. 716 * 717 * The code segment at @rec->ip should contain the contents created by 718 * the compiler 719 * 720 * Return must be: 721 * 0 on success 722 * -EFAULT on error reading the location 723 * -EINVAL on a failed compare of the contents 724 * -EPERM on error writing to the location 725 * Any other value will be considered a failure. 726 */ 727 #ifndef ftrace_init_nop 728 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 729 { 730 return ftrace_make_nop(mod, rec, MCOUNT_ADDR); 731 } 732 #endif 733 734 /** 735 * ftrace_make_call - convert a nop call site into a call to addr 736 * @rec: the call site record (e.g. mcount/fentry) 737 * @addr: the address that the call site should call 738 * 739 * This is a very sensitive operation and great care needs 740 * to be taken by the arch. The operation should carefully 741 * read the location, check to see if what is read is indeed 742 * what we expect it to be, and then on success of the compare, 743 * it should write to the location. 744 * 745 * The code segment at @rec->ip should be a nop 746 * 747 * Return must be: 748 * 0 on success 749 * -EFAULT on error reading the location 750 * -EINVAL on a failed compare of the contents 751 * -EPERM on error writing to the location 752 * Any other value will be considered a failure. 753 */ 754 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 755 756 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 757 /** 758 * ftrace_modify_call - convert from one addr to another (no nop) 759 * @rec: the call site record (e.g. mcount/fentry) 760 * @old_addr: the address expected to be currently called to 761 * @addr: the address to change to 762 * 763 * This is a very sensitive operation and great care needs 764 * to be taken by the arch. The operation should carefully 765 * read the location, check to see if what is read is indeed 766 * what we expect it to be, and then on success of the compare, 767 * it should write to the location. 768 * 769 * The code segment at @rec->ip should be a caller to @old_addr 770 * 771 * Return must be: 772 * 0 on success 773 * -EFAULT on error reading the location 774 * -EINVAL on a failed compare of the contents 775 * -EPERM on error writing to the location 776 * Any other value will be considered a failure. 777 */ 778 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 779 unsigned long addr); 780 #else 781 /* Should never be called */ 782 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 783 unsigned long addr) 784 { 785 return -EINVAL; 786 } 787 #endif 788 789 /* May be defined in arch */ 790 extern int ftrace_arch_read_dyn_info(char *buf, int size); 791 792 extern int skip_trace(unsigned long ip); 793 extern void ftrace_module_init(struct module *mod); 794 extern void ftrace_module_enable(struct module *mod); 795 extern void ftrace_release_mod(struct module *mod); 796 797 extern void ftrace_disable_daemon(void); 798 extern void ftrace_enable_daemon(void); 799 #else /* CONFIG_DYNAMIC_FTRACE */ 800 static inline int skip_trace(unsigned long ip) { return 0; } 801 static inline void ftrace_disable_daemon(void) { } 802 static inline void ftrace_enable_daemon(void) { } 803 static inline void ftrace_module_init(struct module *mod) { } 804 static inline void ftrace_module_enable(struct module *mod) { } 805 static inline void ftrace_release_mod(struct module *mod) { } 806 static inline int ftrace_text_reserved(const void *start, const void *end) 807 { 808 return 0; 809 } 810 static inline unsigned long ftrace_location(unsigned long ip) 811 { 812 return 0; 813 } 814 815 /* 816 * Again users of functions that have ftrace_ops may not 817 * have them defined when ftrace is not enabled, but these 818 * functions may still be called. Use a macro instead of inline. 819 */ 820 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 821 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 822 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 823 #define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) 824 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 825 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 826 #define ftrace_free_filter(ops) do { } while (0) 827 #define ftrace_ops_set_global_filter(ops) do { } while (0) 828 829 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 830 size_t cnt, loff_t *ppos) { return -ENODEV; } 831 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 832 size_t cnt, loff_t *ppos) { return -ENODEV; } 833 static inline int 834 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 835 836 static inline bool is_ftrace_trampoline(unsigned long addr) 837 { 838 return false; 839 } 840 #endif /* CONFIG_DYNAMIC_FTRACE */ 841 842 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 843 #ifndef ftrace_graph_func 844 #define ftrace_graph_func ftrace_stub 845 #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB 846 #else 847 #define FTRACE_OPS_GRAPH_STUB 0 848 #endif 849 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 850 851 /* totally disable ftrace - can not re-enable after this */ 852 void ftrace_kill(void); 853 854 static inline void tracer_disable(void) 855 { 856 #ifdef CONFIG_FUNCTION_TRACER 857 ftrace_enabled = 0; 858 #endif 859 } 860 861 /* 862 * Ftrace disable/restore without lock. Some synchronization mechanism 863 * must be used to prevent ftrace_enabled to be changed between 864 * disable/restore. 865 */ 866 static inline int __ftrace_enabled_save(void) 867 { 868 #ifdef CONFIG_FUNCTION_TRACER 869 int saved_ftrace_enabled = ftrace_enabled; 870 ftrace_enabled = 0; 871 return saved_ftrace_enabled; 872 #else 873 return 0; 874 #endif 875 } 876 877 static inline void __ftrace_enabled_restore(int enabled) 878 { 879 #ifdef CONFIG_FUNCTION_TRACER 880 ftrace_enabled = enabled; 881 #endif 882 } 883 884 /* All archs should have this, but we define it for consistency */ 885 #ifndef ftrace_return_address0 886 # define ftrace_return_address0 __builtin_return_address(0) 887 #endif 888 889 /* Archs may use other ways for ADDR1 and beyond */ 890 #ifndef ftrace_return_address 891 # ifdef CONFIG_FRAME_POINTER 892 # define ftrace_return_address(n) __builtin_return_address(n) 893 # else 894 # define ftrace_return_address(n) 0UL 895 # endif 896 #endif 897 898 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 899 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 900 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 901 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 902 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 903 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 904 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 905 906 static inline unsigned long get_lock_parent_ip(void) 907 { 908 unsigned long addr = CALLER_ADDR0; 909 910 if (!in_lock_functions(addr)) 911 return addr; 912 addr = CALLER_ADDR1; 913 if (!in_lock_functions(addr)) 914 return addr; 915 return CALLER_ADDR2; 916 } 917 918 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 919 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 920 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 921 #else 922 /* 923 * Use defines instead of static inlines because some arches will make code out 924 * of the CALLER_ADDR, when we really want these to be a real nop. 925 */ 926 # define trace_preempt_on(a0, a1) do { } while (0) 927 # define trace_preempt_off(a0, a1) do { } while (0) 928 #endif 929 930 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 931 extern void ftrace_init(void); 932 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 933 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" 934 #else 935 #define FTRACE_CALLSITE_SECTION "__mcount_loc" 936 #endif 937 #else 938 static inline void ftrace_init(void) { } 939 #endif 940 941 /* 942 * Structure that defines an entry function trace. 943 * It's already packed but the attribute "packed" is needed 944 * to remove extra padding at the end. 945 */ 946 struct ftrace_graph_ent { 947 unsigned long func; /* Current function */ 948 int depth; 949 } __packed; 950 951 /* 952 * Structure that defines a return function trace. 953 * It's already packed but the attribute "packed" is needed 954 * to remove extra padding at the end. 955 */ 956 struct ftrace_graph_ret { 957 unsigned long func; /* Current function */ 958 int depth; 959 /* Number of functions that overran the depth limit for current task */ 960 unsigned int overrun; 961 unsigned long long calltime; 962 unsigned long long rettime; 963 } __packed; 964 965 /* Type of the callback handlers for tracing function graph*/ 966 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 967 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 968 969 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 970 971 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 972 973 struct fgraph_ops { 974 trace_func_graph_ent_t entryfunc; 975 trace_func_graph_ret_t retfunc; 976 }; 977 978 /* 979 * Stack of return addresses for functions 980 * of a thread. 981 * Used in struct thread_info 982 */ 983 struct ftrace_ret_stack { 984 unsigned long ret; 985 unsigned long func; 986 unsigned long long calltime; 987 #ifdef CONFIG_FUNCTION_PROFILER 988 unsigned long long subtime; 989 #endif 990 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 991 unsigned long fp; 992 #endif 993 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 994 unsigned long *retp; 995 #endif 996 }; 997 998 /* 999 * Primary handler of a function return. 1000 * It relays on ftrace_return_to_handler. 1001 * Defined in entry_32/64.S 1002 */ 1003 extern void return_to_handler(void); 1004 1005 extern int 1006 function_graph_enter(unsigned long ret, unsigned long func, 1007 unsigned long frame_pointer, unsigned long *retp); 1008 1009 struct ftrace_ret_stack * 1010 ftrace_graph_get_ret_stack(struct task_struct *task, int idx); 1011 1012 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 1013 unsigned long ret, unsigned long *retp); 1014 1015 /* 1016 * Sometimes we don't want to trace a function with the function 1017 * graph tracer but we want them to keep traced by the usual function 1018 * tracer if the function graph tracer is not configured. 1019 */ 1020 #define __notrace_funcgraph notrace 1021 1022 #define FTRACE_RETFUNC_DEPTH 50 1023 #define FTRACE_RETSTACK_ALLOC_SIZE 32 1024 1025 extern int register_ftrace_graph(struct fgraph_ops *ops); 1026 extern void unregister_ftrace_graph(struct fgraph_ops *ops); 1027 1028 /** 1029 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called 1030 * 1031 * ftrace_graph_stop() is called when a severe error is detected in 1032 * the function graph tracing. This function is called by the critical 1033 * paths of function graph to keep those paths from doing any more harm. 1034 */ 1035 DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph); 1036 1037 static inline bool ftrace_graph_is_dead(void) 1038 { 1039 return static_branch_unlikely(&kill_ftrace_graph); 1040 } 1041 1042 extern void ftrace_graph_stop(void); 1043 1044 /* The current handlers in use */ 1045 extern trace_func_graph_ret_t ftrace_graph_return; 1046 extern trace_func_graph_ent_t ftrace_graph_entry; 1047 1048 extern void ftrace_graph_init_task(struct task_struct *t); 1049 extern void ftrace_graph_exit_task(struct task_struct *t); 1050 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 1051 1052 static inline void pause_graph_tracing(void) 1053 { 1054 atomic_inc(¤t->tracing_graph_pause); 1055 } 1056 1057 static inline void unpause_graph_tracing(void) 1058 { 1059 atomic_dec(¤t->tracing_graph_pause); 1060 } 1061 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 1062 1063 #define __notrace_funcgraph 1064 1065 static inline void ftrace_graph_init_task(struct task_struct *t) { } 1066 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 1067 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 1068 1069 /* Define as macros as fgraph_ops may not be defined */ 1070 #define register_ftrace_graph(ops) ({ -1; }) 1071 #define unregister_ftrace_graph(ops) do { } while (0) 1072 1073 static inline unsigned long 1074 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, 1075 unsigned long *retp) 1076 { 1077 return ret; 1078 } 1079 1080 static inline void pause_graph_tracing(void) { } 1081 static inline void unpause_graph_tracing(void) { } 1082 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1083 1084 #ifdef CONFIG_TRACING 1085 1086 /* flags for current->trace */ 1087 enum { 1088 TSK_TRACE_FL_TRACE_BIT = 0, 1089 TSK_TRACE_FL_GRAPH_BIT = 1, 1090 }; 1091 enum { 1092 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 1093 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 1094 }; 1095 1096 static inline void set_tsk_trace_trace(struct task_struct *tsk) 1097 { 1098 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1099 } 1100 1101 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 1102 { 1103 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1104 } 1105 1106 static inline int test_tsk_trace_trace(struct task_struct *tsk) 1107 { 1108 return tsk->trace & TSK_TRACE_FL_TRACE; 1109 } 1110 1111 static inline void set_tsk_trace_graph(struct task_struct *tsk) 1112 { 1113 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1114 } 1115 1116 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 1117 { 1118 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1119 } 1120 1121 static inline int test_tsk_trace_graph(struct task_struct *tsk) 1122 { 1123 return tsk->trace & TSK_TRACE_FL_GRAPH; 1124 } 1125 1126 enum ftrace_dump_mode; 1127 1128 extern enum ftrace_dump_mode ftrace_dump_on_oops; 1129 extern int tracepoint_printk; 1130 1131 extern void disable_trace_on_warning(void); 1132 extern int __disable_trace_on_warning; 1133 1134 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1135 void *buffer, size_t *lenp, loff_t *ppos); 1136 1137 #else /* CONFIG_TRACING */ 1138 static inline void disable_trace_on_warning(void) { } 1139 #endif /* CONFIG_TRACING */ 1140 1141 #ifdef CONFIG_FTRACE_SYSCALLS 1142 1143 unsigned long arch_syscall_addr(int nr); 1144 1145 #endif /* CONFIG_FTRACE_SYSCALLS */ 1146 1147 #endif /* _LINUX_FTRACE_H */ 1148