1 /* 2 * Ftrace header. For implementation details beyond the random comments 3 * scattered below, see: Documentation/trace/ftrace-design.txt 4 */ 5 6 #ifndef _LINUX_FTRACE_H 7 #define _LINUX_FTRACE_H 8 9 #include <linux/trace_clock.h> 10 #include <linux/kallsyms.h> 11 #include <linux/linkage.h> 12 #include <linux/bitops.h> 13 #include <linux/module.h> 14 #include <linux/ktime.h> 15 #include <linux/sched.h> 16 #include <linux/types.h> 17 #include <linux/init.h> 18 #include <linux/fs.h> 19 20 #include <asm/ftrace.h> 21 22 #ifdef CONFIG_FUNCTION_TRACER 23 24 extern int ftrace_enabled; 25 extern int 26 ftrace_enable_sysctl(struct ctl_table *table, int write, 27 void __user *buffer, size_t *lenp, 28 loff_t *ppos); 29 30 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 31 32 struct ftrace_hash; 33 34 enum { 35 FTRACE_OPS_FL_ENABLED = 1 << 0, 36 FTRACE_OPS_FL_GLOBAL = 1 << 1, 37 FTRACE_OPS_FL_DYNAMIC = 1 << 2, 38 }; 39 40 struct ftrace_ops { 41 ftrace_func_t func; 42 struct ftrace_ops *next; 43 unsigned long flags; 44 #ifdef CONFIG_DYNAMIC_FTRACE 45 struct ftrace_hash *notrace_hash; 46 struct ftrace_hash *filter_hash; 47 #endif 48 }; 49 50 extern int function_trace_stop; 51 52 /* 53 * Type of the current tracing. 54 */ 55 enum ftrace_tracing_type_t { 56 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 57 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 58 }; 59 60 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 61 extern enum ftrace_tracing_type_t ftrace_tracing_type; 62 63 /** 64 * ftrace_stop - stop function tracer. 65 * 66 * A quick way to stop the function tracer. Note this an on off switch, 67 * it is not something that is recursive like preempt_disable. 68 * This does not disable the calling of mcount, it only stops the 69 * calling of functions from mcount. 70 */ 71 static inline void ftrace_stop(void) 72 { 73 function_trace_stop = 1; 74 } 75 76 /** 77 * ftrace_start - start the function tracer. 78 * 79 * This function is the inverse of ftrace_stop. This does not enable 80 * the function tracing if the function tracer is disabled. This only 81 * sets the function tracer flag to continue calling the functions 82 * from mcount. 83 */ 84 static inline void ftrace_start(void) 85 { 86 function_trace_stop = 0; 87 } 88 89 /* 90 * The ftrace_ops must be a static and should also 91 * be read_mostly. These functions do modify read_mostly variables 92 * so use them sparely. Never free an ftrace_op or modify the 93 * next pointer after it has been registered. Even after unregistering 94 * it, the next pointer may still be used internally. 95 */ 96 int register_ftrace_function(struct ftrace_ops *ops); 97 int unregister_ftrace_function(struct ftrace_ops *ops); 98 void clear_ftrace_function(void); 99 100 extern void ftrace_stub(unsigned long a0, unsigned long a1); 101 102 #else /* !CONFIG_FUNCTION_TRACER */ 103 /* 104 * (un)register_ftrace_function must be a macro since the ops parameter 105 * must not be evaluated. 106 */ 107 #define register_ftrace_function(ops) ({ 0; }) 108 #define unregister_ftrace_function(ops) ({ 0; }) 109 static inline void clear_ftrace_function(void) { } 110 static inline void ftrace_kill(void) { } 111 static inline void ftrace_stop(void) { } 112 static inline void ftrace_start(void) { } 113 #endif /* CONFIG_FUNCTION_TRACER */ 114 115 #ifdef CONFIG_STACK_TRACER 116 extern int stack_tracer_enabled; 117 int 118 stack_trace_sysctl(struct ctl_table *table, int write, 119 void __user *buffer, size_t *lenp, 120 loff_t *ppos); 121 #endif 122 123 struct ftrace_func_command { 124 struct list_head list; 125 char *name; 126 int (*func)(char *func, char *cmd, 127 char *params, int enable); 128 }; 129 130 #ifdef CONFIG_DYNAMIC_FTRACE 131 132 int ftrace_arch_code_modify_prepare(void); 133 int ftrace_arch_code_modify_post_process(void); 134 135 struct seq_file; 136 137 struct ftrace_probe_ops { 138 void (*func)(unsigned long ip, 139 unsigned long parent_ip, 140 void **data); 141 int (*callback)(unsigned long ip, void **data); 142 void (*free)(void **data); 143 int (*print)(struct seq_file *m, 144 unsigned long ip, 145 struct ftrace_probe_ops *ops, 146 void *data); 147 }; 148 149 extern int 150 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 151 void *data); 152 extern void 153 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 154 void *data); 155 extern void 156 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 157 extern void unregister_ftrace_function_probe_all(char *glob); 158 159 extern int ftrace_text_reserved(void *start, void *end); 160 161 enum { 162 FTRACE_FL_ENABLED = (1 << 30), 163 FTRACE_FL_FREE = (1 << 31), 164 }; 165 166 #define FTRACE_FL_MASK (0x3UL << 30) 167 #define FTRACE_REF_MAX ((1 << 30) - 1) 168 169 struct dyn_ftrace { 170 union { 171 unsigned long ip; /* address of mcount call-site */ 172 struct dyn_ftrace *freelist; 173 }; 174 union { 175 unsigned long flags; 176 struct dyn_ftrace *newlist; 177 }; 178 struct dyn_arch_ftrace arch; 179 }; 180 181 int ftrace_force_update(void); 182 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 183 int len, int reset); 184 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 185 int len, int reset); 186 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 187 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 188 189 int register_ftrace_command(struct ftrace_func_command *cmd); 190 int unregister_ftrace_command(struct ftrace_func_command *cmd); 191 192 /* defined in arch */ 193 extern int ftrace_ip_converted(unsigned long ip); 194 extern int ftrace_dyn_arch_init(void *data); 195 extern int ftrace_update_ftrace_func(ftrace_func_t func); 196 extern void ftrace_caller(void); 197 extern void ftrace_call(void); 198 extern void mcount_call(void); 199 200 #ifndef FTRACE_ADDR 201 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 202 #endif 203 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 204 extern void ftrace_graph_caller(void); 205 extern int ftrace_enable_ftrace_graph_caller(void); 206 extern int ftrace_disable_ftrace_graph_caller(void); 207 #else 208 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 209 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 210 #endif 211 212 /** 213 * ftrace_make_nop - convert code into nop 214 * @mod: module structure if called by module load initialization 215 * @rec: the mcount call site record 216 * @addr: the address that the call site should be calling 217 * 218 * This is a very sensitive operation and great care needs 219 * to be taken by the arch. The operation should carefully 220 * read the location, check to see if what is read is indeed 221 * what we expect it to be, and then on success of the compare, 222 * it should write to the location. 223 * 224 * The code segment at @rec->ip should be a caller to @addr 225 * 226 * Return must be: 227 * 0 on success 228 * -EFAULT on error reading the location 229 * -EINVAL on a failed compare of the contents 230 * -EPERM on error writing to the location 231 * Any other value will be considered a failure. 232 */ 233 extern int ftrace_make_nop(struct module *mod, 234 struct dyn_ftrace *rec, unsigned long addr); 235 236 /** 237 * ftrace_make_call - convert a nop call site into a call to addr 238 * @rec: the mcount call site record 239 * @addr: the address that the call site should call 240 * 241 * This is a very sensitive operation and great care needs 242 * to be taken by the arch. The operation should carefully 243 * read the location, check to see if what is read is indeed 244 * what we expect it to be, and then on success of the compare, 245 * it should write to the location. 246 * 247 * The code segment at @rec->ip should be a nop 248 * 249 * Return must be: 250 * 0 on success 251 * -EFAULT on error reading the location 252 * -EINVAL on a failed compare of the contents 253 * -EPERM on error writing to the location 254 * Any other value will be considered a failure. 255 */ 256 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 257 258 /* May be defined in arch */ 259 extern int ftrace_arch_read_dyn_info(char *buf, int size); 260 261 extern int skip_trace(unsigned long ip); 262 263 extern void ftrace_disable_daemon(void); 264 extern void ftrace_enable_daemon(void); 265 #else 266 static inline int skip_trace(unsigned long ip) { return 0; } 267 static inline int ftrace_force_update(void) { return 0; } 268 static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) 269 { 270 } 271 static inline void ftrace_disable_daemon(void) { } 272 static inline void ftrace_enable_daemon(void) { } 273 static inline void ftrace_release_mod(struct module *mod) {} 274 static inline int register_ftrace_command(struct ftrace_func_command *cmd) 275 { 276 return -EINVAL; 277 } 278 static inline int unregister_ftrace_command(char *cmd_name) 279 { 280 return -EINVAL; 281 } 282 static inline int ftrace_text_reserved(void *start, void *end) 283 { 284 return 0; 285 } 286 #endif /* CONFIG_DYNAMIC_FTRACE */ 287 288 /* totally disable ftrace - can not re-enable after this */ 289 void ftrace_kill(void); 290 291 static inline void tracer_disable(void) 292 { 293 #ifdef CONFIG_FUNCTION_TRACER 294 ftrace_enabled = 0; 295 #endif 296 } 297 298 /* 299 * Ftrace disable/restore without lock. Some synchronization mechanism 300 * must be used to prevent ftrace_enabled to be changed between 301 * disable/restore. 302 */ 303 static inline int __ftrace_enabled_save(void) 304 { 305 #ifdef CONFIG_FUNCTION_TRACER 306 int saved_ftrace_enabled = ftrace_enabled; 307 ftrace_enabled = 0; 308 return saved_ftrace_enabled; 309 #else 310 return 0; 311 #endif 312 } 313 314 static inline void __ftrace_enabled_restore(int enabled) 315 { 316 #ifdef CONFIG_FUNCTION_TRACER 317 ftrace_enabled = enabled; 318 #endif 319 } 320 321 #ifndef HAVE_ARCH_CALLER_ADDR 322 # ifdef CONFIG_FRAME_POINTER 323 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 324 # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 325 # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 326 # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 327 # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 328 # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 329 # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 330 # else 331 # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 332 # define CALLER_ADDR1 0UL 333 # define CALLER_ADDR2 0UL 334 # define CALLER_ADDR3 0UL 335 # define CALLER_ADDR4 0UL 336 # define CALLER_ADDR5 0UL 337 # define CALLER_ADDR6 0UL 338 # endif 339 #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ 340 341 #ifdef CONFIG_IRQSOFF_TRACER 342 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 343 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 344 #else 345 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } 346 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 347 #endif 348 349 #ifdef CONFIG_PREEMPT_TRACER 350 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 351 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 352 #else 353 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } 354 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } 355 #endif 356 357 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 358 extern void ftrace_init(void); 359 #else 360 static inline void ftrace_init(void) { } 361 #endif 362 363 /* 364 * Structure that defines an entry function trace. 365 */ 366 struct ftrace_graph_ent { 367 unsigned long func; /* Current function */ 368 int depth; 369 }; 370 371 /* 372 * Structure that defines a return function trace. 373 */ 374 struct ftrace_graph_ret { 375 unsigned long func; /* Current function */ 376 unsigned long long calltime; 377 unsigned long long rettime; 378 /* Number of functions that overran the depth limit for current task */ 379 unsigned long overrun; 380 int depth; 381 }; 382 383 /* Type of the callback handlers for tracing function graph*/ 384 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 385 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 386 387 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 388 389 /* for init task */ 390 #define INIT_FTRACE_GRAPH .ret_stack = NULL, 391 392 /* 393 * Stack of return addresses for functions 394 * of a thread. 395 * Used in struct thread_info 396 */ 397 struct ftrace_ret_stack { 398 unsigned long ret; 399 unsigned long func; 400 unsigned long long calltime; 401 unsigned long long subtime; 402 unsigned long fp; 403 }; 404 405 /* 406 * Primary handler of a function return. 407 * It relays on ftrace_return_to_handler. 408 * Defined in entry_32/64.S 409 */ 410 extern void return_to_handler(void); 411 412 extern int 413 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 414 unsigned long frame_pointer); 415 416 /* 417 * Sometimes we don't want to trace a function with the function 418 * graph tracer but we want them to keep traced by the usual function 419 * tracer if the function graph tracer is not configured. 420 */ 421 #define __notrace_funcgraph notrace 422 423 /* 424 * We want to which function is an entrypoint of a hardirq. 425 * That will help us to put a signal on output. 426 */ 427 #define __irq_entry __attribute__((__section__(".irqentry.text"))) 428 429 /* Limits of hardirq entrypoints */ 430 extern char __irqentry_text_start[]; 431 extern char __irqentry_text_end[]; 432 433 #define FTRACE_RETFUNC_DEPTH 50 434 #define FTRACE_RETSTACK_ALLOC_SIZE 32 435 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 436 trace_func_graph_ent_t entryfunc); 437 438 extern void ftrace_graph_stop(void); 439 440 /* The current handlers in use */ 441 extern trace_func_graph_ret_t ftrace_graph_return; 442 extern trace_func_graph_ent_t ftrace_graph_entry; 443 444 extern void unregister_ftrace_graph(void); 445 446 extern void ftrace_graph_init_task(struct task_struct *t); 447 extern void ftrace_graph_exit_task(struct task_struct *t); 448 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 449 450 static inline int task_curr_ret_stack(struct task_struct *t) 451 { 452 return t->curr_ret_stack; 453 } 454 455 static inline void pause_graph_tracing(void) 456 { 457 atomic_inc(¤t->tracing_graph_pause); 458 } 459 460 static inline void unpause_graph_tracing(void) 461 { 462 atomic_dec(¤t->tracing_graph_pause); 463 } 464 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 465 466 #define __notrace_funcgraph 467 #define __irq_entry 468 #define INIT_FTRACE_GRAPH 469 470 static inline void ftrace_graph_init_task(struct task_struct *t) { } 471 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 472 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 473 474 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 475 trace_func_graph_ent_t entryfunc) 476 { 477 return -1; 478 } 479 static inline void unregister_ftrace_graph(void) { } 480 481 static inline int task_curr_ret_stack(struct task_struct *tsk) 482 { 483 return -1; 484 } 485 486 static inline void pause_graph_tracing(void) { } 487 static inline void unpause_graph_tracing(void) { } 488 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 489 490 #ifdef CONFIG_TRACING 491 492 /* flags for current->trace */ 493 enum { 494 TSK_TRACE_FL_TRACE_BIT = 0, 495 TSK_TRACE_FL_GRAPH_BIT = 1, 496 }; 497 enum { 498 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 499 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 500 }; 501 502 static inline void set_tsk_trace_trace(struct task_struct *tsk) 503 { 504 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 505 } 506 507 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 508 { 509 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 510 } 511 512 static inline int test_tsk_trace_trace(struct task_struct *tsk) 513 { 514 return tsk->trace & TSK_TRACE_FL_TRACE; 515 } 516 517 static inline void set_tsk_trace_graph(struct task_struct *tsk) 518 { 519 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 520 } 521 522 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 523 { 524 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 525 } 526 527 static inline int test_tsk_trace_graph(struct task_struct *tsk) 528 { 529 return tsk->trace & TSK_TRACE_FL_GRAPH; 530 } 531 532 enum ftrace_dump_mode; 533 534 extern enum ftrace_dump_mode ftrace_dump_on_oops; 535 536 #ifdef CONFIG_PREEMPT 537 #define INIT_TRACE_RECURSION .trace_recursion = 0, 538 #endif 539 540 #endif /* CONFIG_TRACING */ 541 542 #ifndef INIT_TRACE_RECURSION 543 #define INIT_TRACE_RECURSION 544 #endif 545 546 #ifdef CONFIG_FTRACE_SYSCALLS 547 548 unsigned long arch_syscall_addr(int nr); 549 550 #endif /* CONFIG_FTRACE_SYSCALLS */ 551 552 #endif /* _LINUX_FTRACE_H */ 553