1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef _LINUX_KERNEL_TRACE_H 4 #define _LINUX_KERNEL_TRACE_H 5 6 #include <linux/fs.h> 7 #include <linux/atomic.h> 8 #include <linux/sched.h> 9 #include <linux/clocksource.h> 10 #include <linux/ring_buffer.h> 11 #include <linux/mmiotrace.h> 12 #include <linux/tracepoint.h> 13 #include <linux/ftrace.h> 14 #include <linux/trace.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/trace_seq.h> 17 #include <linux/trace_events.h> 18 #include <linux/compiler.h> 19 #include <linux/glob.h> 20 #include <linux/irq_work.h> 21 #include <linux/workqueue.h> 22 #include <linux/ctype.h> 23 #include <linux/once_lite.h> 24 25 #include "pid_list.h" 26 27 #ifdef CONFIG_FTRACE_SYSCALLS 28 #include <asm/unistd.h> /* For NR_syscalls */ 29 #include <asm/syscall.h> /* some archs define it here */ 30 #endif 31 32 #define TRACE_MODE_WRITE 0640 33 #define TRACE_MODE_READ 0440 34 35 enum trace_type { 36 __TRACE_FIRST_TYPE = 0, 37 38 TRACE_FN, 39 TRACE_CTX, 40 TRACE_WAKE, 41 TRACE_STACK, 42 TRACE_PRINT, 43 TRACE_BPRINT, 44 TRACE_MMIO_RW, 45 TRACE_MMIO_MAP, 46 TRACE_BRANCH, 47 TRACE_GRAPH_RET, 48 TRACE_GRAPH_ENT, 49 TRACE_GRAPH_RETADDR_ENT, 50 TRACE_USER_STACK, 51 TRACE_BLK, 52 TRACE_BPUTS, 53 TRACE_HWLAT, 54 TRACE_OSNOISE, 55 TRACE_TIMERLAT, 56 TRACE_RAW_DATA, 57 TRACE_FUNC_REPEATS, 58 59 __TRACE_LAST_TYPE, 60 }; 61 62 63 #undef __field 64 #define __field(type, item) type item; 65 66 #undef __field_fn 67 #define __field_fn(type, item) type item; 68 69 #undef __field_struct 70 #define __field_struct(type, item) __field(type, item) 71 72 #undef __field_desc 73 #define __field_desc(type, container, item) 74 75 #undef __field_packed 76 #define __field_packed(type, container, item) 77 78 #undef __array 79 #define __array(type, item, size) type item[size]; 80 81 /* 82 * For backward compatibility, older user space expects to see the 83 * kernel_stack event with a fixed size caller field. But today the fix 84 * size is ignored by the kernel, and the real structure is dynamic. 85 * Expose to user space: "unsigned long caller[8];" but the real structure 86 * will be "unsigned long caller[] __counted_by(size)" 87 */ 88 #undef __stack_array 89 #define __stack_array(type, item, size, field) type item[] __counted_by(field); 90 91 #undef __array_desc 92 #define __array_desc(type, container, item, size) 93 94 #undef __dynamic_array 95 #define __dynamic_array(type, item) type item[]; 96 97 #undef __rel_dynamic_array 98 #define __rel_dynamic_array(type, item) type item[]; 99 100 #undef F_STRUCT 101 #define F_STRUCT(args...) args 102 103 #undef FTRACE_ENTRY 104 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 105 struct struct_name { \ 106 struct trace_entry ent; \ 107 tstruct \ 108 } 109 110 #undef FTRACE_ENTRY_DUP 111 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) 112 113 #undef FTRACE_ENTRY_REG 114 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ 115 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 116 117 #undef FTRACE_ENTRY_PACKED 118 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ 119 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed 120 121 #include "trace_entries.h" 122 123 /* Use this for memory failure errors */ 124 #define MEM_FAIL(condition, fmt, ...) \ 125 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) 126 127 #define FAULT_STRING "(fault)" 128 129 #define HIST_STACKTRACE_DEPTH 16 130 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 131 #define HIST_STACKTRACE_SKIP 5 132 133 /* 134 * syscalls are special, and need special handling, this is why 135 * they are not included in trace_entries.h 136 */ 137 struct syscall_trace_enter { 138 struct trace_entry ent; 139 int nr; 140 unsigned long args[]; 141 }; 142 143 struct syscall_trace_exit { 144 struct trace_entry ent; 145 int nr; 146 long ret; 147 }; 148 149 struct kprobe_trace_entry_head { 150 struct trace_entry ent; 151 unsigned long ip; 152 }; 153 154 struct eprobe_trace_entry_head { 155 struct trace_entry ent; 156 }; 157 158 struct kretprobe_trace_entry_head { 159 struct trace_entry ent; 160 unsigned long func; 161 unsigned long ret_ip; 162 }; 163 164 struct fentry_trace_entry_head { 165 struct trace_entry ent; 166 unsigned long ip; 167 }; 168 169 struct fexit_trace_entry_head { 170 struct trace_entry ent; 171 unsigned long func; 172 unsigned long ret_ip; 173 }; 174 175 #define TRACE_BUF_SIZE 1024 176 177 struct trace_array; 178 179 /* 180 * The CPU trace array - it consists of thousands of trace entries 181 * plus some other descriptor data: (for example which task started 182 * the trace, etc.) 183 */ 184 struct trace_array_cpu { 185 atomic_t disabled; 186 void *buffer_page; /* ring buffer spare */ 187 188 unsigned long entries; 189 unsigned long saved_latency; 190 unsigned long critical_start; 191 unsigned long critical_end; 192 unsigned long critical_sequence; 193 unsigned long nice; 194 unsigned long policy; 195 unsigned long rt_priority; 196 unsigned long skipped_entries; 197 u64 preempt_timestamp; 198 pid_t pid; 199 kuid_t uid; 200 char comm[TASK_COMM_LEN]; 201 202 #ifdef CONFIG_FUNCTION_TRACER 203 int ftrace_ignore_pid; 204 #endif 205 bool ignore_pid; 206 }; 207 208 struct tracer; 209 struct trace_option_dentry; 210 211 struct array_buffer { 212 struct trace_array *tr; 213 struct trace_buffer *buffer; 214 struct trace_array_cpu __percpu *data; 215 u64 time_start; 216 int cpu; 217 }; 218 219 #define TRACE_FLAGS_MAX_SIZE 32 220 221 struct trace_options { 222 struct tracer *tracer; 223 struct trace_option_dentry *topts; 224 }; 225 226 struct trace_pid_list *trace_pid_list_alloc(void); 227 void trace_pid_list_free(struct trace_pid_list *pid_list); 228 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); 229 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); 230 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); 231 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); 232 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, 233 unsigned int *next); 234 235 enum { 236 TRACE_PIDS = BIT(0), 237 TRACE_NO_PIDS = BIT(1), 238 }; 239 240 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, 241 struct trace_pid_list *no_pid_list) 242 { 243 /* Return true if the pid list in type has pids */ 244 return ((type & TRACE_PIDS) && pid_list) || 245 ((type & TRACE_NO_PIDS) && no_pid_list); 246 } 247 248 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, 249 struct trace_pid_list *no_pid_list) 250 { 251 /* 252 * Turning off what is in @type, return true if the "other" 253 * pid list, still has pids in it. 254 */ 255 return (!(type & TRACE_PIDS) && pid_list) || 256 (!(type & TRACE_NO_PIDS) && no_pid_list); 257 } 258 259 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); 260 261 /** 262 * struct cond_snapshot - conditional snapshot data and callback 263 * 264 * The cond_snapshot structure encapsulates a callback function and 265 * data associated with the snapshot for a given tracing instance. 266 * 267 * When a snapshot is taken conditionally, by invoking 268 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is 269 * passed in turn to the cond_snapshot.update() function. That data 270 * can be compared by the update() implementation with the cond_data 271 * contained within the struct cond_snapshot instance associated with 272 * the trace_array. Because the tr->max_lock is held throughout the 273 * update() call, the update() function can directly retrieve the 274 * cond_snapshot and cond_data associated with the per-instance 275 * snapshot associated with the trace_array. 276 * 277 * The cond_snapshot.update() implementation can save data to be 278 * associated with the snapshot if it decides to, and returns 'true' 279 * in that case, or it returns 'false' if the conditional snapshot 280 * shouldn't be taken. 281 * 282 * The cond_snapshot instance is created and associated with the 283 * user-defined cond_data by tracing_cond_snapshot_enable(). 284 * Likewise, the cond_snapshot instance is destroyed and is no longer 285 * associated with the trace instance by 286 * tracing_cond_snapshot_disable(). 287 * 288 * The method below is required. 289 * 290 * @update: When a conditional snapshot is invoked, the update() 291 * callback function is invoked with the tr->max_lock held. The 292 * update() implementation signals whether or not to actually 293 * take the snapshot, by returning 'true' if so, 'false' if no 294 * snapshot should be taken. Because the max_lock is held for 295 * the duration of update(), the implementation is safe to 296 * directly retrieved and save any implementation data it needs 297 * to in association with the snapshot. 298 */ 299 struct cond_snapshot { 300 void *cond_data; 301 cond_update_fn_t update; 302 }; 303 304 /* 305 * struct trace_func_repeats - used to keep track of the consecutive 306 * (on the same CPU) calls of a single function. 307 */ 308 struct trace_func_repeats { 309 unsigned long ip; 310 unsigned long parent_ip; 311 unsigned long count; 312 u64 ts_last_call; 313 }; 314 315 /* 316 * The trace array - an array of per-CPU trace arrays. This is the 317 * highest level data structure that individual tracers deal with. 318 * They have on/off state as well: 319 */ 320 struct trace_array { 321 struct list_head list; 322 char *name; 323 struct array_buffer array_buffer; 324 #ifdef CONFIG_TRACER_MAX_TRACE 325 /* 326 * The max_buffer is used to snapshot the trace when a maximum 327 * latency is reached, or when the user initiates a snapshot. 328 * Some tracers will use this to store a maximum trace while 329 * it continues examining live traces. 330 * 331 * The buffers for the max_buffer are set up the same as the array_buffer 332 * When a snapshot is taken, the buffer of the max_buffer is swapped 333 * with the buffer of the array_buffer and the buffers are reset for 334 * the array_buffer so the tracing can continue. 335 */ 336 struct array_buffer max_buffer; 337 bool allocated_snapshot; 338 spinlock_t snapshot_trigger_lock; 339 unsigned int snapshot; 340 unsigned long max_latency; 341 #ifdef CONFIG_FSNOTIFY 342 struct dentry *d_max_latency; 343 struct work_struct fsnotify_work; 344 struct irq_work fsnotify_irqwork; 345 #endif 346 #endif 347 /* The below is for memory mapped ring buffer */ 348 unsigned int mapped; 349 unsigned long range_addr_start; 350 unsigned long range_addr_size; 351 long text_delta; 352 long data_delta; 353 354 struct trace_pid_list __rcu *filtered_pids; 355 struct trace_pid_list __rcu *filtered_no_pids; 356 /* 357 * max_lock is used to protect the swapping of buffers 358 * when taking a max snapshot. The buffers themselves are 359 * protected by per_cpu spinlocks. But the action of the swap 360 * needs its own lock. 361 * 362 * This is defined as a arch_spinlock_t in order to help 363 * with performance when lockdep debugging is enabled. 364 * 365 * It is also used in other places outside the update_max_tr 366 * so it needs to be defined outside of the 367 * CONFIG_TRACER_MAX_TRACE. 368 */ 369 arch_spinlock_t max_lock; 370 int buffer_disabled; 371 #ifdef CONFIG_FTRACE_SYSCALLS 372 int sys_refcount_enter; 373 int sys_refcount_exit; 374 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 375 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 376 #endif 377 int stop_count; 378 int clock_id; 379 int nr_topts; 380 bool clear_trace; 381 int buffer_percent; 382 unsigned int n_err_log_entries; 383 struct tracer *current_trace; 384 unsigned int trace_flags; 385 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 386 unsigned int flags; 387 raw_spinlock_t start_lock; 388 const char *system_names; 389 struct list_head err_log; 390 struct dentry *dir; 391 struct dentry *options; 392 struct dentry *percpu_dir; 393 struct eventfs_inode *event_dir; 394 struct trace_options *topts; 395 struct list_head systems; 396 struct list_head events; 397 struct trace_event_file *trace_marker_file; 398 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 399 /* one per_cpu trace_pipe can be opened by only one user */ 400 cpumask_var_t pipe_cpumask; 401 int ref; 402 int trace_ref; 403 #ifdef CONFIG_MODULES 404 struct list_head mod_events; 405 #endif 406 #ifdef CONFIG_FUNCTION_TRACER 407 struct ftrace_ops *ops; 408 struct trace_pid_list __rcu *function_pids; 409 struct trace_pid_list __rcu *function_no_pids; 410 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 411 struct fgraph_ops *gops; 412 #endif 413 #ifdef CONFIG_DYNAMIC_FTRACE 414 /* All of these are protected by the ftrace_lock */ 415 struct list_head func_probes; 416 struct list_head mod_trace; 417 struct list_head mod_notrace; 418 #endif 419 /* function tracing enabled */ 420 int function_enabled; 421 #endif 422 int no_filter_buffering_ref; 423 struct list_head hist_vars; 424 #ifdef CONFIG_TRACER_SNAPSHOT 425 struct cond_snapshot *cond_snapshot; 426 #endif 427 struct trace_func_repeats __percpu *last_func_repeats; 428 /* 429 * On boot up, the ring buffer is set to the minimum size, so that 430 * we do not waste memory on systems that are not using tracing. 431 */ 432 bool ring_buffer_expanded; 433 }; 434 435 enum { 436 TRACE_ARRAY_FL_GLOBAL = BIT(0), 437 TRACE_ARRAY_FL_BOOT = BIT(1), 438 }; 439 440 #ifdef CONFIG_MODULES 441 bool module_exists(const char *module); 442 #else 443 static inline bool module_exists(const char *module) 444 { 445 return false; 446 } 447 #endif 448 449 extern struct list_head ftrace_trace_arrays; 450 451 extern struct mutex trace_types_lock; 452 453 extern int trace_array_get(struct trace_array *tr); 454 extern int tracing_check_open_get_tr(struct trace_array *tr); 455 extern struct trace_array *trace_array_find(const char *instance); 456 extern struct trace_array *trace_array_find_get(const char *instance); 457 458 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); 459 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); 460 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); 461 462 extern bool trace_clock_in_ns(struct trace_array *tr); 463 464 /* 465 * The global tracer (top) should be the first trace array added, 466 * but we check the flag anyway. 467 */ 468 static inline struct trace_array *top_trace_array(void) 469 { 470 struct trace_array *tr; 471 472 if (list_empty(&ftrace_trace_arrays)) 473 return NULL; 474 475 tr = list_entry(ftrace_trace_arrays.prev, 476 typeof(*tr), list); 477 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 478 return tr; 479 } 480 481 #define FTRACE_CMP_TYPE(var, type) \ 482 __builtin_types_compatible_p(typeof(var), type *) 483 484 #undef IF_ASSIGN 485 #define IF_ASSIGN(var, entry, etype, id) \ 486 if (FTRACE_CMP_TYPE(var, etype)) { \ 487 var = (typeof(var))(entry); \ 488 WARN_ON(id != 0 && (entry)->type != id); \ 489 break; \ 490 } 491 492 /* Will cause compile errors if type is not found. */ 493 extern void __ftrace_bad_type(void); 494 495 /* 496 * The trace_assign_type is a verifier that the entry type is 497 * the same as the type being assigned. To add new types simply 498 * add a line with the following format: 499 * 500 * IF_ASSIGN(var, ent, type, id); 501 * 502 * Where "type" is the trace type that includes the trace_entry 503 * as the "ent" item. And "id" is the trace identifier that is 504 * used in the trace_type enum. 505 * 506 * If the type can have more than one id, then use zero. 507 */ 508 #define trace_assign_type(var, ent) \ 509 do { \ 510 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 511 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 512 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 513 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 514 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 515 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 516 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 517 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 518 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ 519 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ 520 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ 521 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 522 TRACE_MMIO_RW); \ 523 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 524 TRACE_MMIO_MAP); \ 525 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 526 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 527 TRACE_GRAPH_ENT); \ 528 IF_ASSIGN(var, ent, struct fgraph_retaddr_ent_entry,\ 529 TRACE_GRAPH_RETADDR_ENT); \ 530 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 531 TRACE_GRAPH_RET); \ 532 IF_ASSIGN(var, ent, struct func_repeats_entry, \ 533 TRACE_FUNC_REPEATS); \ 534 __ftrace_bad_type(); \ 535 } while (0) 536 537 /* 538 * An option specific to a tracer. This is a boolean value. 539 * The bit is the bit index that sets its value on the 540 * flags value in struct tracer_flags. 541 */ 542 struct tracer_opt { 543 const char *name; /* Will appear on the trace_options file */ 544 u32 bit; /* Mask assigned in val field in tracer_flags */ 545 }; 546 547 /* 548 * The set of specific options for a tracer. Your tracer 549 * have to set the initial value of the flags val. 550 */ 551 struct tracer_flags { 552 u32 val; 553 struct tracer_opt *opts; 554 struct tracer *trace; 555 }; 556 557 /* Makes more easy to define a tracer opt */ 558 #define TRACER_OPT(s, b) .name = #s, .bit = b 559 560 561 struct trace_option_dentry { 562 struct tracer_opt *opt; 563 struct tracer_flags *flags; 564 struct trace_array *tr; 565 struct dentry *entry; 566 }; 567 568 /** 569 * struct tracer - a specific tracer and its callbacks to interact with tracefs 570 * @name: the name chosen to select it on the available_tracers file 571 * @init: called when one switches to this tracer (echo name > current_tracer) 572 * @reset: called when one switches to another tracer 573 * @start: called when tracing is unpaused (echo 1 > tracing_on) 574 * @stop: called when tracing is paused (echo 0 > tracing_on) 575 * @update_thresh: called when tracing_thresh is updated 576 * @open: called when the trace file is opened 577 * @pipe_open: called when the trace_pipe file is opened 578 * @close: called when the trace file is released 579 * @pipe_close: called when the trace_pipe file is released 580 * @read: override the default read callback on trace_pipe 581 * @splice_read: override the default splice_read callback on trace_pipe 582 * @selftest: selftest to run on boot (see trace_selftest.c) 583 * @print_headers: override the first lines that describe your columns 584 * @print_line: callback that prints a trace 585 * @set_flag: signals one of your private flags changed (trace_options file) 586 * @flags: your private flags 587 */ 588 struct tracer { 589 const char *name; 590 int (*init)(struct trace_array *tr); 591 void (*reset)(struct trace_array *tr); 592 void (*start)(struct trace_array *tr); 593 void (*stop)(struct trace_array *tr); 594 int (*update_thresh)(struct trace_array *tr); 595 void (*open)(struct trace_iterator *iter); 596 void (*pipe_open)(struct trace_iterator *iter); 597 void (*close)(struct trace_iterator *iter); 598 void (*pipe_close)(struct trace_iterator *iter); 599 ssize_t (*read)(struct trace_iterator *iter, 600 struct file *filp, char __user *ubuf, 601 size_t cnt, loff_t *ppos); 602 ssize_t (*splice_read)(struct trace_iterator *iter, 603 struct file *filp, 604 loff_t *ppos, 605 struct pipe_inode_info *pipe, 606 size_t len, 607 unsigned int flags); 608 #ifdef CONFIG_FTRACE_STARTUP_TEST 609 int (*selftest)(struct tracer *trace, 610 struct trace_array *tr); 611 #endif 612 void (*print_header)(struct seq_file *m); 613 enum print_line_t (*print_line)(struct trace_iterator *iter); 614 /* If you handled the flag setting, return 0 */ 615 int (*set_flag)(struct trace_array *tr, 616 u32 old_flags, u32 bit, int set); 617 /* Return 0 if OK with change, else return non-zero */ 618 int (*flag_changed)(struct trace_array *tr, 619 u32 mask, int set); 620 struct tracer *next; 621 struct tracer_flags *flags; 622 int enabled; 623 bool print_max; 624 bool allow_instances; 625 #ifdef CONFIG_TRACER_MAX_TRACE 626 bool use_max_tr; 627 #endif 628 /* True if tracer cannot be enabled in kernel param */ 629 bool noboot; 630 }; 631 632 static inline struct ring_buffer_iter * 633 trace_buffer_iter(struct trace_iterator *iter, int cpu) 634 { 635 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; 636 } 637 638 int tracer_init(struct tracer *t, struct trace_array *tr); 639 int tracing_is_enabled(void); 640 void tracing_reset_online_cpus(struct array_buffer *buf); 641 void tracing_reset_all_online_cpus(void); 642 void tracing_reset_all_online_cpus_unlocked(void); 643 int tracing_open_generic(struct inode *inode, struct file *filp); 644 int tracing_open_generic_tr(struct inode *inode, struct file *filp); 645 int tracing_release_generic_tr(struct inode *inode, struct file *file); 646 int tracing_open_file_tr(struct inode *inode, struct file *filp); 647 int tracing_release_file_tr(struct inode *inode, struct file *filp); 648 int tracing_single_release_file_tr(struct inode *inode, struct file *filp); 649 bool tracing_is_disabled(void); 650 bool tracer_tracing_is_on(struct trace_array *tr); 651 void tracer_tracing_on(struct trace_array *tr); 652 void tracer_tracing_off(struct trace_array *tr); 653 struct dentry *trace_create_file(const char *name, 654 umode_t mode, 655 struct dentry *parent, 656 void *data, 657 const struct file_operations *fops); 658 659 int tracing_init_dentry(void); 660 661 struct ring_buffer_event; 662 663 struct ring_buffer_event * 664 trace_buffer_lock_reserve(struct trace_buffer *buffer, 665 int type, 666 unsigned long len, 667 unsigned int trace_ctx); 668 669 int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu); 670 671 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 672 struct trace_array_cpu *data); 673 674 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 675 int *ent_cpu, u64 *ent_ts); 676 677 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 678 struct ring_buffer_event *event); 679 680 bool trace_is_tracepoint_string(const char *str); 681 const char *trace_event_format(struct trace_iterator *iter, const char *fmt); 682 char *trace_iter_expand_format(struct trace_iterator *iter); 683 bool ignore_event(struct trace_iterator *iter); 684 685 int trace_empty(struct trace_iterator *iter); 686 687 void *trace_find_next_entry_inc(struct trace_iterator *iter); 688 689 void trace_init_global_iter(struct trace_iterator *iter); 690 691 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 692 693 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); 694 unsigned long trace_total_entries(struct trace_array *tr); 695 696 void trace_function(struct trace_array *tr, 697 unsigned long ip, 698 unsigned long parent_ip, 699 unsigned int trace_ctx); 700 void trace_graph_function(struct trace_array *tr, 701 unsigned long ip, 702 unsigned long parent_ip, 703 unsigned int trace_ctx); 704 void trace_latency_header(struct seq_file *m); 705 void trace_default_header(struct seq_file *m); 706 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 707 708 void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); 709 int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); 710 711 void tracing_start_cmdline_record(void); 712 void tracing_stop_cmdline_record(void); 713 void tracing_start_tgid_record(void); 714 void tracing_stop_tgid_record(void); 715 716 int register_tracer(struct tracer *type); 717 int is_tracing_stopped(void); 718 719 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 720 721 extern cpumask_var_t __read_mostly tracing_buffer_mask; 722 723 #define for_each_tracing_cpu(cpu) \ 724 for_each_cpu(cpu, tracing_buffer_mask) 725 726 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 727 728 extern unsigned long tracing_thresh; 729 730 /* PID filtering */ 731 732 extern int pid_max; 733 734 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 735 pid_t search_pid); 736 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 737 struct trace_pid_list *filtered_no_pids, 738 struct task_struct *task); 739 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 740 struct task_struct *self, 741 struct task_struct *task); 742 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 743 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 744 int trace_pid_show(struct seq_file *m, void *v); 745 int trace_pid_write(struct trace_pid_list *filtered_pids, 746 struct trace_pid_list **new_pid_list, 747 const char __user *ubuf, size_t cnt); 748 749 #ifdef CONFIG_TRACER_MAX_TRACE 750 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 751 void *cond_data); 752 void update_max_tr_single(struct trace_array *tr, 753 struct task_struct *tsk, int cpu); 754 755 #ifdef CONFIG_FSNOTIFY 756 #define LATENCY_FS_NOTIFY 757 #endif 758 #endif /* CONFIG_TRACER_MAX_TRACE */ 759 760 #ifdef LATENCY_FS_NOTIFY 761 void latency_fsnotify(struct trace_array *tr); 762 #else 763 static inline void latency_fsnotify(struct trace_array *tr) { } 764 #endif 765 766 #ifdef CONFIG_STACKTRACE 767 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); 768 #else 769 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 770 int skip) 771 { 772 } 773 #endif /* CONFIG_STACKTRACE */ 774 775 void trace_last_func_repeats(struct trace_array *tr, 776 struct trace_func_repeats *last_info, 777 unsigned int trace_ctx); 778 779 extern u64 ftrace_now(int cpu); 780 781 extern void trace_find_cmdline(int pid, char comm[]); 782 extern int trace_find_tgid(int pid); 783 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 784 785 #ifdef CONFIG_DYNAMIC_FTRACE 786 extern unsigned long ftrace_update_tot_cnt; 787 extern unsigned long ftrace_number_of_pages; 788 extern unsigned long ftrace_number_of_groups; 789 extern u64 ftrace_update_time; 790 extern u64 ftrace_total_mod_time; 791 void ftrace_init_trace_array(struct trace_array *tr); 792 #else 793 static inline void ftrace_init_trace_array(struct trace_array *tr) { } 794 #endif 795 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 796 extern int DYN_FTRACE_TEST_NAME(void); 797 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 798 extern int DYN_FTRACE_TEST_NAME2(void); 799 800 extern void trace_set_ring_buffer_expanded(struct trace_array *tr); 801 extern bool tracing_selftest_disabled; 802 803 #ifdef CONFIG_FTRACE_STARTUP_TEST 804 extern void __init disable_tracing_selftest(const char *reason); 805 806 extern int trace_selftest_startup_function(struct tracer *trace, 807 struct trace_array *tr); 808 extern int trace_selftest_startup_function_graph(struct tracer *trace, 809 struct trace_array *tr); 810 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 811 struct trace_array *tr); 812 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 813 struct trace_array *tr); 814 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 815 struct trace_array *tr); 816 extern int trace_selftest_startup_wakeup(struct tracer *trace, 817 struct trace_array *tr); 818 extern int trace_selftest_startup_nop(struct tracer *trace, 819 struct trace_array *tr); 820 extern int trace_selftest_startup_branch(struct tracer *trace, 821 struct trace_array *tr); 822 /* 823 * Tracer data references selftest functions that only occur 824 * on boot up. These can be __init functions. Thus, when selftests 825 * are enabled, then the tracers need to reference __init functions. 826 */ 827 #define __tracer_data __refdata 828 #else 829 static inline void __init disable_tracing_selftest(const char *reason) 830 { 831 } 832 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 833 #define __tracer_data __read_mostly 834 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 835 836 extern void *head_page(struct trace_array_cpu *data); 837 extern unsigned long long ns2usecs(u64 nsec); 838 extern int 839 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 840 extern int 841 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 842 extern int 843 trace_array_vprintk(struct trace_array *tr, 844 unsigned long ip, const char *fmt, va_list args); 845 int trace_array_printk_buf(struct trace_buffer *buffer, 846 unsigned long ip, const char *fmt, ...); 847 void trace_printk_seq(struct trace_seq *s); 848 enum print_line_t print_trace_line(struct trace_iterator *iter); 849 850 extern char trace_find_mark(unsigned long long duration); 851 852 struct ftrace_hash; 853 854 struct ftrace_mod_load { 855 struct list_head list; 856 char *func; 857 char *module; 858 int enable; 859 }; 860 861 enum { 862 FTRACE_HASH_FL_MOD = (1 << 0), 863 }; 864 865 struct ftrace_hash { 866 unsigned long size_bits; 867 struct hlist_head *buckets; 868 unsigned long count; 869 unsigned long flags; 870 struct rcu_head rcu; 871 }; 872 873 struct ftrace_func_entry * 874 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); 875 876 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) 877 { 878 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); 879 } 880 881 /* Standard output formatting function used for function return traces */ 882 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 883 884 /* Flag options */ 885 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 886 #define TRACE_GRAPH_PRINT_CPU 0x2 887 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 888 #define TRACE_GRAPH_PRINT_PROC 0x8 889 #define TRACE_GRAPH_PRINT_DURATION 0x10 890 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 891 #define TRACE_GRAPH_PRINT_REL_TIME 0x40 892 #define TRACE_GRAPH_PRINT_IRQS 0x80 893 #define TRACE_GRAPH_PRINT_TAIL 0x100 894 #define TRACE_GRAPH_SLEEP_TIME 0x200 895 #define TRACE_GRAPH_GRAPH_TIME 0x400 896 #define TRACE_GRAPH_PRINT_RETVAL 0x800 897 #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000 898 #define TRACE_GRAPH_PRINT_RETADDR 0x2000 899 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 900 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 901 902 extern void ftrace_graph_sleep_time_control(bool enable); 903 904 #ifdef CONFIG_FUNCTION_PROFILER 905 extern void ftrace_graph_graph_time_control(bool enable); 906 #else 907 static inline void ftrace_graph_graph_time_control(bool enable) { } 908 #endif 909 910 extern enum print_line_t 911 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 912 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 913 extern void 914 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 915 extern void graph_trace_open(struct trace_iterator *iter); 916 extern void graph_trace_close(struct trace_iterator *iter); 917 extern int __trace_graph_entry(struct trace_array *tr, 918 struct ftrace_graph_ent *trace, 919 unsigned int trace_ctx); 920 extern int __trace_graph_retaddr_entry(struct trace_array *tr, 921 struct ftrace_graph_ent *trace, 922 unsigned int trace_ctx, 923 unsigned long retaddr); 924 extern void __trace_graph_return(struct trace_array *tr, 925 struct ftrace_graph_ret *trace, 926 unsigned int trace_ctx); 927 extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 928 extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 929 extern void free_fgraph_ops(struct trace_array *tr); 930 931 enum { 932 TRACE_GRAPH_FL = 1, 933 934 /* 935 * In the very unlikely case that an interrupt came in 936 * at a start of graph tracing, and we want to trace 937 * the function in that interrupt, the depth can be greater 938 * than zero, because of the preempted start of a previous 939 * trace. In an even more unlikely case, depth could be 2 940 * if a softirq interrupted the start of graph tracing, 941 * followed by an interrupt preempting a start of graph 942 * tracing in the softirq, and depth can even be 3 943 * if an NMI came in at the start of an interrupt function 944 * that preempted a softirq start of a function that 945 * preempted normal context!!!! Luckily, it can't be 946 * greater than 3, so the next two bits are a mask 947 * of what the depth is when we set TRACE_GRAPH_FL 948 */ 949 950 TRACE_GRAPH_DEPTH_START_BIT, 951 TRACE_GRAPH_DEPTH_END_BIT, 952 953 /* 954 * To implement set_graph_notrace, if this bit is set, we ignore 955 * function graph tracing of called functions, until the return 956 * function is called to clear it. 957 */ 958 TRACE_GRAPH_NOTRACE_BIT, 959 }; 960 961 #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT) 962 963 static inline unsigned long ftrace_graph_depth(unsigned long *task_var) 964 { 965 return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; 966 } 967 968 static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) 969 { 970 *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); 971 *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; 972 } 973 974 #ifdef CONFIG_DYNAMIC_FTRACE 975 extern struct ftrace_hash __rcu *ftrace_graph_hash; 976 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; 977 978 static inline int 979 ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 980 { 981 unsigned long addr = trace->func; 982 int ret = 0; 983 struct ftrace_hash *hash; 984 985 preempt_disable_notrace(); 986 987 /* 988 * Have to open code "rcu_dereference_sched()" because the 989 * function graph tracer can be called when RCU is not 990 * "watching". 991 * Protected with schedule_on_each_cpu(ftrace_sync) 992 */ 993 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); 994 995 if (ftrace_hash_empty(hash)) { 996 ret = 1; 997 goto out; 998 } 999 1000 if (ftrace_lookup_ip(hash, addr)) { 1001 /* 1002 * This needs to be cleared on the return functions 1003 * when the depth is zero. 1004 */ 1005 *task_var |= TRACE_GRAPH_FL; 1006 ftrace_graph_set_depth(task_var, trace->depth); 1007 1008 /* 1009 * If no irqs are to be traced, but a set_graph_function 1010 * is set, and called by an interrupt handler, we still 1011 * want to trace it. 1012 */ 1013 if (in_hardirq()) 1014 trace_recursion_set(TRACE_IRQ_BIT); 1015 else 1016 trace_recursion_clear(TRACE_IRQ_BIT); 1017 ret = 1; 1018 } 1019 1020 out: 1021 preempt_enable_notrace(); 1022 return ret; 1023 } 1024 1025 static inline void 1026 ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 1027 { 1028 unsigned long *task_var = fgraph_get_task_var(gops); 1029 1030 if ((*task_var & TRACE_GRAPH_FL) && 1031 trace->depth == ftrace_graph_depth(task_var)) 1032 *task_var &= ~TRACE_GRAPH_FL; 1033 } 1034 1035 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1036 { 1037 int ret = 0; 1038 struct ftrace_hash *notrace_hash; 1039 1040 preempt_disable_notrace(); 1041 1042 /* 1043 * Have to open code "rcu_dereference_sched()" because the 1044 * function graph tracer can be called when RCU is not 1045 * "watching". 1046 * Protected with schedule_on_each_cpu(ftrace_sync) 1047 */ 1048 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 1049 !preemptible()); 1050 1051 if (ftrace_lookup_ip(notrace_hash, addr)) 1052 ret = 1; 1053 1054 preempt_enable_notrace(); 1055 return ret; 1056 } 1057 #else 1058 static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 1059 { 1060 return 1; 1061 } 1062 1063 static inline int ftrace_graph_notrace_addr(unsigned long addr) 1064 { 1065 return 0; 1066 } 1067 static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 1068 { } 1069 #endif /* CONFIG_DYNAMIC_FTRACE */ 1070 1071 extern unsigned int fgraph_max_depth; 1072 extern bool fgraph_sleep_time; 1073 1074 static inline bool 1075 ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace) 1076 { 1077 unsigned long *task_var = fgraph_get_task_var(gops); 1078 1079 /* trace it when it is-nested-in or is a function enabled. */ 1080 return !((*task_var & TRACE_GRAPH_FL) || 1081 ftrace_graph_addr(task_var, trace)) || 1082 (trace->depth < 0) || 1083 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 1084 } 1085 1086 void fgraph_init_ops(struct ftrace_ops *dst_ops, 1087 struct ftrace_ops *src_ops); 1088 1089 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 1090 static inline enum print_line_t 1091 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1092 { 1093 return TRACE_TYPE_UNHANDLED; 1094 } 1095 static inline void free_fgraph_ops(struct trace_array *tr) { } 1096 /* ftrace_ops may not be defined */ 1097 #define init_array_fgraph_ops(tr, ops) do { } while (0) 1098 #define allocate_fgraph_ops(tr, ops) ({ 0; }) 1099 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1100 1101 extern struct list_head ftrace_pids; 1102 1103 #ifdef CONFIG_FUNCTION_TRACER 1104 1105 #define FTRACE_PID_IGNORE -1 1106 #define FTRACE_PID_TRACE -2 1107 1108 struct ftrace_func_command { 1109 struct list_head list; 1110 char *name; 1111 int (*func)(struct trace_array *tr, 1112 struct ftrace_hash *hash, 1113 char *func, char *cmd, 1114 char *params, int enable); 1115 }; 1116 extern bool ftrace_filter_param __initdata; 1117 static inline int ftrace_trace_task(struct trace_array *tr) 1118 { 1119 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != 1120 FTRACE_PID_IGNORE; 1121 } 1122 extern int ftrace_is_dead(void); 1123 int ftrace_create_function_files(struct trace_array *tr, 1124 struct dentry *parent); 1125 void ftrace_destroy_function_files(struct trace_array *tr); 1126 int ftrace_allocate_ftrace_ops(struct trace_array *tr); 1127 void ftrace_free_ftrace_ops(struct trace_array *tr); 1128 void ftrace_init_global_array_ops(struct trace_array *tr); 1129 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 1130 void ftrace_reset_array_ops(struct trace_array *tr); 1131 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 1132 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 1133 struct dentry *d_tracer); 1134 void ftrace_clear_pids(struct trace_array *tr); 1135 int init_function_trace(void); 1136 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); 1137 #else 1138 static inline int ftrace_trace_task(struct trace_array *tr) 1139 { 1140 return 1; 1141 } 1142 static inline int ftrace_is_dead(void) { return 0; } 1143 static inline int 1144 ftrace_create_function_files(struct trace_array *tr, 1145 struct dentry *parent) 1146 { 1147 return 0; 1148 } 1149 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) 1150 { 1151 return 0; 1152 } 1153 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } 1154 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 1155 static inline __init void 1156 ftrace_init_global_array_ops(struct trace_array *tr) { } 1157 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 1158 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 1159 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 1160 static inline void ftrace_clear_pids(struct trace_array *tr) { } 1161 static inline int init_function_trace(void) { return 0; } 1162 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } 1163 /* ftace_func_t type is not defined, use macro instead of static inline */ 1164 #define ftrace_init_array_ops(tr, func) do { } while (0) 1165 #endif /* CONFIG_FUNCTION_TRACER */ 1166 1167 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 1168 1169 struct ftrace_probe_ops { 1170 void (*func)(unsigned long ip, 1171 unsigned long parent_ip, 1172 struct trace_array *tr, 1173 struct ftrace_probe_ops *ops, 1174 void *data); 1175 int (*init)(struct ftrace_probe_ops *ops, 1176 struct trace_array *tr, 1177 unsigned long ip, void *init_data, 1178 void **data); 1179 void (*free)(struct ftrace_probe_ops *ops, 1180 struct trace_array *tr, 1181 unsigned long ip, void *data); 1182 int (*print)(struct seq_file *m, 1183 unsigned long ip, 1184 struct ftrace_probe_ops *ops, 1185 void *data); 1186 }; 1187 1188 struct ftrace_func_mapper; 1189 typedef int (*ftrace_mapper_func)(void *data); 1190 1191 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); 1192 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 1193 unsigned long ip); 1194 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 1195 unsigned long ip, void *data); 1196 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 1197 unsigned long ip); 1198 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 1199 ftrace_mapper_func free_func); 1200 1201 extern int 1202 register_ftrace_function_probe(char *glob, struct trace_array *tr, 1203 struct ftrace_probe_ops *ops, void *data); 1204 extern int 1205 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 1206 struct ftrace_probe_ops *ops); 1207 extern void clear_ftrace_function_probes(struct trace_array *tr); 1208 1209 int register_ftrace_command(struct ftrace_func_command *cmd); 1210 int unregister_ftrace_command(struct ftrace_func_command *cmd); 1211 1212 void ftrace_create_filter_files(struct ftrace_ops *ops, 1213 struct dentry *parent); 1214 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1215 1216 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 1217 int len, int reset); 1218 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 1219 int len, int reset); 1220 #else 1221 struct ftrace_func_command; 1222 1223 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 1224 { 1225 return -EINVAL; 1226 } 1227 static inline __init int unregister_ftrace_command(char *cmd_name) 1228 { 1229 return -EINVAL; 1230 } 1231 static inline void clear_ftrace_function_probes(struct trace_array *tr) 1232 { 1233 } 1234 1235 /* 1236 * The ops parameter passed in is usually undefined. 1237 * This must be a macro. 1238 */ 1239 #define ftrace_create_filter_files(ops, parent) do { } while (0) 1240 #define ftrace_destroy_filter_files(ops) do { } while (0) 1241 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 1242 1243 bool ftrace_event_is_function(struct trace_event_call *call); 1244 1245 /* 1246 * struct trace_parser - servers for reading the user input separated by spaces 1247 * @cont: set if the input is not complete - no final space char was found 1248 * @buffer: holds the parsed user input 1249 * @idx: user input length 1250 * @size: buffer size 1251 */ 1252 struct trace_parser { 1253 bool cont; 1254 char *buffer; 1255 unsigned idx; 1256 unsigned size; 1257 }; 1258 1259 static inline bool trace_parser_loaded(struct trace_parser *parser) 1260 { 1261 return (parser->idx != 0); 1262 } 1263 1264 static inline bool trace_parser_cont(struct trace_parser *parser) 1265 { 1266 return parser->cont; 1267 } 1268 1269 static inline void trace_parser_clear(struct trace_parser *parser) 1270 { 1271 parser->cont = false; 1272 parser->idx = 0; 1273 } 1274 1275 extern int trace_parser_get_init(struct trace_parser *parser, int size); 1276 extern void trace_parser_put(struct trace_parser *parser); 1277 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1278 size_t cnt, loff_t *ppos); 1279 1280 /* 1281 * Only create function graph options if function graph is configured. 1282 */ 1283 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1284 # define FGRAPH_FLAGS \ 1285 C(DISPLAY_GRAPH, "display-graph"), 1286 #else 1287 # define FGRAPH_FLAGS 1288 #endif 1289 1290 #ifdef CONFIG_BRANCH_TRACER 1291 # define BRANCH_FLAGS \ 1292 C(BRANCH, "branch"), 1293 #else 1294 # define BRANCH_FLAGS 1295 #endif 1296 1297 #ifdef CONFIG_FUNCTION_TRACER 1298 # define FUNCTION_FLAGS \ 1299 C(FUNCTION, "function-trace"), \ 1300 C(FUNC_FORK, "function-fork"), 1301 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 1302 #else 1303 # define FUNCTION_FLAGS 1304 # define FUNCTION_DEFAULT_FLAGS 0UL 1305 # define TRACE_ITER_FUNC_FORK 0UL 1306 #endif 1307 1308 #ifdef CONFIG_STACKTRACE 1309 # define STACK_FLAGS \ 1310 C(STACKTRACE, "stacktrace"), 1311 #else 1312 # define STACK_FLAGS 1313 #endif 1314 1315 /* 1316 * trace_iterator_flags is an enumeration that defines bit 1317 * positions into trace_flags that controls the output. 1318 * 1319 * NOTE: These bits must match the trace_options array in 1320 * trace.c (this macro guarantees it). 1321 */ 1322 #define TRACE_FLAGS \ 1323 C(PRINT_PARENT, "print-parent"), \ 1324 C(SYM_OFFSET, "sym-offset"), \ 1325 C(SYM_ADDR, "sym-addr"), \ 1326 C(VERBOSE, "verbose"), \ 1327 C(RAW, "raw"), \ 1328 C(HEX, "hex"), \ 1329 C(BIN, "bin"), \ 1330 C(BLOCK, "block"), \ 1331 C(FIELDS, "fields"), \ 1332 C(PRINTK, "trace_printk"), \ 1333 C(ANNOTATE, "annotate"), \ 1334 C(USERSTACKTRACE, "userstacktrace"), \ 1335 C(SYM_USEROBJ, "sym-userobj"), \ 1336 C(PRINTK_MSGONLY, "printk-msg-only"), \ 1337 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1338 C(LATENCY_FMT, "latency-format"), \ 1339 C(RECORD_CMD, "record-cmd"), \ 1340 C(RECORD_TGID, "record-tgid"), \ 1341 C(OVERWRITE, "overwrite"), \ 1342 C(STOP_ON_FREE, "disable_on_free"), \ 1343 C(IRQ_INFO, "irq-info"), \ 1344 C(MARKERS, "markers"), \ 1345 C(EVENT_FORK, "event-fork"), \ 1346 C(TRACE_PRINTK, "trace_printk_dest"), \ 1347 C(PAUSE_ON_TRACE, "pause-on-trace"), \ 1348 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \ 1349 FUNCTION_FLAGS \ 1350 FGRAPH_FLAGS \ 1351 STACK_FLAGS \ 1352 BRANCH_FLAGS 1353 1354 /* 1355 * By defining C, we can make TRACE_FLAGS a list of bit names 1356 * that will define the bits for the flag masks. 1357 */ 1358 #undef C 1359 #define C(a, b) TRACE_ITER_##a##_BIT 1360 1361 enum trace_iterator_bits { 1362 TRACE_FLAGS 1363 /* Make sure we don't go more than we have bits for */ 1364 TRACE_ITER_LAST_BIT 1365 }; 1366 1367 /* 1368 * By redefining C, we can make TRACE_FLAGS a list of masks that 1369 * use the bits as defined above. 1370 */ 1371 #undef C 1372 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1373 1374 enum trace_iterator_flags { TRACE_FLAGS }; 1375 1376 /* 1377 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1378 * control the output of kernel symbols. 1379 */ 1380 #define TRACE_ITER_SYM_MASK \ 1381 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1382 1383 extern struct tracer nop_trace; 1384 1385 #ifdef CONFIG_BRANCH_TRACER 1386 extern int enable_branch_tracing(struct trace_array *tr); 1387 extern void disable_branch_tracing(void); 1388 static inline int trace_branch_enable(struct trace_array *tr) 1389 { 1390 if (tr->trace_flags & TRACE_ITER_BRANCH) 1391 return enable_branch_tracing(tr); 1392 return 0; 1393 } 1394 static inline void trace_branch_disable(void) 1395 { 1396 /* due to races, always disable */ 1397 disable_branch_tracing(); 1398 } 1399 #else 1400 static inline int trace_branch_enable(struct trace_array *tr) 1401 { 1402 return 0; 1403 } 1404 static inline void trace_branch_disable(void) 1405 { 1406 } 1407 #endif /* CONFIG_BRANCH_TRACER */ 1408 1409 /* set ring buffers to default size if not already done so */ 1410 int tracing_update_buffers(struct trace_array *tr); 1411 1412 union trace_synth_field { 1413 u8 as_u8; 1414 u16 as_u16; 1415 u32 as_u32; 1416 u64 as_u64; 1417 struct trace_dynamic_info as_dynamic; 1418 }; 1419 1420 struct ftrace_event_field { 1421 struct list_head link; 1422 const char *name; 1423 const char *type; 1424 int filter_type; 1425 int offset; 1426 int size; 1427 unsigned int is_signed:1; 1428 unsigned int needs_test:1; 1429 int len; 1430 }; 1431 1432 struct prog_entry; 1433 1434 struct event_filter { 1435 struct prog_entry __rcu *prog; 1436 char *filter_string; 1437 }; 1438 1439 struct event_subsystem { 1440 struct list_head list; 1441 const char *name; 1442 struct event_filter *filter; 1443 int ref_count; 1444 }; 1445 1446 struct trace_subsystem_dir { 1447 struct list_head list; 1448 struct event_subsystem *subsystem; 1449 struct trace_array *tr; 1450 struct eventfs_inode *ei; 1451 int ref_count; 1452 int nr_events; 1453 }; 1454 1455 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1456 struct trace_buffer *buffer, 1457 struct ring_buffer_event *event, 1458 unsigned int trcace_ctx, 1459 struct pt_regs *regs); 1460 1461 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1462 struct trace_buffer *buffer, 1463 struct ring_buffer_event *event, 1464 unsigned int trace_ctx) 1465 { 1466 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL); 1467 } 1468 1469 DECLARE_PER_CPU(bool, trace_taskinfo_save); 1470 int trace_save_cmdline(struct task_struct *tsk); 1471 int trace_create_savedcmd(void); 1472 int trace_alloc_tgid_map(void); 1473 void trace_free_saved_cmdlines_buffer(void); 1474 1475 extern const struct file_operations tracing_saved_cmdlines_fops; 1476 extern const struct file_operations tracing_saved_tgids_fops; 1477 extern const struct file_operations tracing_saved_cmdlines_size_fops; 1478 1479 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1480 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1481 void trace_buffered_event_disable(void); 1482 void trace_buffered_event_enable(void); 1483 1484 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first); 1485 1486 static inline void 1487 __trace_event_discard_commit(struct trace_buffer *buffer, 1488 struct ring_buffer_event *event) 1489 { 1490 if (this_cpu_read(trace_buffered_event) == event) { 1491 /* Simply release the temp buffer and enable preemption */ 1492 this_cpu_dec(trace_buffered_event_cnt); 1493 preempt_enable_notrace(); 1494 return; 1495 } 1496 /* ring_buffer_discard_commit() enables preemption */ 1497 ring_buffer_discard_commit(buffer, event); 1498 } 1499 1500 /* 1501 * Helper function for event_trigger_unlock_commit{_regs}(). 1502 * If there are event triggers attached to this event that requires 1503 * filtering against its fields, then they will be called as the 1504 * entry already holds the field information of the current event. 1505 * 1506 * It also checks if the event should be discarded or not. 1507 * It is to be discarded if the event is soft disabled and the 1508 * event was only recorded to process triggers, or if the event 1509 * filter is active and this event did not match the filters. 1510 * 1511 * Returns true if the event is discarded, false otherwise. 1512 */ 1513 static inline bool 1514 __event_trigger_test_discard(struct trace_event_file *file, 1515 struct trace_buffer *buffer, 1516 struct ring_buffer_event *event, 1517 void *entry, 1518 enum event_trigger_type *tt) 1519 { 1520 unsigned long eflags = file->flags; 1521 1522 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1523 *tt = event_triggers_call(file, buffer, entry, event); 1524 1525 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | 1526 EVENT_FILE_FL_FILTERED | 1527 EVENT_FILE_FL_PID_FILTER)))) 1528 return false; 1529 1530 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 1531 goto discard; 1532 1533 if (file->flags & EVENT_FILE_FL_FILTERED && 1534 !filter_match_preds(file->filter, entry)) 1535 goto discard; 1536 1537 if ((file->flags & EVENT_FILE_FL_PID_FILTER) && 1538 trace_event_ignore_this_pid(file)) 1539 goto discard; 1540 1541 return false; 1542 discard: 1543 __trace_event_discard_commit(buffer, event); 1544 return true; 1545 } 1546 1547 /** 1548 * event_trigger_unlock_commit - handle triggers and finish event commit 1549 * @file: The file pointer associated with the event 1550 * @buffer: The ring buffer that the event is being written to 1551 * @event: The event meta data in the ring buffer 1552 * @entry: The event itself 1553 * @trace_ctx: The tracing context flags. 1554 * 1555 * This is a helper function to handle triggers that require data 1556 * from the event itself. It also tests the event against filters and 1557 * if the event is soft disabled and should be discarded. 1558 */ 1559 static inline void 1560 event_trigger_unlock_commit(struct trace_event_file *file, 1561 struct trace_buffer *buffer, 1562 struct ring_buffer_event *event, 1563 void *entry, unsigned int trace_ctx) 1564 { 1565 enum event_trigger_type tt = ETT_NONE; 1566 1567 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1568 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx); 1569 1570 if (tt) 1571 event_triggers_post_call(file, tt); 1572 } 1573 1574 #define FILTER_PRED_INVALID ((unsigned short)-1) 1575 #define FILTER_PRED_IS_RIGHT (1 << 15) 1576 #define FILTER_PRED_FOLD (1 << 15) 1577 1578 /* 1579 * The max preds is the size of unsigned short with 1580 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1581 * and FOLD flags. The other is reserved. 1582 * 1583 * 2^14 preds is way more than enough. 1584 */ 1585 #define MAX_FILTER_PRED 16384 1586 1587 struct filter_pred; 1588 struct regex; 1589 1590 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1591 1592 enum regex_type { 1593 MATCH_FULL = 0, 1594 MATCH_FRONT_ONLY, 1595 MATCH_MIDDLE_ONLY, 1596 MATCH_END_ONLY, 1597 MATCH_GLOB, 1598 MATCH_INDEX, 1599 }; 1600 1601 struct regex { 1602 char pattern[MAX_FILTER_STR_VAL]; 1603 int len; 1604 int field_len; 1605 regex_match_func match; 1606 }; 1607 1608 static inline bool is_string_field(struct ftrace_event_field *field) 1609 { 1610 return field->filter_type == FILTER_DYN_STRING || 1611 field->filter_type == FILTER_RDYN_STRING || 1612 field->filter_type == FILTER_STATIC_STRING || 1613 field->filter_type == FILTER_PTR_STRING || 1614 field->filter_type == FILTER_COMM; 1615 } 1616 1617 static inline bool is_function_field(struct ftrace_event_field *field) 1618 { 1619 return field->filter_type == FILTER_TRACE_FN; 1620 } 1621 1622 extern enum regex_type 1623 filter_parse_regex(char *buff, int len, char **search, int *not); 1624 extern void print_event_filter(struct trace_event_file *file, 1625 struct trace_seq *s); 1626 extern int apply_event_filter(struct trace_event_file *file, 1627 char *filter_string); 1628 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1629 char *filter_string); 1630 extern void print_subsystem_event_filter(struct event_subsystem *system, 1631 struct trace_seq *s); 1632 extern int filter_assign_type(const char *type); 1633 extern int create_event_filter(struct trace_array *tr, 1634 struct trace_event_call *call, 1635 char *filter_str, bool set_str, 1636 struct event_filter **filterp); 1637 extern void free_event_filter(struct event_filter *filter); 1638 1639 struct ftrace_event_field * 1640 trace_find_event_field(struct trace_event_call *call, char *name); 1641 1642 extern void trace_event_enable_cmd_record(bool enable); 1643 extern void trace_event_enable_tgid_record(bool enable); 1644 1645 extern int event_trace_init(void); 1646 extern int init_events(void); 1647 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1648 extern int event_trace_del_tracer(struct trace_array *tr); 1649 extern void __trace_early_add_events(struct trace_array *tr); 1650 1651 extern struct trace_event_file *__find_event_file(struct trace_array *tr, 1652 const char *system, 1653 const char *event); 1654 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1655 const char *system, 1656 const char *event); 1657 1658 static inline void *event_file_data(struct file *filp) 1659 { 1660 return READ_ONCE(file_inode(filp)->i_private); 1661 } 1662 1663 extern struct mutex event_mutex; 1664 extern struct list_head ftrace_events; 1665 1666 /* 1667 * When the trace_event_file is the filp->i_private pointer, 1668 * it must be taken under the event_mutex lock, and then checked 1669 * if the EVENT_FILE_FL_FREED flag is set. If it is, then the 1670 * data pointed to by the trace_event_file can not be trusted. 1671 * 1672 * Use the event_file_file() to access the trace_event_file from 1673 * the filp the first time under the event_mutex and check for 1674 * NULL. If it is needed to be retrieved again and the event_mutex 1675 * is still held, then the event_file_data() can be used and it 1676 * is guaranteed to be valid. 1677 */ 1678 static inline struct trace_event_file *event_file_file(struct file *filp) 1679 { 1680 struct trace_event_file *file; 1681 1682 lockdep_assert_held(&event_mutex); 1683 file = READ_ONCE(file_inode(filp)->i_private); 1684 if (!file || file->flags & EVENT_FILE_FL_FREED) 1685 return NULL; 1686 return file; 1687 } 1688 1689 extern const struct file_operations event_trigger_fops; 1690 extern const struct file_operations event_hist_fops; 1691 extern const struct file_operations event_hist_debug_fops; 1692 extern const struct file_operations event_inject_fops; 1693 1694 #ifdef CONFIG_HIST_TRIGGERS 1695 extern int register_trigger_hist_cmd(void); 1696 extern int register_trigger_hist_enable_disable_cmds(void); 1697 #else 1698 static inline int register_trigger_hist_cmd(void) { return 0; } 1699 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1700 #endif 1701 1702 extern int register_trigger_cmds(void); 1703 extern void clear_event_triggers(struct trace_array *tr); 1704 1705 enum { 1706 EVENT_TRIGGER_FL_PROBE = BIT(0), 1707 }; 1708 1709 struct event_trigger_data { 1710 unsigned long count; 1711 int ref; 1712 int flags; 1713 struct event_trigger_ops *ops; 1714 struct event_command *cmd_ops; 1715 struct event_filter __rcu *filter; 1716 char *filter_str; 1717 void *private_data; 1718 bool paused; 1719 bool paused_tmp; 1720 struct list_head list; 1721 char *name; 1722 struct list_head named_list; 1723 struct event_trigger_data *named_data; 1724 }; 1725 1726 /* Avoid typos */ 1727 #define ENABLE_EVENT_STR "enable_event" 1728 #define DISABLE_EVENT_STR "disable_event" 1729 #define ENABLE_HIST_STR "enable_hist" 1730 #define DISABLE_HIST_STR "disable_hist" 1731 1732 struct enable_trigger_data { 1733 struct trace_event_file *file; 1734 bool enable; 1735 bool hist; 1736 }; 1737 1738 extern int event_enable_trigger_print(struct seq_file *m, 1739 struct event_trigger_data *data); 1740 extern void event_enable_trigger_free(struct event_trigger_data *data); 1741 extern int event_enable_trigger_parse(struct event_command *cmd_ops, 1742 struct trace_event_file *file, 1743 char *glob, char *cmd, 1744 char *param_and_filter); 1745 extern int event_enable_register_trigger(char *glob, 1746 struct event_trigger_data *data, 1747 struct trace_event_file *file); 1748 extern void event_enable_unregister_trigger(char *glob, 1749 struct event_trigger_data *test, 1750 struct trace_event_file *file); 1751 extern void trigger_data_free(struct event_trigger_data *data); 1752 extern int event_trigger_init(struct event_trigger_data *data); 1753 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1754 int trigger_enable); 1755 extern void update_cond_flag(struct trace_event_file *file); 1756 extern int set_trigger_filter(char *filter_str, 1757 struct event_trigger_data *trigger_data, 1758 struct trace_event_file *file); 1759 extern struct event_trigger_data *find_named_trigger(const char *name); 1760 extern bool is_named_trigger(struct event_trigger_data *test); 1761 extern int save_named_trigger(const char *name, 1762 struct event_trigger_data *data); 1763 extern void del_named_trigger(struct event_trigger_data *data); 1764 extern void pause_named_trigger(struct event_trigger_data *data); 1765 extern void unpause_named_trigger(struct event_trigger_data *data); 1766 extern void set_named_trigger_data(struct event_trigger_data *data, 1767 struct event_trigger_data *named_data); 1768 extern struct event_trigger_data * 1769 get_named_trigger_data(struct event_trigger_data *data); 1770 extern int register_event_command(struct event_command *cmd); 1771 extern int unregister_event_command(struct event_command *cmd); 1772 extern int register_trigger_hist_enable_disable_cmds(void); 1773 extern bool event_trigger_check_remove(const char *glob); 1774 extern bool event_trigger_empty_param(const char *param); 1775 extern int event_trigger_separate_filter(char *param_and_filter, char **param, 1776 char **filter, bool param_required); 1777 extern struct event_trigger_data * 1778 event_trigger_alloc(struct event_command *cmd_ops, 1779 char *cmd, 1780 char *param, 1781 void *private_data); 1782 extern int event_trigger_parse_num(char *trigger, 1783 struct event_trigger_data *trigger_data); 1784 extern int event_trigger_set_filter(struct event_command *cmd_ops, 1785 struct trace_event_file *file, 1786 char *param, 1787 struct event_trigger_data *trigger_data); 1788 extern void event_trigger_reset_filter(struct event_command *cmd_ops, 1789 struct event_trigger_data *trigger_data); 1790 extern int event_trigger_register(struct event_command *cmd_ops, 1791 struct trace_event_file *file, 1792 char *glob, 1793 struct event_trigger_data *trigger_data); 1794 extern void event_trigger_unregister(struct event_command *cmd_ops, 1795 struct trace_event_file *file, 1796 char *glob, 1797 struct event_trigger_data *trigger_data); 1798 1799 extern void event_file_get(struct trace_event_file *file); 1800 extern void event_file_put(struct trace_event_file *file); 1801 1802 /** 1803 * struct event_trigger_ops - callbacks for trace event triggers 1804 * 1805 * The methods in this structure provide per-event trigger hooks for 1806 * various trigger operations. 1807 * 1808 * The @init and @free methods are used during trigger setup and 1809 * teardown, typically called from an event_command's @parse() 1810 * function implementation. 1811 * 1812 * The @print method is used to print the trigger spec. 1813 * 1814 * The @trigger method is the function that actually implements the 1815 * trigger and is called in the context of the triggering event 1816 * whenever that event occurs. 1817 * 1818 * All the methods below, except for @init() and @free(), must be 1819 * implemented. 1820 * 1821 * @trigger: The trigger 'probe' function called when the triggering 1822 * event occurs. The data passed into this callback is the data 1823 * that was supplied to the event_command @reg() function that 1824 * registered the trigger (see struct event_command) along with 1825 * the trace record, rec. 1826 * 1827 * @init: An optional initialization function called for the trigger 1828 * when the trigger is registered (via the event_command reg() 1829 * function). This can be used to perform per-trigger 1830 * initialization such as incrementing a per-trigger reference 1831 * count, for instance. This is usually implemented by the 1832 * generic utility function @event_trigger_init() (see 1833 * trace_event_triggers.c). 1834 * 1835 * @free: An optional de-initialization function called for the 1836 * trigger when the trigger is unregistered (via the 1837 * event_command @reg() function). This can be used to perform 1838 * per-trigger de-initialization such as decrementing a 1839 * per-trigger reference count and freeing corresponding trigger 1840 * data, for instance. This is usually implemented by the 1841 * generic utility function @event_trigger_free() (see 1842 * trace_event_triggers.c). 1843 * 1844 * @print: The callback function invoked to have the trigger print 1845 * itself. This is usually implemented by a wrapper function 1846 * that calls the generic utility function @event_trigger_print() 1847 * (see trace_event_triggers.c). 1848 */ 1849 struct event_trigger_ops { 1850 void (*trigger)(struct event_trigger_data *data, 1851 struct trace_buffer *buffer, 1852 void *rec, 1853 struct ring_buffer_event *rbe); 1854 int (*init)(struct event_trigger_data *data); 1855 void (*free)(struct event_trigger_data *data); 1856 int (*print)(struct seq_file *m, 1857 struct event_trigger_data *data); 1858 }; 1859 1860 /** 1861 * struct event_command - callbacks and data members for event commands 1862 * 1863 * Event commands are invoked by users by writing the command name 1864 * into the 'trigger' file associated with a trace event. The 1865 * parameters associated with a specific invocation of an event 1866 * command are used to create an event trigger instance, which is 1867 * added to the list of trigger instances associated with that trace 1868 * event. When the event is hit, the set of triggers associated with 1869 * that event is invoked. 1870 * 1871 * The data members in this structure provide per-event command data 1872 * for various event commands. 1873 * 1874 * All the data members below, except for @post_trigger, must be set 1875 * for each event command. 1876 * 1877 * @name: The unique name that identifies the event command. This is 1878 * the name used when setting triggers via trigger files. 1879 * 1880 * @trigger_type: A unique id that identifies the event command 1881 * 'type'. This value has two purposes, the first to ensure that 1882 * only one trigger of the same type can be set at a given time 1883 * for a particular event e.g. it doesn't make sense to have both 1884 * a traceon and traceoff trigger attached to a single event at 1885 * the same time, so traceon and traceoff have the same type 1886 * though they have different names. The @trigger_type value is 1887 * also used as a bit value for deferring the actual trigger 1888 * action until after the current event is finished. Some 1889 * commands need to do this if they themselves log to the trace 1890 * buffer (see the @post_trigger() member below). @trigger_type 1891 * values are defined by adding new values to the trigger_type 1892 * enum in include/linux/trace_events.h. 1893 * 1894 * @flags: See the enum event_command_flags below. 1895 * 1896 * All the methods below, except for @set_filter() and @unreg_all(), 1897 * must be implemented. 1898 * 1899 * @parse: The callback function responsible for parsing and 1900 * registering the trigger written to the 'trigger' file by the 1901 * user. It allocates the trigger instance and registers it with 1902 * the appropriate trace event. It makes use of the other 1903 * event_command callback functions to orchestrate this, and is 1904 * usually implemented by the generic utility function 1905 * @event_trigger_callback() (see trace_event_triggers.c). 1906 * 1907 * @reg: Adds the trigger to the list of triggers associated with the 1908 * event, and enables the event trigger itself, after 1909 * initializing it (via the event_trigger_ops @init() function). 1910 * This is also where commands can use the @trigger_type value to 1911 * make the decision as to whether or not multiple instances of 1912 * the trigger should be allowed. This is usually implemented by 1913 * the generic utility function @register_trigger() (see 1914 * trace_event_triggers.c). 1915 * 1916 * @unreg: Removes the trigger from the list of triggers associated 1917 * with the event, and disables the event trigger itself, after 1918 * initializing it (via the event_trigger_ops @free() function). 1919 * This is usually implemented by the generic utility function 1920 * @unregister_trigger() (see trace_event_triggers.c). 1921 * 1922 * @unreg_all: An optional function called to remove all the triggers 1923 * from the list of triggers associated with the event. Called 1924 * when a trigger file is opened in truncate mode. 1925 * 1926 * @set_filter: An optional function called to parse and set a filter 1927 * for the trigger. If no @set_filter() method is set for the 1928 * event command, filters set by the user for the command will be 1929 * ignored. This is usually implemented by the generic utility 1930 * function @set_trigger_filter() (see trace_event_triggers.c). 1931 * 1932 * @get_trigger_ops: The callback function invoked to retrieve the 1933 * event_trigger_ops implementation associated with the command. 1934 * This callback function allows a single event_command to 1935 * support multiple trigger implementations via different sets of 1936 * event_trigger_ops, depending on the value of the @param 1937 * string. 1938 */ 1939 struct event_command { 1940 struct list_head list; 1941 char *name; 1942 enum event_trigger_type trigger_type; 1943 int flags; 1944 int (*parse)(struct event_command *cmd_ops, 1945 struct trace_event_file *file, 1946 char *glob, char *cmd, 1947 char *param_and_filter); 1948 int (*reg)(char *glob, 1949 struct event_trigger_data *data, 1950 struct trace_event_file *file); 1951 void (*unreg)(char *glob, 1952 struct event_trigger_data *data, 1953 struct trace_event_file *file); 1954 void (*unreg_all)(struct trace_event_file *file); 1955 int (*set_filter)(char *filter_str, 1956 struct event_trigger_data *data, 1957 struct trace_event_file *file); 1958 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1959 }; 1960 1961 /** 1962 * enum event_command_flags - flags for struct event_command 1963 * 1964 * @POST_TRIGGER: A flag that says whether or not this command needs 1965 * to have its action delayed until after the current event has 1966 * been closed. Some triggers need to avoid being invoked while 1967 * an event is currently in the process of being logged, since 1968 * the trigger may itself log data into the trace buffer. Thus 1969 * we make sure the current event is committed before invoking 1970 * those triggers. To do that, the trigger invocation is split 1971 * in two - the first part checks the filter using the current 1972 * trace record; if a command has the @post_trigger flag set, it 1973 * sets a bit for itself in the return value, otherwise it 1974 * directly invokes the trigger. Once all commands have been 1975 * either invoked or set their return flag, the current record is 1976 * either committed or discarded. At that point, if any commands 1977 * have deferred their triggers, those commands are finally 1978 * invoked following the close of the current event. In other 1979 * words, if the event_trigger_ops @func() probe implementation 1980 * itself logs to the trace buffer, this flag should be set, 1981 * otherwise it can be left unspecified. 1982 * 1983 * @NEEDS_REC: A flag that says whether or not this command needs 1984 * access to the trace record in order to perform its function, 1985 * regardless of whether or not it has a filter associated with 1986 * it (filters make a trigger require access to the trace record 1987 * but are not always present). 1988 */ 1989 enum event_command_flags { 1990 EVENT_CMD_FL_POST_TRIGGER = 1, 1991 EVENT_CMD_FL_NEEDS_REC = 2, 1992 }; 1993 1994 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1995 { 1996 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1997 } 1998 1999 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 2000 { 2001 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 2002 } 2003 2004 extern int trace_event_enable_disable(struct trace_event_file *file, 2005 int enable, int soft_disable); 2006 extern int tracing_alloc_snapshot(void); 2007 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 2008 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 2009 2010 extern int tracing_snapshot_cond_disable(struct trace_array *tr); 2011 extern void *tracing_cond_snapshot_data(struct trace_array *tr); 2012 2013 extern const char *__start___trace_bprintk_fmt[]; 2014 extern const char *__stop___trace_bprintk_fmt[]; 2015 2016 extern const char *__start___tracepoint_str[]; 2017 extern const char *__stop___tracepoint_str[]; 2018 2019 void trace_printk_control(bool enabled); 2020 void trace_printk_start_comm(void); 2021 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 2022 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 2023 2024 /* Used from boot time tracer */ 2025 extern int trace_set_options(struct trace_array *tr, char *option); 2026 extern int tracing_set_tracer(struct trace_array *tr, const char *buf); 2027 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 2028 unsigned long size, int cpu_id); 2029 extern int tracing_set_cpumask(struct trace_array *tr, 2030 cpumask_var_t tracing_cpumask_new); 2031 2032 2033 #define MAX_EVENT_NAME_LEN 64 2034 2035 extern ssize_t trace_parse_run_command(struct file *file, 2036 const char __user *buffer, size_t count, loff_t *ppos, 2037 int (*createfn)(const char *)); 2038 2039 extern unsigned int err_pos(char *cmd, const char *str); 2040 extern void tracing_log_err(struct trace_array *tr, 2041 const char *loc, const char *cmd, 2042 const char **errs, u8 type, u16 pos); 2043 2044 /* 2045 * Normal trace_printk() and friends allocates special buffers 2046 * to do the manipulation, as well as saves the print formats 2047 * into sections to display. But the trace infrastructure wants 2048 * to use these without the added overhead at the price of being 2049 * a bit slower (used mainly for warnings, where we don't care 2050 * about performance). The internal_trace_puts() is for such 2051 * a purpose. 2052 */ 2053 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 2054 2055 #undef FTRACE_ENTRY 2056 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ 2057 extern struct trace_event_call \ 2058 __aligned(4) event_##call; 2059 #undef FTRACE_ENTRY_DUP 2060 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ 2061 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2062 #undef FTRACE_ENTRY_PACKED 2063 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ 2064 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 2065 2066 #include "trace_entries.h" 2067 2068 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 2069 int perf_ftrace_event_register(struct trace_event_call *call, 2070 enum trace_reg type, void *data); 2071 #else 2072 #define perf_ftrace_event_register NULL 2073 #endif 2074 2075 #ifdef CONFIG_FTRACE_SYSCALLS 2076 void init_ftrace_syscalls(void); 2077 const char *get_syscall_name(int syscall); 2078 #else 2079 static inline void init_ftrace_syscalls(void) { } 2080 static inline const char *get_syscall_name(int syscall) 2081 { 2082 return NULL; 2083 } 2084 #endif 2085 2086 #ifdef CONFIG_EVENT_TRACING 2087 void trace_event_init(void); 2088 void trace_event_eval_update(struct trace_eval_map **map, int len); 2089 /* Used from boot time tracer */ 2090 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); 2091 extern int trigger_process_regex(struct trace_event_file *file, char *buff); 2092 #else 2093 static inline void __init trace_event_init(void) { } 2094 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 2095 #endif 2096 2097 #ifdef CONFIG_TRACER_SNAPSHOT 2098 void tracing_snapshot_instance(struct trace_array *tr); 2099 int tracing_alloc_snapshot_instance(struct trace_array *tr); 2100 int tracing_arm_snapshot(struct trace_array *tr); 2101 void tracing_disarm_snapshot(struct trace_array *tr); 2102 #else 2103 static inline void tracing_snapshot_instance(struct trace_array *tr) { } 2104 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 2105 { 2106 return 0; 2107 } 2108 static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; } 2109 static inline void tracing_disarm_snapshot(struct trace_array *tr) { } 2110 #endif 2111 2112 #ifdef CONFIG_PREEMPT_TRACER 2113 void tracer_preempt_on(unsigned long a0, unsigned long a1); 2114 void tracer_preempt_off(unsigned long a0, unsigned long a1); 2115 #else 2116 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } 2117 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } 2118 #endif 2119 #ifdef CONFIG_IRQSOFF_TRACER 2120 void tracer_hardirqs_on(unsigned long a0, unsigned long a1); 2121 void tracer_hardirqs_off(unsigned long a0, unsigned long a1); 2122 #else 2123 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } 2124 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } 2125 #endif 2126 2127 /* 2128 * Reset the state of the trace_iterator so that it can read consumed data. 2129 * Normally, the trace_iterator is used for reading the data when it is not 2130 * consumed, and must retain state. 2131 */ 2132 static __always_inline void trace_iterator_reset(struct trace_iterator *iter) 2133 { 2134 memset_startat(iter, 0, seq); 2135 iter->pos = -1; 2136 } 2137 2138 /* Check the name is good for event/group/fields */ 2139 static inline bool __is_good_name(const char *name, bool hash_ok) 2140 { 2141 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) 2142 return false; 2143 while (*++name != '\0') { 2144 if (!isalpha(*name) && !isdigit(*name) && *name != '_' && 2145 (!hash_ok || *name != '-')) 2146 return false; 2147 } 2148 return true; 2149 } 2150 2151 /* Check the name is good for event/group/fields */ 2152 static inline bool is_good_name(const char *name) 2153 { 2154 return __is_good_name(name, false); 2155 } 2156 2157 /* Check the name is good for system */ 2158 static inline bool is_good_system_name(const char *name) 2159 { 2160 return __is_good_name(name, true); 2161 } 2162 2163 /* Convert certain expected symbols into '_' when generating event names */ 2164 static inline void sanitize_event_name(char *name) 2165 { 2166 while (*name++ != '\0') 2167 if (*name == ':' || *name == '.') 2168 *name = '_'; 2169 } 2170 2171 /* 2172 * This is a generic way to read and write a u64 value from a file in tracefs. 2173 * 2174 * The value is stored on the variable pointed by *val. The value needs 2175 * to be at least *min and at most *max. The write is protected by an 2176 * existing *lock. 2177 */ 2178 struct trace_min_max_param { 2179 struct mutex *lock; 2180 u64 *val; 2181 u64 *min; 2182 u64 *max; 2183 }; 2184 2185 #define U64_STR_SIZE 24 /* 20 digits max */ 2186 2187 extern const struct file_operations trace_min_max_fops; 2188 2189 #ifdef CONFIG_RV 2190 extern int rv_init_interface(void); 2191 #else 2192 static inline int rv_init_interface(void) 2193 { 2194 return 0; 2195 } 2196 #endif 2197 2198 /* 2199 * This is used only to distinguish 2200 * function address from trampoline code. 2201 * So this value has no meaning. 2202 */ 2203 #define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX) 2204 2205 #endif /* _LINUX_KERNEL_TRACE_H */ 2206