1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <[email protected]> 6 * Copyright (C) 2008 Ingo Molnar <[email protected]> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <[email protected]> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <generated/utsrelease.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/seq_file.h> 21 #include <linux/notifier.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/vmalloc.h> 30 #include <linux/ftrace.h> 31 #include <linux/module.h> 32 #include <linux/percpu.h> 33 #include <linux/splice.h> 34 #include <linux/kdebug.h> 35 #include <linux/string.h> 36 #include <linux/mount.h> 37 #include <linux/rwsem.h> 38 #include <linux/slab.h> 39 #include <linux/ctype.h> 40 #include <linux/init.h> 41 #include <linux/poll.h> 42 #include <linux/nmi.h> 43 #include <linux/fs.h> 44 #include <linux/trace.h> 45 #include <linux/sched/clock.h> 46 #include <linux/sched/rt.h> 47 48 #include "trace.h" 49 #include "trace_output.h" 50 51 /* 52 * On boot up, the ring buffer is set to the minimum size, so that 53 * we do not waste memory on systems that are not using tracing. 54 */ 55 bool ring_buffer_expanded; 56 57 /* 58 * We need to change this state when a selftest is running. 59 * A selftest will lurk into the ring-buffer to count the 60 * entries inserted during the selftest although some concurrent 61 * insertions into the ring-buffer such as trace_printk could occurred 62 * at the same time, giving false positive or negative results. 63 */ 64 static bool __read_mostly tracing_selftest_running; 65 66 /* 67 * If a tracer is running, we do not want to run SELFTEST. 68 */ 69 bool __read_mostly tracing_selftest_disabled; 70 71 /* Pipe tracepoints to printk */ 72 struct trace_iterator *tracepoint_print_iter; 73 int tracepoint_printk; 74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 75 76 /* For tracers that don't implement custom flags */ 77 static struct tracer_opt dummy_tracer_opt[] = { 78 { } 79 }; 80 81 static int 82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 83 { 84 return 0; 85 } 86 87 /* 88 * To prevent the comm cache from being overwritten when no 89 * tracing is active, only save the comm when a trace event 90 * occurred. 91 */ 92 static DEFINE_PER_CPU(bool, trace_taskinfo_save); 93 94 /* 95 * Kill all tracing for good (never come back). 96 * It is initialized to 1 but will turn to zero if the initialization 97 * of the tracer is successful. But that is the only place that sets 98 * this back to zero. 99 */ 100 static int tracing_disabled = 1; 101 102 cpumask_var_t __read_mostly tracing_buffer_mask; 103 104 /* 105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 106 * 107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 108 * is set, then ftrace_dump is called. This will output the contents 109 * of the ftrace buffers to the console. This is very useful for 110 * capturing traces that lead to crashes and outputing it to a 111 * serial console. 112 * 113 * It is default off, but you can enable it with either specifying 114 * "ftrace_dump_on_oops" in the kernel command line, or setting 115 * /proc/sys/kernel/ftrace_dump_on_oops 116 * Set 1 if you want to dump buffers of all CPUs 117 * Set 2 if you want to dump the buffer of the CPU that triggered oops 118 */ 119 120 enum ftrace_dump_mode ftrace_dump_on_oops; 121 122 /* When set, tracing will stop when a WARN*() is hit */ 123 int __disable_trace_on_warning; 124 125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 126 /* Map of enums to their values, for "eval_map" file */ 127 struct trace_eval_map_head { 128 struct module *mod; 129 unsigned long length; 130 }; 131 132 union trace_eval_map_item; 133 134 struct trace_eval_map_tail { 135 /* 136 * "end" is first and points to NULL as it must be different 137 * than "mod" or "eval_string" 138 */ 139 union trace_eval_map_item *next; 140 const char *end; /* points to NULL */ 141 }; 142 143 static DEFINE_MUTEX(trace_eval_mutex); 144 145 /* 146 * The trace_eval_maps are saved in an array with two extra elements, 147 * one at the beginning, and one at the end. The beginning item contains 148 * the count of the saved maps (head.length), and the module they 149 * belong to if not built in (head.mod). The ending item contains a 150 * pointer to the next array of saved eval_map items. 151 */ 152 union trace_eval_map_item { 153 struct trace_eval_map map; 154 struct trace_eval_map_head head; 155 struct trace_eval_map_tail tail; 156 }; 157 158 static union trace_eval_map_item *trace_eval_maps; 159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 160 161 static int tracing_set_tracer(struct trace_array *tr, const char *buf); 162 163 #define MAX_TRACER_SIZE 100 164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 165 static char *default_bootup_tracer; 166 167 static bool allocate_snapshot; 168 169 static int __init set_cmdline_ftrace(char *str) 170 { 171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 172 default_bootup_tracer = bootup_tracer_buf; 173 /* We are using ftrace early, expand it */ 174 ring_buffer_expanded = true; 175 return 1; 176 } 177 __setup("ftrace=", set_cmdline_ftrace); 178 179 static int __init set_ftrace_dump_on_oops(char *str) 180 { 181 if (*str++ != '=' || !*str) { 182 ftrace_dump_on_oops = DUMP_ALL; 183 return 1; 184 } 185 186 if (!strcmp("orig_cpu", str)) { 187 ftrace_dump_on_oops = DUMP_ORIG; 188 return 1; 189 } 190 191 return 0; 192 } 193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 194 195 static int __init stop_trace_on_warning(char *str) 196 { 197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 198 __disable_trace_on_warning = 1; 199 return 1; 200 } 201 __setup("traceoff_on_warning", stop_trace_on_warning); 202 203 static int __init boot_alloc_snapshot(char *str) 204 { 205 allocate_snapshot = true; 206 /* We also need the main ring buffer expanded */ 207 ring_buffer_expanded = true; 208 return 1; 209 } 210 __setup("alloc_snapshot", boot_alloc_snapshot); 211 212 213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 214 215 static int __init set_trace_boot_options(char *str) 216 { 217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 218 return 0; 219 } 220 __setup("trace_options=", set_trace_boot_options); 221 222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 223 static char *trace_boot_clock __initdata; 224 225 static int __init set_trace_boot_clock(char *str) 226 { 227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 228 trace_boot_clock = trace_boot_clock_buf; 229 return 0; 230 } 231 __setup("trace_clock=", set_trace_boot_clock); 232 233 static int __init set_tracepoint_printk(char *str) 234 { 235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 236 tracepoint_printk = 1; 237 return 1; 238 } 239 __setup("tp_printk", set_tracepoint_printk); 240 241 unsigned long long ns2usecs(u64 nsec) 242 { 243 nsec += 500; 244 do_div(nsec, 1000); 245 return nsec; 246 } 247 248 /* trace_flags holds trace_options default values */ 249 #define TRACE_DEFAULT_FLAGS \ 250 (FUNCTION_DEFAULT_FLAGS | \ 251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) 255 256 /* trace_options that are only supported by global_trace */ 257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 259 260 /* trace_flags that are default zero for instances */ 261 #define ZEROED_TRACE_FLAGS \ 262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) 263 264 /* 265 * The global_trace is the descriptor that holds the top-level tracing 266 * buffers for the live tracing. 267 */ 268 static struct trace_array global_trace = { 269 .trace_flags = TRACE_DEFAULT_FLAGS, 270 }; 271 272 LIST_HEAD(ftrace_trace_arrays); 273 274 int trace_array_get(struct trace_array *this_tr) 275 { 276 struct trace_array *tr; 277 int ret = -ENODEV; 278 279 mutex_lock(&trace_types_lock); 280 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 281 if (tr == this_tr) { 282 tr->ref++; 283 ret = 0; 284 break; 285 } 286 } 287 mutex_unlock(&trace_types_lock); 288 289 return ret; 290 } 291 292 static void __trace_array_put(struct trace_array *this_tr) 293 { 294 WARN_ON(!this_tr->ref); 295 this_tr->ref--; 296 } 297 298 void trace_array_put(struct trace_array *this_tr) 299 { 300 mutex_lock(&trace_types_lock); 301 __trace_array_put(this_tr); 302 mutex_unlock(&trace_types_lock); 303 } 304 305 int call_filter_check_discard(struct trace_event_call *call, void *rec, 306 struct ring_buffer *buffer, 307 struct ring_buffer_event *event) 308 { 309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 310 !filter_match_preds(call->filter, rec)) { 311 __trace_event_discard_commit(buffer, event); 312 return 1; 313 } 314 315 return 0; 316 } 317 318 void trace_free_pid_list(struct trace_pid_list *pid_list) 319 { 320 vfree(pid_list->pids); 321 kfree(pid_list); 322 } 323 324 /** 325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 326 * @filtered_pids: The list of pids to check 327 * @search_pid: The PID to find in @filtered_pids 328 * 329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis. 330 */ 331 bool 332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 333 { 334 /* 335 * If pid_max changed after filtered_pids was created, we 336 * by default ignore all pids greater than the previous pid_max. 337 */ 338 if (search_pid >= filtered_pids->pid_max) 339 return false; 340 341 return test_bit(search_pid, filtered_pids->pids); 342 } 343 344 /** 345 * trace_ignore_this_task - should a task be ignored for tracing 346 * @filtered_pids: The list of pids to check 347 * @task: The task that should be ignored if not filtered 348 * 349 * Checks if @task should be traced or not from @filtered_pids. 350 * Returns true if @task should *NOT* be traced. 351 * Returns false if @task should be traced. 352 */ 353 bool 354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task) 355 { 356 /* 357 * Return false, because if filtered_pids does not exist, 358 * all pids are good to trace. 359 */ 360 if (!filtered_pids) 361 return false; 362 363 return !trace_find_filtered_pid(filtered_pids, task->pid); 364 } 365 366 /** 367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list 368 * @pid_list: The list to modify 369 * @self: The current task for fork or NULL for exit 370 * @task: The task to add or remove 371 * 372 * If adding a task, if @self is defined, the task is only added if @self 373 * is also included in @pid_list. This happens on fork and tasks should 374 * only be added when the parent is listed. If @self is NULL, then the 375 * @task pid will be removed from the list, which would happen on exit 376 * of a task. 377 */ 378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 379 struct task_struct *self, 380 struct task_struct *task) 381 { 382 if (!pid_list) 383 return; 384 385 /* For forks, we only add if the forking task is listed */ 386 if (self) { 387 if (!trace_find_filtered_pid(pid_list, self->pid)) 388 return; 389 } 390 391 /* Sorry, but we don't support pid_max changing after setting */ 392 if (task->pid >= pid_list->pid_max) 393 return; 394 395 /* "self" is set for forks, and NULL for exits */ 396 if (self) 397 set_bit(task->pid, pid_list->pids); 398 else 399 clear_bit(task->pid, pid_list->pids); 400 } 401 402 /** 403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 404 * @pid_list: The pid list to show 405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 406 * @pos: The position of the file 407 * 408 * This is used by the seq_file "next" operation to iterate the pids 409 * listed in a trace_pid_list structure. 410 * 411 * Returns the pid+1 as we want to display pid of zero, but NULL would 412 * stop the iteration. 413 */ 414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 415 { 416 unsigned long pid = (unsigned long)v; 417 418 (*pos)++; 419 420 /* pid already is +1 of the actual prevous bit */ 421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid); 422 423 /* Return pid + 1 to allow zero to be represented */ 424 if (pid < pid_list->pid_max) 425 return (void *)(pid + 1); 426 427 return NULL; 428 } 429 430 /** 431 * trace_pid_start - Used for seq_file to start reading pid lists 432 * @pid_list: The pid list to show 433 * @pos: The position of the file 434 * 435 * This is used by seq_file "start" operation to start the iteration 436 * of listing pids. 437 * 438 * Returns the pid+1 as we want to display pid of zero, but NULL would 439 * stop the iteration. 440 */ 441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 442 { 443 unsigned long pid; 444 loff_t l = 0; 445 446 pid = find_first_bit(pid_list->pids, pid_list->pid_max); 447 if (pid >= pid_list->pid_max) 448 return NULL; 449 450 /* Return pid + 1 so that zero can be the exit value */ 451 for (pid++; pid && l < *pos; 452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 453 ; 454 return (void *)pid; 455 } 456 457 /** 458 * trace_pid_show - show the current pid in seq_file processing 459 * @m: The seq_file structure to write into 460 * @v: A void pointer of the pid (+1) value to display 461 * 462 * Can be directly used by seq_file operations to display the current 463 * pid value. 464 */ 465 int trace_pid_show(struct seq_file *m, void *v) 466 { 467 unsigned long pid = (unsigned long)v - 1; 468 469 seq_printf(m, "%lu\n", pid); 470 return 0; 471 } 472 473 /* 128 should be much more than enough */ 474 #define PID_BUF_SIZE 127 475 476 int trace_pid_write(struct trace_pid_list *filtered_pids, 477 struct trace_pid_list **new_pid_list, 478 const char __user *ubuf, size_t cnt) 479 { 480 struct trace_pid_list *pid_list; 481 struct trace_parser parser; 482 unsigned long val; 483 int nr_pids = 0; 484 ssize_t read = 0; 485 ssize_t ret = 0; 486 loff_t pos; 487 pid_t pid; 488 489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 490 return -ENOMEM; 491 492 /* 493 * Always recreate a new array. The write is an all or nothing 494 * operation. Always create a new array when adding new pids by 495 * the user. If the operation fails, then the current list is 496 * not modified. 497 */ 498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); 499 if (!pid_list) 500 return -ENOMEM; 501 502 pid_list->pid_max = READ_ONCE(pid_max); 503 504 /* Only truncating will shrink pid_max */ 505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max) 506 pid_list->pid_max = filtered_pids->pid_max; 507 508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); 509 if (!pid_list->pids) { 510 kfree(pid_list); 511 return -ENOMEM; 512 } 513 514 if (filtered_pids) { 515 /* copy the current bits to the new max */ 516 for_each_set_bit(pid, filtered_pids->pids, 517 filtered_pids->pid_max) { 518 set_bit(pid, pid_list->pids); 519 nr_pids++; 520 } 521 } 522 523 while (cnt > 0) { 524 525 pos = 0; 526 527 ret = trace_get_user(&parser, ubuf, cnt, &pos); 528 if (ret < 0 || !trace_parser_loaded(&parser)) 529 break; 530 531 read += ret; 532 ubuf += ret; 533 cnt -= ret; 534 535 ret = -EINVAL; 536 if (kstrtoul(parser.buffer, 0, &val)) 537 break; 538 if (val >= pid_list->pid_max) 539 break; 540 541 pid = (pid_t)val; 542 543 set_bit(pid, pid_list->pids); 544 nr_pids++; 545 546 trace_parser_clear(&parser); 547 ret = 0; 548 } 549 trace_parser_put(&parser); 550 551 if (ret < 0) { 552 trace_free_pid_list(pid_list); 553 return ret; 554 } 555 556 if (!nr_pids) { 557 /* Cleared the list of pids */ 558 trace_free_pid_list(pid_list); 559 read = ret; 560 pid_list = NULL; 561 } 562 563 *new_pid_list = pid_list; 564 565 return read; 566 } 567 568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu) 569 { 570 u64 ts; 571 572 /* Early boot up does not have a buffer yet */ 573 if (!buf->buffer) 574 return trace_clock_local(); 575 576 ts = ring_buffer_time_stamp(buf->buffer, cpu); 577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 578 579 return ts; 580 } 581 582 u64 ftrace_now(int cpu) 583 { 584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 585 } 586 587 /** 588 * tracing_is_enabled - Show if global_trace has been disabled 589 * 590 * Shows if the global trace has been enabled or not. It uses the 591 * mirror flag "buffer_disabled" to be used in fast paths such as for 592 * the irqsoff tracer. But it may be inaccurate due to races. If you 593 * need to know the accurate state, use tracing_is_on() which is a little 594 * slower, but accurate. 595 */ 596 int tracing_is_enabled(void) 597 { 598 /* 599 * For quick access (irqsoff uses this in fast path), just 600 * return the mirror variable of the state of the ring buffer. 601 * It's a little racy, but we don't really care. 602 */ 603 smp_rmb(); 604 return !global_trace.buffer_disabled; 605 } 606 607 /* 608 * trace_buf_size is the size in bytes that is allocated 609 * for a buffer. Note, the number of bytes is always rounded 610 * to page size. 611 * 612 * This number is purposely set to a low number of 16384. 613 * If the dump on oops happens, it will be much appreciated 614 * to not have to wait for all that output. Anyway this can be 615 * boot time and run time configurable. 616 */ 617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 618 619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 620 621 /* trace_types holds a link list of available tracers. */ 622 static struct tracer *trace_types __read_mostly; 623 624 /* 625 * trace_types_lock is used to protect the trace_types list. 626 */ 627 DEFINE_MUTEX(trace_types_lock); 628 629 /* 630 * serialize the access of the ring buffer 631 * 632 * ring buffer serializes readers, but it is low level protection. 633 * The validity of the events (which returns by ring_buffer_peek() ..etc) 634 * are not protected by ring buffer. 635 * 636 * The content of events may become garbage if we allow other process consumes 637 * these events concurrently: 638 * A) the page of the consumed events may become a normal page 639 * (not reader page) in ring buffer, and this page will be rewrited 640 * by events producer. 641 * B) The page of the consumed events may become a page for splice_read, 642 * and this page will be returned to system. 643 * 644 * These primitives allow multi process access to different cpu ring buffer 645 * concurrently. 646 * 647 * These primitives don't distinguish read-only and read-consume access. 648 * Multi read-only access are also serialized. 649 */ 650 651 #ifdef CONFIG_SMP 652 static DECLARE_RWSEM(all_cpu_access_lock); 653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 654 655 static inline void trace_access_lock(int cpu) 656 { 657 if (cpu == RING_BUFFER_ALL_CPUS) { 658 /* gain it for accessing the whole ring buffer. */ 659 down_write(&all_cpu_access_lock); 660 } else { 661 /* gain it for accessing a cpu ring buffer. */ 662 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 664 down_read(&all_cpu_access_lock); 665 666 /* Secondly block other access to this @cpu ring buffer. */ 667 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 668 } 669 } 670 671 static inline void trace_access_unlock(int cpu) 672 { 673 if (cpu == RING_BUFFER_ALL_CPUS) { 674 up_write(&all_cpu_access_lock); 675 } else { 676 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 677 up_read(&all_cpu_access_lock); 678 } 679 } 680 681 static inline void trace_access_lock_init(void) 682 { 683 int cpu; 684 685 for_each_possible_cpu(cpu) 686 mutex_init(&per_cpu(cpu_access_lock, cpu)); 687 } 688 689 #else 690 691 static DEFINE_MUTEX(access_lock); 692 693 static inline void trace_access_lock(int cpu) 694 { 695 (void)cpu; 696 mutex_lock(&access_lock); 697 } 698 699 static inline void trace_access_unlock(int cpu) 700 { 701 (void)cpu; 702 mutex_unlock(&access_lock); 703 } 704 705 static inline void trace_access_lock_init(void) 706 { 707 } 708 709 #endif 710 711 #ifdef CONFIG_STACKTRACE 712 static void __ftrace_trace_stack(struct ring_buffer *buffer, 713 unsigned long flags, 714 int skip, int pc, struct pt_regs *regs); 715 static inline void ftrace_trace_stack(struct trace_array *tr, 716 struct ring_buffer *buffer, 717 unsigned long flags, 718 int skip, int pc, struct pt_regs *regs); 719 720 #else 721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer, 722 unsigned long flags, 723 int skip, int pc, struct pt_regs *regs) 724 { 725 } 726 static inline void ftrace_trace_stack(struct trace_array *tr, 727 struct ring_buffer *buffer, 728 unsigned long flags, 729 int skip, int pc, struct pt_regs *regs) 730 { 731 } 732 733 #endif 734 735 static __always_inline void 736 trace_event_setup(struct ring_buffer_event *event, 737 int type, unsigned long flags, int pc) 738 { 739 struct trace_entry *ent = ring_buffer_event_data(event); 740 741 tracing_generic_entry_update(ent, flags, pc); 742 ent->type = type; 743 } 744 745 static __always_inline struct ring_buffer_event * 746 __trace_buffer_lock_reserve(struct ring_buffer *buffer, 747 int type, 748 unsigned long len, 749 unsigned long flags, int pc) 750 { 751 struct ring_buffer_event *event; 752 753 event = ring_buffer_lock_reserve(buffer, len); 754 if (event != NULL) 755 trace_event_setup(event, type, flags, pc); 756 757 return event; 758 } 759 760 void tracer_tracing_on(struct trace_array *tr) 761 { 762 if (tr->trace_buffer.buffer) 763 ring_buffer_record_on(tr->trace_buffer.buffer); 764 /* 765 * This flag is looked at when buffers haven't been allocated 766 * yet, or by some tracers (like irqsoff), that just want to 767 * know if the ring buffer has been disabled, but it can handle 768 * races of where it gets disabled but we still do a record. 769 * As the check is in the fast path of the tracers, it is more 770 * important to be fast than accurate. 771 */ 772 tr->buffer_disabled = 0; 773 /* Make the flag seen by readers */ 774 smp_wmb(); 775 } 776 777 /** 778 * tracing_on - enable tracing buffers 779 * 780 * This function enables tracing buffers that may have been 781 * disabled with tracing_off. 782 */ 783 void tracing_on(void) 784 { 785 tracer_tracing_on(&global_trace); 786 } 787 EXPORT_SYMBOL_GPL(tracing_on); 788 789 790 static __always_inline void 791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 792 { 793 __this_cpu_write(trace_taskinfo_save, true); 794 795 /* If this is the temp buffer, we need to commit fully */ 796 if (this_cpu_read(trace_buffered_event) == event) { 797 /* Length is in event->array[0] */ 798 ring_buffer_write(buffer, event->array[0], &event->array[1]); 799 /* Release the temp buffer */ 800 this_cpu_dec(trace_buffered_event_cnt); 801 } else 802 ring_buffer_unlock_commit(buffer, event); 803 } 804 805 /** 806 * __trace_puts - write a constant string into the trace buffer. 807 * @ip: The address of the caller 808 * @str: The constant string to write 809 * @size: The size of the string. 810 */ 811 int __trace_puts(unsigned long ip, const char *str, int size) 812 { 813 struct ring_buffer_event *event; 814 struct ring_buffer *buffer; 815 struct print_entry *entry; 816 unsigned long irq_flags; 817 int alloc; 818 int pc; 819 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 821 return 0; 822 823 pc = preempt_count(); 824 825 if (unlikely(tracing_selftest_running || tracing_disabled)) 826 return 0; 827 828 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 829 830 local_save_flags(irq_flags); 831 buffer = global_trace.trace_buffer.buffer; 832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 833 irq_flags, pc); 834 if (!event) 835 return 0; 836 837 entry = ring_buffer_event_data(event); 838 entry->ip = ip; 839 840 memcpy(&entry->buf, str, size); 841 842 /* Add a newline if necessary */ 843 if (entry->buf[size - 1] != '\n') { 844 entry->buf[size] = '\n'; 845 entry->buf[size + 1] = '\0'; 846 } else 847 entry->buf[size] = '\0'; 848 849 __buffer_unlock_commit(buffer, event); 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); 851 852 return size; 853 } 854 EXPORT_SYMBOL_GPL(__trace_puts); 855 856 /** 857 * __trace_bputs - write the pointer to a constant string into trace buffer 858 * @ip: The address of the caller 859 * @str: The constant string to write to the buffer to 860 */ 861 int __trace_bputs(unsigned long ip, const char *str) 862 { 863 struct ring_buffer_event *event; 864 struct ring_buffer *buffer; 865 struct bputs_entry *entry; 866 unsigned long irq_flags; 867 int size = sizeof(struct bputs_entry); 868 int pc; 869 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 871 return 0; 872 873 pc = preempt_count(); 874 875 if (unlikely(tracing_selftest_running || tracing_disabled)) 876 return 0; 877 878 local_save_flags(irq_flags); 879 buffer = global_trace.trace_buffer.buffer; 880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 881 irq_flags, pc); 882 if (!event) 883 return 0; 884 885 entry = ring_buffer_event_data(event); 886 entry->ip = ip; 887 entry->str = str; 888 889 __buffer_unlock_commit(buffer, event); 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); 891 892 return 1; 893 } 894 EXPORT_SYMBOL_GPL(__trace_bputs); 895 896 #ifdef CONFIG_TRACER_SNAPSHOT 897 void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) 898 { 899 struct tracer *tracer = tr->current_trace; 900 unsigned long flags; 901 902 if (in_nmi()) { 903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 904 internal_trace_puts("*** snapshot is being ignored ***\n"); 905 return; 906 } 907 908 if (!tr->allocated_snapshot) { 909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); 910 internal_trace_puts("*** stopping trace here! ***\n"); 911 tracing_off(); 912 return; 913 } 914 915 /* Note, snapshot can not be used when the tracer uses it */ 916 if (tracer->use_max_tr) { 917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); 918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); 919 return; 920 } 921 922 local_irq_save(flags); 923 update_max_tr(tr, current, smp_processor_id(), cond_data); 924 local_irq_restore(flags); 925 } 926 927 void tracing_snapshot_instance(struct trace_array *tr) 928 { 929 tracing_snapshot_instance_cond(tr, NULL); 930 } 931 932 /** 933 * tracing_snapshot - take a snapshot of the current buffer. 934 * 935 * This causes a swap between the snapshot buffer and the current live 936 * tracing buffer. You can use this to take snapshots of the live 937 * trace when some condition is triggered, but continue to trace. 938 * 939 * Note, make sure to allocate the snapshot with either 940 * a tracing_snapshot_alloc(), or by doing it manually 941 * with: echo 1 > /sys/kernel/debug/tracing/snapshot 942 * 943 * If the snapshot buffer is not allocated, it will stop tracing. 944 * Basically making a permanent snapshot. 945 */ 946 void tracing_snapshot(void) 947 { 948 struct trace_array *tr = &global_trace; 949 950 tracing_snapshot_instance(tr); 951 } 952 EXPORT_SYMBOL_GPL(tracing_snapshot); 953 954 /** 955 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 956 * @tr: The tracing instance to snapshot 957 * @cond_data: The data to be tested conditionally, and possibly saved 958 * 959 * This is the same as tracing_snapshot() except that the snapshot is 960 * conditional - the snapshot will only happen if the 961 * cond_snapshot.update() implementation receiving the cond_data 962 * returns true, which means that the trace array's cond_snapshot 963 * update() operation used the cond_data to determine whether the 964 * snapshot should be taken, and if it was, presumably saved it along 965 * with the snapshot. 966 */ 967 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 968 { 969 tracing_snapshot_instance_cond(tr, cond_data); 970 } 971 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 972 973 /** 974 * tracing_snapshot_cond_data - get the user data associated with a snapshot 975 * @tr: The tracing instance 976 * 977 * When the user enables a conditional snapshot using 978 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 979 * with the snapshot. This accessor is used to retrieve it. 980 * 981 * Should not be called from cond_snapshot.update(), since it takes 982 * the tr->max_lock lock, which the code calling 983 * cond_snapshot.update() has already done. 984 * 985 * Returns the cond_data associated with the trace array's snapshot. 986 */ 987 void *tracing_cond_snapshot_data(struct trace_array *tr) 988 { 989 void *cond_data = NULL; 990 991 arch_spin_lock(&tr->max_lock); 992 993 if (tr->cond_snapshot) 994 cond_data = tr->cond_snapshot->cond_data; 995 996 arch_spin_unlock(&tr->max_lock); 997 998 return cond_data; 999 } 1000 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1001 1002 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 1003 struct trace_buffer *size_buf, int cpu_id); 1004 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 1005 1006 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1007 { 1008 int ret; 1009 1010 if (!tr->allocated_snapshot) { 1011 1012 /* allocate spare buffer */ 1013 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1014 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); 1015 if (ret < 0) 1016 return ret; 1017 1018 tr->allocated_snapshot = true; 1019 } 1020 1021 return 0; 1022 } 1023 1024 static void free_snapshot(struct trace_array *tr) 1025 { 1026 /* 1027 * We don't free the ring buffer. instead, resize it because 1028 * The max_tr ring buffer has some state (e.g. ring->clock) and 1029 * we want preserve it. 1030 */ 1031 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1032 set_buffer_entries(&tr->max_buffer, 1); 1033 tracing_reset_online_cpus(&tr->max_buffer); 1034 tr->allocated_snapshot = false; 1035 } 1036 1037 /** 1038 * tracing_alloc_snapshot - allocate snapshot buffer. 1039 * 1040 * This only allocates the snapshot buffer if it isn't already 1041 * allocated - it doesn't also take a snapshot. 1042 * 1043 * This is meant to be used in cases where the snapshot buffer needs 1044 * to be set up for events that can't sleep but need to be able to 1045 * trigger a snapshot. 1046 */ 1047 int tracing_alloc_snapshot(void) 1048 { 1049 struct trace_array *tr = &global_trace; 1050 int ret; 1051 1052 ret = tracing_alloc_snapshot_instance(tr); 1053 WARN_ON(ret < 0); 1054 1055 return ret; 1056 } 1057 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1058 1059 /** 1060 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1061 * 1062 * This is similar to tracing_snapshot(), but it will allocate the 1063 * snapshot buffer if it isn't already allocated. Use this only 1064 * where it is safe to sleep, as the allocation may sleep. 1065 * 1066 * This causes a swap between the snapshot buffer and the current live 1067 * tracing buffer. You can use this to take snapshots of the live 1068 * trace when some condition is triggered, but continue to trace. 1069 */ 1070 void tracing_snapshot_alloc(void) 1071 { 1072 int ret; 1073 1074 ret = tracing_alloc_snapshot(); 1075 if (ret < 0) 1076 return; 1077 1078 tracing_snapshot(); 1079 } 1080 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1081 1082 /** 1083 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1084 * @tr: The tracing instance 1085 * @cond_data: User data to associate with the snapshot 1086 * @update: Implementation of the cond_snapshot update function 1087 * 1088 * Check whether the conditional snapshot for the given instance has 1089 * already been enabled, or if the current tracer is already using a 1090 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1091 * save the cond_data and update function inside. 1092 * 1093 * Returns 0 if successful, error otherwise. 1094 */ 1095 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1096 cond_update_fn_t update) 1097 { 1098 struct cond_snapshot *cond_snapshot; 1099 int ret = 0; 1100 1101 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1102 if (!cond_snapshot) 1103 return -ENOMEM; 1104 1105 cond_snapshot->cond_data = cond_data; 1106 cond_snapshot->update = update; 1107 1108 mutex_lock(&trace_types_lock); 1109 1110 ret = tracing_alloc_snapshot_instance(tr); 1111 if (ret) 1112 goto fail_unlock; 1113 1114 if (tr->current_trace->use_max_tr) { 1115 ret = -EBUSY; 1116 goto fail_unlock; 1117 } 1118 1119 /* 1120 * The cond_snapshot can only change to NULL without the 1121 * trace_types_lock. We don't care if we race with it going 1122 * to NULL, but we want to make sure that it's not set to 1123 * something other than NULL when we get here, which we can 1124 * do safely with only holding the trace_types_lock and not 1125 * having to take the max_lock. 1126 */ 1127 if (tr->cond_snapshot) { 1128 ret = -EBUSY; 1129 goto fail_unlock; 1130 } 1131 1132 arch_spin_lock(&tr->max_lock); 1133 tr->cond_snapshot = cond_snapshot; 1134 arch_spin_unlock(&tr->max_lock); 1135 1136 mutex_unlock(&trace_types_lock); 1137 1138 return ret; 1139 1140 fail_unlock: 1141 mutex_unlock(&trace_types_lock); 1142 kfree(cond_snapshot); 1143 return ret; 1144 } 1145 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1146 1147 /** 1148 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1149 * @tr: The tracing instance 1150 * 1151 * Check whether the conditional snapshot for the given instance is 1152 * enabled; if so, free the cond_snapshot associated with it, 1153 * otherwise return -EINVAL. 1154 * 1155 * Returns 0 if successful, error otherwise. 1156 */ 1157 int tracing_snapshot_cond_disable(struct trace_array *tr) 1158 { 1159 int ret = 0; 1160 1161 arch_spin_lock(&tr->max_lock); 1162 1163 if (!tr->cond_snapshot) 1164 ret = -EINVAL; 1165 else { 1166 kfree(tr->cond_snapshot); 1167 tr->cond_snapshot = NULL; 1168 } 1169 1170 arch_spin_unlock(&tr->max_lock); 1171 1172 return ret; 1173 } 1174 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1175 #else 1176 void tracing_snapshot(void) 1177 { 1178 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1179 } 1180 EXPORT_SYMBOL_GPL(tracing_snapshot); 1181 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1182 { 1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1184 } 1185 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1186 int tracing_alloc_snapshot(void) 1187 { 1188 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1189 return -ENODEV; 1190 } 1191 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1192 void tracing_snapshot_alloc(void) 1193 { 1194 /* Give warning */ 1195 tracing_snapshot(); 1196 } 1197 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1198 void *tracing_cond_snapshot_data(struct trace_array *tr) 1199 { 1200 return NULL; 1201 } 1202 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1203 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1204 { 1205 return -ENODEV; 1206 } 1207 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1208 int tracing_snapshot_cond_disable(struct trace_array *tr) 1209 { 1210 return false; 1211 } 1212 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1213 #endif /* CONFIG_TRACER_SNAPSHOT */ 1214 1215 void tracer_tracing_off(struct trace_array *tr) 1216 { 1217 if (tr->trace_buffer.buffer) 1218 ring_buffer_record_off(tr->trace_buffer.buffer); 1219 /* 1220 * This flag is looked at when buffers haven't been allocated 1221 * yet, or by some tracers (like irqsoff), that just want to 1222 * know if the ring buffer has been disabled, but it can handle 1223 * races of where it gets disabled but we still do a record. 1224 * As the check is in the fast path of the tracers, it is more 1225 * important to be fast than accurate. 1226 */ 1227 tr->buffer_disabled = 1; 1228 /* Make the flag seen by readers */ 1229 smp_wmb(); 1230 } 1231 1232 /** 1233 * tracing_off - turn off tracing buffers 1234 * 1235 * This function stops the tracing buffers from recording data. 1236 * It does not disable any overhead the tracers themselves may 1237 * be causing. This function simply causes all recording to 1238 * the ring buffers to fail. 1239 */ 1240 void tracing_off(void) 1241 { 1242 tracer_tracing_off(&global_trace); 1243 } 1244 EXPORT_SYMBOL_GPL(tracing_off); 1245 1246 void disable_trace_on_warning(void) 1247 { 1248 if (__disable_trace_on_warning) 1249 tracing_off(); 1250 } 1251 1252 /** 1253 * tracer_tracing_is_on - show real state of ring buffer enabled 1254 * @tr : the trace array to know if ring buffer is enabled 1255 * 1256 * Shows real state of the ring buffer if it is enabled or not. 1257 */ 1258 bool tracer_tracing_is_on(struct trace_array *tr) 1259 { 1260 if (tr->trace_buffer.buffer) 1261 return ring_buffer_record_is_on(tr->trace_buffer.buffer); 1262 return !tr->buffer_disabled; 1263 } 1264 1265 /** 1266 * tracing_is_on - show state of ring buffers enabled 1267 */ 1268 int tracing_is_on(void) 1269 { 1270 return tracer_tracing_is_on(&global_trace); 1271 } 1272 EXPORT_SYMBOL_GPL(tracing_is_on); 1273 1274 static int __init set_buf_size(char *str) 1275 { 1276 unsigned long buf_size; 1277 1278 if (!str) 1279 return 0; 1280 buf_size = memparse(str, &str); 1281 /* nr_entries can not be zero */ 1282 if (buf_size == 0) 1283 return 0; 1284 trace_buf_size = buf_size; 1285 return 1; 1286 } 1287 __setup("trace_buf_size=", set_buf_size); 1288 1289 static int __init set_tracing_thresh(char *str) 1290 { 1291 unsigned long threshold; 1292 int ret; 1293 1294 if (!str) 1295 return 0; 1296 ret = kstrtoul(str, 0, &threshold); 1297 if (ret < 0) 1298 return 0; 1299 tracing_thresh = threshold * 1000; 1300 return 1; 1301 } 1302 __setup("tracing_thresh=", set_tracing_thresh); 1303 1304 unsigned long nsecs_to_usecs(unsigned long nsecs) 1305 { 1306 return nsecs / 1000; 1307 } 1308 1309 /* 1310 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1311 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1312 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1313 * of strings in the order that the evals (enum) were defined. 1314 */ 1315 #undef C 1316 #define C(a, b) b 1317 1318 /* These must match the bit postions in trace_iterator_flags */ 1319 static const char *trace_options[] = { 1320 TRACE_FLAGS 1321 NULL 1322 }; 1323 1324 static struct { 1325 u64 (*func)(void); 1326 const char *name; 1327 int in_ns; /* is this clock in nanoseconds? */ 1328 } trace_clocks[] = { 1329 { trace_clock_local, "local", 1 }, 1330 { trace_clock_global, "global", 1 }, 1331 { trace_clock_counter, "counter", 0 }, 1332 { trace_clock_jiffies, "uptime", 0 }, 1333 { trace_clock, "perf", 1 }, 1334 { ktime_get_mono_fast_ns, "mono", 1 }, 1335 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1336 { ktime_get_boot_fast_ns, "boot", 1 }, 1337 ARCH_TRACE_CLOCKS 1338 }; 1339 1340 bool trace_clock_in_ns(struct trace_array *tr) 1341 { 1342 if (trace_clocks[tr->clock_id].in_ns) 1343 return true; 1344 1345 return false; 1346 } 1347 1348 /* 1349 * trace_parser_get_init - gets the buffer for trace parser 1350 */ 1351 int trace_parser_get_init(struct trace_parser *parser, int size) 1352 { 1353 memset(parser, 0, sizeof(*parser)); 1354 1355 parser->buffer = kmalloc(size, GFP_KERNEL); 1356 if (!parser->buffer) 1357 return 1; 1358 1359 parser->size = size; 1360 return 0; 1361 } 1362 1363 /* 1364 * trace_parser_put - frees the buffer for trace parser 1365 */ 1366 void trace_parser_put(struct trace_parser *parser) 1367 { 1368 kfree(parser->buffer); 1369 parser->buffer = NULL; 1370 } 1371 1372 /* 1373 * trace_get_user - reads the user input string separated by space 1374 * (matched by isspace(ch)) 1375 * 1376 * For each string found the 'struct trace_parser' is updated, 1377 * and the function returns. 1378 * 1379 * Returns number of bytes read. 1380 * 1381 * See kernel/trace/trace.h for 'struct trace_parser' details. 1382 */ 1383 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1384 size_t cnt, loff_t *ppos) 1385 { 1386 char ch; 1387 size_t read = 0; 1388 ssize_t ret; 1389 1390 if (!*ppos) 1391 trace_parser_clear(parser); 1392 1393 ret = get_user(ch, ubuf++); 1394 if (ret) 1395 goto out; 1396 1397 read++; 1398 cnt--; 1399 1400 /* 1401 * The parser is not finished with the last write, 1402 * continue reading the user input without skipping spaces. 1403 */ 1404 if (!parser->cont) { 1405 /* skip white space */ 1406 while (cnt && isspace(ch)) { 1407 ret = get_user(ch, ubuf++); 1408 if (ret) 1409 goto out; 1410 read++; 1411 cnt--; 1412 } 1413 1414 parser->idx = 0; 1415 1416 /* only spaces were written */ 1417 if (isspace(ch) || !ch) { 1418 *ppos += read; 1419 ret = read; 1420 goto out; 1421 } 1422 } 1423 1424 /* read the non-space input */ 1425 while (cnt && !isspace(ch) && ch) { 1426 if (parser->idx < parser->size - 1) 1427 parser->buffer[parser->idx++] = ch; 1428 else { 1429 ret = -EINVAL; 1430 goto out; 1431 } 1432 ret = get_user(ch, ubuf++); 1433 if (ret) 1434 goto out; 1435 read++; 1436 cnt--; 1437 } 1438 1439 /* We either got finished input or we have to wait for another call. */ 1440 if (isspace(ch) || !ch) { 1441 parser->buffer[parser->idx] = 0; 1442 parser->cont = false; 1443 } else if (parser->idx < parser->size - 1) { 1444 parser->cont = true; 1445 parser->buffer[parser->idx++] = ch; 1446 /* Make sure the parsed string always terminates with '\0'. */ 1447 parser->buffer[parser->idx] = 0; 1448 } else { 1449 ret = -EINVAL; 1450 goto out; 1451 } 1452 1453 *ppos += read; 1454 ret = read; 1455 1456 out: 1457 return ret; 1458 } 1459 1460 /* TODO add a seq_buf_to_buffer() */ 1461 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1462 { 1463 int len; 1464 1465 if (trace_seq_used(s) <= s->seq.readpos) 1466 return -EBUSY; 1467 1468 len = trace_seq_used(s) - s->seq.readpos; 1469 if (cnt > len) 1470 cnt = len; 1471 memcpy(buf, s->buffer + s->seq.readpos, cnt); 1472 1473 s->seq.readpos += cnt; 1474 return cnt; 1475 } 1476 1477 unsigned long __read_mostly tracing_thresh; 1478 1479 #ifdef CONFIG_TRACER_MAX_TRACE 1480 /* 1481 * Copy the new maximum trace into the separate maximum-trace 1482 * structure. (this way the maximum trace is permanently saved, 1483 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1484 */ 1485 static void 1486 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1487 { 1488 struct trace_buffer *trace_buf = &tr->trace_buffer; 1489 struct trace_buffer *max_buf = &tr->max_buffer; 1490 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1491 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1492 1493 max_buf->cpu = cpu; 1494 max_buf->time_start = data->preempt_timestamp; 1495 1496 max_data->saved_latency = tr->max_latency; 1497 max_data->critical_start = data->critical_start; 1498 max_data->critical_end = data->critical_end; 1499 1500 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 1501 max_data->pid = tsk->pid; 1502 /* 1503 * If tsk == current, then use current_uid(), as that does not use 1504 * RCU. The irq tracer can be called out of RCU scope. 1505 */ 1506 if (tsk == current) 1507 max_data->uid = current_uid(); 1508 else 1509 max_data->uid = task_uid(tsk); 1510 1511 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1512 max_data->policy = tsk->policy; 1513 max_data->rt_priority = tsk->rt_priority; 1514 1515 /* record this tasks comm */ 1516 tracing_record_cmdline(tsk); 1517 } 1518 1519 /** 1520 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1521 * @tr: tracer 1522 * @tsk: the task with the latency 1523 * @cpu: The cpu that initiated the trace. 1524 * @cond_data: User data associated with a conditional snapshot 1525 * 1526 * Flip the buffers between the @tr and the max_tr and record information 1527 * about which task was the cause of this latency. 1528 */ 1529 void 1530 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1531 void *cond_data) 1532 { 1533 if (tr->stop_count) 1534 return; 1535 1536 WARN_ON_ONCE(!irqs_disabled()); 1537 1538 if (!tr->allocated_snapshot) { 1539 /* Only the nop tracer should hit this when disabling */ 1540 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1541 return; 1542 } 1543 1544 arch_spin_lock(&tr->max_lock); 1545 1546 /* Inherit the recordable setting from trace_buffer */ 1547 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer)) 1548 ring_buffer_record_on(tr->max_buffer.buffer); 1549 else 1550 ring_buffer_record_off(tr->max_buffer.buffer); 1551 1552 #ifdef CONFIG_TRACER_SNAPSHOT 1553 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) 1554 goto out_unlock; 1555 #endif 1556 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); 1557 1558 __update_max_tr(tr, tsk, cpu); 1559 1560 out_unlock: 1561 arch_spin_unlock(&tr->max_lock); 1562 } 1563 1564 /** 1565 * update_max_tr_single - only copy one trace over, and reset the rest 1566 * @tr - tracer 1567 * @tsk - task with the latency 1568 * @cpu - the cpu of the buffer to copy. 1569 * 1570 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1571 */ 1572 void 1573 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1574 { 1575 int ret; 1576 1577 if (tr->stop_count) 1578 return; 1579 1580 WARN_ON_ONCE(!irqs_disabled()); 1581 if (!tr->allocated_snapshot) { 1582 /* Only the nop tracer should hit this when disabling */ 1583 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1584 return; 1585 } 1586 1587 arch_spin_lock(&tr->max_lock); 1588 1589 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1590 1591 if (ret == -EBUSY) { 1592 /* 1593 * We failed to swap the buffer due to a commit taking 1594 * place on this CPU. We fail to record, but we reset 1595 * the max trace buffer (no one writes directly to it) 1596 * and flag that it failed. 1597 */ 1598 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1599 "Failed to swap buffers due to commit in progress\n"); 1600 } 1601 1602 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1603 1604 __update_max_tr(tr, tsk, cpu); 1605 arch_spin_unlock(&tr->max_lock); 1606 } 1607 #endif /* CONFIG_TRACER_MAX_TRACE */ 1608 1609 static int wait_on_pipe(struct trace_iterator *iter, int full) 1610 { 1611 /* Iterators are static, they should be filled or empty */ 1612 if (trace_buffer_iter(iter, iter->cpu_file)) 1613 return 0; 1614 1615 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, 1616 full); 1617 } 1618 1619 #ifdef CONFIG_FTRACE_STARTUP_TEST 1620 static bool selftests_can_run; 1621 1622 struct trace_selftests { 1623 struct list_head list; 1624 struct tracer *type; 1625 }; 1626 1627 static LIST_HEAD(postponed_selftests); 1628 1629 static int save_selftest(struct tracer *type) 1630 { 1631 struct trace_selftests *selftest; 1632 1633 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 1634 if (!selftest) 1635 return -ENOMEM; 1636 1637 selftest->type = type; 1638 list_add(&selftest->list, &postponed_selftests); 1639 return 0; 1640 } 1641 1642 static int run_tracer_selftest(struct tracer *type) 1643 { 1644 struct trace_array *tr = &global_trace; 1645 struct tracer *saved_tracer = tr->current_trace; 1646 int ret; 1647 1648 if (!type->selftest || tracing_selftest_disabled) 1649 return 0; 1650 1651 /* 1652 * If a tracer registers early in boot up (before scheduling is 1653 * initialized and such), then do not run its selftests yet. 1654 * Instead, run it a little later in the boot process. 1655 */ 1656 if (!selftests_can_run) 1657 return save_selftest(type); 1658 1659 /* 1660 * Run a selftest on this tracer. 1661 * Here we reset the trace buffer, and set the current 1662 * tracer to be this tracer. The tracer can then run some 1663 * internal tracing to verify that everything is in order. 1664 * If we fail, we do not register this tracer. 1665 */ 1666 tracing_reset_online_cpus(&tr->trace_buffer); 1667 1668 tr->current_trace = type; 1669 1670 #ifdef CONFIG_TRACER_MAX_TRACE 1671 if (type->use_max_tr) { 1672 /* If we expanded the buffers, make sure the max is expanded too */ 1673 if (ring_buffer_expanded) 1674 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 1675 RING_BUFFER_ALL_CPUS); 1676 tr->allocated_snapshot = true; 1677 } 1678 #endif 1679 1680 /* the test is responsible for initializing and enabling */ 1681 pr_info("Testing tracer %s: ", type->name); 1682 ret = type->selftest(type, tr); 1683 /* the test is responsible for resetting too */ 1684 tr->current_trace = saved_tracer; 1685 if (ret) { 1686 printk(KERN_CONT "FAILED!\n"); 1687 /* Add the warning after printing 'FAILED' */ 1688 WARN_ON(1); 1689 return -1; 1690 } 1691 /* Only reset on passing, to avoid touching corrupted buffers */ 1692 tracing_reset_online_cpus(&tr->trace_buffer); 1693 1694 #ifdef CONFIG_TRACER_MAX_TRACE 1695 if (type->use_max_tr) { 1696 tr->allocated_snapshot = false; 1697 1698 /* Shrink the max buffer again */ 1699 if (ring_buffer_expanded) 1700 ring_buffer_resize(tr->max_buffer.buffer, 1, 1701 RING_BUFFER_ALL_CPUS); 1702 } 1703 #endif 1704 1705 printk(KERN_CONT "PASSED\n"); 1706 return 0; 1707 } 1708 1709 static __init int init_trace_selftests(void) 1710 { 1711 struct trace_selftests *p, *n; 1712 struct tracer *t, **last; 1713 int ret; 1714 1715 selftests_can_run = true; 1716 1717 mutex_lock(&trace_types_lock); 1718 1719 if (list_empty(&postponed_selftests)) 1720 goto out; 1721 1722 pr_info("Running postponed tracer tests:\n"); 1723 1724 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 1725 ret = run_tracer_selftest(p->type); 1726 /* If the test fails, then warn and remove from available_tracers */ 1727 if (ret < 0) { 1728 WARN(1, "tracer: %s failed selftest, disabling\n", 1729 p->type->name); 1730 last = &trace_types; 1731 for (t = trace_types; t; t = t->next) { 1732 if (t == p->type) { 1733 *last = t->next; 1734 break; 1735 } 1736 last = &t->next; 1737 } 1738 } 1739 list_del(&p->list); 1740 kfree(p); 1741 } 1742 1743 out: 1744 mutex_unlock(&trace_types_lock); 1745 1746 return 0; 1747 } 1748 core_initcall(init_trace_selftests); 1749 #else 1750 static inline int run_tracer_selftest(struct tracer *type) 1751 { 1752 return 0; 1753 } 1754 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 1755 1756 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 1757 1758 static void __init apply_trace_boot_options(void); 1759 1760 /** 1761 * register_tracer - register a tracer with the ftrace system. 1762 * @type - the plugin for the tracer 1763 * 1764 * Register a new plugin tracer. 1765 */ 1766 int __init register_tracer(struct tracer *type) 1767 { 1768 struct tracer *t; 1769 int ret = 0; 1770 1771 if (!type->name) { 1772 pr_info("Tracer must have a name\n"); 1773 return -1; 1774 } 1775 1776 if (strlen(type->name) >= MAX_TRACER_SIZE) { 1777 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 1778 return -1; 1779 } 1780 1781 mutex_lock(&trace_types_lock); 1782 1783 tracing_selftest_running = true; 1784 1785 for (t = trace_types; t; t = t->next) { 1786 if (strcmp(type->name, t->name) == 0) { 1787 /* already found */ 1788 pr_info("Tracer %s already registered\n", 1789 type->name); 1790 ret = -1; 1791 goto out; 1792 } 1793 } 1794 1795 if (!type->set_flag) 1796 type->set_flag = &dummy_set_flag; 1797 if (!type->flags) { 1798 /*allocate a dummy tracer_flags*/ 1799 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 1800 if (!type->flags) { 1801 ret = -ENOMEM; 1802 goto out; 1803 } 1804 type->flags->val = 0; 1805 type->flags->opts = dummy_tracer_opt; 1806 } else 1807 if (!type->flags->opts) 1808 type->flags->opts = dummy_tracer_opt; 1809 1810 /* store the tracer for __set_tracer_option */ 1811 type->flags->trace = type; 1812 1813 ret = run_tracer_selftest(type); 1814 if (ret < 0) 1815 goto out; 1816 1817 type->next = trace_types; 1818 trace_types = type; 1819 add_tracer_options(&global_trace, type); 1820 1821 out: 1822 tracing_selftest_running = false; 1823 mutex_unlock(&trace_types_lock); 1824 1825 if (ret || !default_bootup_tracer) 1826 goto out_unlock; 1827 1828 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 1829 goto out_unlock; 1830 1831 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1832 /* Do we want this tracer to start on bootup? */ 1833 tracing_set_tracer(&global_trace, type->name); 1834 default_bootup_tracer = NULL; 1835 1836 apply_trace_boot_options(); 1837 1838 /* disable other selftests, since this will break it. */ 1839 tracing_selftest_disabled = true; 1840 #ifdef CONFIG_FTRACE_STARTUP_TEST 1841 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 1842 type->name); 1843 #endif 1844 1845 out_unlock: 1846 return ret; 1847 } 1848 1849 void tracing_reset(struct trace_buffer *buf, int cpu) 1850 { 1851 struct ring_buffer *buffer = buf->buffer; 1852 1853 if (!buffer) 1854 return; 1855 1856 ring_buffer_record_disable(buffer); 1857 1858 /* Make sure all commits have finished */ 1859 synchronize_rcu(); 1860 ring_buffer_reset_cpu(buffer, cpu); 1861 1862 ring_buffer_record_enable(buffer); 1863 } 1864 1865 void tracing_reset_online_cpus(struct trace_buffer *buf) 1866 { 1867 struct ring_buffer *buffer = buf->buffer; 1868 int cpu; 1869 1870 if (!buffer) 1871 return; 1872 1873 ring_buffer_record_disable(buffer); 1874 1875 /* Make sure all commits have finished */ 1876 synchronize_rcu(); 1877 1878 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 1879 1880 for_each_online_cpu(cpu) 1881 ring_buffer_reset_cpu(buffer, cpu); 1882 1883 ring_buffer_record_enable(buffer); 1884 } 1885 1886 /* Must have trace_types_lock held */ 1887 void tracing_reset_all_online_cpus(void) 1888 { 1889 struct trace_array *tr; 1890 1891 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1892 if (!tr->clear_trace) 1893 continue; 1894 tr->clear_trace = false; 1895 tracing_reset_online_cpus(&tr->trace_buffer); 1896 #ifdef CONFIG_TRACER_MAX_TRACE 1897 tracing_reset_online_cpus(&tr->max_buffer); 1898 #endif 1899 } 1900 } 1901 1902 static int *tgid_map; 1903 1904 #define SAVED_CMDLINES_DEFAULT 128 1905 #define NO_CMDLINE_MAP UINT_MAX 1906 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1907 struct saved_cmdlines_buffer { 1908 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 1909 unsigned *map_cmdline_to_pid; 1910 unsigned cmdline_num; 1911 int cmdline_idx; 1912 char *saved_cmdlines; 1913 }; 1914 static struct saved_cmdlines_buffer *savedcmd; 1915 1916 /* temporary disable recording */ 1917 static atomic_t trace_record_taskinfo_disabled __read_mostly; 1918 1919 static inline char *get_saved_cmdlines(int idx) 1920 { 1921 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; 1922 } 1923 1924 static inline void set_cmdline(int idx, const char *cmdline) 1925 { 1926 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); 1927 } 1928 1929 static int allocate_cmdlines_buffer(unsigned int val, 1930 struct saved_cmdlines_buffer *s) 1931 { 1932 s->map_cmdline_to_pid = kmalloc_array(val, 1933 sizeof(*s->map_cmdline_to_pid), 1934 GFP_KERNEL); 1935 if (!s->map_cmdline_to_pid) 1936 return -ENOMEM; 1937 1938 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); 1939 if (!s->saved_cmdlines) { 1940 kfree(s->map_cmdline_to_pid); 1941 return -ENOMEM; 1942 } 1943 1944 s->cmdline_idx = 0; 1945 s->cmdline_num = val; 1946 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, 1947 sizeof(s->map_pid_to_cmdline)); 1948 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, 1949 val * sizeof(*s->map_cmdline_to_pid)); 1950 1951 return 0; 1952 } 1953 1954 static int trace_create_savedcmd(void) 1955 { 1956 int ret; 1957 1958 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); 1959 if (!savedcmd) 1960 return -ENOMEM; 1961 1962 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); 1963 if (ret < 0) { 1964 kfree(savedcmd); 1965 savedcmd = NULL; 1966 return -ENOMEM; 1967 } 1968 1969 return 0; 1970 } 1971 1972 int is_tracing_stopped(void) 1973 { 1974 return global_trace.stop_count; 1975 } 1976 1977 /** 1978 * tracing_start - quick start of the tracer 1979 * 1980 * If tracing is enabled but was stopped by tracing_stop, 1981 * this will start the tracer back up. 1982 */ 1983 void tracing_start(void) 1984 { 1985 struct ring_buffer *buffer; 1986 unsigned long flags; 1987 1988 if (tracing_disabled) 1989 return; 1990 1991 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1992 if (--global_trace.stop_count) { 1993 if (global_trace.stop_count < 0) { 1994 /* Someone screwed up their debugging */ 1995 WARN_ON_ONCE(1); 1996 global_trace.stop_count = 0; 1997 } 1998 goto out; 1999 } 2000 2001 /* Prevent the buffers from switching */ 2002 arch_spin_lock(&global_trace.max_lock); 2003 2004 buffer = global_trace.trace_buffer.buffer; 2005 if (buffer) 2006 ring_buffer_record_enable(buffer); 2007 2008 #ifdef CONFIG_TRACER_MAX_TRACE 2009 buffer = global_trace.max_buffer.buffer; 2010 if (buffer) 2011 ring_buffer_record_enable(buffer); 2012 #endif 2013 2014 arch_spin_unlock(&global_trace.max_lock); 2015 2016 out: 2017 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2018 } 2019 2020 static void tracing_start_tr(struct trace_array *tr) 2021 { 2022 struct ring_buffer *buffer; 2023 unsigned long flags; 2024 2025 if (tracing_disabled) 2026 return; 2027 2028 /* If global, we need to also start the max tracer */ 2029 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2030 return tracing_start(); 2031 2032 raw_spin_lock_irqsave(&tr->start_lock, flags); 2033 2034 if (--tr->stop_count) { 2035 if (tr->stop_count < 0) { 2036 /* Someone screwed up their debugging */ 2037 WARN_ON_ONCE(1); 2038 tr->stop_count = 0; 2039 } 2040 goto out; 2041 } 2042 2043 buffer = tr->trace_buffer.buffer; 2044 if (buffer) 2045 ring_buffer_record_enable(buffer); 2046 2047 out: 2048 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2049 } 2050 2051 /** 2052 * tracing_stop - quick stop of the tracer 2053 * 2054 * Light weight way to stop tracing. Use in conjunction with 2055 * tracing_start. 2056 */ 2057 void tracing_stop(void) 2058 { 2059 struct ring_buffer *buffer; 2060 unsigned long flags; 2061 2062 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 2063 if (global_trace.stop_count++) 2064 goto out; 2065 2066 /* Prevent the buffers from switching */ 2067 arch_spin_lock(&global_trace.max_lock); 2068 2069 buffer = global_trace.trace_buffer.buffer; 2070 if (buffer) 2071 ring_buffer_record_disable(buffer); 2072 2073 #ifdef CONFIG_TRACER_MAX_TRACE 2074 buffer = global_trace.max_buffer.buffer; 2075 if (buffer) 2076 ring_buffer_record_disable(buffer); 2077 #endif 2078 2079 arch_spin_unlock(&global_trace.max_lock); 2080 2081 out: 2082 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2083 } 2084 2085 static void tracing_stop_tr(struct trace_array *tr) 2086 { 2087 struct ring_buffer *buffer; 2088 unsigned long flags; 2089 2090 /* If global, we need to also stop the max tracer */ 2091 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2092 return tracing_stop(); 2093 2094 raw_spin_lock_irqsave(&tr->start_lock, flags); 2095 if (tr->stop_count++) 2096 goto out; 2097 2098 buffer = tr->trace_buffer.buffer; 2099 if (buffer) 2100 ring_buffer_record_disable(buffer); 2101 2102 out: 2103 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2104 } 2105 2106 static int trace_save_cmdline(struct task_struct *tsk) 2107 { 2108 unsigned pid, idx; 2109 2110 /* treat recording of idle task as a success */ 2111 if (!tsk->pid) 2112 return 1; 2113 2114 if (unlikely(tsk->pid > PID_MAX_DEFAULT)) 2115 return 0; 2116 2117 /* 2118 * It's not the end of the world if we don't get 2119 * the lock, but we also don't want to spin 2120 * nor do we want to disable interrupts, 2121 * so if we miss here, then better luck next time. 2122 */ 2123 if (!arch_spin_trylock(&trace_cmdline_lock)) 2124 return 0; 2125 2126 idx = savedcmd->map_pid_to_cmdline[tsk->pid]; 2127 if (idx == NO_CMDLINE_MAP) { 2128 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; 2129 2130 /* 2131 * Check whether the cmdline buffer at idx has a pid 2132 * mapped. We are going to overwrite that entry so we 2133 * need to clear the map_pid_to_cmdline. Otherwise we 2134 * would read the new comm for the old pid. 2135 */ 2136 pid = savedcmd->map_cmdline_to_pid[idx]; 2137 if (pid != NO_CMDLINE_MAP) 2138 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 2139 2140 savedcmd->map_cmdline_to_pid[idx] = tsk->pid; 2141 savedcmd->map_pid_to_cmdline[tsk->pid] = idx; 2142 2143 savedcmd->cmdline_idx = idx; 2144 } 2145 2146 set_cmdline(idx, tsk->comm); 2147 2148 arch_spin_unlock(&trace_cmdline_lock); 2149 2150 return 1; 2151 } 2152 2153 static void __trace_find_cmdline(int pid, char comm[]) 2154 { 2155 unsigned map; 2156 2157 if (!pid) { 2158 strcpy(comm, "<idle>"); 2159 return; 2160 } 2161 2162 if (WARN_ON_ONCE(pid < 0)) { 2163 strcpy(comm, "<XXX>"); 2164 return; 2165 } 2166 2167 if (pid > PID_MAX_DEFAULT) { 2168 strcpy(comm, "<...>"); 2169 return; 2170 } 2171 2172 map = savedcmd->map_pid_to_cmdline[pid]; 2173 if (map != NO_CMDLINE_MAP) 2174 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); 2175 else 2176 strcpy(comm, "<...>"); 2177 } 2178 2179 void trace_find_cmdline(int pid, char comm[]) 2180 { 2181 preempt_disable(); 2182 arch_spin_lock(&trace_cmdline_lock); 2183 2184 __trace_find_cmdline(pid, comm); 2185 2186 arch_spin_unlock(&trace_cmdline_lock); 2187 preempt_enable(); 2188 } 2189 2190 int trace_find_tgid(int pid) 2191 { 2192 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT)) 2193 return 0; 2194 2195 return tgid_map[pid]; 2196 } 2197 2198 static int trace_save_tgid(struct task_struct *tsk) 2199 { 2200 /* treat recording of idle task as a success */ 2201 if (!tsk->pid) 2202 return 1; 2203 2204 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT)) 2205 return 0; 2206 2207 tgid_map[tsk->pid] = tsk->tgid; 2208 return 1; 2209 } 2210 2211 static bool tracing_record_taskinfo_skip(int flags) 2212 { 2213 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) 2214 return true; 2215 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on()) 2216 return true; 2217 if (!__this_cpu_read(trace_taskinfo_save)) 2218 return true; 2219 return false; 2220 } 2221 2222 /** 2223 * tracing_record_taskinfo - record the task info of a task 2224 * 2225 * @task - task to record 2226 * @flags - TRACE_RECORD_CMDLINE for recording comm 2227 * - TRACE_RECORD_TGID for recording tgid 2228 */ 2229 void tracing_record_taskinfo(struct task_struct *task, int flags) 2230 { 2231 bool done; 2232 2233 if (tracing_record_taskinfo_skip(flags)) 2234 return; 2235 2236 /* 2237 * Record as much task information as possible. If some fail, continue 2238 * to try to record the others. 2239 */ 2240 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); 2241 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); 2242 2243 /* If recording any information failed, retry again soon. */ 2244 if (!done) 2245 return; 2246 2247 __this_cpu_write(trace_taskinfo_save, false); 2248 } 2249 2250 /** 2251 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2252 * 2253 * @prev - previous task during sched_switch 2254 * @next - next task during sched_switch 2255 * @flags - TRACE_RECORD_CMDLINE for recording comm 2256 * TRACE_RECORD_TGID for recording tgid 2257 */ 2258 void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2259 struct task_struct *next, int flags) 2260 { 2261 bool done; 2262 2263 if (tracing_record_taskinfo_skip(flags)) 2264 return; 2265 2266 /* 2267 * Record as much task information as possible. If some fail, continue 2268 * to try to record the others. 2269 */ 2270 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); 2271 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); 2272 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); 2273 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); 2274 2275 /* If recording any information failed, retry again soon. */ 2276 if (!done) 2277 return; 2278 2279 __this_cpu_write(trace_taskinfo_save, false); 2280 } 2281 2282 /* Helpers to record a specific task information */ 2283 void tracing_record_cmdline(struct task_struct *task) 2284 { 2285 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); 2286 } 2287 2288 void tracing_record_tgid(struct task_struct *task) 2289 { 2290 tracing_record_taskinfo(task, TRACE_RECORD_TGID); 2291 } 2292 2293 /* 2294 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2295 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2296 * simplifies those functions and keeps them in sync. 2297 */ 2298 enum print_line_t trace_handle_return(struct trace_seq *s) 2299 { 2300 return trace_seq_has_overflowed(s) ? 2301 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2302 } 2303 EXPORT_SYMBOL_GPL(trace_handle_return); 2304 2305 void 2306 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 2307 int pc) 2308 { 2309 struct task_struct *tsk = current; 2310 2311 entry->preempt_count = pc & 0xff; 2312 entry->pid = (tsk) ? tsk->pid : 0; 2313 entry->flags = 2314 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 2315 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 2316 #else 2317 TRACE_FLAG_IRQS_NOSUPPORT | 2318 #endif 2319 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | 2320 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 2321 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | 2322 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | 2323 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); 2324 } 2325 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 2326 2327 struct ring_buffer_event * 2328 trace_buffer_lock_reserve(struct ring_buffer *buffer, 2329 int type, 2330 unsigned long len, 2331 unsigned long flags, int pc) 2332 { 2333 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc); 2334 } 2335 2336 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2337 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2338 static int trace_buffered_event_ref; 2339 2340 /** 2341 * trace_buffered_event_enable - enable buffering events 2342 * 2343 * When events are being filtered, it is quicker to use a temporary 2344 * buffer to write the event data into if there's a likely chance 2345 * that it will not be committed. The discard of the ring buffer 2346 * is not as fast as committing, and is much slower than copying 2347 * a commit. 2348 * 2349 * When an event is to be filtered, allocate per cpu buffers to 2350 * write the event data into, and if the event is filtered and discarded 2351 * it is simply dropped, otherwise, the entire data is to be committed 2352 * in one shot. 2353 */ 2354 void trace_buffered_event_enable(void) 2355 { 2356 struct ring_buffer_event *event; 2357 struct page *page; 2358 int cpu; 2359 2360 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2361 2362 if (trace_buffered_event_ref++) 2363 return; 2364 2365 for_each_tracing_cpu(cpu) { 2366 page = alloc_pages_node(cpu_to_node(cpu), 2367 GFP_KERNEL | __GFP_NORETRY, 0); 2368 if (!page) 2369 goto failed; 2370 2371 event = page_address(page); 2372 memset(event, 0, sizeof(*event)); 2373 2374 per_cpu(trace_buffered_event, cpu) = event; 2375 2376 preempt_disable(); 2377 if (cpu == smp_processor_id() && 2378 this_cpu_read(trace_buffered_event) != 2379 per_cpu(trace_buffered_event, cpu)) 2380 WARN_ON_ONCE(1); 2381 preempt_enable(); 2382 } 2383 2384 return; 2385 failed: 2386 trace_buffered_event_disable(); 2387 } 2388 2389 static void enable_trace_buffered_event(void *data) 2390 { 2391 /* Probably not needed, but do it anyway */ 2392 smp_rmb(); 2393 this_cpu_dec(trace_buffered_event_cnt); 2394 } 2395 2396 static void disable_trace_buffered_event(void *data) 2397 { 2398 this_cpu_inc(trace_buffered_event_cnt); 2399 } 2400 2401 /** 2402 * trace_buffered_event_disable - disable buffering events 2403 * 2404 * When a filter is removed, it is faster to not use the buffered 2405 * events, and to commit directly into the ring buffer. Free up 2406 * the temp buffers when there are no more users. This requires 2407 * special synchronization with current events. 2408 */ 2409 void trace_buffered_event_disable(void) 2410 { 2411 int cpu; 2412 2413 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2414 2415 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2416 return; 2417 2418 if (--trace_buffered_event_ref) 2419 return; 2420 2421 preempt_disable(); 2422 /* For each CPU, set the buffer as used. */ 2423 smp_call_function_many(tracing_buffer_mask, 2424 disable_trace_buffered_event, NULL, 1); 2425 preempt_enable(); 2426 2427 /* Wait for all current users to finish */ 2428 synchronize_rcu(); 2429 2430 for_each_tracing_cpu(cpu) { 2431 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2432 per_cpu(trace_buffered_event, cpu) = NULL; 2433 } 2434 /* 2435 * Make sure trace_buffered_event is NULL before clearing 2436 * trace_buffered_event_cnt. 2437 */ 2438 smp_wmb(); 2439 2440 preempt_disable(); 2441 /* Do the work on each cpu */ 2442 smp_call_function_many(tracing_buffer_mask, 2443 enable_trace_buffered_event, NULL, 1); 2444 preempt_enable(); 2445 } 2446 2447 static struct ring_buffer *temp_buffer; 2448 2449 struct ring_buffer_event * 2450 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 2451 struct trace_event_file *trace_file, 2452 int type, unsigned long len, 2453 unsigned long flags, int pc) 2454 { 2455 struct ring_buffer_event *entry; 2456 int val; 2457 2458 *current_rb = trace_file->tr->trace_buffer.buffer; 2459 2460 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags & 2461 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) && 2462 (entry = this_cpu_read(trace_buffered_event))) { 2463 /* Try to use the per cpu buffer first */ 2464 val = this_cpu_inc_return(trace_buffered_event_cnt); 2465 if (val == 1) { 2466 trace_event_setup(entry, type, flags, pc); 2467 entry->array[0] = len; 2468 return entry; 2469 } 2470 this_cpu_dec(trace_buffered_event_cnt); 2471 } 2472 2473 entry = __trace_buffer_lock_reserve(*current_rb, 2474 type, len, flags, pc); 2475 /* 2476 * If tracing is off, but we have triggers enabled 2477 * we still need to look at the event data. Use the temp_buffer 2478 * to store the trace event for the tigger to use. It's recusive 2479 * safe and will not be recorded anywhere. 2480 */ 2481 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2482 *current_rb = temp_buffer; 2483 entry = __trace_buffer_lock_reserve(*current_rb, 2484 type, len, flags, pc); 2485 } 2486 return entry; 2487 } 2488 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2489 2490 static DEFINE_SPINLOCK(tracepoint_iter_lock); 2491 static DEFINE_MUTEX(tracepoint_printk_mutex); 2492 2493 static void output_printk(struct trace_event_buffer *fbuffer) 2494 { 2495 struct trace_event_call *event_call; 2496 struct trace_event *event; 2497 unsigned long flags; 2498 struct trace_iterator *iter = tracepoint_print_iter; 2499 2500 /* We should never get here if iter is NULL */ 2501 if (WARN_ON_ONCE(!iter)) 2502 return; 2503 2504 event_call = fbuffer->trace_file->event_call; 2505 if (!event_call || !event_call->event.funcs || 2506 !event_call->event.funcs->trace) 2507 return; 2508 2509 event = &fbuffer->trace_file->event_call->event; 2510 2511 spin_lock_irqsave(&tracepoint_iter_lock, flags); 2512 trace_seq_init(&iter->seq); 2513 iter->ent = fbuffer->entry; 2514 event_call->event.funcs->trace(iter, 0, event); 2515 trace_seq_putc(&iter->seq, 0); 2516 printk("%s", iter->seq.buffer); 2517 2518 spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2519 } 2520 2521 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 2522 void __user *buffer, size_t *lenp, 2523 loff_t *ppos) 2524 { 2525 int save_tracepoint_printk; 2526 int ret; 2527 2528 mutex_lock(&tracepoint_printk_mutex); 2529 save_tracepoint_printk = tracepoint_printk; 2530 2531 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2532 2533 /* 2534 * This will force exiting early, as tracepoint_printk 2535 * is always zero when tracepoint_printk_iter is not allocated 2536 */ 2537 if (!tracepoint_print_iter) 2538 tracepoint_printk = 0; 2539 2540 if (save_tracepoint_printk == tracepoint_printk) 2541 goto out; 2542 2543 if (tracepoint_printk) 2544 static_key_enable(&tracepoint_printk_key.key); 2545 else 2546 static_key_disable(&tracepoint_printk_key.key); 2547 2548 out: 2549 mutex_unlock(&tracepoint_printk_mutex); 2550 2551 return ret; 2552 } 2553 2554 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 2555 { 2556 if (static_key_false(&tracepoint_printk_key.key)) 2557 output_printk(fbuffer); 2558 2559 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, 2560 fbuffer->event, fbuffer->entry, 2561 fbuffer->flags, fbuffer->pc); 2562 } 2563 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2564 2565 /* 2566 * Skip 3: 2567 * 2568 * trace_buffer_unlock_commit_regs() 2569 * trace_event_buffer_commit() 2570 * trace_event_raw_event_xxx() 2571 */ 2572 # define STACK_SKIP 3 2573 2574 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2575 struct ring_buffer *buffer, 2576 struct ring_buffer_event *event, 2577 unsigned long flags, int pc, 2578 struct pt_regs *regs) 2579 { 2580 __buffer_unlock_commit(buffer, event); 2581 2582 /* 2583 * If regs is not set, then skip the necessary functions. 2584 * Note, we can still get here via blktrace, wakeup tracer 2585 * and mmiotrace, but that's ok if they lose a function or 2586 * two. They are not that meaningful. 2587 */ 2588 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); 2589 ftrace_trace_userstack(buffer, flags, pc); 2590 } 2591 2592 /* 2593 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 2594 */ 2595 void 2596 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, 2597 struct ring_buffer_event *event) 2598 { 2599 __buffer_unlock_commit(buffer, event); 2600 } 2601 2602 static void 2603 trace_process_export(struct trace_export *export, 2604 struct ring_buffer_event *event) 2605 { 2606 struct trace_entry *entry; 2607 unsigned int size = 0; 2608 2609 entry = ring_buffer_event_data(event); 2610 size = ring_buffer_event_length(event); 2611 export->write(export, entry, size); 2612 } 2613 2614 static DEFINE_MUTEX(ftrace_export_lock); 2615 2616 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 2617 2618 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled); 2619 2620 static inline void ftrace_exports_enable(void) 2621 { 2622 static_branch_enable(&ftrace_exports_enabled); 2623 } 2624 2625 static inline void ftrace_exports_disable(void) 2626 { 2627 static_branch_disable(&ftrace_exports_enabled); 2628 } 2629 2630 static void ftrace_exports(struct ring_buffer_event *event) 2631 { 2632 struct trace_export *export; 2633 2634 preempt_disable_notrace(); 2635 2636 export = rcu_dereference_raw_notrace(ftrace_exports_list); 2637 while (export) { 2638 trace_process_export(export, event); 2639 export = rcu_dereference_raw_notrace(export->next); 2640 } 2641 2642 preempt_enable_notrace(); 2643 } 2644 2645 static inline void 2646 add_trace_export(struct trace_export **list, struct trace_export *export) 2647 { 2648 rcu_assign_pointer(export->next, *list); 2649 /* 2650 * We are entering export into the list but another 2651 * CPU might be walking that list. We need to make sure 2652 * the export->next pointer is valid before another CPU sees 2653 * the export pointer included into the list. 2654 */ 2655 rcu_assign_pointer(*list, export); 2656 } 2657 2658 static inline int 2659 rm_trace_export(struct trace_export **list, struct trace_export *export) 2660 { 2661 struct trace_export **p; 2662 2663 for (p = list; *p != NULL; p = &(*p)->next) 2664 if (*p == export) 2665 break; 2666 2667 if (*p != export) 2668 return -1; 2669 2670 rcu_assign_pointer(*p, (*p)->next); 2671 2672 return 0; 2673 } 2674 2675 static inline void 2676 add_ftrace_export(struct trace_export **list, struct trace_export *export) 2677 { 2678 if (*list == NULL) 2679 ftrace_exports_enable(); 2680 2681 add_trace_export(list, export); 2682 } 2683 2684 static inline int 2685 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 2686 { 2687 int ret; 2688 2689 ret = rm_trace_export(list, export); 2690 if (*list == NULL) 2691 ftrace_exports_disable(); 2692 2693 return ret; 2694 } 2695 2696 int register_ftrace_export(struct trace_export *export) 2697 { 2698 if (WARN_ON_ONCE(!export->write)) 2699 return -1; 2700 2701 mutex_lock(&ftrace_export_lock); 2702 2703 add_ftrace_export(&ftrace_exports_list, export); 2704 2705 mutex_unlock(&ftrace_export_lock); 2706 2707 return 0; 2708 } 2709 EXPORT_SYMBOL_GPL(register_ftrace_export); 2710 2711 int unregister_ftrace_export(struct trace_export *export) 2712 { 2713 int ret; 2714 2715 mutex_lock(&ftrace_export_lock); 2716 2717 ret = rm_ftrace_export(&ftrace_exports_list, export); 2718 2719 mutex_unlock(&ftrace_export_lock); 2720 2721 return ret; 2722 } 2723 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 2724 2725 void 2726 trace_function(struct trace_array *tr, 2727 unsigned long ip, unsigned long parent_ip, unsigned long flags, 2728 int pc) 2729 { 2730 struct trace_event_call *call = &event_function; 2731 struct ring_buffer *buffer = tr->trace_buffer.buffer; 2732 struct ring_buffer_event *event; 2733 struct ftrace_entry *entry; 2734 2735 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 2736 flags, pc); 2737 if (!event) 2738 return; 2739 entry = ring_buffer_event_data(event); 2740 entry->ip = ip; 2741 entry->parent_ip = parent_ip; 2742 2743 if (!call_filter_check_discard(call, entry, buffer, event)) { 2744 if (static_branch_unlikely(&ftrace_exports_enabled)) 2745 ftrace_exports(event); 2746 __buffer_unlock_commit(buffer, event); 2747 } 2748 } 2749 2750 #ifdef CONFIG_STACKTRACE 2751 2752 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 2753 struct ftrace_stack { 2754 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 2755 }; 2756 2757 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 2758 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 2759 2760 static void __ftrace_trace_stack(struct ring_buffer *buffer, 2761 unsigned long flags, 2762 int skip, int pc, struct pt_regs *regs) 2763 { 2764 struct trace_event_call *call = &event_kernel_stack; 2765 struct ring_buffer_event *event; 2766 struct stack_entry *entry; 2767 struct stack_trace trace; 2768 int use_stack; 2769 int size = FTRACE_STACK_ENTRIES; 2770 2771 trace.nr_entries = 0; 2772 trace.skip = skip; 2773 2774 /* 2775 * Add one, for this function and the call to save_stack_trace() 2776 * If regs is set, then these functions will not be in the way. 2777 */ 2778 #ifndef CONFIG_UNWINDER_ORC 2779 if (!regs) 2780 trace.skip++; 2781 #endif 2782 2783 /* 2784 * Since events can happen in NMIs there's no safe way to 2785 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 2786 * or NMI comes in, it will just have to use the default 2787 * FTRACE_STACK_SIZE. 2788 */ 2789 preempt_disable_notrace(); 2790 2791 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 2792 /* 2793 * We don't need any atomic variables, just a barrier. 2794 * If an interrupt comes in, we don't care, because it would 2795 * have exited and put the counter back to what we want. 2796 * We just need a barrier to keep gcc from moving things 2797 * around. 2798 */ 2799 barrier(); 2800 if (use_stack == 1) { 2801 trace.entries = this_cpu_ptr(ftrace_stack.calls); 2802 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 2803 2804 if (regs) 2805 save_stack_trace_regs(regs, &trace); 2806 else 2807 save_stack_trace(&trace); 2808 2809 if (trace.nr_entries > size) 2810 size = trace.nr_entries; 2811 } else 2812 /* From now on, use_stack is a boolean */ 2813 use_stack = 0; 2814 2815 size *= sizeof(unsigned long); 2816 2817 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 2818 sizeof(*entry) + size, flags, pc); 2819 if (!event) 2820 goto out; 2821 entry = ring_buffer_event_data(event); 2822 2823 memset(&entry->caller, 0, size); 2824 2825 if (use_stack) 2826 memcpy(&entry->caller, trace.entries, 2827 trace.nr_entries * sizeof(unsigned long)); 2828 else { 2829 trace.max_entries = FTRACE_STACK_ENTRIES; 2830 trace.entries = entry->caller; 2831 if (regs) 2832 save_stack_trace_regs(regs, &trace); 2833 else 2834 save_stack_trace(&trace); 2835 } 2836 2837 entry->size = trace.nr_entries; 2838 2839 if (!call_filter_check_discard(call, entry, buffer, event)) 2840 __buffer_unlock_commit(buffer, event); 2841 2842 out: 2843 /* Again, don't let gcc optimize things here */ 2844 barrier(); 2845 __this_cpu_dec(ftrace_stack_reserve); 2846 preempt_enable_notrace(); 2847 2848 } 2849 2850 static inline void ftrace_trace_stack(struct trace_array *tr, 2851 struct ring_buffer *buffer, 2852 unsigned long flags, 2853 int skip, int pc, struct pt_regs *regs) 2854 { 2855 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 2856 return; 2857 2858 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 2859 } 2860 2861 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 2862 int pc) 2863 { 2864 struct ring_buffer *buffer = tr->trace_buffer.buffer; 2865 2866 if (rcu_is_watching()) { 2867 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 2868 return; 2869 } 2870 2871 /* 2872 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(), 2873 * but if the above rcu_is_watching() failed, then the NMI 2874 * triggered someplace critical, and rcu_irq_enter() should 2875 * not be called from NMI. 2876 */ 2877 if (unlikely(in_nmi())) 2878 return; 2879 2880 rcu_irq_enter_irqson(); 2881 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 2882 rcu_irq_exit_irqson(); 2883 } 2884 2885 /** 2886 * trace_dump_stack - record a stack back trace in the trace buffer 2887 * @skip: Number of functions to skip (helper handlers) 2888 */ 2889 void trace_dump_stack(int skip) 2890 { 2891 unsigned long flags; 2892 2893 if (tracing_disabled || tracing_selftest_running) 2894 return; 2895 2896 local_save_flags(flags); 2897 2898 #ifndef CONFIG_UNWINDER_ORC 2899 /* Skip 1 to skip this function. */ 2900 skip++; 2901 #endif 2902 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 2903 flags, skip, preempt_count(), NULL); 2904 } 2905 EXPORT_SYMBOL_GPL(trace_dump_stack); 2906 2907 static DEFINE_PER_CPU(int, user_stack_count); 2908 2909 void 2910 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 2911 { 2912 struct trace_event_call *call = &event_user_stack; 2913 struct ring_buffer_event *event; 2914 struct userstack_entry *entry; 2915 struct stack_trace trace; 2916 2917 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) 2918 return; 2919 2920 /* 2921 * NMIs can not handle page faults, even with fix ups. 2922 * The save user stack can (and often does) fault. 2923 */ 2924 if (unlikely(in_nmi())) 2925 return; 2926 2927 /* 2928 * prevent recursion, since the user stack tracing may 2929 * trigger other kernel events. 2930 */ 2931 preempt_disable(); 2932 if (__this_cpu_read(user_stack_count)) 2933 goto out; 2934 2935 __this_cpu_inc(user_stack_count); 2936 2937 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 2938 sizeof(*entry), flags, pc); 2939 if (!event) 2940 goto out_drop_count; 2941 entry = ring_buffer_event_data(event); 2942 2943 entry->tgid = current->tgid; 2944 memset(&entry->caller, 0, sizeof(entry->caller)); 2945 2946 trace.nr_entries = 0; 2947 trace.max_entries = FTRACE_STACK_ENTRIES; 2948 trace.skip = 0; 2949 trace.entries = entry->caller; 2950 2951 save_stack_trace_user(&trace); 2952 if (!call_filter_check_discard(call, entry, buffer, event)) 2953 __buffer_unlock_commit(buffer, event); 2954 2955 out_drop_count: 2956 __this_cpu_dec(user_stack_count); 2957 out: 2958 preempt_enable(); 2959 } 2960 2961 #ifdef UNUSED 2962 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 2963 { 2964 ftrace_trace_userstack(tr, flags, preempt_count()); 2965 } 2966 #endif /* UNUSED */ 2967 2968 #endif /* CONFIG_STACKTRACE */ 2969 2970 /* created for use with alloc_percpu */ 2971 struct trace_buffer_struct { 2972 int nesting; 2973 char buffer[4][TRACE_BUF_SIZE]; 2974 }; 2975 2976 static struct trace_buffer_struct *trace_percpu_buffer; 2977 2978 /* 2979 * Thise allows for lockless recording. If we're nested too deeply, then 2980 * this returns NULL. 2981 */ 2982 static char *get_trace_buf(void) 2983 { 2984 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 2985 2986 if (!buffer || buffer->nesting >= 4) 2987 return NULL; 2988 2989 buffer->nesting++; 2990 2991 /* Interrupts must see nesting incremented before we use the buffer */ 2992 barrier(); 2993 return &buffer->buffer[buffer->nesting][0]; 2994 } 2995 2996 static void put_trace_buf(void) 2997 { 2998 /* Don't let the decrement of nesting leak before this */ 2999 barrier(); 3000 this_cpu_dec(trace_percpu_buffer->nesting); 3001 } 3002 3003 static int alloc_percpu_trace_buffer(void) 3004 { 3005 struct trace_buffer_struct *buffers; 3006 3007 buffers = alloc_percpu(struct trace_buffer_struct); 3008 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer")) 3009 return -ENOMEM; 3010 3011 trace_percpu_buffer = buffers; 3012 return 0; 3013 } 3014 3015 static int buffers_allocated; 3016 3017 void trace_printk_init_buffers(void) 3018 { 3019 if (buffers_allocated) 3020 return; 3021 3022 if (alloc_percpu_trace_buffer()) 3023 return; 3024 3025 /* trace_printk() is for debug use only. Don't use it in production. */ 3026 3027 pr_warn("\n"); 3028 pr_warn("**********************************************************\n"); 3029 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3030 pr_warn("** **\n"); 3031 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3032 pr_warn("** **\n"); 3033 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3034 pr_warn("** unsafe for production use. **\n"); 3035 pr_warn("** **\n"); 3036 pr_warn("** If you see this message and you are not debugging **\n"); 3037 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3038 pr_warn("** **\n"); 3039 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3040 pr_warn("**********************************************************\n"); 3041 3042 /* Expand the buffers to set size */ 3043 tracing_update_buffers(); 3044 3045 buffers_allocated = 1; 3046 3047 /* 3048 * trace_printk_init_buffers() can be called by modules. 3049 * If that happens, then we need to start cmdline recording 3050 * directly here. If the global_trace.buffer is already 3051 * allocated here, then this was called by module code. 3052 */ 3053 if (global_trace.trace_buffer.buffer) 3054 tracing_start_cmdline_record(); 3055 } 3056 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3057 3058 void trace_printk_start_comm(void) 3059 { 3060 /* Start tracing comms if trace printk is set */ 3061 if (!buffers_allocated) 3062 return; 3063 tracing_start_cmdline_record(); 3064 } 3065 3066 static void trace_printk_start_stop_comm(int enabled) 3067 { 3068 if (!buffers_allocated) 3069 return; 3070 3071 if (enabled) 3072 tracing_start_cmdline_record(); 3073 else 3074 tracing_stop_cmdline_record(); 3075 } 3076 3077 /** 3078 * trace_vbprintk - write binary msg to tracing buffer 3079 * 3080 */ 3081 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3082 { 3083 struct trace_event_call *call = &event_bprint; 3084 struct ring_buffer_event *event; 3085 struct ring_buffer *buffer; 3086 struct trace_array *tr = &global_trace; 3087 struct bprint_entry *entry; 3088 unsigned long flags; 3089 char *tbuffer; 3090 int len = 0, size, pc; 3091 3092 if (unlikely(tracing_selftest_running || tracing_disabled)) 3093 return 0; 3094 3095 /* Don't pollute graph traces with trace_vprintk internals */ 3096 pause_graph_tracing(); 3097 3098 pc = preempt_count(); 3099 preempt_disable_notrace(); 3100 3101 tbuffer = get_trace_buf(); 3102 if (!tbuffer) { 3103 len = 0; 3104 goto out_nobuffer; 3105 } 3106 3107 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3108 3109 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3110 goto out; 3111 3112 local_save_flags(flags); 3113 size = sizeof(*entry) + sizeof(u32) * len; 3114 buffer = tr->trace_buffer.buffer; 3115 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3116 flags, pc); 3117 if (!event) 3118 goto out; 3119 entry = ring_buffer_event_data(event); 3120 entry->ip = ip; 3121 entry->fmt = fmt; 3122 3123 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3124 if (!call_filter_check_discard(call, entry, buffer, event)) { 3125 __buffer_unlock_commit(buffer, event); 3126 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); 3127 } 3128 3129 out: 3130 put_trace_buf(); 3131 3132 out_nobuffer: 3133 preempt_enable_notrace(); 3134 unpause_graph_tracing(); 3135 3136 return len; 3137 } 3138 EXPORT_SYMBOL_GPL(trace_vbprintk); 3139 3140 __printf(3, 0) 3141 static int 3142 __trace_array_vprintk(struct ring_buffer *buffer, 3143 unsigned long ip, const char *fmt, va_list args) 3144 { 3145 struct trace_event_call *call = &event_print; 3146 struct ring_buffer_event *event; 3147 int len = 0, size, pc; 3148 struct print_entry *entry; 3149 unsigned long flags; 3150 char *tbuffer; 3151 3152 if (tracing_disabled || tracing_selftest_running) 3153 return 0; 3154 3155 /* Don't pollute graph traces with trace_vprintk internals */ 3156 pause_graph_tracing(); 3157 3158 pc = preempt_count(); 3159 preempt_disable_notrace(); 3160 3161 3162 tbuffer = get_trace_buf(); 3163 if (!tbuffer) { 3164 len = 0; 3165 goto out_nobuffer; 3166 } 3167 3168 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3169 3170 local_save_flags(flags); 3171 size = sizeof(*entry) + len + 1; 3172 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3173 flags, pc); 3174 if (!event) 3175 goto out; 3176 entry = ring_buffer_event_data(event); 3177 entry->ip = ip; 3178 3179 memcpy(&entry->buf, tbuffer, len + 1); 3180 if (!call_filter_check_discard(call, entry, buffer, event)) { 3181 __buffer_unlock_commit(buffer, event); 3182 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); 3183 } 3184 3185 out: 3186 put_trace_buf(); 3187 3188 out_nobuffer: 3189 preempt_enable_notrace(); 3190 unpause_graph_tracing(); 3191 3192 return len; 3193 } 3194 3195 __printf(3, 0) 3196 int trace_array_vprintk(struct trace_array *tr, 3197 unsigned long ip, const char *fmt, va_list args) 3198 { 3199 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); 3200 } 3201 3202 __printf(3, 0) 3203 int trace_array_printk(struct trace_array *tr, 3204 unsigned long ip, const char *fmt, ...) 3205 { 3206 int ret; 3207 va_list ap; 3208 3209 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 3210 return 0; 3211 3212 va_start(ap, fmt); 3213 ret = trace_array_vprintk(tr, ip, fmt, ap); 3214 va_end(ap); 3215 return ret; 3216 } 3217 EXPORT_SYMBOL_GPL(trace_array_printk); 3218 3219 __printf(3, 4) 3220 int trace_array_printk_buf(struct ring_buffer *buffer, 3221 unsigned long ip, const char *fmt, ...) 3222 { 3223 int ret; 3224 va_list ap; 3225 3226 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 3227 return 0; 3228 3229 va_start(ap, fmt); 3230 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3231 va_end(ap); 3232 return ret; 3233 } 3234 3235 __printf(2, 0) 3236 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3237 { 3238 return trace_array_vprintk(&global_trace, ip, fmt, args); 3239 } 3240 EXPORT_SYMBOL_GPL(trace_vprintk); 3241 3242 static void trace_iterator_increment(struct trace_iterator *iter) 3243 { 3244 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3245 3246 iter->idx++; 3247 if (buf_iter) 3248 ring_buffer_read(buf_iter, NULL); 3249 } 3250 3251 static struct trace_entry * 3252 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3253 unsigned long *lost_events) 3254 { 3255 struct ring_buffer_event *event; 3256 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3257 3258 if (buf_iter) 3259 event = ring_buffer_iter_peek(buf_iter, ts); 3260 else 3261 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, 3262 lost_events); 3263 3264 if (event) { 3265 iter->ent_size = ring_buffer_event_length(event); 3266 return ring_buffer_event_data(event); 3267 } 3268 iter->ent_size = 0; 3269 return NULL; 3270 } 3271 3272 static struct trace_entry * 3273 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3274 unsigned long *missing_events, u64 *ent_ts) 3275 { 3276 struct ring_buffer *buffer = iter->trace_buffer->buffer; 3277 struct trace_entry *ent, *next = NULL; 3278 unsigned long lost_events = 0, next_lost = 0; 3279 int cpu_file = iter->cpu_file; 3280 u64 next_ts = 0, ts; 3281 int next_cpu = -1; 3282 int next_size = 0; 3283 int cpu; 3284 3285 /* 3286 * If we are in a per_cpu trace file, don't bother by iterating over 3287 * all cpu and peek directly. 3288 */ 3289 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3290 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3291 return NULL; 3292 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3293 if (ent_cpu) 3294 *ent_cpu = cpu_file; 3295 3296 return ent; 3297 } 3298 3299 for_each_tracing_cpu(cpu) { 3300 3301 if (ring_buffer_empty_cpu(buffer, cpu)) 3302 continue; 3303 3304 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3305 3306 /* 3307 * Pick the entry with the smallest timestamp: 3308 */ 3309 if (ent && (!next || ts < next_ts)) { 3310 next = ent; 3311 next_cpu = cpu; 3312 next_ts = ts; 3313 next_lost = lost_events; 3314 next_size = iter->ent_size; 3315 } 3316 } 3317 3318 iter->ent_size = next_size; 3319 3320 if (ent_cpu) 3321 *ent_cpu = next_cpu; 3322 3323 if (ent_ts) 3324 *ent_ts = next_ts; 3325 3326 if (missing_events) 3327 *missing_events = next_lost; 3328 3329 return next; 3330 } 3331 3332 /* Find the next real entry, without updating the iterator itself */ 3333 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 3334 int *ent_cpu, u64 *ent_ts) 3335 { 3336 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 3337 } 3338 3339 /* Find the next real entry, and increment the iterator to the next entry */ 3340 void *trace_find_next_entry_inc(struct trace_iterator *iter) 3341 { 3342 iter->ent = __find_next_entry(iter, &iter->cpu, 3343 &iter->lost_events, &iter->ts); 3344 3345 if (iter->ent) 3346 trace_iterator_increment(iter); 3347 3348 return iter->ent ? iter : NULL; 3349 } 3350 3351 static void trace_consume(struct trace_iterator *iter) 3352 { 3353 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, 3354 &iter->lost_events); 3355 } 3356 3357 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 3358 { 3359 struct trace_iterator *iter = m->private; 3360 int i = (int)*pos; 3361 void *ent; 3362 3363 WARN_ON_ONCE(iter->leftover); 3364 3365 (*pos)++; 3366 3367 /* can't go backwards */ 3368 if (iter->idx > i) 3369 return NULL; 3370 3371 if (iter->idx < 0) 3372 ent = trace_find_next_entry_inc(iter); 3373 else 3374 ent = iter; 3375 3376 while (ent && iter->idx < i) 3377 ent = trace_find_next_entry_inc(iter); 3378 3379 iter->pos = *pos; 3380 3381 return ent; 3382 } 3383 3384 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 3385 { 3386 struct ring_buffer_event *event; 3387 struct ring_buffer_iter *buf_iter; 3388 unsigned long entries = 0; 3389 u64 ts; 3390 3391 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; 3392 3393 buf_iter = trace_buffer_iter(iter, cpu); 3394 if (!buf_iter) 3395 return; 3396 3397 ring_buffer_iter_reset(buf_iter); 3398 3399 /* 3400 * We could have the case with the max latency tracers 3401 * that a reset never took place on a cpu. This is evident 3402 * by the timestamp being before the start of the buffer. 3403 */ 3404 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 3405 if (ts >= iter->trace_buffer->time_start) 3406 break; 3407 entries++; 3408 ring_buffer_read(buf_iter, NULL); 3409 } 3410 3411 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; 3412 } 3413 3414 /* 3415 * The current tracer is copied to avoid a global locking 3416 * all around. 3417 */ 3418 static void *s_start(struct seq_file *m, loff_t *pos) 3419 { 3420 struct trace_iterator *iter = m->private; 3421 struct trace_array *tr = iter->tr; 3422 int cpu_file = iter->cpu_file; 3423 void *p = NULL; 3424 loff_t l = 0; 3425 int cpu; 3426 3427 /* 3428 * copy the tracer to avoid using a global lock all around. 3429 * iter->trace is a copy of current_trace, the pointer to the 3430 * name may be used instead of a strcmp(), as iter->trace->name 3431 * will point to the same string as current_trace->name. 3432 */ 3433 mutex_lock(&trace_types_lock); 3434 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) 3435 *iter->trace = *tr->current_trace; 3436 mutex_unlock(&trace_types_lock); 3437 3438 #ifdef CONFIG_TRACER_MAX_TRACE 3439 if (iter->snapshot && iter->trace->use_max_tr) 3440 return ERR_PTR(-EBUSY); 3441 #endif 3442 3443 if (!iter->snapshot) 3444 atomic_inc(&trace_record_taskinfo_disabled); 3445 3446 if (*pos != iter->pos) { 3447 iter->ent = NULL; 3448 iter->cpu = 0; 3449 iter->idx = -1; 3450 3451 if (cpu_file == RING_BUFFER_ALL_CPUS) { 3452 for_each_tracing_cpu(cpu) 3453 tracing_iter_reset(iter, cpu); 3454 } else 3455 tracing_iter_reset(iter, cpu_file); 3456 3457 iter->leftover = 0; 3458 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 3459 ; 3460 3461 } else { 3462 /* 3463 * If we overflowed the seq_file before, then we want 3464 * to just reuse the trace_seq buffer again. 3465 */ 3466 if (iter->leftover) 3467 p = iter; 3468 else { 3469 l = *pos - 1; 3470 p = s_next(m, p, &l); 3471 } 3472 } 3473 3474 trace_event_read_lock(); 3475 trace_access_lock(cpu_file); 3476 return p; 3477 } 3478 3479 static void s_stop(struct seq_file *m, void *p) 3480 { 3481 struct trace_iterator *iter = m->private; 3482 3483 #ifdef CONFIG_TRACER_MAX_TRACE 3484 if (iter->snapshot && iter->trace->use_max_tr) 3485 return; 3486 #endif 3487 3488 if (!iter->snapshot) 3489 atomic_dec(&trace_record_taskinfo_disabled); 3490 3491 trace_access_unlock(iter->cpu_file); 3492 trace_event_read_unlock(); 3493 } 3494 3495 static void 3496 get_total_entries(struct trace_buffer *buf, 3497 unsigned long *total, unsigned long *entries) 3498 { 3499 unsigned long count; 3500 int cpu; 3501 3502 *total = 0; 3503 *entries = 0; 3504 3505 for_each_tracing_cpu(cpu) { 3506 count = ring_buffer_entries_cpu(buf->buffer, cpu); 3507 /* 3508 * If this buffer has skipped entries, then we hold all 3509 * entries for the trace and we need to ignore the 3510 * ones before the time stamp. 3511 */ 3512 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3513 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3514 /* total is the same as the entries */ 3515 *total += count; 3516 } else 3517 *total += count + 3518 ring_buffer_overrun_cpu(buf->buffer, cpu); 3519 *entries += count; 3520 } 3521 } 3522 3523 static void print_lat_help_header(struct seq_file *m) 3524 { 3525 seq_puts(m, "# _------=> CPU# \n" 3526 "# / _-----=> irqs-off \n" 3527 "# | / _----=> need-resched \n" 3528 "# || / _---=> hardirq/softirq \n" 3529 "# ||| / _--=> preempt-depth \n" 3530 "# |||| / delay \n" 3531 "# cmd pid ||||| time | caller \n" 3532 "# \\ / ||||| \\ | / \n"); 3533 } 3534 3535 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) 3536 { 3537 unsigned long total; 3538 unsigned long entries; 3539 3540 get_total_entries(buf, &total, &entries); 3541 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 3542 entries, total, num_online_cpus()); 3543 seq_puts(m, "#\n"); 3544 } 3545 3546 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m, 3547 unsigned int flags) 3548 { 3549 bool tgid = flags & TRACE_ITER_RECORD_TGID; 3550 3551 print_event_info(buf, m); 3552 3553 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); 3554 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 3555 } 3556 3557 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m, 3558 unsigned int flags) 3559 { 3560 bool tgid = flags & TRACE_ITER_RECORD_TGID; 3561 const char tgid_space[] = " "; 3562 const char space[] = " "; 3563 3564 print_event_info(buf, m); 3565 3566 seq_printf(m, "# %s _-----=> irqs-off\n", 3567 tgid ? tgid_space : space); 3568 seq_printf(m, "# %s / _----=> need-resched\n", 3569 tgid ? tgid_space : space); 3570 seq_printf(m, "# %s| / _---=> hardirq/softirq\n", 3571 tgid ? tgid_space : space); 3572 seq_printf(m, "# %s|| / _--=> preempt-depth\n", 3573 tgid ? tgid_space : space); 3574 seq_printf(m, "# %s||| / delay\n", 3575 tgid ? tgid_space : space); 3576 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", 3577 tgid ? " TGID " : space); 3578 seq_printf(m, "# | | %s | |||| | |\n", 3579 tgid ? " | " : space); 3580 } 3581 3582 void 3583 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 3584 { 3585 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 3586 struct trace_buffer *buf = iter->trace_buffer; 3587 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 3588 struct tracer *type = iter->trace; 3589 unsigned long entries; 3590 unsigned long total; 3591 const char *name = "preemption"; 3592 3593 name = type->name; 3594 3595 get_total_entries(buf, &total, &entries); 3596 3597 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 3598 name, UTS_RELEASE); 3599 seq_puts(m, "# -----------------------------------" 3600 "---------------------------------\n"); 3601 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 3602 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 3603 nsecs_to_usecs(data->saved_latency), 3604 entries, 3605 total, 3606 buf->cpu, 3607 #if defined(CONFIG_PREEMPT_NONE) 3608 "server", 3609 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 3610 "desktop", 3611 #elif defined(CONFIG_PREEMPT) 3612 "preempt", 3613 #else 3614 "unknown", 3615 #endif 3616 /* These are reserved for later use */ 3617 0, 0, 0, 0); 3618 #ifdef CONFIG_SMP 3619 seq_printf(m, " #P:%d)\n", num_online_cpus()); 3620 #else 3621 seq_puts(m, ")\n"); 3622 #endif 3623 seq_puts(m, "# -----------------\n"); 3624 seq_printf(m, "# | task: %.16s-%d " 3625 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 3626 data->comm, data->pid, 3627 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 3628 data->policy, data->rt_priority); 3629 seq_puts(m, "# -----------------\n"); 3630 3631 if (data->critical_start) { 3632 seq_puts(m, "# => started at: "); 3633 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 3634 trace_print_seq(m, &iter->seq); 3635 seq_puts(m, "\n# => ended at: "); 3636 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 3637 trace_print_seq(m, &iter->seq); 3638 seq_puts(m, "\n#\n"); 3639 } 3640 3641 seq_puts(m, "#\n"); 3642 } 3643 3644 static void test_cpu_buff_start(struct trace_iterator *iter) 3645 { 3646 struct trace_seq *s = &iter->seq; 3647 struct trace_array *tr = iter->tr; 3648 3649 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 3650 return; 3651 3652 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 3653 return; 3654 3655 if (cpumask_available(iter->started) && 3656 cpumask_test_cpu(iter->cpu, iter->started)) 3657 return; 3658 3659 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) 3660 return; 3661 3662 if (cpumask_available(iter->started)) 3663 cpumask_set_cpu(iter->cpu, iter->started); 3664 3665 /* Don't print started cpu buffer for the first entry of the trace */ 3666 if (iter->idx > 1) 3667 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 3668 iter->cpu); 3669 } 3670 3671 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 3672 { 3673 struct trace_array *tr = iter->tr; 3674 struct trace_seq *s = &iter->seq; 3675 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 3676 struct trace_entry *entry; 3677 struct trace_event *event; 3678 3679 entry = iter->ent; 3680 3681 test_cpu_buff_start(iter); 3682 3683 event = ftrace_find_event(entry->type); 3684 3685 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 3686 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 3687 trace_print_lat_context(iter); 3688 else 3689 trace_print_context(iter); 3690 } 3691 3692 if (trace_seq_has_overflowed(s)) 3693 return TRACE_TYPE_PARTIAL_LINE; 3694 3695 if (event) 3696 return event->funcs->trace(iter, sym_flags, event); 3697 3698 trace_seq_printf(s, "Unknown type %d\n", entry->type); 3699 3700 return trace_handle_return(s); 3701 } 3702 3703 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 3704 { 3705 struct trace_array *tr = iter->tr; 3706 struct trace_seq *s = &iter->seq; 3707 struct trace_entry *entry; 3708 struct trace_event *event; 3709 3710 entry = iter->ent; 3711 3712 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 3713 trace_seq_printf(s, "%d %d %llu ", 3714 entry->pid, iter->cpu, iter->ts); 3715 3716 if (trace_seq_has_overflowed(s)) 3717 return TRACE_TYPE_PARTIAL_LINE; 3718 3719 event = ftrace_find_event(entry->type); 3720 if (event) 3721 return event->funcs->raw(iter, 0, event); 3722 3723 trace_seq_printf(s, "%d ?\n", entry->type); 3724 3725 return trace_handle_return(s); 3726 } 3727 3728 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 3729 { 3730 struct trace_array *tr = iter->tr; 3731 struct trace_seq *s = &iter->seq; 3732 unsigned char newline = '\n'; 3733 struct trace_entry *entry; 3734 struct trace_event *event; 3735 3736 entry = iter->ent; 3737 3738 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 3739 SEQ_PUT_HEX_FIELD(s, entry->pid); 3740 SEQ_PUT_HEX_FIELD(s, iter->cpu); 3741 SEQ_PUT_HEX_FIELD(s, iter->ts); 3742 if (trace_seq_has_overflowed(s)) 3743 return TRACE_TYPE_PARTIAL_LINE; 3744 } 3745 3746 event = ftrace_find_event(entry->type); 3747 if (event) { 3748 enum print_line_t ret = event->funcs->hex(iter, 0, event); 3749 if (ret != TRACE_TYPE_HANDLED) 3750 return ret; 3751 } 3752 3753 SEQ_PUT_FIELD(s, newline); 3754 3755 return trace_handle_return(s); 3756 } 3757 3758 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 3759 { 3760 struct trace_array *tr = iter->tr; 3761 struct trace_seq *s = &iter->seq; 3762 struct trace_entry *entry; 3763 struct trace_event *event; 3764 3765 entry = iter->ent; 3766 3767 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 3768 SEQ_PUT_FIELD(s, entry->pid); 3769 SEQ_PUT_FIELD(s, iter->cpu); 3770 SEQ_PUT_FIELD(s, iter->ts); 3771 if (trace_seq_has_overflowed(s)) 3772 return TRACE_TYPE_PARTIAL_LINE; 3773 } 3774 3775 event = ftrace_find_event(entry->type); 3776 return event ? event->funcs->binary(iter, 0, event) : 3777 TRACE_TYPE_HANDLED; 3778 } 3779 3780 int trace_empty(struct trace_iterator *iter) 3781 { 3782 struct ring_buffer_iter *buf_iter; 3783 int cpu; 3784 3785 /* If we are looking at one CPU buffer, only check that one */ 3786 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 3787 cpu = iter->cpu_file; 3788 buf_iter = trace_buffer_iter(iter, cpu); 3789 if (buf_iter) { 3790 if (!ring_buffer_iter_empty(buf_iter)) 3791 return 0; 3792 } else { 3793 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 3794 return 0; 3795 } 3796 return 1; 3797 } 3798 3799 for_each_tracing_cpu(cpu) { 3800 buf_iter = trace_buffer_iter(iter, cpu); 3801 if (buf_iter) { 3802 if (!ring_buffer_iter_empty(buf_iter)) 3803 return 0; 3804 } else { 3805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 3806 return 0; 3807 } 3808 } 3809 3810 return 1; 3811 } 3812 3813 /* Called with trace_event_read_lock() held. */ 3814 enum print_line_t print_trace_line(struct trace_iterator *iter) 3815 { 3816 struct trace_array *tr = iter->tr; 3817 unsigned long trace_flags = tr->trace_flags; 3818 enum print_line_t ret; 3819 3820 if (iter->lost_events) { 3821 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 3822 iter->cpu, iter->lost_events); 3823 if (trace_seq_has_overflowed(&iter->seq)) 3824 return TRACE_TYPE_PARTIAL_LINE; 3825 } 3826 3827 if (iter->trace && iter->trace->print_line) { 3828 ret = iter->trace->print_line(iter); 3829 if (ret != TRACE_TYPE_UNHANDLED) 3830 return ret; 3831 } 3832 3833 if (iter->ent->type == TRACE_BPUTS && 3834 trace_flags & TRACE_ITER_PRINTK && 3835 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 3836 return trace_print_bputs_msg_only(iter); 3837 3838 if (iter->ent->type == TRACE_BPRINT && 3839 trace_flags & TRACE_ITER_PRINTK && 3840 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 3841 return trace_print_bprintk_msg_only(iter); 3842 3843 if (iter->ent->type == TRACE_PRINT && 3844 trace_flags & TRACE_ITER_PRINTK && 3845 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 3846 return trace_print_printk_msg_only(iter); 3847 3848 if (trace_flags & TRACE_ITER_BIN) 3849 return print_bin_fmt(iter); 3850 3851 if (trace_flags & TRACE_ITER_HEX) 3852 return print_hex_fmt(iter); 3853 3854 if (trace_flags & TRACE_ITER_RAW) 3855 return print_raw_fmt(iter); 3856 3857 return print_trace_fmt(iter); 3858 } 3859 3860 void trace_latency_header(struct seq_file *m) 3861 { 3862 struct trace_iterator *iter = m->private; 3863 struct trace_array *tr = iter->tr; 3864 3865 /* print nothing if the buffers are empty */ 3866 if (trace_empty(iter)) 3867 return; 3868 3869 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 3870 print_trace_header(m, iter); 3871 3872 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 3873 print_lat_help_header(m); 3874 } 3875 3876 void trace_default_header(struct seq_file *m) 3877 { 3878 struct trace_iterator *iter = m->private; 3879 struct trace_array *tr = iter->tr; 3880 unsigned long trace_flags = tr->trace_flags; 3881 3882 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 3883 return; 3884 3885 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 3886 /* print nothing if the buffers are empty */ 3887 if (trace_empty(iter)) 3888 return; 3889 print_trace_header(m, iter); 3890 if (!(trace_flags & TRACE_ITER_VERBOSE)) 3891 print_lat_help_header(m); 3892 } else { 3893 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 3894 if (trace_flags & TRACE_ITER_IRQ_INFO) 3895 print_func_help_header_irq(iter->trace_buffer, 3896 m, trace_flags); 3897 else 3898 print_func_help_header(iter->trace_buffer, m, 3899 trace_flags); 3900 } 3901 } 3902 } 3903 3904 static void test_ftrace_alive(struct seq_file *m) 3905 { 3906 if (!ftrace_is_dead()) 3907 return; 3908 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 3909 "# MAY BE MISSING FUNCTION EVENTS\n"); 3910 } 3911 3912 #ifdef CONFIG_TRACER_MAX_TRACE 3913 static void show_snapshot_main_help(struct seq_file *m) 3914 { 3915 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 3916 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 3917 "# Takes a snapshot of the main buffer.\n" 3918 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 3919 "# (Doesn't have to be '2' works with any number that\n" 3920 "# is not a '0' or '1')\n"); 3921 } 3922 3923 static void show_snapshot_percpu_help(struct seq_file *m) 3924 { 3925 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 3926 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 3927 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 3928 "# Takes a snapshot of the main buffer for this cpu.\n"); 3929 #else 3930 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 3931 "# Must use main snapshot file to allocate.\n"); 3932 #endif 3933 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 3934 "# (Doesn't have to be '2' works with any number that\n" 3935 "# is not a '0' or '1')\n"); 3936 } 3937 3938 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 3939 { 3940 if (iter->tr->allocated_snapshot) 3941 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 3942 else 3943 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 3944 3945 seq_puts(m, "# Snapshot commands:\n"); 3946 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 3947 show_snapshot_main_help(m); 3948 else 3949 show_snapshot_percpu_help(m); 3950 } 3951 #else 3952 /* Should never be called */ 3953 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 3954 #endif 3955 3956 static int s_show(struct seq_file *m, void *v) 3957 { 3958 struct trace_iterator *iter = v; 3959 int ret; 3960 3961 if (iter->ent == NULL) { 3962 if (iter->tr) { 3963 seq_printf(m, "# tracer: %s\n", iter->trace->name); 3964 seq_puts(m, "#\n"); 3965 test_ftrace_alive(m); 3966 } 3967 if (iter->snapshot && trace_empty(iter)) 3968 print_snapshot_help(m, iter); 3969 else if (iter->trace && iter->trace->print_header) 3970 iter->trace->print_header(m); 3971 else 3972 trace_default_header(m); 3973 3974 } else if (iter->leftover) { 3975 /* 3976 * If we filled the seq_file buffer earlier, we 3977 * want to just show it now. 3978 */ 3979 ret = trace_print_seq(m, &iter->seq); 3980 3981 /* ret should this time be zero, but you never know */ 3982 iter->leftover = ret; 3983 3984 } else { 3985 print_trace_line(iter); 3986 ret = trace_print_seq(m, &iter->seq); 3987 /* 3988 * If we overflow the seq_file buffer, then it will 3989 * ask us for this data again at start up. 3990 * Use that instead. 3991 * ret is 0 if seq_file write succeeded. 3992 * -1 otherwise. 3993 */ 3994 iter->leftover = ret; 3995 } 3996 3997 return 0; 3998 } 3999 4000 /* 4001 * Should be used after trace_array_get(), trace_types_lock 4002 * ensures that i_cdev was already initialized. 4003 */ 4004 static inline int tracing_get_cpu(struct inode *inode) 4005 { 4006 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4007 return (long)inode->i_cdev - 1; 4008 return RING_BUFFER_ALL_CPUS; 4009 } 4010 4011 static const struct seq_operations tracer_seq_ops = { 4012 .start = s_start, 4013 .next = s_next, 4014 .stop = s_stop, 4015 .show = s_show, 4016 }; 4017 4018 static struct trace_iterator * 4019 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4020 { 4021 struct trace_array *tr = inode->i_private; 4022 struct trace_iterator *iter; 4023 int cpu; 4024 4025 if (tracing_disabled) 4026 return ERR_PTR(-ENODEV); 4027 4028 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4029 if (!iter) 4030 return ERR_PTR(-ENOMEM); 4031 4032 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4033 GFP_KERNEL); 4034 if (!iter->buffer_iter) 4035 goto release; 4036 4037 /* 4038 * We make a copy of the current tracer to avoid concurrent 4039 * changes on it while we are reading. 4040 */ 4041 mutex_lock(&trace_types_lock); 4042 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 4043 if (!iter->trace) 4044 goto fail; 4045 4046 *iter->trace = *tr->current_trace; 4047 4048 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4049 goto fail; 4050 4051 iter->tr = tr; 4052 4053 #ifdef CONFIG_TRACER_MAX_TRACE 4054 /* Currently only the top directory has a snapshot */ 4055 if (tr->current_trace->print_max || snapshot) 4056 iter->trace_buffer = &tr->max_buffer; 4057 else 4058 #endif 4059 iter->trace_buffer = &tr->trace_buffer; 4060 iter->snapshot = snapshot; 4061 iter->pos = -1; 4062 iter->cpu_file = tracing_get_cpu(inode); 4063 mutex_init(&iter->mutex); 4064 4065 /* Notify the tracer early; before we stop tracing. */ 4066 if (iter->trace && iter->trace->open) 4067 iter->trace->open(iter); 4068 4069 /* Annotate start of buffers if we had overruns */ 4070 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 4071 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4072 4073 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4074 if (trace_clocks[tr->clock_id].in_ns) 4075 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4076 4077 /* stop the trace while dumping if we are not opening "snapshot" */ 4078 if (!iter->snapshot) 4079 tracing_stop_tr(tr); 4080 4081 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4082 for_each_tracing_cpu(cpu) { 4083 iter->buffer_iter[cpu] = 4084 ring_buffer_read_prepare(iter->trace_buffer->buffer, 4085 cpu, GFP_KERNEL); 4086 } 4087 ring_buffer_read_prepare_sync(); 4088 for_each_tracing_cpu(cpu) { 4089 ring_buffer_read_start(iter->buffer_iter[cpu]); 4090 tracing_iter_reset(iter, cpu); 4091 } 4092 } else { 4093 cpu = iter->cpu_file; 4094 iter->buffer_iter[cpu] = 4095 ring_buffer_read_prepare(iter->trace_buffer->buffer, 4096 cpu, GFP_KERNEL); 4097 ring_buffer_read_prepare_sync(); 4098 ring_buffer_read_start(iter->buffer_iter[cpu]); 4099 tracing_iter_reset(iter, cpu); 4100 } 4101 4102 mutex_unlock(&trace_types_lock); 4103 4104 return iter; 4105 4106 fail: 4107 mutex_unlock(&trace_types_lock); 4108 kfree(iter->trace); 4109 kfree(iter->buffer_iter); 4110 release: 4111 seq_release_private(inode, file); 4112 return ERR_PTR(-ENOMEM); 4113 } 4114 4115 int tracing_open_generic(struct inode *inode, struct file *filp) 4116 { 4117 if (tracing_disabled) 4118 return -ENODEV; 4119 4120 filp->private_data = inode->i_private; 4121 return 0; 4122 } 4123 4124 bool tracing_is_disabled(void) 4125 { 4126 return (tracing_disabled) ? true: false; 4127 } 4128 4129 /* 4130 * Open and update trace_array ref count. 4131 * Must have the current trace_array passed to it. 4132 */ 4133 static int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4134 { 4135 struct trace_array *tr = inode->i_private; 4136 4137 if (tracing_disabled) 4138 return -ENODEV; 4139 4140 if (trace_array_get(tr) < 0) 4141 return -ENODEV; 4142 4143 filp->private_data = inode->i_private; 4144 4145 return 0; 4146 } 4147 4148 static int tracing_release(struct inode *inode, struct file *file) 4149 { 4150 struct trace_array *tr = inode->i_private; 4151 struct seq_file *m = file->private_data; 4152 struct trace_iterator *iter; 4153 int cpu; 4154 4155 if (!(file->f_mode & FMODE_READ)) { 4156 trace_array_put(tr); 4157 return 0; 4158 } 4159 4160 /* Writes do not use seq_file */ 4161 iter = m->private; 4162 mutex_lock(&trace_types_lock); 4163 4164 for_each_tracing_cpu(cpu) { 4165 if (iter->buffer_iter[cpu]) 4166 ring_buffer_read_finish(iter->buffer_iter[cpu]); 4167 } 4168 4169 if (iter->trace && iter->trace->close) 4170 iter->trace->close(iter); 4171 4172 if (!iter->snapshot) 4173 /* reenable tracing if it was previously enabled */ 4174 tracing_start_tr(tr); 4175 4176 __trace_array_put(tr); 4177 4178 mutex_unlock(&trace_types_lock); 4179 4180 mutex_destroy(&iter->mutex); 4181 free_cpumask_var(iter->started); 4182 kfree(iter->trace); 4183 kfree(iter->buffer_iter); 4184 seq_release_private(inode, file); 4185 4186 return 0; 4187 } 4188 4189 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 4190 { 4191 struct trace_array *tr = inode->i_private; 4192 4193 trace_array_put(tr); 4194 return 0; 4195 } 4196 4197 static int tracing_single_release_tr(struct inode *inode, struct file *file) 4198 { 4199 struct trace_array *tr = inode->i_private; 4200 4201 trace_array_put(tr); 4202 4203 return single_release(inode, file); 4204 } 4205 4206 static int tracing_open(struct inode *inode, struct file *file) 4207 { 4208 struct trace_array *tr = inode->i_private; 4209 struct trace_iterator *iter; 4210 int ret = 0; 4211 4212 if (trace_array_get(tr) < 0) 4213 return -ENODEV; 4214 4215 /* If this file was open for write, then erase contents */ 4216 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 4217 int cpu = tracing_get_cpu(inode); 4218 struct trace_buffer *trace_buf = &tr->trace_buffer; 4219 4220 #ifdef CONFIG_TRACER_MAX_TRACE 4221 if (tr->current_trace->print_max) 4222 trace_buf = &tr->max_buffer; 4223 #endif 4224 4225 if (cpu == RING_BUFFER_ALL_CPUS) 4226 tracing_reset_online_cpus(trace_buf); 4227 else 4228 tracing_reset(trace_buf, cpu); 4229 } 4230 4231 if (file->f_mode & FMODE_READ) { 4232 iter = __tracing_open(inode, file, false); 4233 if (IS_ERR(iter)) 4234 ret = PTR_ERR(iter); 4235 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 4236 iter->iter_flags |= TRACE_FILE_LAT_FMT; 4237 } 4238 4239 if (ret < 0) 4240 trace_array_put(tr); 4241 4242 return ret; 4243 } 4244 4245 /* 4246 * Some tracers are not suitable for instance buffers. 4247 * A tracer is always available for the global array (toplevel) 4248 * or if it explicitly states that it is. 4249 */ 4250 static bool 4251 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 4252 { 4253 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 4254 } 4255 4256 /* Find the next tracer that this trace array may use */ 4257 static struct tracer * 4258 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 4259 { 4260 while (t && !trace_ok_for_array(t, tr)) 4261 t = t->next; 4262 4263 return t; 4264 } 4265 4266 static void * 4267 t_next(struct seq_file *m, void *v, loff_t *pos) 4268 { 4269 struct trace_array *tr = m->private; 4270 struct tracer *t = v; 4271 4272 (*pos)++; 4273 4274 if (t) 4275 t = get_tracer_for_array(tr, t->next); 4276 4277 return t; 4278 } 4279 4280 static void *t_start(struct seq_file *m, loff_t *pos) 4281 { 4282 struct trace_array *tr = m->private; 4283 struct tracer *t; 4284 loff_t l = 0; 4285 4286 mutex_lock(&trace_types_lock); 4287 4288 t = get_tracer_for_array(tr, trace_types); 4289 for (; t && l < *pos; t = t_next(m, t, &l)) 4290 ; 4291 4292 return t; 4293 } 4294 4295 static void t_stop(struct seq_file *m, void *p) 4296 { 4297 mutex_unlock(&trace_types_lock); 4298 } 4299 4300 static int t_show(struct seq_file *m, void *v) 4301 { 4302 struct tracer *t = v; 4303 4304 if (!t) 4305 return 0; 4306 4307 seq_puts(m, t->name); 4308 if (t->next) 4309 seq_putc(m, ' '); 4310 else 4311 seq_putc(m, '\n'); 4312 4313 return 0; 4314 } 4315 4316 static const struct seq_operations show_traces_seq_ops = { 4317 .start = t_start, 4318 .next = t_next, 4319 .stop = t_stop, 4320 .show = t_show, 4321 }; 4322 4323 static int show_traces_open(struct inode *inode, struct file *file) 4324 { 4325 struct trace_array *tr = inode->i_private; 4326 struct seq_file *m; 4327 int ret; 4328 4329 if (tracing_disabled) 4330 return -ENODEV; 4331 4332 ret = seq_open(file, &show_traces_seq_ops); 4333 if (ret) 4334 return ret; 4335 4336 m = file->private_data; 4337 m->private = tr; 4338 4339 return 0; 4340 } 4341 4342 static ssize_t 4343 tracing_write_stub(struct file *filp, const char __user *ubuf, 4344 size_t count, loff_t *ppos) 4345 { 4346 return count; 4347 } 4348 4349 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 4350 { 4351 int ret; 4352 4353 if (file->f_mode & FMODE_READ) 4354 ret = seq_lseek(file, offset, whence); 4355 else 4356 file->f_pos = ret = 0; 4357 4358 return ret; 4359 } 4360 4361 static const struct file_operations tracing_fops = { 4362 .open = tracing_open, 4363 .read = seq_read, 4364 .write = tracing_write_stub, 4365 .llseek = tracing_lseek, 4366 .release = tracing_release, 4367 }; 4368 4369 static const struct file_operations show_traces_fops = { 4370 .open = show_traces_open, 4371 .read = seq_read, 4372 .release = seq_release, 4373 .llseek = seq_lseek, 4374 }; 4375 4376 static ssize_t 4377 tracing_cpumask_read(struct file *filp, char __user *ubuf, 4378 size_t count, loff_t *ppos) 4379 { 4380 struct trace_array *tr = file_inode(filp)->i_private; 4381 char *mask_str; 4382 int len; 4383 4384 len = snprintf(NULL, 0, "%*pb\n", 4385 cpumask_pr_args(tr->tracing_cpumask)) + 1; 4386 mask_str = kmalloc(len, GFP_KERNEL); 4387 if (!mask_str) 4388 return -ENOMEM; 4389 4390 len = snprintf(mask_str, len, "%*pb\n", 4391 cpumask_pr_args(tr->tracing_cpumask)); 4392 if (len >= count) { 4393 count = -EINVAL; 4394 goto out_err; 4395 } 4396 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 4397 4398 out_err: 4399 kfree(mask_str); 4400 4401 return count; 4402 } 4403 4404 static ssize_t 4405 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 4406 size_t count, loff_t *ppos) 4407 { 4408 struct trace_array *tr = file_inode(filp)->i_private; 4409 cpumask_var_t tracing_cpumask_new; 4410 int err, cpu; 4411 4412 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 4413 return -ENOMEM; 4414 4415 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 4416 if (err) 4417 goto err_unlock; 4418 4419 local_irq_disable(); 4420 arch_spin_lock(&tr->max_lock); 4421 for_each_tracing_cpu(cpu) { 4422 /* 4423 * Increase/decrease the disabled counter if we are 4424 * about to flip a bit in the cpumask: 4425 */ 4426 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 4427 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 4428 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 4429 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 4430 } 4431 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 4432 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 4433 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 4434 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 4435 } 4436 } 4437 arch_spin_unlock(&tr->max_lock); 4438 local_irq_enable(); 4439 4440 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 4441 free_cpumask_var(tracing_cpumask_new); 4442 4443 return count; 4444 4445 err_unlock: 4446 free_cpumask_var(tracing_cpumask_new); 4447 4448 return err; 4449 } 4450 4451 static const struct file_operations tracing_cpumask_fops = { 4452 .open = tracing_open_generic_tr, 4453 .read = tracing_cpumask_read, 4454 .write = tracing_cpumask_write, 4455 .release = tracing_release_generic_tr, 4456 .llseek = generic_file_llseek, 4457 }; 4458 4459 static int tracing_trace_options_show(struct seq_file *m, void *v) 4460 { 4461 struct tracer_opt *trace_opts; 4462 struct trace_array *tr = m->private; 4463 u32 tracer_flags; 4464 int i; 4465 4466 mutex_lock(&trace_types_lock); 4467 tracer_flags = tr->current_trace->flags->val; 4468 trace_opts = tr->current_trace->flags->opts; 4469 4470 for (i = 0; trace_options[i]; i++) { 4471 if (tr->trace_flags & (1 << i)) 4472 seq_printf(m, "%s\n", trace_options[i]); 4473 else 4474 seq_printf(m, "no%s\n", trace_options[i]); 4475 } 4476 4477 for (i = 0; trace_opts[i].name; i++) { 4478 if (tracer_flags & trace_opts[i].bit) 4479 seq_printf(m, "%s\n", trace_opts[i].name); 4480 else 4481 seq_printf(m, "no%s\n", trace_opts[i].name); 4482 } 4483 mutex_unlock(&trace_types_lock); 4484 4485 return 0; 4486 } 4487 4488 static int __set_tracer_option(struct trace_array *tr, 4489 struct tracer_flags *tracer_flags, 4490 struct tracer_opt *opts, int neg) 4491 { 4492 struct tracer *trace = tracer_flags->trace; 4493 int ret; 4494 4495 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 4496 if (ret) 4497 return ret; 4498 4499 if (neg) 4500 tracer_flags->val &= ~opts->bit; 4501 else 4502 tracer_flags->val |= opts->bit; 4503 return 0; 4504 } 4505 4506 /* Try to assign a tracer specific option */ 4507 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 4508 { 4509 struct tracer *trace = tr->current_trace; 4510 struct tracer_flags *tracer_flags = trace->flags; 4511 struct tracer_opt *opts = NULL; 4512 int i; 4513 4514 for (i = 0; tracer_flags->opts[i].name; i++) { 4515 opts = &tracer_flags->opts[i]; 4516 4517 if (strcmp(cmp, opts->name) == 0) 4518 return __set_tracer_option(tr, trace->flags, opts, neg); 4519 } 4520 4521 return -EINVAL; 4522 } 4523 4524 /* Some tracers require overwrite to stay enabled */ 4525 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 4526 { 4527 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 4528 return -1; 4529 4530 return 0; 4531 } 4532 4533 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 4534 { 4535 /* do nothing if flag is already set */ 4536 if (!!(tr->trace_flags & mask) == !!enabled) 4537 return 0; 4538 4539 /* Give the tracer a chance to approve the change */ 4540 if (tr->current_trace->flag_changed) 4541 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 4542 return -EINVAL; 4543 4544 if (enabled) 4545 tr->trace_flags |= mask; 4546 else 4547 tr->trace_flags &= ~mask; 4548 4549 if (mask == TRACE_ITER_RECORD_CMD) 4550 trace_event_enable_cmd_record(enabled); 4551 4552 if (mask == TRACE_ITER_RECORD_TGID) { 4553 if (!tgid_map) 4554 tgid_map = kcalloc(PID_MAX_DEFAULT + 1, 4555 sizeof(*tgid_map), 4556 GFP_KERNEL); 4557 if (!tgid_map) { 4558 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 4559 return -ENOMEM; 4560 } 4561 4562 trace_event_enable_tgid_record(enabled); 4563 } 4564 4565 if (mask == TRACE_ITER_EVENT_FORK) 4566 trace_event_follow_fork(tr, enabled); 4567 4568 if (mask == TRACE_ITER_FUNC_FORK) 4569 ftrace_pid_follow_fork(tr, enabled); 4570 4571 if (mask == TRACE_ITER_OVERWRITE) { 4572 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); 4573 #ifdef CONFIG_TRACER_MAX_TRACE 4574 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 4575 #endif 4576 } 4577 4578 if (mask == TRACE_ITER_PRINTK) { 4579 trace_printk_start_stop_comm(enabled); 4580 trace_printk_control(enabled); 4581 } 4582 4583 return 0; 4584 } 4585 4586 static int trace_set_options(struct trace_array *tr, char *option) 4587 { 4588 char *cmp; 4589 int neg = 0; 4590 int ret; 4591 size_t orig_len = strlen(option); 4592 int len; 4593 4594 cmp = strstrip(option); 4595 4596 len = str_has_prefix(cmp, "no"); 4597 if (len) 4598 neg = 1; 4599 4600 cmp += len; 4601 4602 mutex_lock(&trace_types_lock); 4603 4604 ret = match_string(trace_options, -1, cmp); 4605 /* If no option could be set, test the specific tracer options */ 4606 if (ret < 0) 4607 ret = set_tracer_option(tr, cmp, neg); 4608 else 4609 ret = set_tracer_flag(tr, 1 << ret, !neg); 4610 4611 mutex_unlock(&trace_types_lock); 4612 4613 /* 4614 * If the first trailing whitespace is replaced with '\0' by strstrip, 4615 * turn it back into a space. 4616 */ 4617 if (orig_len > strlen(option)) 4618 option[strlen(option)] = ' '; 4619 4620 return ret; 4621 } 4622 4623 static void __init apply_trace_boot_options(void) 4624 { 4625 char *buf = trace_boot_options_buf; 4626 char *option; 4627 4628 while (true) { 4629 option = strsep(&buf, ","); 4630 4631 if (!option) 4632 break; 4633 4634 if (*option) 4635 trace_set_options(&global_trace, option); 4636 4637 /* Put back the comma to allow this to be called again */ 4638 if (buf) 4639 *(buf - 1) = ','; 4640 } 4641 } 4642 4643 static ssize_t 4644 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 4645 size_t cnt, loff_t *ppos) 4646 { 4647 struct seq_file *m = filp->private_data; 4648 struct trace_array *tr = m->private; 4649 char buf[64]; 4650 int ret; 4651 4652 if (cnt >= sizeof(buf)) 4653 return -EINVAL; 4654 4655 if (copy_from_user(buf, ubuf, cnt)) 4656 return -EFAULT; 4657 4658 buf[cnt] = 0; 4659 4660 ret = trace_set_options(tr, buf); 4661 if (ret < 0) 4662 return ret; 4663 4664 *ppos += cnt; 4665 4666 return cnt; 4667 } 4668 4669 static int tracing_trace_options_open(struct inode *inode, struct file *file) 4670 { 4671 struct trace_array *tr = inode->i_private; 4672 int ret; 4673 4674 if (tracing_disabled) 4675 return -ENODEV; 4676 4677 if (trace_array_get(tr) < 0) 4678 return -ENODEV; 4679 4680 ret = single_open(file, tracing_trace_options_show, inode->i_private); 4681 if (ret < 0) 4682 trace_array_put(tr); 4683 4684 return ret; 4685 } 4686 4687 static const struct file_operations tracing_iter_fops = { 4688 .open = tracing_trace_options_open, 4689 .read = seq_read, 4690 .llseek = seq_lseek, 4691 .release = tracing_single_release_tr, 4692 .write = tracing_trace_options_write, 4693 }; 4694 4695 static const char readme_msg[] = 4696 "tracing mini-HOWTO:\n\n" 4697 "# echo 0 > tracing_on : quick way to disable tracing\n" 4698 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 4699 " Important files:\n" 4700 " trace\t\t\t- The static contents of the buffer\n" 4701 "\t\t\t To clear the buffer write into this file: echo > trace\n" 4702 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 4703 " current_tracer\t- function and latency tracers\n" 4704 " available_tracers\t- list of configured tracers for current_tracer\n" 4705 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 4706 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 4707 " trace_clock\t\t-change the clock used to order events\n" 4708 " local: Per cpu clock but may not be synced across CPUs\n" 4709 " global: Synced across CPUs but slows tracing down.\n" 4710 " counter: Not a clock, but just an increment\n" 4711 " uptime: Jiffy counter from time of boot\n" 4712 " perf: Same clock that perf events use\n" 4713 #ifdef CONFIG_X86_64 4714 " x86-tsc: TSC cycle counter\n" 4715 #endif 4716 "\n timestamp_mode\t-view the mode used to timestamp events\n" 4717 " delta: Delta difference against a buffer-wide timestamp\n" 4718 " absolute: Absolute (standalone) timestamp\n" 4719 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 4720 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 4721 " tracing_cpumask\t- Limit which CPUs to trace\n" 4722 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 4723 "\t\t\t Remove sub-buffer with rmdir\n" 4724 " trace_options\t\t- Set format or modify how tracing happens\n" 4725 "\t\t\t Disable an option by adding a suffix 'no' to the\n" 4726 "\t\t\t option name\n" 4727 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 4728 #ifdef CONFIG_DYNAMIC_FTRACE 4729 "\n available_filter_functions - list of functions that can be filtered on\n" 4730 " set_ftrace_filter\t- echo function name in here to only trace these\n" 4731 "\t\t\t functions\n" 4732 "\t accepts: func_full_name or glob-matching-pattern\n" 4733 "\t modules: Can select a group via module\n" 4734 "\t Format: :mod:<module-name>\n" 4735 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 4736 "\t triggers: a command to perform when function is hit\n" 4737 "\t Format: <function>:<trigger>[:count]\n" 4738 "\t trigger: traceon, traceoff\n" 4739 "\t\t enable_event:<system>:<event>\n" 4740 "\t\t disable_event:<system>:<event>\n" 4741 #ifdef CONFIG_STACKTRACE 4742 "\t\t stacktrace\n" 4743 #endif 4744 #ifdef CONFIG_TRACER_SNAPSHOT 4745 "\t\t snapshot\n" 4746 #endif 4747 "\t\t dump\n" 4748 "\t\t cpudump\n" 4749 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 4750 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 4751 "\t The first one will disable tracing every time do_fault is hit\n" 4752 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 4753 "\t The first time do trap is hit and it disables tracing, the\n" 4754 "\t counter will decrement to 2. If tracing is already disabled,\n" 4755 "\t the counter will not decrement. It only decrements when the\n" 4756 "\t trigger did work\n" 4757 "\t To remove trigger without count:\n" 4758 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 4759 "\t To remove trigger with a count:\n" 4760 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 4761 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 4762 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 4763 "\t modules: Can select a group via module command :mod:\n" 4764 "\t Does not accept triggers\n" 4765 #endif /* CONFIG_DYNAMIC_FTRACE */ 4766 #ifdef CONFIG_FUNCTION_TRACER 4767 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 4768 "\t\t (function)\n" 4769 #endif 4770 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4771 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 4772 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 4773 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 4774 #endif 4775 #ifdef CONFIG_TRACER_SNAPSHOT 4776 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 4777 "\t\t\t snapshot buffer. Read the contents for more\n" 4778 "\t\t\t information\n" 4779 #endif 4780 #ifdef CONFIG_STACK_TRACER 4781 " stack_trace\t\t- Shows the max stack trace when active\n" 4782 " stack_max_size\t- Shows current max stack size that was traced\n" 4783 "\t\t\t Write into this file to reset the max size (trigger a\n" 4784 "\t\t\t new trace)\n" 4785 #ifdef CONFIG_DYNAMIC_FTRACE 4786 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 4787 "\t\t\t traces\n" 4788 #endif 4789 #endif /* CONFIG_STACK_TRACER */ 4790 #ifdef CONFIG_DYNAMIC_EVENTS 4791 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n" 4792 "\t\t\t Write into this file to define/undefine new trace events.\n" 4793 #endif 4794 #ifdef CONFIG_KPROBE_EVENTS 4795 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n" 4796 "\t\t\t Write into this file to define/undefine new trace events.\n" 4797 #endif 4798 #ifdef CONFIG_UPROBE_EVENTS 4799 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n" 4800 "\t\t\t Write into this file to define/undefine new trace events.\n" 4801 #endif 4802 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 4803 "\t accepts: event-definitions (one definition per line)\n" 4804 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n" 4805 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n" 4806 #ifdef CONFIG_HIST_TRIGGERS 4807 "\t s:[synthetic/]<event> <field> [<field>]\n" 4808 #endif 4809 "\t -:[<group>/]<event>\n" 4810 #ifdef CONFIG_KPROBE_EVENTS 4811 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 4812 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n" 4813 #endif 4814 #ifdef CONFIG_UPROBE_EVENTS 4815 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n" 4816 #endif 4817 "\t args: <name>=fetcharg[:type]\n" 4818 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n" 4819 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 4820 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n" 4821 #else 4822 "\t $stack<index>, $stack, $retval, $comm\n" 4823 #endif 4824 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" 4825 "\t b<bit-width>@<bit-offset>/<container-size>,\n" 4826 "\t <type>\\[<array-size>\\]\n" 4827 #ifdef CONFIG_HIST_TRIGGERS 4828 "\t field: <stype> <name>;\n" 4829 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 4830 "\t [unsigned] char/int/long\n" 4831 #endif 4832 #endif 4833 " events/\t\t- Directory containing all trace event subsystems:\n" 4834 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 4835 " events/<system>/\t- Directory containing all trace events for <system>:\n" 4836 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 4837 "\t\t\t events\n" 4838 " filter\t\t- If set, only events passing filter are traced\n" 4839 " events/<system>/<event>/\t- Directory containing control files for\n" 4840 "\t\t\t <event>:\n" 4841 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 4842 " filter\t\t- If set, only events passing filter are traced\n" 4843 " trigger\t\t- If set, a command to perform when event is hit\n" 4844 "\t Format: <trigger>[:count][if <filter>]\n" 4845 "\t trigger: traceon, traceoff\n" 4846 "\t enable_event:<system>:<event>\n" 4847 "\t disable_event:<system>:<event>\n" 4848 #ifdef CONFIG_HIST_TRIGGERS 4849 "\t enable_hist:<system>:<event>\n" 4850 "\t disable_hist:<system>:<event>\n" 4851 #endif 4852 #ifdef CONFIG_STACKTRACE 4853 "\t\t stacktrace\n" 4854 #endif 4855 #ifdef CONFIG_TRACER_SNAPSHOT 4856 "\t\t snapshot\n" 4857 #endif 4858 #ifdef CONFIG_HIST_TRIGGERS 4859 "\t\t hist (see below)\n" 4860 #endif 4861 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 4862 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 4863 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 4864 "\t events/block/block_unplug/trigger\n" 4865 "\t The first disables tracing every time block_unplug is hit.\n" 4866 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 4867 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 4868 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 4869 "\t Like function triggers, the counter is only decremented if it\n" 4870 "\t enabled or disabled tracing.\n" 4871 "\t To remove a trigger without a count:\n" 4872 "\t echo '!<trigger> > <system>/<event>/trigger\n" 4873 "\t To remove a trigger with a count:\n" 4874 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 4875 "\t Filters can be ignored when removing a trigger.\n" 4876 #ifdef CONFIG_HIST_TRIGGERS 4877 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 4878 "\t Format: hist:keys=<field1[,field2,...]>\n" 4879 "\t [:values=<field1[,field2,...]>]\n" 4880 "\t [:sort=<field1[,field2,...]>]\n" 4881 "\t [:size=#entries]\n" 4882 "\t [:pause][:continue][:clear]\n" 4883 "\t [:name=histname1]\n" 4884 "\t [:<handler>.<action>]\n" 4885 "\t [if <filter>]\n\n" 4886 "\t When a matching event is hit, an entry is added to a hash\n" 4887 "\t table using the key(s) and value(s) named, and the value of a\n" 4888 "\t sum called 'hitcount' is incremented. Keys and values\n" 4889 "\t correspond to fields in the event's format description. Keys\n" 4890 "\t can be any field, or the special string 'stacktrace'.\n" 4891 "\t Compound keys consisting of up to two fields can be specified\n" 4892 "\t by the 'keys' keyword. Values must correspond to numeric\n" 4893 "\t fields. Sort keys consisting of up to two fields can be\n" 4894 "\t specified using the 'sort' keyword. The sort direction can\n" 4895 "\t be modified by appending '.descending' or '.ascending' to a\n" 4896 "\t sort field. The 'size' parameter can be used to specify more\n" 4897 "\t or fewer than the default 2048 entries for the hashtable size.\n" 4898 "\t If a hist trigger is given a name using the 'name' parameter,\n" 4899 "\t its histogram data will be shared with other triggers of the\n" 4900 "\t same name, and trigger hits will update this common data.\n\n" 4901 "\t Reading the 'hist' file for the event will dump the hash\n" 4902 "\t table in its entirety to stdout. If there are multiple hist\n" 4903 "\t triggers attached to an event, there will be a table for each\n" 4904 "\t trigger in the output. The table displayed for a named\n" 4905 "\t trigger will be the same as any other instance having the\n" 4906 "\t same name. The default format used to display a given field\n" 4907 "\t can be modified by appending any of the following modifiers\n" 4908 "\t to the field name, as applicable:\n\n" 4909 "\t .hex display a number as a hex value\n" 4910 "\t .sym display an address as a symbol\n" 4911 "\t .sym-offset display an address as a symbol and offset\n" 4912 "\t .execname display a common_pid as a program name\n" 4913 "\t .syscall display a syscall id as a syscall name\n" 4914 "\t .log2 display log2 value rather than raw number\n" 4915 "\t .usecs display a common_timestamp in microseconds\n\n" 4916 "\t The 'pause' parameter can be used to pause an existing hist\n" 4917 "\t trigger or to start a hist trigger but not log any events\n" 4918 "\t until told to do so. 'continue' can be used to start or\n" 4919 "\t restart a paused hist trigger.\n\n" 4920 "\t The 'clear' parameter will clear the contents of a running\n" 4921 "\t hist trigger and leave its current paused/active state\n" 4922 "\t unchanged.\n\n" 4923 "\t The enable_hist and disable_hist triggers can be used to\n" 4924 "\t have one event conditionally start and stop another event's\n" 4925 "\t already-attached hist trigger. The syntax is analogous to\n" 4926 "\t the enable_event and disable_event triggers.\n\n" 4927 "\t Hist trigger handlers and actions are executed whenever a\n" 4928 "\t a histogram entry is added or updated. They take the form:\n\n" 4929 "\t <handler>.<action>\n\n" 4930 "\t The available handlers are:\n\n" 4931 "\t onmatch(matching.event) - invoke on addition or update\n" 4932 "\t onmax(var) - invoke if var exceeds current max\n" 4933 "\t onchange(var) - invoke action if var changes\n\n" 4934 "\t The available actions are:\n\n" 4935 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 4936 "\t save(field,...) - save current event fields\n" 4937 #ifdef CONFIG_TRACER_SNAPSHOT 4938 "\t snapshot() - snapshot the trace buffer\n" 4939 #endif 4940 #endif 4941 ; 4942 4943 static ssize_t 4944 tracing_readme_read(struct file *filp, char __user *ubuf, 4945 size_t cnt, loff_t *ppos) 4946 { 4947 return simple_read_from_buffer(ubuf, cnt, ppos, 4948 readme_msg, strlen(readme_msg)); 4949 } 4950 4951 static const struct file_operations tracing_readme_fops = { 4952 .open = tracing_open_generic, 4953 .read = tracing_readme_read, 4954 .llseek = generic_file_llseek, 4955 }; 4956 4957 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) 4958 { 4959 int *ptr = v; 4960 4961 if (*pos || m->count) 4962 ptr++; 4963 4964 (*pos)++; 4965 4966 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) { 4967 if (trace_find_tgid(*ptr)) 4968 return ptr; 4969 } 4970 4971 return NULL; 4972 } 4973 4974 static void *saved_tgids_start(struct seq_file *m, loff_t *pos) 4975 { 4976 void *v; 4977 loff_t l = 0; 4978 4979 if (!tgid_map) 4980 return NULL; 4981 4982 v = &tgid_map[0]; 4983 while (l <= *pos) { 4984 v = saved_tgids_next(m, v, &l); 4985 if (!v) 4986 return NULL; 4987 } 4988 4989 return v; 4990 } 4991 4992 static void saved_tgids_stop(struct seq_file *m, void *v) 4993 { 4994 } 4995 4996 static int saved_tgids_show(struct seq_file *m, void *v) 4997 { 4998 int pid = (int *)v - tgid_map; 4999 5000 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid)); 5001 return 0; 5002 } 5003 5004 static const struct seq_operations tracing_saved_tgids_seq_ops = { 5005 .start = saved_tgids_start, 5006 .stop = saved_tgids_stop, 5007 .next = saved_tgids_next, 5008 .show = saved_tgids_show, 5009 }; 5010 5011 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) 5012 { 5013 if (tracing_disabled) 5014 return -ENODEV; 5015 5016 return seq_open(filp, &tracing_saved_tgids_seq_ops); 5017 } 5018 5019 5020 static const struct file_operations tracing_saved_tgids_fops = { 5021 .open = tracing_saved_tgids_open, 5022 .read = seq_read, 5023 .llseek = seq_lseek, 5024 .release = seq_release, 5025 }; 5026 5027 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) 5028 { 5029 unsigned int *ptr = v; 5030 5031 if (*pos || m->count) 5032 ptr++; 5033 5034 (*pos)++; 5035 5036 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; 5037 ptr++) { 5038 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) 5039 continue; 5040 5041 return ptr; 5042 } 5043 5044 return NULL; 5045 } 5046 5047 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) 5048 { 5049 void *v; 5050 loff_t l = 0; 5051 5052 preempt_disable(); 5053 arch_spin_lock(&trace_cmdline_lock); 5054 5055 v = &savedcmd->map_cmdline_to_pid[0]; 5056 while (l <= *pos) { 5057 v = saved_cmdlines_next(m, v, &l); 5058 if (!v) 5059 return NULL; 5060 } 5061 5062 return v; 5063 } 5064 5065 static void saved_cmdlines_stop(struct seq_file *m, void *v) 5066 { 5067 arch_spin_unlock(&trace_cmdline_lock); 5068 preempt_enable(); 5069 } 5070 5071 static int saved_cmdlines_show(struct seq_file *m, void *v) 5072 { 5073 char buf[TASK_COMM_LEN]; 5074 unsigned int *pid = v; 5075 5076 __trace_find_cmdline(*pid, buf); 5077 seq_printf(m, "%d %s\n", *pid, buf); 5078 return 0; 5079 } 5080 5081 static const struct seq_operations tracing_saved_cmdlines_seq_ops = { 5082 .start = saved_cmdlines_start, 5083 .next = saved_cmdlines_next, 5084 .stop = saved_cmdlines_stop, 5085 .show = saved_cmdlines_show, 5086 }; 5087 5088 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) 5089 { 5090 if (tracing_disabled) 5091 return -ENODEV; 5092 5093 return seq_open(filp, &tracing_saved_cmdlines_seq_ops); 5094 } 5095 5096 static const struct file_operations tracing_saved_cmdlines_fops = { 5097 .open = tracing_saved_cmdlines_open, 5098 .read = seq_read, 5099 .llseek = seq_lseek, 5100 .release = seq_release, 5101 }; 5102 5103 static ssize_t 5104 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, 5105 size_t cnt, loff_t *ppos) 5106 { 5107 char buf[64]; 5108 int r; 5109 5110 arch_spin_lock(&trace_cmdline_lock); 5111 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); 5112 arch_spin_unlock(&trace_cmdline_lock); 5113 5114 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5115 } 5116 5117 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) 5118 { 5119 kfree(s->saved_cmdlines); 5120 kfree(s->map_cmdline_to_pid); 5121 kfree(s); 5122 } 5123 5124 static int tracing_resize_saved_cmdlines(unsigned int val) 5125 { 5126 struct saved_cmdlines_buffer *s, *savedcmd_temp; 5127 5128 s = kmalloc(sizeof(*s), GFP_KERNEL); 5129 if (!s) 5130 return -ENOMEM; 5131 5132 if (allocate_cmdlines_buffer(val, s) < 0) { 5133 kfree(s); 5134 return -ENOMEM; 5135 } 5136 5137 arch_spin_lock(&trace_cmdline_lock); 5138 savedcmd_temp = savedcmd; 5139 savedcmd = s; 5140 arch_spin_unlock(&trace_cmdline_lock); 5141 free_saved_cmdlines_buffer(savedcmd_temp); 5142 5143 return 0; 5144 } 5145 5146 static ssize_t 5147 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, 5148 size_t cnt, loff_t *ppos) 5149 { 5150 unsigned long val; 5151 int ret; 5152 5153 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5154 if (ret) 5155 return ret; 5156 5157 /* must have at least 1 entry or less than PID_MAX_DEFAULT */ 5158 if (!val || val > PID_MAX_DEFAULT) 5159 return -EINVAL; 5160 5161 ret = tracing_resize_saved_cmdlines((unsigned int)val); 5162 if (ret < 0) 5163 return ret; 5164 5165 *ppos += cnt; 5166 5167 return cnt; 5168 } 5169 5170 static const struct file_operations tracing_saved_cmdlines_size_fops = { 5171 .open = tracing_open_generic, 5172 .read = tracing_saved_cmdlines_size_read, 5173 .write = tracing_saved_cmdlines_size_write, 5174 }; 5175 5176 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 5177 static union trace_eval_map_item * 5178 update_eval_map(union trace_eval_map_item *ptr) 5179 { 5180 if (!ptr->map.eval_string) { 5181 if (ptr->tail.next) { 5182 ptr = ptr->tail.next; 5183 /* Set ptr to the next real item (skip head) */ 5184 ptr++; 5185 } else 5186 return NULL; 5187 } 5188 return ptr; 5189 } 5190 5191 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 5192 { 5193 union trace_eval_map_item *ptr = v; 5194 5195 /* 5196 * Paranoid! If ptr points to end, we don't want to increment past it. 5197 * This really should never happen. 5198 */ 5199 ptr = update_eval_map(ptr); 5200 if (WARN_ON_ONCE(!ptr)) 5201 return NULL; 5202 5203 ptr++; 5204 5205 (*pos)++; 5206 5207 ptr = update_eval_map(ptr); 5208 5209 return ptr; 5210 } 5211 5212 static void *eval_map_start(struct seq_file *m, loff_t *pos) 5213 { 5214 union trace_eval_map_item *v; 5215 loff_t l = 0; 5216 5217 mutex_lock(&trace_eval_mutex); 5218 5219 v = trace_eval_maps; 5220 if (v) 5221 v++; 5222 5223 while (v && l < *pos) { 5224 v = eval_map_next(m, v, &l); 5225 } 5226 5227 return v; 5228 } 5229 5230 static void eval_map_stop(struct seq_file *m, void *v) 5231 { 5232 mutex_unlock(&trace_eval_mutex); 5233 } 5234 5235 static int eval_map_show(struct seq_file *m, void *v) 5236 { 5237 union trace_eval_map_item *ptr = v; 5238 5239 seq_printf(m, "%s %ld (%s)\n", 5240 ptr->map.eval_string, ptr->map.eval_value, 5241 ptr->map.system); 5242 5243 return 0; 5244 } 5245 5246 static const struct seq_operations tracing_eval_map_seq_ops = { 5247 .start = eval_map_start, 5248 .next = eval_map_next, 5249 .stop = eval_map_stop, 5250 .show = eval_map_show, 5251 }; 5252 5253 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 5254 { 5255 if (tracing_disabled) 5256 return -ENODEV; 5257 5258 return seq_open(filp, &tracing_eval_map_seq_ops); 5259 } 5260 5261 static const struct file_operations tracing_eval_map_fops = { 5262 .open = tracing_eval_map_open, 5263 .read = seq_read, 5264 .llseek = seq_lseek, 5265 .release = seq_release, 5266 }; 5267 5268 static inline union trace_eval_map_item * 5269 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 5270 { 5271 /* Return tail of array given the head */ 5272 return ptr + ptr->head.length + 1; 5273 } 5274 5275 static void 5276 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 5277 int len) 5278 { 5279 struct trace_eval_map **stop; 5280 struct trace_eval_map **map; 5281 union trace_eval_map_item *map_array; 5282 union trace_eval_map_item *ptr; 5283 5284 stop = start + len; 5285 5286 /* 5287 * The trace_eval_maps contains the map plus a head and tail item, 5288 * where the head holds the module and length of array, and the 5289 * tail holds a pointer to the next list. 5290 */ 5291 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 5292 if (!map_array) { 5293 pr_warn("Unable to allocate trace eval mapping\n"); 5294 return; 5295 } 5296 5297 mutex_lock(&trace_eval_mutex); 5298 5299 if (!trace_eval_maps) 5300 trace_eval_maps = map_array; 5301 else { 5302 ptr = trace_eval_maps; 5303 for (;;) { 5304 ptr = trace_eval_jmp_to_tail(ptr); 5305 if (!ptr->tail.next) 5306 break; 5307 ptr = ptr->tail.next; 5308 5309 } 5310 ptr->tail.next = map_array; 5311 } 5312 map_array->head.mod = mod; 5313 map_array->head.length = len; 5314 map_array++; 5315 5316 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 5317 map_array->map = **map; 5318 map_array++; 5319 } 5320 memset(map_array, 0, sizeof(*map_array)); 5321 5322 mutex_unlock(&trace_eval_mutex); 5323 } 5324 5325 static void trace_create_eval_file(struct dentry *d_tracer) 5326 { 5327 trace_create_file("eval_map", 0444, d_tracer, 5328 NULL, &tracing_eval_map_fops); 5329 } 5330 5331 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 5332 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 5333 static inline void trace_insert_eval_map_file(struct module *mod, 5334 struct trace_eval_map **start, int len) { } 5335 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 5336 5337 static void trace_insert_eval_map(struct module *mod, 5338 struct trace_eval_map **start, int len) 5339 { 5340 struct trace_eval_map **map; 5341 5342 if (len <= 0) 5343 return; 5344 5345 map = start; 5346 5347 trace_event_eval_update(map, len); 5348 5349 trace_insert_eval_map_file(mod, start, len); 5350 } 5351 5352 static ssize_t 5353 tracing_set_trace_read(struct file *filp, char __user *ubuf, 5354 size_t cnt, loff_t *ppos) 5355 { 5356 struct trace_array *tr = filp->private_data; 5357 char buf[MAX_TRACER_SIZE+2]; 5358 int r; 5359 5360 mutex_lock(&trace_types_lock); 5361 r = sprintf(buf, "%s\n", tr->current_trace->name); 5362 mutex_unlock(&trace_types_lock); 5363 5364 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5365 } 5366 5367 int tracer_init(struct tracer *t, struct trace_array *tr) 5368 { 5369 tracing_reset_online_cpus(&tr->trace_buffer); 5370 return t->init(tr); 5371 } 5372 5373 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) 5374 { 5375 int cpu; 5376 5377 for_each_tracing_cpu(cpu) 5378 per_cpu_ptr(buf->data, cpu)->entries = val; 5379 } 5380 5381 #ifdef CONFIG_TRACER_MAX_TRACE 5382 /* resize @tr's buffer to the size of @size_tr's entries */ 5383 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 5384 struct trace_buffer *size_buf, int cpu_id) 5385 { 5386 int cpu, ret = 0; 5387 5388 if (cpu_id == RING_BUFFER_ALL_CPUS) { 5389 for_each_tracing_cpu(cpu) { 5390 ret = ring_buffer_resize(trace_buf->buffer, 5391 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 5392 if (ret < 0) 5393 break; 5394 per_cpu_ptr(trace_buf->data, cpu)->entries = 5395 per_cpu_ptr(size_buf->data, cpu)->entries; 5396 } 5397 } else { 5398 ret = ring_buffer_resize(trace_buf->buffer, 5399 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 5400 if (ret == 0) 5401 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 5402 per_cpu_ptr(size_buf->data, cpu_id)->entries; 5403 } 5404 5405 return ret; 5406 } 5407 #endif /* CONFIG_TRACER_MAX_TRACE */ 5408 5409 static int __tracing_resize_ring_buffer(struct trace_array *tr, 5410 unsigned long size, int cpu) 5411 { 5412 int ret; 5413 5414 /* 5415 * If kernel or user changes the size of the ring buffer 5416 * we use the size that was given, and we can forget about 5417 * expanding it later. 5418 */ 5419 ring_buffer_expanded = true; 5420 5421 /* May be called before buffers are initialized */ 5422 if (!tr->trace_buffer.buffer) 5423 return 0; 5424 5425 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); 5426 if (ret < 0) 5427 return ret; 5428 5429 #ifdef CONFIG_TRACER_MAX_TRACE 5430 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 5431 !tr->current_trace->use_max_tr) 5432 goto out; 5433 5434 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 5435 if (ret < 0) { 5436 int r = resize_buffer_duplicate_size(&tr->trace_buffer, 5437 &tr->trace_buffer, cpu); 5438 if (r < 0) { 5439 /* 5440 * AARGH! We are left with different 5441 * size max buffer!!!! 5442 * The max buffer is our "snapshot" buffer. 5443 * When a tracer needs a snapshot (one of the 5444 * latency tracers), it swaps the max buffer 5445 * with the saved snap shot. We succeeded to 5446 * update the size of the main buffer, but failed to 5447 * update the size of the max buffer. But when we tried 5448 * to reset the main buffer to the original size, we 5449 * failed there too. This is very unlikely to 5450 * happen, but if it does, warn and kill all 5451 * tracing. 5452 */ 5453 WARN_ON(1); 5454 tracing_disabled = 1; 5455 } 5456 return ret; 5457 } 5458 5459 if (cpu == RING_BUFFER_ALL_CPUS) 5460 set_buffer_entries(&tr->max_buffer, size); 5461 else 5462 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; 5463 5464 out: 5465 #endif /* CONFIG_TRACER_MAX_TRACE */ 5466 5467 if (cpu == RING_BUFFER_ALL_CPUS) 5468 set_buffer_entries(&tr->trace_buffer, size); 5469 else 5470 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; 5471 5472 return ret; 5473 } 5474 5475 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 5476 unsigned long size, int cpu_id) 5477 { 5478 int ret = size; 5479 5480 mutex_lock(&trace_types_lock); 5481 5482 if (cpu_id != RING_BUFFER_ALL_CPUS) { 5483 /* make sure, this cpu is enabled in the mask */ 5484 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 5485 ret = -EINVAL; 5486 goto out; 5487 } 5488 } 5489 5490 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 5491 if (ret < 0) 5492 ret = -ENOMEM; 5493 5494 out: 5495 mutex_unlock(&trace_types_lock); 5496 5497 return ret; 5498 } 5499 5500 5501 /** 5502 * tracing_update_buffers - used by tracing facility to expand ring buffers 5503 * 5504 * To save on memory when the tracing is never used on a system with it 5505 * configured in. The ring buffers are set to a minimum size. But once 5506 * a user starts to use the tracing facility, then they need to grow 5507 * to their default size. 5508 * 5509 * This function is to be called when a tracer is about to be used. 5510 */ 5511 int tracing_update_buffers(void) 5512 { 5513 int ret = 0; 5514 5515 mutex_lock(&trace_types_lock); 5516 if (!ring_buffer_expanded) 5517 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 5518 RING_BUFFER_ALL_CPUS); 5519 mutex_unlock(&trace_types_lock); 5520 5521 return ret; 5522 } 5523 5524 struct trace_option_dentry; 5525 5526 static void 5527 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 5528 5529 /* 5530 * Used to clear out the tracer before deletion of an instance. 5531 * Must have trace_types_lock held. 5532 */ 5533 static void tracing_set_nop(struct trace_array *tr) 5534 { 5535 if (tr->current_trace == &nop_trace) 5536 return; 5537 5538 tr->current_trace->enabled--; 5539 5540 if (tr->current_trace->reset) 5541 tr->current_trace->reset(tr); 5542 5543 tr->current_trace = &nop_trace; 5544 } 5545 5546 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 5547 { 5548 /* Only enable if the directory has been created already. */ 5549 if (!tr->dir) 5550 return; 5551 5552 create_trace_option_files(tr, t); 5553 } 5554 5555 static int tracing_set_tracer(struct trace_array *tr, const char *buf) 5556 { 5557 struct tracer *t; 5558 #ifdef CONFIG_TRACER_MAX_TRACE 5559 bool had_max_tr; 5560 #endif 5561 int ret = 0; 5562 5563 mutex_lock(&trace_types_lock); 5564 5565 if (!ring_buffer_expanded) { 5566 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 5567 RING_BUFFER_ALL_CPUS); 5568 if (ret < 0) 5569 goto out; 5570 ret = 0; 5571 } 5572 5573 for (t = trace_types; t; t = t->next) { 5574 if (strcmp(t->name, buf) == 0) 5575 break; 5576 } 5577 if (!t) { 5578 ret = -EINVAL; 5579 goto out; 5580 } 5581 if (t == tr->current_trace) 5582 goto out; 5583 5584 #ifdef CONFIG_TRACER_SNAPSHOT 5585 if (t->use_max_tr) { 5586 arch_spin_lock(&tr->max_lock); 5587 if (tr->cond_snapshot) 5588 ret = -EBUSY; 5589 arch_spin_unlock(&tr->max_lock); 5590 if (ret) 5591 goto out; 5592 } 5593 #endif 5594 /* Some tracers won't work on kernel command line */ 5595 if (system_state < SYSTEM_RUNNING && t->noboot) { 5596 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 5597 t->name); 5598 goto out; 5599 } 5600 5601 /* Some tracers are only allowed for the top level buffer */ 5602 if (!trace_ok_for_array(t, tr)) { 5603 ret = -EINVAL; 5604 goto out; 5605 } 5606 5607 /* If trace pipe files are being read, we can't change the tracer */ 5608 if (tr->current_trace->ref) { 5609 ret = -EBUSY; 5610 goto out; 5611 } 5612 5613 trace_branch_disable(); 5614 5615 tr->current_trace->enabled--; 5616 5617 if (tr->current_trace->reset) 5618 tr->current_trace->reset(tr); 5619 5620 /* Current trace needs to be nop_trace before synchronize_rcu */ 5621 tr->current_trace = &nop_trace; 5622 5623 #ifdef CONFIG_TRACER_MAX_TRACE 5624 had_max_tr = tr->allocated_snapshot; 5625 5626 if (had_max_tr && !t->use_max_tr) { 5627 /* 5628 * We need to make sure that the update_max_tr sees that 5629 * current_trace changed to nop_trace to keep it from 5630 * swapping the buffers after we resize it. 5631 * The update_max_tr is called from interrupts disabled 5632 * so a synchronized_sched() is sufficient. 5633 */ 5634 synchronize_rcu(); 5635 free_snapshot(tr); 5636 } 5637 #endif 5638 5639 #ifdef CONFIG_TRACER_MAX_TRACE 5640 if (t->use_max_tr && !had_max_tr) { 5641 ret = tracing_alloc_snapshot_instance(tr); 5642 if (ret < 0) 5643 goto out; 5644 } 5645 #endif 5646 5647 if (t->init) { 5648 ret = tracer_init(t, tr); 5649 if (ret) 5650 goto out; 5651 } 5652 5653 tr->current_trace = t; 5654 tr->current_trace->enabled++; 5655 trace_branch_enable(tr); 5656 out: 5657 mutex_unlock(&trace_types_lock); 5658 5659 return ret; 5660 } 5661 5662 static ssize_t 5663 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 5664 size_t cnt, loff_t *ppos) 5665 { 5666 struct trace_array *tr = filp->private_data; 5667 char buf[MAX_TRACER_SIZE+1]; 5668 int i; 5669 size_t ret; 5670 int err; 5671 5672 ret = cnt; 5673 5674 if (cnt > MAX_TRACER_SIZE) 5675 cnt = MAX_TRACER_SIZE; 5676 5677 if (copy_from_user(buf, ubuf, cnt)) 5678 return -EFAULT; 5679 5680 buf[cnt] = 0; 5681 5682 /* strip ending whitespace. */ 5683 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 5684 buf[i] = 0; 5685 5686 err = tracing_set_tracer(tr, buf); 5687 if (err) 5688 return err; 5689 5690 *ppos += ret; 5691 5692 return ret; 5693 } 5694 5695 static ssize_t 5696 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 5697 size_t cnt, loff_t *ppos) 5698 { 5699 char buf[64]; 5700 int r; 5701 5702 r = snprintf(buf, sizeof(buf), "%ld\n", 5703 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 5704 if (r > sizeof(buf)) 5705 r = sizeof(buf); 5706 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5707 } 5708 5709 static ssize_t 5710 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 5711 size_t cnt, loff_t *ppos) 5712 { 5713 unsigned long val; 5714 int ret; 5715 5716 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5717 if (ret) 5718 return ret; 5719 5720 *ptr = val * 1000; 5721 5722 return cnt; 5723 } 5724 5725 static ssize_t 5726 tracing_thresh_read(struct file *filp, char __user *ubuf, 5727 size_t cnt, loff_t *ppos) 5728 { 5729 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 5730 } 5731 5732 static ssize_t 5733 tracing_thresh_write(struct file *filp, const char __user *ubuf, 5734 size_t cnt, loff_t *ppos) 5735 { 5736 struct trace_array *tr = filp->private_data; 5737 int ret; 5738 5739 mutex_lock(&trace_types_lock); 5740 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 5741 if (ret < 0) 5742 goto out; 5743 5744 if (tr->current_trace->update_thresh) { 5745 ret = tr->current_trace->update_thresh(tr); 5746 if (ret < 0) 5747 goto out; 5748 } 5749 5750 ret = cnt; 5751 out: 5752 mutex_unlock(&trace_types_lock); 5753 5754 return ret; 5755 } 5756 5757 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 5758 5759 static ssize_t 5760 tracing_max_lat_read(struct file *filp, char __user *ubuf, 5761 size_t cnt, loff_t *ppos) 5762 { 5763 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); 5764 } 5765 5766 static ssize_t 5767 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 5768 size_t cnt, loff_t *ppos) 5769 { 5770 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); 5771 } 5772 5773 #endif 5774 5775 static int tracing_open_pipe(struct inode *inode, struct file *filp) 5776 { 5777 struct trace_array *tr = inode->i_private; 5778 struct trace_iterator *iter; 5779 int ret = 0; 5780 5781 if (tracing_disabled) 5782 return -ENODEV; 5783 5784 if (trace_array_get(tr) < 0) 5785 return -ENODEV; 5786 5787 mutex_lock(&trace_types_lock); 5788 5789 /* create a buffer to store the information to pass to userspace */ 5790 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 5791 if (!iter) { 5792 ret = -ENOMEM; 5793 __trace_array_put(tr); 5794 goto out; 5795 } 5796 5797 trace_seq_init(&iter->seq); 5798 iter->trace = tr->current_trace; 5799 5800 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 5801 ret = -ENOMEM; 5802 goto fail; 5803 } 5804 5805 /* trace pipe does not show start of buffer */ 5806 cpumask_setall(iter->started); 5807 5808 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 5809 iter->iter_flags |= TRACE_FILE_LAT_FMT; 5810 5811 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 5812 if (trace_clocks[tr->clock_id].in_ns) 5813 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 5814 5815 iter->tr = tr; 5816 iter->trace_buffer = &tr->trace_buffer; 5817 iter->cpu_file = tracing_get_cpu(inode); 5818 mutex_init(&iter->mutex); 5819 filp->private_data = iter; 5820 5821 if (iter->trace->pipe_open) 5822 iter->trace->pipe_open(iter); 5823 5824 nonseekable_open(inode, filp); 5825 5826 tr->current_trace->ref++; 5827 out: 5828 mutex_unlock(&trace_types_lock); 5829 return ret; 5830 5831 fail: 5832 kfree(iter); 5833 __trace_array_put(tr); 5834 mutex_unlock(&trace_types_lock); 5835 return ret; 5836 } 5837 5838 static int tracing_release_pipe(struct inode *inode, struct file *file) 5839 { 5840 struct trace_iterator *iter = file->private_data; 5841 struct trace_array *tr = inode->i_private; 5842 5843 mutex_lock(&trace_types_lock); 5844 5845 tr->current_trace->ref--; 5846 5847 if (iter->trace->pipe_close) 5848 iter->trace->pipe_close(iter); 5849 5850 mutex_unlock(&trace_types_lock); 5851 5852 free_cpumask_var(iter->started); 5853 mutex_destroy(&iter->mutex); 5854 kfree(iter); 5855 5856 trace_array_put(tr); 5857 5858 return 0; 5859 } 5860 5861 static __poll_t 5862 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 5863 { 5864 struct trace_array *tr = iter->tr; 5865 5866 /* Iterators are static, they should be filled or empty */ 5867 if (trace_buffer_iter(iter, iter->cpu_file)) 5868 return EPOLLIN | EPOLLRDNORM; 5869 5870 if (tr->trace_flags & TRACE_ITER_BLOCK) 5871 /* 5872 * Always select as readable when in blocking mode 5873 */ 5874 return EPOLLIN | EPOLLRDNORM; 5875 else 5876 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, 5877 filp, poll_table); 5878 } 5879 5880 static __poll_t 5881 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 5882 { 5883 struct trace_iterator *iter = filp->private_data; 5884 5885 return trace_poll(iter, filp, poll_table); 5886 } 5887 5888 /* Must be called with iter->mutex held. */ 5889 static int tracing_wait_pipe(struct file *filp) 5890 { 5891 struct trace_iterator *iter = filp->private_data; 5892 int ret; 5893 5894 while (trace_empty(iter)) { 5895 5896 if ((filp->f_flags & O_NONBLOCK)) { 5897 return -EAGAIN; 5898 } 5899 5900 /* 5901 * We block until we read something and tracing is disabled. 5902 * We still block if tracing is disabled, but we have never 5903 * read anything. This allows a user to cat this file, and 5904 * then enable tracing. But after we have read something, 5905 * we give an EOF when tracing is again disabled. 5906 * 5907 * iter->pos will be 0 if we haven't read anything. 5908 */ 5909 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 5910 break; 5911 5912 mutex_unlock(&iter->mutex); 5913 5914 ret = wait_on_pipe(iter, 0); 5915 5916 mutex_lock(&iter->mutex); 5917 5918 if (ret) 5919 return ret; 5920 } 5921 5922 return 1; 5923 } 5924 5925 /* 5926 * Consumer reader. 5927 */ 5928 static ssize_t 5929 tracing_read_pipe(struct file *filp, char __user *ubuf, 5930 size_t cnt, loff_t *ppos) 5931 { 5932 struct trace_iterator *iter = filp->private_data; 5933 ssize_t sret; 5934 5935 /* 5936 * Avoid more than one consumer on a single file descriptor 5937 * This is just a matter of traces coherency, the ring buffer itself 5938 * is protected. 5939 */ 5940 mutex_lock(&iter->mutex); 5941 5942 /* return any leftover data */ 5943 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 5944 if (sret != -EBUSY) 5945 goto out; 5946 5947 trace_seq_init(&iter->seq); 5948 5949 if (iter->trace->read) { 5950 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 5951 if (sret) 5952 goto out; 5953 } 5954 5955 waitagain: 5956 sret = tracing_wait_pipe(filp); 5957 if (sret <= 0) 5958 goto out; 5959 5960 /* stop when tracing is finished */ 5961 if (trace_empty(iter)) { 5962 sret = 0; 5963 goto out; 5964 } 5965 5966 if (cnt >= PAGE_SIZE) 5967 cnt = PAGE_SIZE - 1; 5968 5969 /* reset all but tr, trace, and overruns */ 5970 memset(&iter->seq, 0, 5971 sizeof(struct trace_iterator) - 5972 offsetof(struct trace_iterator, seq)); 5973 cpumask_clear(iter->started); 5974 iter->pos = -1; 5975 5976 trace_event_read_lock(); 5977 trace_access_lock(iter->cpu_file); 5978 while (trace_find_next_entry_inc(iter) != NULL) { 5979 enum print_line_t ret; 5980 int save_len = iter->seq.seq.len; 5981 5982 ret = print_trace_line(iter); 5983 if (ret == TRACE_TYPE_PARTIAL_LINE) { 5984 /* don't print partial lines */ 5985 iter->seq.seq.len = save_len; 5986 break; 5987 } 5988 if (ret != TRACE_TYPE_NO_CONSUME) 5989 trace_consume(iter); 5990 5991 if (trace_seq_used(&iter->seq) >= cnt) 5992 break; 5993 5994 /* 5995 * Setting the full flag means we reached the trace_seq buffer 5996 * size and we should leave by partial output condition above. 5997 * One of the trace_seq_* functions is not used properly. 5998 */ 5999 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 6000 iter->ent->type); 6001 } 6002 trace_access_unlock(iter->cpu_file); 6003 trace_event_read_unlock(); 6004 6005 /* Now copy what we have to the user */ 6006 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6007 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) 6008 trace_seq_init(&iter->seq); 6009 6010 /* 6011 * If there was nothing to send to user, in spite of consuming trace 6012 * entries, go back to wait for more entries. 6013 */ 6014 if (sret == -EBUSY) 6015 goto waitagain; 6016 6017 out: 6018 mutex_unlock(&iter->mutex); 6019 6020 return sret; 6021 } 6022 6023 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 6024 unsigned int idx) 6025 { 6026 __free_page(spd->pages[idx]); 6027 } 6028 6029 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 6030 .confirm = generic_pipe_buf_confirm, 6031 .release = generic_pipe_buf_release, 6032 .steal = generic_pipe_buf_steal, 6033 .get = generic_pipe_buf_get, 6034 }; 6035 6036 static size_t 6037 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 6038 { 6039 size_t count; 6040 int save_len; 6041 int ret; 6042 6043 /* Seq buffer is page-sized, exactly what we need. */ 6044 for (;;) { 6045 save_len = iter->seq.seq.len; 6046 ret = print_trace_line(iter); 6047 6048 if (trace_seq_has_overflowed(&iter->seq)) { 6049 iter->seq.seq.len = save_len; 6050 break; 6051 } 6052 6053 /* 6054 * This should not be hit, because it should only 6055 * be set if the iter->seq overflowed. But check it 6056 * anyway to be safe. 6057 */ 6058 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6059 iter->seq.seq.len = save_len; 6060 break; 6061 } 6062 6063 count = trace_seq_used(&iter->seq) - save_len; 6064 if (rem < count) { 6065 rem = 0; 6066 iter->seq.seq.len = save_len; 6067 break; 6068 } 6069 6070 if (ret != TRACE_TYPE_NO_CONSUME) 6071 trace_consume(iter); 6072 rem -= count; 6073 if (!trace_find_next_entry_inc(iter)) { 6074 rem = 0; 6075 iter->ent = NULL; 6076 break; 6077 } 6078 } 6079 6080 return rem; 6081 } 6082 6083 static ssize_t tracing_splice_read_pipe(struct file *filp, 6084 loff_t *ppos, 6085 struct pipe_inode_info *pipe, 6086 size_t len, 6087 unsigned int flags) 6088 { 6089 struct page *pages_def[PIPE_DEF_BUFFERS]; 6090 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 6091 struct trace_iterator *iter = filp->private_data; 6092 struct splice_pipe_desc spd = { 6093 .pages = pages_def, 6094 .partial = partial_def, 6095 .nr_pages = 0, /* This gets updated below. */ 6096 .nr_pages_max = PIPE_DEF_BUFFERS, 6097 .ops = &tracing_pipe_buf_ops, 6098 .spd_release = tracing_spd_release_pipe, 6099 }; 6100 ssize_t ret; 6101 size_t rem; 6102 unsigned int i; 6103 6104 if (splice_grow_spd(pipe, &spd)) 6105 return -ENOMEM; 6106 6107 mutex_lock(&iter->mutex); 6108 6109 if (iter->trace->splice_read) { 6110 ret = iter->trace->splice_read(iter, filp, 6111 ppos, pipe, len, flags); 6112 if (ret) 6113 goto out_err; 6114 } 6115 6116 ret = tracing_wait_pipe(filp); 6117 if (ret <= 0) 6118 goto out_err; 6119 6120 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 6121 ret = -EFAULT; 6122 goto out_err; 6123 } 6124 6125 trace_event_read_lock(); 6126 trace_access_lock(iter->cpu_file); 6127 6128 /* Fill as many pages as possible. */ 6129 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 6130 spd.pages[i] = alloc_page(GFP_KERNEL); 6131 if (!spd.pages[i]) 6132 break; 6133 6134 rem = tracing_fill_pipe_page(rem, iter); 6135 6136 /* Copy the data into the page, so we can start over. */ 6137 ret = trace_seq_to_buffer(&iter->seq, 6138 page_address(spd.pages[i]), 6139 trace_seq_used(&iter->seq)); 6140 if (ret < 0) { 6141 __free_page(spd.pages[i]); 6142 break; 6143 } 6144 spd.partial[i].offset = 0; 6145 spd.partial[i].len = trace_seq_used(&iter->seq); 6146 6147 trace_seq_init(&iter->seq); 6148 } 6149 6150 trace_access_unlock(iter->cpu_file); 6151 trace_event_read_unlock(); 6152 mutex_unlock(&iter->mutex); 6153 6154 spd.nr_pages = i; 6155 6156 if (i) 6157 ret = splice_to_pipe(pipe, &spd); 6158 else 6159 ret = 0; 6160 out: 6161 splice_shrink_spd(&spd); 6162 return ret; 6163 6164 out_err: 6165 mutex_unlock(&iter->mutex); 6166 goto out; 6167 } 6168 6169 static ssize_t 6170 tracing_entries_read(struct file *filp, char __user *ubuf, 6171 size_t cnt, loff_t *ppos) 6172 { 6173 struct inode *inode = file_inode(filp); 6174 struct trace_array *tr = inode->i_private; 6175 int cpu = tracing_get_cpu(inode); 6176 char buf[64]; 6177 int r = 0; 6178 ssize_t ret; 6179 6180 mutex_lock(&trace_types_lock); 6181 6182 if (cpu == RING_BUFFER_ALL_CPUS) { 6183 int cpu, buf_size_same; 6184 unsigned long size; 6185 6186 size = 0; 6187 buf_size_same = 1; 6188 /* check if all cpu sizes are same */ 6189 for_each_tracing_cpu(cpu) { 6190 /* fill in the size from first enabled cpu */ 6191 if (size == 0) 6192 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; 6193 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { 6194 buf_size_same = 0; 6195 break; 6196 } 6197 } 6198 6199 if (buf_size_same) { 6200 if (!ring_buffer_expanded) 6201 r = sprintf(buf, "%lu (expanded: %lu)\n", 6202 size >> 10, 6203 trace_buf_size >> 10); 6204 else 6205 r = sprintf(buf, "%lu\n", size >> 10); 6206 } else 6207 r = sprintf(buf, "X\n"); 6208 } else 6209 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); 6210 6211 mutex_unlock(&trace_types_lock); 6212 6213 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6214 return ret; 6215 } 6216 6217 static ssize_t 6218 tracing_entries_write(struct file *filp, const char __user *ubuf, 6219 size_t cnt, loff_t *ppos) 6220 { 6221 struct inode *inode = file_inode(filp); 6222 struct trace_array *tr = inode->i_private; 6223 unsigned long val; 6224 int ret; 6225 6226 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6227 if (ret) 6228 return ret; 6229 6230 /* must have at least 1 entry */ 6231 if (!val) 6232 return -EINVAL; 6233 6234 /* value is in KB */ 6235 val <<= 10; 6236 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 6237 if (ret < 0) 6238 return ret; 6239 6240 *ppos += cnt; 6241 6242 return cnt; 6243 } 6244 6245 static ssize_t 6246 tracing_total_entries_read(struct file *filp, char __user *ubuf, 6247 size_t cnt, loff_t *ppos) 6248 { 6249 struct trace_array *tr = filp->private_data; 6250 char buf[64]; 6251 int r, cpu; 6252 unsigned long size = 0, expanded_size = 0; 6253 6254 mutex_lock(&trace_types_lock); 6255 for_each_tracing_cpu(cpu) { 6256 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; 6257 if (!ring_buffer_expanded) 6258 expanded_size += trace_buf_size >> 10; 6259 } 6260 if (ring_buffer_expanded) 6261 r = sprintf(buf, "%lu\n", size); 6262 else 6263 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 6264 mutex_unlock(&trace_types_lock); 6265 6266 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6267 } 6268 6269 static ssize_t 6270 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 6271 size_t cnt, loff_t *ppos) 6272 { 6273 /* 6274 * There is no need to read what the user has written, this function 6275 * is just to make sure that there is no error when "echo" is used 6276 */ 6277 6278 *ppos += cnt; 6279 6280 return cnt; 6281 } 6282 6283 static int 6284 tracing_free_buffer_release(struct inode *inode, struct file *filp) 6285 { 6286 struct trace_array *tr = inode->i_private; 6287 6288 /* disable tracing ? */ 6289 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 6290 tracer_tracing_off(tr); 6291 /* resize the ring buffer to 0 */ 6292 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 6293 6294 trace_array_put(tr); 6295 6296 return 0; 6297 } 6298 6299 static ssize_t 6300 tracing_mark_write(struct file *filp, const char __user *ubuf, 6301 size_t cnt, loff_t *fpos) 6302 { 6303 struct trace_array *tr = filp->private_data; 6304 struct ring_buffer_event *event; 6305 enum event_trigger_type tt = ETT_NONE; 6306 struct ring_buffer *buffer; 6307 struct print_entry *entry; 6308 unsigned long irq_flags; 6309 const char faulted[] = "<faulted>"; 6310 ssize_t written; 6311 int size; 6312 int len; 6313 6314 /* Used in tracing_mark_raw_write() as well */ 6315 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ 6316 6317 if (tracing_disabled) 6318 return -EINVAL; 6319 6320 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6321 return -EINVAL; 6322 6323 if (cnt > TRACE_BUF_SIZE) 6324 cnt = TRACE_BUF_SIZE; 6325 6326 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 6327 6328 local_save_flags(irq_flags); 6329 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ 6330 6331 /* If less than "<faulted>", then make sure we can still add that */ 6332 if (cnt < FAULTED_SIZE) 6333 size += FAULTED_SIZE - cnt; 6334 6335 buffer = tr->trace_buffer.buffer; 6336 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 6337 irq_flags, preempt_count()); 6338 if (unlikely(!event)) 6339 /* Ring buffer disabled, return as if not open for write */ 6340 return -EBADF; 6341 6342 entry = ring_buffer_event_data(event); 6343 entry->ip = _THIS_IP_; 6344 6345 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 6346 if (len) { 6347 memcpy(&entry->buf, faulted, FAULTED_SIZE); 6348 cnt = FAULTED_SIZE; 6349 written = -EFAULT; 6350 } else 6351 written = cnt; 6352 len = cnt; 6353 6354 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 6355 /* do not add \n before testing triggers, but add \0 */ 6356 entry->buf[cnt] = '\0'; 6357 tt = event_triggers_call(tr->trace_marker_file, entry, event); 6358 } 6359 6360 if (entry->buf[cnt - 1] != '\n') { 6361 entry->buf[cnt] = '\n'; 6362 entry->buf[cnt + 1] = '\0'; 6363 } else 6364 entry->buf[cnt] = '\0'; 6365 6366 __buffer_unlock_commit(buffer, event); 6367 6368 if (tt) 6369 event_triggers_post_call(tr->trace_marker_file, tt); 6370 6371 if (written > 0) 6372 *fpos += written; 6373 6374 return written; 6375 } 6376 6377 /* Limit it for now to 3K (including tag) */ 6378 #define RAW_DATA_MAX_SIZE (1024*3) 6379 6380 static ssize_t 6381 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 6382 size_t cnt, loff_t *fpos) 6383 { 6384 struct trace_array *tr = filp->private_data; 6385 struct ring_buffer_event *event; 6386 struct ring_buffer *buffer; 6387 struct raw_data_entry *entry; 6388 const char faulted[] = "<faulted>"; 6389 unsigned long irq_flags; 6390 ssize_t written; 6391 int size; 6392 int len; 6393 6394 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 6395 6396 if (tracing_disabled) 6397 return -EINVAL; 6398 6399 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 6400 return -EINVAL; 6401 6402 /* The marker must at least have a tag id */ 6403 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) 6404 return -EINVAL; 6405 6406 if (cnt > TRACE_BUF_SIZE) 6407 cnt = TRACE_BUF_SIZE; 6408 6409 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 6410 6411 local_save_flags(irq_flags); 6412 size = sizeof(*entry) + cnt; 6413 if (cnt < FAULT_SIZE_ID) 6414 size += FAULT_SIZE_ID - cnt; 6415 6416 buffer = tr->trace_buffer.buffer; 6417 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 6418 irq_flags, preempt_count()); 6419 if (!event) 6420 /* Ring buffer disabled, return as if not open for write */ 6421 return -EBADF; 6422 6423 entry = ring_buffer_event_data(event); 6424 6425 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 6426 if (len) { 6427 entry->id = -1; 6428 memcpy(&entry->buf, faulted, FAULTED_SIZE); 6429 written = -EFAULT; 6430 } else 6431 written = cnt; 6432 6433 __buffer_unlock_commit(buffer, event); 6434 6435 if (written > 0) 6436 *fpos += written; 6437 6438 return written; 6439 } 6440 6441 static int tracing_clock_show(struct seq_file *m, void *v) 6442 { 6443 struct trace_array *tr = m->private; 6444 int i; 6445 6446 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 6447 seq_printf(m, 6448 "%s%s%s%s", i ? " " : "", 6449 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 6450 i == tr->clock_id ? "]" : ""); 6451 seq_putc(m, '\n'); 6452 6453 return 0; 6454 } 6455 6456 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 6457 { 6458 int i; 6459 6460 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 6461 if (strcmp(trace_clocks[i].name, clockstr) == 0) 6462 break; 6463 } 6464 if (i == ARRAY_SIZE(trace_clocks)) 6465 return -EINVAL; 6466 6467 mutex_lock(&trace_types_lock); 6468 6469 tr->clock_id = i; 6470 6471 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); 6472 6473 /* 6474 * New clock may not be consistent with the previous clock. 6475 * Reset the buffer so that it doesn't have incomparable timestamps. 6476 */ 6477 tracing_reset_online_cpus(&tr->trace_buffer); 6478 6479 #ifdef CONFIG_TRACER_MAX_TRACE 6480 if (tr->max_buffer.buffer) 6481 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 6482 tracing_reset_online_cpus(&tr->max_buffer); 6483 #endif 6484 6485 mutex_unlock(&trace_types_lock); 6486 6487 return 0; 6488 } 6489 6490 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 6491 size_t cnt, loff_t *fpos) 6492 { 6493 struct seq_file *m = filp->private_data; 6494 struct trace_array *tr = m->private; 6495 char buf[64]; 6496 const char *clockstr; 6497 int ret; 6498 6499 if (cnt >= sizeof(buf)) 6500 return -EINVAL; 6501 6502 if (copy_from_user(buf, ubuf, cnt)) 6503 return -EFAULT; 6504 6505 buf[cnt] = 0; 6506 6507 clockstr = strstrip(buf); 6508 6509 ret = tracing_set_clock(tr, clockstr); 6510 if (ret) 6511 return ret; 6512 6513 *fpos += cnt; 6514 6515 return cnt; 6516 } 6517 6518 static int tracing_clock_open(struct inode *inode, struct file *file) 6519 { 6520 struct trace_array *tr = inode->i_private; 6521 int ret; 6522 6523 if (tracing_disabled) 6524 return -ENODEV; 6525 6526 if (trace_array_get(tr)) 6527 return -ENODEV; 6528 6529 ret = single_open(file, tracing_clock_show, inode->i_private); 6530 if (ret < 0) 6531 trace_array_put(tr); 6532 6533 return ret; 6534 } 6535 6536 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 6537 { 6538 struct trace_array *tr = m->private; 6539 6540 mutex_lock(&trace_types_lock); 6541 6542 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer)) 6543 seq_puts(m, "delta [absolute]\n"); 6544 else 6545 seq_puts(m, "[delta] absolute\n"); 6546 6547 mutex_unlock(&trace_types_lock); 6548 6549 return 0; 6550 } 6551 6552 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 6553 { 6554 struct trace_array *tr = inode->i_private; 6555 int ret; 6556 6557 if (tracing_disabled) 6558 return -ENODEV; 6559 6560 if (trace_array_get(tr)) 6561 return -ENODEV; 6562 6563 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 6564 if (ret < 0) 6565 trace_array_put(tr); 6566 6567 return ret; 6568 } 6569 6570 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs) 6571 { 6572 int ret = 0; 6573 6574 mutex_lock(&trace_types_lock); 6575 6576 if (abs && tr->time_stamp_abs_ref++) 6577 goto out; 6578 6579 if (!abs) { 6580 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) { 6581 ret = -EINVAL; 6582 goto out; 6583 } 6584 6585 if (--tr->time_stamp_abs_ref) 6586 goto out; 6587 } 6588 6589 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs); 6590 6591 #ifdef CONFIG_TRACER_MAX_TRACE 6592 if (tr->max_buffer.buffer) 6593 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs); 6594 #endif 6595 out: 6596 mutex_unlock(&trace_types_lock); 6597 6598 return ret; 6599 } 6600 6601 struct ftrace_buffer_info { 6602 struct trace_iterator iter; 6603 void *spare; 6604 unsigned int spare_cpu; 6605 unsigned int read; 6606 }; 6607 6608 #ifdef CONFIG_TRACER_SNAPSHOT 6609 static int tracing_snapshot_open(struct inode *inode, struct file *file) 6610 { 6611 struct trace_array *tr = inode->i_private; 6612 struct trace_iterator *iter; 6613 struct seq_file *m; 6614 int ret = 0; 6615 6616 if (trace_array_get(tr) < 0) 6617 return -ENODEV; 6618 6619 if (file->f_mode & FMODE_READ) { 6620 iter = __tracing_open(inode, file, true); 6621 if (IS_ERR(iter)) 6622 ret = PTR_ERR(iter); 6623 } else { 6624 /* Writes still need the seq_file to hold the private data */ 6625 ret = -ENOMEM; 6626 m = kzalloc(sizeof(*m), GFP_KERNEL); 6627 if (!m) 6628 goto out; 6629 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6630 if (!iter) { 6631 kfree(m); 6632 goto out; 6633 } 6634 ret = 0; 6635 6636 iter->tr = tr; 6637 iter->trace_buffer = &tr->max_buffer; 6638 iter->cpu_file = tracing_get_cpu(inode); 6639 m->private = iter; 6640 file->private_data = m; 6641 } 6642 out: 6643 if (ret < 0) 6644 trace_array_put(tr); 6645 6646 return ret; 6647 } 6648 6649 static ssize_t 6650 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 6651 loff_t *ppos) 6652 { 6653 struct seq_file *m = filp->private_data; 6654 struct trace_iterator *iter = m->private; 6655 struct trace_array *tr = iter->tr; 6656 unsigned long val; 6657 int ret; 6658 6659 ret = tracing_update_buffers(); 6660 if (ret < 0) 6661 return ret; 6662 6663 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6664 if (ret) 6665 return ret; 6666 6667 mutex_lock(&trace_types_lock); 6668 6669 if (tr->current_trace->use_max_tr) { 6670 ret = -EBUSY; 6671 goto out; 6672 } 6673 6674 arch_spin_lock(&tr->max_lock); 6675 if (tr->cond_snapshot) 6676 ret = -EBUSY; 6677 arch_spin_unlock(&tr->max_lock); 6678 if (ret) 6679 goto out; 6680 6681 switch (val) { 6682 case 0: 6683 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 6684 ret = -EINVAL; 6685 break; 6686 } 6687 if (tr->allocated_snapshot) 6688 free_snapshot(tr); 6689 break; 6690 case 1: 6691 /* Only allow per-cpu swap if the ring buffer supports it */ 6692 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 6693 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 6694 ret = -EINVAL; 6695 break; 6696 } 6697 #endif 6698 if (!tr->allocated_snapshot) { 6699 ret = tracing_alloc_snapshot_instance(tr); 6700 if (ret < 0) 6701 break; 6702 } 6703 local_irq_disable(); 6704 /* Now, we're going to swap */ 6705 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 6706 update_max_tr(tr, current, smp_processor_id(), NULL); 6707 else 6708 update_max_tr_single(tr, current, iter->cpu_file); 6709 local_irq_enable(); 6710 break; 6711 default: 6712 if (tr->allocated_snapshot) { 6713 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 6714 tracing_reset_online_cpus(&tr->max_buffer); 6715 else 6716 tracing_reset(&tr->max_buffer, iter->cpu_file); 6717 } 6718 break; 6719 } 6720 6721 if (ret >= 0) { 6722 *ppos += cnt; 6723 ret = cnt; 6724 } 6725 out: 6726 mutex_unlock(&trace_types_lock); 6727 return ret; 6728 } 6729 6730 static int tracing_snapshot_release(struct inode *inode, struct file *file) 6731 { 6732 struct seq_file *m = file->private_data; 6733 int ret; 6734 6735 ret = tracing_release(inode, file); 6736 6737 if (file->f_mode & FMODE_READ) 6738 return ret; 6739 6740 /* If write only, the seq_file is just a stub */ 6741 if (m) 6742 kfree(m->private); 6743 kfree(m); 6744 6745 return 0; 6746 } 6747 6748 static int tracing_buffers_open(struct inode *inode, struct file *filp); 6749 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 6750 size_t count, loff_t *ppos); 6751 static int tracing_buffers_release(struct inode *inode, struct file *file); 6752 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 6753 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 6754 6755 static int snapshot_raw_open(struct inode *inode, struct file *filp) 6756 { 6757 struct ftrace_buffer_info *info; 6758 int ret; 6759 6760 ret = tracing_buffers_open(inode, filp); 6761 if (ret < 0) 6762 return ret; 6763 6764 info = filp->private_data; 6765 6766 if (info->iter.trace->use_max_tr) { 6767 tracing_buffers_release(inode, filp); 6768 return -EBUSY; 6769 } 6770 6771 info->iter.snapshot = true; 6772 info->iter.trace_buffer = &info->iter.tr->max_buffer; 6773 6774 return ret; 6775 } 6776 6777 #endif /* CONFIG_TRACER_SNAPSHOT */ 6778 6779 6780 static const struct file_operations tracing_thresh_fops = { 6781 .open = tracing_open_generic, 6782 .read = tracing_thresh_read, 6783 .write = tracing_thresh_write, 6784 .llseek = generic_file_llseek, 6785 }; 6786 6787 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 6788 static const struct file_operations tracing_max_lat_fops = { 6789 .open = tracing_open_generic, 6790 .read = tracing_max_lat_read, 6791 .write = tracing_max_lat_write, 6792 .llseek = generic_file_llseek, 6793 }; 6794 #endif 6795 6796 static const struct file_operations set_tracer_fops = { 6797 .open = tracing_open_generic, 6798 .read = tracing_set_trace_read, 6799 .write = tracing_set_trace_write, 6800 .llseek = generic_file_llseek, 6801 }; 6802 6803 static const struct file_operations tracing_pipe_fops = { 6804 .open = tracing_open_pipe, 6805 .poll = tracing_poll_pipe, 6806 .read = tracing_read_pipe, 6807 .splice_read = tracing_splice_read_pipe, 6808 .release = tracing_release_pipe, 6809 .llseek = no_llseek, 6810 }; 6811 6812 static const struct file_operations tracing_entries_fops = { 6813 .open = tracing_open_generic_tr, 6814 .read = tracing_entries_read, 6815 .write = tracing_entries_write, 6816 .llseek = generic_file_llseek, 6817 .release = tracing_release_generic_tr, 6818 }; 6819 6820 static const struct file_operations tracing_total_entries_fops = { 6821 .open = tracing_open_generic_tr, 6822 .read = tracing_total_entries_read, 6823 .llseek = generic_file_llseek, 6824 .release = tracing_release_generic_tr, 6825 }; 6826 6827 static const struct file_operations tracing_free_buffer_fops = { 6828 .open = tracing_open_generic_tr, 6829 .write = tracing_free_buffer_write, 6830 .release = tracing_free_buffer_release, 6831 }; 6832 6833 static const struct file_operations tracing_mark_fops = { 6834 .open = tracing_open_generic_tr, 6835 .write = tracing_mark_write, 6836 .llseek = generic_file_llseek, 6837 .release = tracing_release_generic_tr, 6838 }; 6839 6840 static const struct file_operations tracing_mark_raw_fops = { 6841 .open = tracing_open_generic_tr, 6842 .write = tracing_mark_raw_write, 6843 .llseek = generic_file_llseek, 6844 .release = tracing_release_generic_tr, 6845 }; 6846 6847 static const struct file_operations trace_clock_fops = { 6848 .open = tracing_clock_open, 6849 .read = seq_read, 6850 .llseek = seq_lseek, 6851 .release = tracing_single_release_tr, 6852 .write = tracing_clock_write, 6853 }; 6854 6855 static const struct file_operations trace_time_stamp_mode_fops = { 6856 .open = tracing_time_stamp_mode_open, 6857 .read = seq_read, 6858 .llseek = seq_lseek, 6859 .release = tracing_single_release_tr, 6860 }; 6861 6862 #ifdef CONFIG_TRACER_SNAPSHOT 6863 static const struct file_operations snapshot_fops = { 6864 .open = tracing_snapshot_open, 6865 .read = seq_read, 6866 .write = tracing_snapshot_write, 6867 .llseek = tracing_lseek, 6868 .release = tracing_snapshot_release, 6869 }; 6870 6871 static const struct file_operations snapshot_raw_fops = { 6872 .open = snapshot_raw_open, 6873 .read = tracing_buffers_read, 6874 .release = tracing_buffers_release, 6875 .splice_read = tracing_buffers_splice_read, 6876 .llseek = no_llseek, 6877 }; 6878 6879 #endif /* CONFIG_TRACER_SNAPSHOT */ 6880 6881 #define TRACING_LOG_ERRS_MAX 8 6882 #define TRACING_LOG_LOC_MAX 128 6883 6884 #define CMD_PREFIX " Command: " 6885 6886 struct err_info { 6887 const char **errs; /* ptr to loc-specific array of err strings */ 6888 u8 type; /* index into errs -> specific err string */ 6889 u8 pos; /* MAX_FILTER_STR_VAL = 256 */ 6890 u64 ts; 6891 }; 6892 6893 struct tracing_log_err { 6894 struct list_head list; 6895 struct err_info info; 6896 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 6897 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */ 6898 }; 6899 6900 static DEFINE_MUTEX(tracing_err_log_lock); 6901 6902 struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) 6903 { 6904 struct tracing_log_err *err; 6905 6906 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 6907 err = kzalloc(sizeof(*err), GFP_KERNEL); 6908 if (!err) 6909 err = ERR_PTR(-ENOMEM); 6910 tr->n_err_log_entries++; 6911 6912 return err; 6913 } 6914 6915 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 6916 list_del(&err->list); 6917 6918 return err; 6919 } 6920 6921 /** 6922 * err_pos - find the position of a string within a command for error careting 6923 * @cmd: The tracing command that caused the error 6924 * @str: The string to position the caret at within @cmd 6925 * 6926 * Finds the position of the first occurence of @str within @cmd. The 6927 * return value can be passed to tracing_log_err() for caret placement 6928 * within @cmd. 6929 * 6930 * Returns the index within @cmd of the first occurence of @str or 0 6931 * if @str was not found. 6932 */ 6933 unsigned int err_pos(char *cmd, const char *str) 6934 { 6935 char *found; 6936 6937 if (WARN_ON(!strlen(cmd))) 6938 return 0; 6939 6940 found = strstr(cmd, str); 6941 if (found) 6942 return found - cmd; 6943 6944 return 0; 6945 } 6946 6947 /** 6948 * tracing_log_err - write an error to the tracing error log 6949 * @tr: The associated trace array for the error (NULL for top level array) 6950 * @loc: A string describing where the error occurred 6951 * @cmd: The tracing command that caused the error 6952 * @errs: The array of loc-specific static error strings 6953 * @type: The index into errs[], which produces the specific static err string 6954 * @pos: The position the caret should be placed in the cmd 6955 * 6956 * Writes an error into tracing/error_log of the form: 6957 * 6958 * <loc>: error: <text> 6959 * Command: <cmd> 6960 * ^ 6961 * 6962 * tracing/error_log is a small log file containing the last 6963 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 6964 * unless there has been a tracing error, and the error log can be 6965 * cleared and have its memory freed by writing the empty string in 6966 * truncation mode to it i.e. echo > tracing/error_log. 6967 * 6968 * NOTE: the @errs array along with the @type param are used to 6969 * produce a static error string - this string is not copied and saved 6970 * when the error is logged - only a pointer to it is saved. See 6971 * existing callers for examples of how static strings are typically 6972 * defined for use with tracing_log_err(). 6973 */ 6974 void tracing_log_err(struct trace_array *tr, 6975 const char *loc, const char *cmd, 6976 const char **errs, u8 type, u8 pos) 6977 { 6978 struct tracing_log_err *err; 6979 6980 if (!tr) 6981 tr = &global_trace; 6982 6983 mutex_lock(&tracing_err_log_lock); 6984 err = get_tracing_log_err(tr); 6985 if (PTR_ERR(err) == -ENOMEM) { 6986 mutex_unlock(&tracing_err_log_lock); 6987 return; 6988 } 6989 6990 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 6991 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd); 6992 6993 err->info.errs = errs; 6994 err->info.type = type; 6995 err->info.pos = pos; 6996 err->info.ts = local_clock(); 6997 6998 list_add_tail(&err->list, &tr->err_log); 6999 mutex_unlock(&tracing_err_log_lock); 7000 } 7001 7002 static void clear_tracing_err_log(struct trace_array *tr) 7003 { 7004 struct tracing_log_err *err, *next; 7005 7006 mutex_lock(&tracing_err_log_lock); 7007 list_for_each_entry_safe(err, next, &tr->err_log, list) { 7008 list_del(&err->list); 7009 kfree(err); 7010 } 7011 7012 tr->n_err_log_entries = 0; 7013 mutex_unlock(&tracing_err_log_lock); 7014 } 7015 7016 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 7017 { 7018 struct trace_array *tr = m->private; 7019 7020 mutex_lock(&tracing_err_log_lock); 7021 7022 return seq_list_start(&tr->err_log, *pos); 7023 } 7024 7025 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 7026 { 7027 struct trace_array *tr = m->private; 7028 7029 return seq_list_next(v, &tr->err_log, pos); 7030 } 7031 7032 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 7033 { 7034 mutex_unlock(&tracing_err_log_lock); 7035 } 7036 7037 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos) 7038 { 7039 u8 i; 7040 7041 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 7042 seq_putc(m, ' '); 7043 for (i = 0; i < pos; i++) 7044 seq_putc(m, ' '); 7045 seq_puts(m, "^\n"); 7046 } 7047 7048 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 7049 { 7050 struct tracing_log_err *err = v; 7051 7052 if (err) { 7053 const char *err_text = err->info.errs[err->info.type]; 7054 u64 sec = err->info.ts; 7055 u32 nsec; 7056 7057 nsec = do_div(sec, NSEC_PER_SEC); 7058 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 7059 err->loc, err_text); 7060 seq_printf(m, "%s", err->cmd); 7061 tracing_err_log_show_pos(m, err->info.pos); 7062 } 7063 7064 return 0; 7065 } 7066 7067 static const struct seq_operations tracing_err_log_seq_ops = { 7068 .start = tracing_err_log_seq_start, 7069 .next = tracing_err_log_seq_next, 7070 .stop = tracing_err_log_seq_stop, 7071 .show = tracing_err_log_seq_show 7072 }; 7073 7074 static int tracing_err_log_open(struct inode *inode, struct file *file) 7075 { 7076 struct trace_array *tr = inode->i_private; 7077 int ret = 0; 7078 7079 if (trace_array_get(tr) < 0) 7080 return -ENODEV; 7081 7082 /* If this file was opened for write, then erase contents */ 7083 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 7084 clear_tracing_err_log(tr); 7085 7086 if (file->f_mode & FMODE_READ) { 7087 ret = seq_open(file, &tracing_err_log_seq_ops); 7088 if (!ret) { 7089 struct seq_file *m = file->private_data; 7090 m->private = tr; 7091 } else { 7092 trace_array_put(tr); 7093 } 7094 } 7095 return ret; 7096 } 7097 7098 static ssize_t tracing_err_log_write(struct file *file, 7099 const char __user *buffer, 7100 size_t count, loff_t *ppos) 7101 { 7102 return count; 7103 } 7104 7105 static const struct file_operations tracing_err_log_fops = { 7106 .open = tracing_err_log_open, 7107 .write = tracing_err_log_write, 7108 .read = seq_read, 7109 .llseek = seq_lseek, 7110 .release = tracing_release_generic_tr, 7111 }; 7112 7113 static int tracing_buffers_open(struct inode *inode, struct file *filp) 7114 { 7115 struct trace_array *tr = inode->i_private; 7116 struct ftrace_buffer_info *info; 7117 int ret; 7118 7119 if (tracing_disabled) 7120 return -ENODEV; 7121 7122 if (trace_array_get(tr) < 0) 7123 return -ENODEV; 7124 7125 info = kzalloc(sizeof(*info), GFP_KERNEL); 7126 if (!info) { 7127 trace_array_put(tr); 7128 return -ENOMEM; 7129 } 7130 7131 mutex_lock(&trace_types_lock); 7132 7133 info->iter.tr = tr; 7134 info->iter.cpu_file = tracing_get_cpu(inode); 7135 info->iter.trace = tr->current_trace; 7136 info->iter.trace_buffer = &tr->trace_buffer; 7137 info->spare = NULL; 7138 /* Force reading ring buffer for first read */ 7139 info->read = (unsigned int)-1; 7140 7141 filp->private_data = info; 7142 7143 tr->current_trace->ref++; 7144 7145 mutex_unlock(&trace_types_lock); 7146 7147 ret = nonseekable_open(inode, filp); 7148 if (ret < 0) 7149 trace_array_put(tr); 7150 7151 return ret; 7152 } 7153 7154 static __poll_t 7155 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 7156 { 7157 struct ftrace_buffer_info *info = filp->private_data; 7158 struct trace_iterator *iter = &info->iter; 7159 7160 return trace_poll(iter, filp, poll_table); 7161 } 7162 7163 static ssize_t 7164 tracing_buffers_read(struct file *filp, char __user *ubuf, 7165 size_t count, loff_t *ppos) 7166 { 7167 struct ftrace_buffer_info *info = filp->private_data; 7168 struct trace_iterator *iter = &info->iter; 7169 ssize_t ret = 0; 7170 ssize_t size; 7171 7172 if (!count) 7173 return 0; 7174 7175 #ifdef CONFIG_TRACER_MAX_TRACE 7176 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 7177 return -EBUSY; 7178 #endif 7179 7180 if (!info->spare) { 7181 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 7182 iter->cpu_file); 7183 if (IS_ERR(info->spare)) { 7184 ret = PTR_ERR(info->spare); 7185 info->spare = NULL; 7186 } else { 7187 info->spare_cpu = iter->cpu_file; 7188 } 7189 } 7190 if (!info->spare) 7191 return ret; 7192 7193 /* Do we have previous read data to read? */ 7194 if (info->read < PAGE_SIZE) 7195 goto read; 7196 7197 again: 7198 trace_access_lock(iter->cpu_file); 7199 ret = ring_buffer_read_page(iter->trace_buffer->buffer, 7200 &info->spare, 7201 count, 7202 iter->cpu_file, 0); 7203 trace_access_unlock(iter->cpu_file); 7204 7205 if (ret < 0) { 7206 if (trace_empty(iter)) { 7207 if ((filp->f_flags & O_NONBLOCK)) 7208 return -EAGAIN; 7209 7210 ret = wait_on_pipe(iter, 0); 7211 if (ret) 7212 return ret; 7213 7214 goto again; 7215 } 7216 return 0; 7217 } 7218 7219 info->read = 0; 7220 read: 7221 size = PAGE_SIZE - info->read; 7222 if (size > count) 7223 size = count; 7224 7225 ret = copy_to_user(ubuf, info->spare + info->read, size); 7226 if (ret == size) 7227 return -EFAULT; 7228 7229 size -= ret; 7230 7231 *ppos += size; 7232 info->read += size; 7233 7234 return size; 7235 } 7236 7237 static int tracing_buffers_release(struct inode *inode, struct file *file) 7238 { 7239 struct ftrace_buffer_info *info = file->private_data; 7240 struct trace_iterator *iter = &info->iter; 7241 7242 mutex_lock(&trace_types_lock); 7243 7244 iter->tr->current_trace->ref--; 7245 7246 __trace_array_put(iter->tr); 7247 7248 if (info->spare) 7249 ring_buffer_free_read_page(iter->trace_buffer->buffer, 7250 info->spare_cpu, info->spare); 7251 kfree(info); 7252 7253 mutex_unlock(&trace_types_lock); 7254 7255 return 0; 7256 } 7257 7258 struct buffer_ref { 7259 struct ring_buffer *buffer; 7260 void *page; 7261 int cpu; 7262 int ref; 7263 }; 7264 7265 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 7266 struct pipe_buffer *buf) 7267 { 7268 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 7269 7270 if (--ref->ref) 7271 return; 7272 7273 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 7274 kfree(ref); 7275 buf->private = 0; 7276 } 7277 7278 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 7279 struct pipe_buffer *buf) 7280 { 7281 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 7282 7283 ref->ref++; 7284 } 7285 7286 /* Pipe buffer operations for a buffer. */ 7287 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 7288 .confirm = generic_pipe_buf_confirm, 7289 .release = buffer_pipe_buf_release, 7290 .steal = generic_pipe_buf_steal, 7291 .get = buffer_pipe_buf_get, 7292 }; 7293 7294 /* 7295 * Callback from splice_to_pipe(), if we need to release some pages 7296 * at the end of the spd in case we error'ed out in filling the pipe. 7297 */ 7298 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 7299 { 7300 struct buffer_ref *ref = 7301 (struct buffer_ref *)spd->partial[i].private; 7302 7303 if (--ref->ref) 7304 return; 7305 7306 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 7307 kfree(ref); 7308 spd->partial[i].private = 0; 7309 } 7310 7311 static ssize_t 7312 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7313 struct pipe_inode_info *pipe, size_t len, 7314 unsigned int flags) 7315 { 7316 struct ftrace_buffer_info *info = file->private_data; 7317 struct trace_iterator *iter = &info->iter; 7318 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 7319 struct page *pages_def[PIPE_DEF_BUFFERS]; 7320 struct splice_pipe_desc spd = { 7321 .pages = pages_def, 7322 .partial = partial_def, 7323 .nr_pages_max = PIPE_DEF_BUFFERS, 7324 .ops = &buffer_pipe_buf_ops, 7325 .spd_release = buffer_spd_release, 7326 }; 7327 struct buffer_ref *ref; 7328 int entries, i; 7329 ssize_t ret = 0; 7330 7331 #ifdef CONFIG_TRACER_MAX_TRACE 7332 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 7333 return -EBUSY; 7334 #endif 7335 7336 if (*ppos & (PAGE_SIZE - 1)) 7337 return -EINVAL; 7338 7339 if (len & (PAGE_SIZE - 1)) { 7340 if (len < PAGE_SIZE) 7341 return -EINVAL; 7342 len &= PAGE_MASK; 7343 } 7344 7345 if (splice_grow_spd(pipe, &spd)) 7346 return -ENOMEM; 7347 7348 again: 7349 trace_access_lock(iter->cpu_file); 7350 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 7351 7352 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { 7353 struct page *page; 7354 int r; 7355 7356 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 7357 if (!ref) { 7358 ret = -ENOMEM; 7359 break; 7360 } 7361 7362 ref->ref = 1; 7363 ref->buffer = iter->trace_buffer->buffer; 7364 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 7365 if (IS_ERR(ref->page)) { 7366 ret = PTR_ERR(ref->page); 7367 ref->page = NULL; 7368 kfree(ref); 7369 break; 7370 } 7371 ref->cpu = iter->cpu_file; 7372 7373 r = ring_buffer_read_page(ref->buffer, &ref->page, 7374 len, iter->cpu_file, 1); 7375 if (r < 0) { 7376 ring_buffer_free_read_page(ref->buffer, ref->cpu, 7377 ref->page); 7378 kfree(ref); 7379 break; 7380 } 7381 7382 page = virt_to_page(ref->page); 7383 7384 spd.pages[i] = page; 7385 spd.partial[i].len = PAGE_SIZE; 7386 spd.partial[i].offset = 0; 7387 spd.partial[i].private = (unsigned long)ref; 7388 spd.nr_pages++; 7389 *ppos += PAGE_SIZE; 7390 7391 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 7392 } 7393 7394 trace_access_unlock(iter->cpu_file); 7395 spd.nr_pages = i; 7396 7397 /* did we read anything? */ 7398 if (!spd.nr_pages) { 7399 if (ret) 7400 goto out; 7401 7402 ret = -EAGAIN; 7403 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 7404 goto out; 7405 7406 ret = wait_on_pipe(iter, iter->tr->buffer_percent); 7407 if (ret) 7408 goto out; 7409 7410 goto again; 7411 } 7412 7413 ret = splice_to_pipe(pipe, &spd); 7414 out: 7415 splice_shrink_spd(&spd); 7416 7417 return ret; 7418 } 7419 7420 static const struct file_operations tracing_buffers_fops = { 7421 .open = tracing_buffers_open, 7422 .read = tracing_buffers_read, 7423 .poll = tracing_buffers_poll, 7424 .release = tracing_buffers_release, 7425 .splice_read = tracing_buffers_splice_read, 7426 .llseek = no_llseek, 7427 }; 7428 7429 static ssize_t 7430 tracing_stats_read(struct file *filp, char __user *ubuf, 7431 size_t count, loff_t *ppos) 7432 { 7433 struct inode *inode = file_inode(filp); 7434 struct trace_array *tr = inode->i_private; 7435 struct trace_buffer *trace_buf = &tr->trace_buffer; 7436 int cpu = tracing_get_cpu(inode); 7437 struct trace_seq *s; 7438 unsigned long cnt; 7439 unsigned long long t; 7440 unsigned long usec_rem; 7441 7442 s = kmalloc(sizeof(*s), GFP_KERNEL); 7443 if (!s) 7444 return -ENOMEM; 7445 7446 trace_seq_init(s); 7447 7448 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 7449 trace_seq_printf(s, "entries: %ld\n", cnt); 7450 7451 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 7452 trace_seq_printf(s, "overrun: %ld\n", cnt); 7453 7454 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 7455 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 7456 7457 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 7458 trace_seq_printf(s, "bytes: %ld\n", cnt); 7459 7460 if (trace_clocks[tr->clock_id].in_ns) { 7461 /* local or global for trace_clock */ 7462 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 7463 usec_rem = do_div(t, USEC_PER_SEC); 7464 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 7465 t, usec_rem); 7466 7467 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); 7468 usec_rem = do_div(t, USEC_PER_SEC); 7469 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 7470 } else { 7471 /* counter or tsc mode for trace_clock */ 7472 trace_seq_printf(s, "oldest event ts: %llu\n", 7473 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 7474 7475 trace_seq_printf(s, "now ts: %llu\n", 7476 ring_buffer_time_stamp(trace_buf->buffer, cpu)); 7477 } 7478 7479 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 7480 trace_seq_printf(s, "dropped events: %ld\n", cnt); 7481 7482 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 7483 trace_seq_printf(s, "read events: %ld\n", cnt); 7484 7485 count = simple_read_from_buffer(ubuf, count, ppos, 7486 s->buffer, trace_seq_used(s)); 7487 7488 kfree(s); 7489 7490 return count; 7491 } 7492 7493 static const struct file_operations tracing_stats_fops = { 7494 .open = tracing_open_generic_tr, 7495 .read = tracing_stats_read, 7496 .llseek = generic_file_llseek, 7497 .release = tracing_release_generic_tr, 7498 }; 7499 7500 #ifdef CONFIG_DYNAMIC_FTRACE 7501 7502 static ssize_t 7503 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 7504 size_t cnt, loff_t *ppos) 7505 { 7506 unsigned long *p = filp->private_data; 7507 char buf[64]; /* Not too big for a shallow stack */ 7508 int r; 7509 7510 r = scnprintf(buf, 63, "%ld", *p); 7511 buf[r++] = '\n'; 7512 7513 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 7514 } 7515 7516 static const struct file_operations tracing_dyn_info_fops = { 7517 .open = tracing_open_generic, 7518 .read = tracing_read_dyn_info, 7519 .llseek = generic_file_llseek, 7520 }; 7521 #endif /* CONFIG_DYNAMIC_FTRACE */ 7522 7523 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 7524 static void 7525 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 7526 struct trace_array *tr, struct ftrace_probe_ops *ops, 7527 void *data) 7528 { 7529 tracing_snapshot_instance(tr); 7530 } 7531 7532 static void 7533 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 7534 struct trace_array *tr, struct ftrace_probe_ops *ops, 7535 void *data) 7536 { 7537 struct ftrace_func_mapper *mapper = data; 7538 long *count = NULL; 7539 7540 if (mapper) 7541 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 7542 7543 if (count) { 7544 7545 if (*count <= 0) 7546 return; 7547 7548 (*count)--; 7549 } 7550 7551 tracing_snapshot_instance(tr); 7552 } 7553 7554 static int 7555 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 7556 struct ftrace_probe_ops *ops, void *data) 7557 { 7558 struct ftrace_func_mapper *mapper = data; 7559 long *count = NULL; 7560 7561 seq_printf(m, "%ps:", (void *)ip); 7562 7563 seq_puts(m, "snapshot"); 7564 7565 if (mapper) 7566 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 7567 7568 if (count) 7569 seq_printf(m, ":count=%ld\n", *count); 7570 else 7571 seq_puts(m, ":unlimited\n"); 7572 7573 return 0; 7574 } 7575 7576 static int 7577 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 7578 unsigned long ip, void *init_data, void **data) 7579 { 7580 struct ftrace_func_mapper *mapper = *data; 7581 7582 if (!mapper) { 7583 mapper = allocate_ftrace_func_mapper(); 7584 if (!mapper) 7585 return -ENOMEM; 7586 *data = mapper; 7587 } 7588 7589 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 7590 } 7591 7592 static void 7593 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 7594 unsigned long ip, void *data) 7595 { 7596 struct ftrace_func_mapper *mapper = data; 7597 7598 if (!ip) { 7599 if (!mapper) 7600 return; 7601 free_ftrace_func_mapper(mapper, NULL); 7602 return; 7603 } 7604 7605 ftrace_func_mapper_remove_ip(mapper, ip); 7606 } 7607 7608 static struct ftrace_probe_ops snapshot_probe_ops = { 7609 .func = ftrace_snapshot, 7610 .print = ftrace_snapshot_print, 7611 }; 7612 7613 static struct ftrace_probe_ops snapshot_count_probe_ops = { 7614 .func = ftrace_count_snapshot, 7615 .print = ftrace_snapshot_print, 7616 .init = ftrace_snapshot_init, 7617 .free = ftrace_snapshot_free, 7618 }; 7619 7620 static int 7621 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 7622 char *glob, char *cmd, char *param, int enable) 7623 { 7624 struct ftrace_probe_ops *ops; 7625 void *count = (void *)-1; 7626 char *number; 7627 int ret; 7628 7629 if (!tr) 7630 return -ENODEV; 7631 7632 /* hash funcs only work with set_ftrace_filter */ 7633 if (!enable) 7634 return -EINVAL; 7635 7636 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 7637 7638 if (glob[0] == '!') 7639 return unregister_ftrace_function_probe_func(glob+1, tr, ops); 7640 7641 if (!param) 7642 goto out_reg; 7643 7644 number = strsep(¶m, ":"); 7645 7646 if (!strlen(number)) 7647 goto out_reg; 7648 7649 /* 7650 * We use the callback data field (which is a pointer) 7651 * as our counter. 7652 */ 7653 ret = kstrtoul(number, 0, (unsigned long *)&count); 7654 if (ret) 7655 return ret; 7656 7657 out_reg: 7658 ret = tracing_alloc_snapshot_instance(tr); 7659 if (ret < 0) 7660 goto out; 7661 7662 ret = register_ftrace_function_probe(glob, tr, ops, count); 7663 7664 out: 7665 return ret < 0 ? ret : 0; 7666 } 7667 7668 static struct ftrace_func_command ftrace_snapshot_cmd = { 7669 .name = "snapshot", 7670 .func = ftrace_trace_snapshot_callback, 7671 }; 7672 7673 static __init int register_snapshot_cmd(void) 7674 { 7675 return register_ftrace_command(&ftrace_snapshot_cmd); 7676 } 7677 #else 7678 static inline __init int register_snapshot_cmd(void) { return 0; } 7679 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 7680 7681 static struct dentry *tracing_get_dentry(struct trace_array *tr) 7682 { 7683 if (WARN_ON(!tr->dir)) 7684 return ERR_PTR(-ENODEV); 7685 7686 /* Top directory uses NULL as the parent */ 7687 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 7688 return NULL; 7689 7690 /* All sub buffers have a descriptor */ 7691 return tr->dir; 7692 } 7693 7694 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 7695 { 7696 struct dentry *d_tracer; 7697 7698 if (tr->percpu_dir) 7699 return tr->percpu_dir; 7700 7701 d_tracer = tracing_get_dentry(tr); 7702 if (IS_ERR(d_tracer)) 7703 return NULL; 7704 7705 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 7706 7707 WARN_ONCE(!tr->percpu_dir, 7708 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 7709 7710 return tr->percpu_dir; 7711 } 7712 7713 static struct dentry * 7714 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 7715 void *data, long cpu, const struct file_operations *fops) 7716 { 7717 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 7718 7719 if (ret) /* See tracing_get_cpu() */ 7720 d_inode(ret)->i_cdev = (void *)(cpu + 1); 7721 return ret; 7722 } 7723 7724 static void 7725 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 7726 { 7727 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 7728 struct dentry *d_cpu; 7729 char cpu_dir[30]; /* 30 characters should be more than enough */ 7730 7731 if (!d_percpu) 7732 return; 7733 7734 snprintf(cpu_dir, 30, "cpu%ld", cpu); 7735 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 7736 if (!d_cpu) { 7737 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 7738 return; 7739 } 7740 7741 /* per cpu trace_pipe */ 7742 trace_create_cpu_file("trace_pipe", 0444, d_cpu, 7743 tr, cpu, &tracing_pipe_fops); 7744 7745 /* per cpu trace */ 7746 trace_create_cpu_file("trace", 0644, d_cpu, 7747 tr, cpu, &tracing_fops); 7748 7749 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, 7750 tr, cpu, &tracing_buffers_fops); 7751 7752 trace_create_cpu_file("stats", 0444, d_cpu, 7753 tr, cpu, &tracing_stats_fops); 7754 7755 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, 7756 tr, cpu, &tracing_entries_fops); 7757 7758 #ifdef CONFIG_TRACER_SNAPSHOT 7759 trace_create_cpu_file("snapshot", 0644, d_cpu, 7760 tr, cpu, &snapshot_fops); 7761 7762 trace_create_cpu_file("snapshot_raw", 0444, d_cpu, 7763 tr, cpu, &snapshot_raw_fops); 7764 #endif 7765 } 7766 7767 #ifdef CONFIG_FTRACE_SELFTEST 7768 /* Let selftest have access to static functions in this file */ 7769 #include "trace_selftest.c" 7770 #endif 7771 7772 static ssize_t 7773 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 7774 loff_t *ppos) 7775 { 7776 struct trace_option_dentry *topt = filp->private_data; 7777 char *buf; 7778 7779 if (topt->flags->val & topt->opt->bit) 7780 buf = "1\n"; 7781 else 7782 buf = "0\n"; 7783 7784 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 7785 } 7786 7787 static ssize_t 7788 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 7789 loff_t *ppos) 7790 { 7791 struct trace_option_dentry *topt = filp->private_data; 7792 unsigned long val; 7793 int ret; 7794 7795 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7796 if (ret) 7797 return ret; 7798 7799 if (val != 0 && val != 1) 7800 return -EINVAL; 7801 7802 if (!!(topt->flags->val & topt->opt->bit) != val) { 7803 mutex_lock(&trace_types_lock); 7804 ret = __set_tracer_option(topt->tr, topt->flags, 7805 topt->opt, !val); 7806 mutex_unlock(&trace_types_lock); 7807 if (ret) 7808 return ret; 7809 } 7810 7811 *ppos += cnt; 7812 7813 return cnt; 7814 } 7815 7816 7817 static const struct file_operations trace_options_fops = { 7818 .open = tracing_open_generic, 7819 .read = trace_options_read, 7820 .write = trace_options_write, 7821 .llseek = generic_file_llseek, 7822 }; 7823 7824 /* 7825 * In order to pass in both the trace_array descriptor as well as the index 7826 * to the flag that the trace option file represents, the trace_array 7827 * has a character array of trace_flags_index[], which holds the index 7828 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 7829 * The address of this character array is passed to the flag option file 7830 * read/write callbacks. 7831 * 7832 * In order to extract both the index and the trace_array descriptor, 7833 * get_tr_index() uses the following algorithm. 7834 * 7835 * idx = *ptr; 7836 * 7837 * As the pointer itself contains the address of the index (remember 7838 * index[1] == 1). 7839 * 7840 * Then to get the trace_array descriptor, by subtracting that index 7841 * from the ptr, we get to the start of the index itself. 7842 * 7843 * ptr - idx == &index[0] 7844 * 7845 * Then a simple container_of() from that pointer gets us to the 7846 * trace_array descriptor. 7847 */ 7848 static void get_tr_index(void *data, struct trace_array **ptr, 7849 unsigned int *pindex) 7850 { 7851 *pindex = *(unsigned char *)data; 7852 7853 *ptr = container_of(data - *pindex, struct trace_array, 7854 trace_flags_index); 7855 } 7856 7857 static ssize_t 7858 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 7859 loff_t *ppos) 7860 { 7861 void *tr_index = filp->private_data; 7862 struct trace_array *tr; 7863 unsigned int index; 7864 char *buf; 7865 7866 get_tr_index(tr_index, &tr, &index); 7867 7868 if (tr->trace_flags & (1 << index)) 7869 buf = "1\n"; 7870 else 7871 buf = "0\n"; 7872 7873 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 7874 } 7875 7876 static ssize_t 7877 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 7878 loff_t *ppos) 7879 { 7880 void *tr_index = filp->private_data; 7881 struct trace_array *tr; 7882 unsigned int index; 7883 unsigned long val; 7884 int ret; 7885 7886 get_tr_index(tr_index, &tr, &index); 7887 7888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7889 if (ret) 7890 return ret; 7891 7892 if (val != 0 && val != 1) 7893 return -EINVAL; 7894 7895 mutex_lock(&trace_types_lock); 7896 ret = set_tracer_flag(tr, 1 << index, val); 7897 mutex_unlock(&trace_types_lock); 7898 7899 if (ret < 0) 7900 return ret; 7901 7902 *ppos += cnt; 7903 7904 return cnt; 7905 } 7906 7907 static const struct file_operations trace_options_core_fops = { 7908 .open = tracing_open_generic, 7909 .read = trace_options_core_read, 7910 .write = trace_options_core_write, 7911 .llseek = generic_file_llseek, 7912 }; 7913 7914 struct dentry *trace_create_file(const char *name, 7915 umode_t mode, 7916 struct dentry *parent, 7917 void *data, 7918 const struct file_operations *fops) 7919 { 7920 struct dentry *ret; 7921 7922 ret = tracefs_create_file(name, mode, parent, data, fops); 7923 if (!ret) 7924 pr_warn("Could not create tracefs '%s' entry\n", name); 7925 7926 return ret; 7927 } 7928 7929 7930 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 7931 { 7932 struct dentry *d_tracer; 7933 7934 if (tr->options) 7935 return tr->options; 7936 7937 d_tracer = tracing_get_dentry(tr); 7938 if (IS_ERR(d_tracer)) 7939 return NULL; 7940 7941 tr->options = tracefs_create_dir("options", d_tracer); 7942 if (!tr->options) { 7943 pr_warn("Could not create tracefs directory 'options'\n"); 7944 return NULL; 7945 } 7946 7947 return tr->options; 7948 } 7949 7950 static void 7951 create_trace_option_file(struct trace_array *tr, 7952 struct trace_option_dentry *topt, 7953 struct tracer_flags *flags, 7954 struct tracer_opt *opt) 7955 { 7956 struct dentry *t_options; 7957 7958 t_options = trace_options_init_dentry(tr); 7959 if (!t_options) 7960 return; 7961 7962 topt->flags = flags; 7963 topt->opt = opt; 7964 topt->tr = tr; 7965 7966 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 7967 &trace_options_fops); 7968 7969 } 7970 7971 static void 7972 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 7973 { 7974 struct trace_option_dentry *topts; 7975 struct trace_options *tr_topts; 7976 struct tracer_flags *flags; 7977 struct tracer_opt *opts; 7978 int cnt; 7979 int i; 7980 7981 if (!tracer) 7982 return; 7983 7984 flags = tracer->flags; 7985 7986 if (!flags || !flags->opts) 7987 return; 7988 7989 /* 7990 * If this is an instance, only create flags for tracers 7991 * the instance may have. 7992 */ 7993 if (!trace_ok_for_array(tracer, tr)) 7994 return; 7995 7996 for (i = 0; i < tr->nr_topts; i++) { 7997 /* Make sure there's no duplicate flags. */ 7998 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 7999 return; 8000 } 8001 8002 opts = flags->opts; 8003 8004 for (cnt = 0; opts[cnt].name; cnt++) 8005 ; 8006 8007 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 8008 if (!topts) 8009 return; 8010 8011 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 8012 GFP_KERNEL); 8013 if (!tr_topts) { 8014 kfree(topts); 8015 return; 8016 } 8017 8018 tr->topts = tr_topts; 8019 tr->topts[tr->nr_topts].tracer = tracer; 8020 tr->topts[tr->nr_topts].topts = topts; 8021 tr->nr_topts++; 8022 8023 for (cnt = 0; opts[cnt].name; cnt++) { 8024 create_trace_option_file(tr, &topts[cnt], flags, 8025 &opts[cnt]); 8026 WARN_ONCE(topts[cnt].entry == NULL, 8027 "Failed to create trace option: %s", 8028 opts[cnt].name); 8029 } 8030 } 8031 8032 static struct dentry * 8033 create_trace_option_core_file(struct trace_array *tr, 8034 const char *option, long index) 8035 { 8036 struct dentry *t_options; 8037 8038 t_options = trace_options_init_dentry(tr); 8039 if (!t_options) 8040 return NULL; 8041 8042 return trace_create_file(option, 0644, t_options, 8043 (void *)&tr->trace_flags_index[index], 8044 &trace_options_core_fops); 8045 } 8046 8047 static void create_trace_options_dir(struct trace_array *tr) 8048 { 8049 struct dentry *t_options; 8050 bool top_level = tr == &global_trace; 8051 int i; 8052 8053 t_options = trace_options_init_dentry(tr); 8054 if (!t_options) 8055 return; 8056 8057 for (i = 0; trace_options[i]; i++) { 8058 if (top_level || 8059 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 8060 create_trace_option_core_file(tr, trace_options[i], i); 8061 } 8062 } 8063 8064 static ssize_t 8065 rb_simple_read(struct file *filp, char __user *ubuf, 8066 size_t cnt, loff_t *ppos) 8067 { 8068 struct trace_array *tr = filp->private_data; 8069 char buf[64]; 8070 int r; 8071 8072 r = tracer_tracing_is_on(tr); 8073 r = sprintf(buf, "%d\n", r); 8074 8075 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8076 } 8077 8078 static ssize_t 8079 rb_simple_write(struct file *filp, const char __user *ubuf, 8080 size_t cnt, loff_t *ppos) 8081 { 8082 struct trace_array *tr = filp->private_data; 8083 struct ring_buffer *buffer = tr->trace_buffer.buffer; 8084 unsigned long val; 8085 int ret; 8086 8087 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8088 if (ret) 8089 return ret; 8090 8091 if (buffer) { 8092 mutex_lock(&trace_types_lock); 8093 if (!!val == tracer_tracing_is_on(tr)) { 8094 val = 0; /* do nothing */ 8095 } else if (val) { 8096 tracer_tracing_on(tr); 8097 if (tr->current_trace->start) 8098 tr->current_trace->start(tr); 8099 } else { 8100 tracer_tracing_off(tr); 8101 if (tr->current_trace->stop) 8102 tr->current_trace->stop(tr); 8103 } 8104 mutex_unlock(&trace_types_lock); 8105 } 8106 8107 (*ppos)++; 8108 8109 return cnt; 8110 } 8111 8112 static const struct file_operations rb_simple_fops = { 8113 .open = tracing_open_generic_tr, 8114 .read = rb_simple_read, 8115 .write = rb_simple_write, 8116 .release = tracing_release_generic_tr, 8117 .llseek = default_llseek, 8118 }; 8119 8120 static ssize_t 8121 buffer_percent_read(struct file *filp, char __user *ubuf, 8122 size_t cnt, loff_t *ppos) 8123 { 8124 struct trace_array *tr = filp->private_data; 8125 char buf[64]; 8126 int r; 8127 8128 r = tr->buffer_percent; 8129 r = sprintf(buf, "%d\n", r); 8130 8131 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8132 } 8133 8134 static ssize_t 8135 buffer_percent_write(struct file *filp, const char __user *ubuf, 8136 size_t cnt, loff_t *ppos) 8137 { 8138 struct trace_array *tr = filp->private_data; 8139 unsigned long val; 8140 int ret; 8141 8142 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8143 if (ret) 8144 return ret; 8145 8146 if (val > 100) 8147 return -EINVAL; 8148 8149 if (!val) 8150 val = 1; 8151 8152 tr->buffer_percent = val; 8153 8154 (*ppos)++; 8155 8156 return cnt; 8157 } 8158 8159 static const struct file_operations buffer_percent_fops = { 8160 .open = tracing_open_generic_tr, 8161 .read = buffer_percent_read, 8162 .write = buffer_percent_write, 8163 .release = tracing_release_generic_tr, 8164 .llseek = default_llseek, 8165 }; 8166 8167 struct dentry *trace_instance_dir; 8168 8169 static void 8170 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 8171 8172 static int 8173 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 8174 { 8175 enum ring_buffer_flags rb_flags; 8176 8177 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 8178 8179 buf->tr = tr; 8180 8181 buf->buffer = ring_buffer_alloc(size, rb_flags); 8182 if (!buf->buffer) 8183 return -ENOMEM; 8184 8185 buf->data = alloc_percpu(struct trace_array_cpu); 8186 if (!buf->data) { 8187 ring_buffer_free(buf->buffer); 8188 buf->buffer = NULL; 8189 return -ENOMEM; 8190 } 8191 8192 /* Allocate the first page for all buffers */ 8193 set_buffer_entries(&tr->trace_buffer, 8194 ring_buffer_size(tr->trace_buffer.buffer, 0)); 8195 8196 return 0; 8197 } 8198 8199 static int allocate_trace_buffers(struct trace_array *tr, int size) 8200 { 8201 int ret; 8202 8203 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); 8204 if (ret) 8205 return ret; 8206 8207 #ifdef CONFIG_TRACER_MAX_TRACE 8208 ret = allocate_trace_buffer(tr, &tr->max_buffer, 8209 allocate_snapshot ? size : 1); 8210 if (WARN_ON(ret)) { 8211 ring_buffer_free(tr->trace_buffer.buffer); 8212 tr->trace_buffer.buffer = NULL; 8213 free_percpu(tr->trace_buffer.data); 8214 tr->trace_buffer.data = NULL; 8215 return -ENOMEM; 8216 } 8217 tr->allocated_snapshot = allocate_snapshot; 8218 8219 /* 8220 * Only the top level trace array gets its snapshot allocated 8221 * from the kernel command line. 8222 */ 8223 allocate_snapshot = false; 8224 #endif 8225 return 0; 8226 } 8227 8228 static void free_trace_buffer(struct trace_buffer *buf) 8229 { 8230 if (buf->buffer) { 8231 ring_buffer_free(buf->buffer); 8232 buf->buffer = NULL; 8233 free_percpu(buf->data); 8234 buf->data = NULL; 8235 } 8236 } 8237 8238 static void free_trace_buffers(struct trace_array *tr) 8239 { 8240 if (!tr) 8241 return; 8242 8243 free_trace_buffer(&tr->trace_buffer); 8244 8245 #ifdef CONFIG_TRACER_MAX_TRACE 8246 free_trace_buffer(&tr->max_buffer); 8247 #endif 8248 } 8249 8250 static void init_trace_flags_index(struct trace_array *tr) 8251 { 8252 int i; 8253 8254 /* Used by the trace options files */ 8255 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 8256 tr->trace_flags_index[i] = i; 8257 } 8258 8259 static void __update_tracer_options(struct trace_array *tr) 8260 { 8261 struct tracer *t; 8262 8263 for (t = trace_types; t; t = t->next) 8264 add_tracer_options(tr, t); 8265 } 8266 8267 static void update_tracer_options(struct trace_array *tr) 8268 { 8269 mutex_lock(&trace_types_lock); 8270 __update_tracer_options(tr); 8271 mutex_unlock(&trace_types_lock); 8272 } 8273 8274 struct trace_array *trace_array_create(const char *name) 8275 { 8276 struct trace_array *tr; 8277 int ret; 8278 8279 mutex_lock(&event_mutex); 8280 mutex_lock(&trace_types_lock); 8281 8282 ret = -EEXIST; 8283 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 8284 if (tr->name && strcmp(tr->name, name) == 0) 8285 goto out_unlock; 8286 } 8287 8288 ret = -ENOMEM; 8289 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 8290 if (!tr) 8291 goto out_unlock; 8292 8293 tr->name = kstrdup(name, GFP_KERNEL); 8294 if (!tr->name) 8295 goto out_free_tr; 8296 8297 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 8298 goto out_free_tr; 8299 8300 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 8301 8302 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 8303 8304 raw_spin_lock_init(&tr->start_lock); 8305 8306 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 8307 8308 tr->current_trace = &nop_trace; 8309 8310 INIT_LIST_HEAD(&tr->systems); 8311 INIT_LIST_HEAD(&tr->events); 8312 INIT_LIST_HEAD(&tr->hist_vars); 8313 INIT_LIST_HEAD(&tr->err_log); 8314 8315 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 8316 goto out_free_tr; 8317 8318 tr->dir = tracefs_create_dir(name, trace_instance_dir); 8319 if (!tr->dir) 8320 goto out_free_tr; 8321 8322 ret = event_trace_add_tracer(tr->dir, tr); 8323 if (ret) { 8324 tracefs_remove_recursive(tr->dir); 8325 goto out_free_tr; 8326 } 8327 8328 ftrace_init_trace_array(tr); 8329 8330 init_tracer_tracefs(tr, tr->dir); 8331 init_trace_flags_index(tr); 8332 __update_tracer_options(tr); 8333 8334 list_add(&tr->list, &ftrace_trace_arrays); 8335 8336 mutex_unlock(&trace_types_lock); 8337 mutex_unlock(&event_mutex); 8338 8339 return tr; 8340 8341 out_free_tr: 8342 free_trace_buffers(tr); 8343 free_cpumask_var(tr->tracing_cpumask); 8344 kfree(tr->name); 8345 kfree(tr); 8346 8347 out_unlock: 8348 mutex_unlock(&trace_types_lock); 8349 mutex_unlock(&event_mutex); 8350 8351 return ERR_PTR(ret); 8352 } 8353 EXPORT_SYMBOL_GPL(trace_array_create); 8354 8355 static int instance_mkdir(const char *name) 8356 { 8357 return PTR_ERR_OR_ZERO(trace_array_create(name)); 8358 } 8359 8360 static int __remove_instance(struct trace_array *tr) 8361 { 8362 int i; 8363 8364 if (tr->ref || (tr->current_trace && tr->current_trace->ref)) 8365 return -EBUSY; 8366 8367 list_del(&tr->list); 8368 8369 /* Disable all the flags that were enabled coming in */ 8370 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 8371 if ((1 << i) & ZEROED_TRACE_FLAGS) 8372 set_tracer_flag(tr, 1 << i, 0); 8373 } 8374 8375 tracing_set_nop(tr); 8376 clear_ftrace_function_probes(tr); 8377 event_trace_del_tracer(tr); 8378 ftrace_clear_pids(tr); 8379 ftrace_destroy_function_files(tr); 8380 tracefs_remove_recursive(tr->dir); 8381 free_trace_buffers(tr); 8382 8383 for (i = 0; i < tr->nr_topts; i++) { 8384 kfree(tr->topts[i].topts); 8385 } 8386 kfree(tr->topts); 8387 8388 free_cpumask_var(tr->tracing_cpumask); 8389 kfree(tr->name); 8390 kfree(tr); 8391 tr = NULL; 8392 8393 return 0; 8394 } 8395 8396 int trace_array_destroy(struct trace_array *tr) 8397 { 8398 int ret; 8399 8400 if (!tr) 8401 return -EINVAL; 8402 8403 mutex_lock(&event_mutex); 8404 mutex_lock(&trace_types_lock); 8405 8406 ret = __remove_instance(tr); 8407 8408 mutex_unlock(&trace_types_lock); 8409 mutex_unlock(&event_mutex); 8410 8411 return ret; 8412 } 8413 EXPORT_SYMBOL_GPL(trace_array_destroy); 8414 8415 static int instance_rmdir(const char *name) 8416 { 8417 struct trace_array *tr; 8418 int ret; 8419 8420 mutex_lock(&event_mutex); 8421 mutex_lock(&trace_types_lock); 8422 8423 ret = -ENODEV; 8424 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 8425 if (tr->name && strcmp(tr->name, name) == 0) { 8426 ret = __remove_instance(tr); 8427 break; 8428 } 8429 } 8430 8431 mutex_unlock(&trace_types_lock); 8432 mutex_unlock(&event_mutex); 8433 8434 return ret; 8435 } 8436 8437 static __init void create_trace_instances(struct dentry *d_tracer) 8438 { 8439 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 8440 instance_mkdir, 8441 instance_rmdir); 8442 if (WARN_ON(!trace_instance_dir)) 8443 return; 8444 } 8445 8446 static void 8447 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 8448 { 8449 struct trace_event_file *file; 8450 int cpu; 8451 8452 trace_create_file("available_tracers", 0444, d_tracer, 8453 tr, &show_traces_fops); 8454 8455 trace_create_file("current_tracer", 0644, d_tracer, 8456 tr, &set_tracer_fops); 8457 8458 trace_create_file("tracing_cpumask", 0644, d_tracer, 8459 tr, &tracing_cpumask_fops); 8460 8461 trace_create_file("trace_options", 0644, d_tracer, 8462 tr, &tracing_iter_fops); 8463 8464 trace_create_file("trace", 0644, d_tracer, 8465 tr, &tracing_fops); 8466 8467 trace_create_file("trace_pipe", 0444, d_tracer, 8468 tr, &tracing_pipe_fops); 8469 8470 trace_create_file("buffer_size_kb", 0644, d_tracer, 8471 tr, &tracing_entries_fops); 8472 8473 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 8474 tr, &tracing_total_entries_fops); 8475 8476 trace_create_file("free_buffer", 0200, d_tracer, 8477 tr, &tracing_free_buffer_fops); 8478 8479 trace_create_file("trace_marker", 0220, d_tracer, 8480 tr, &tracing_mark_fops); 8481 8482 file = __find_event_file(tr, "ftrace", "print"); 8483 if (file && file->dir) 8484 trace_create_file("trigger", 0644, file->dir, file, 8485 &event_trigger_fops); 8486 tr->trace_marker_file = file; 8487 8488 trace_create_file("trace_marker_raw", 0220, d_tracer, 8489 tr, &tracing_mark_raw_fops); 8490 8491 trace_create_file("trace_clock", 0644, d_tracer, tr, 8492 &trace_clock_fops); 8493 8494 trace_create_file("tracing_on", 0644, d_tracer, 8495 tr, &rb_simple_fops); 8496 8497 trace_create_file("timestamp_mode", 0444, d_tracer, tr, 8498 &trace_time_stamp_mode_fops); 8499 8500 tr->buffer_percent = 50; 8501 8502 trace_create_file("buffer_percent", 0444, d_tracer, 8503 tr, &buffer_percent_fops); 8504 8505 create_trace_options_dir(tr); 8506 8507 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 8508 trace_create_file("tracing_max_latency", 0644, d_tracer, 8509 &tr->max_latency, &tracing_max_lat_fops); 8510 #endif 8511 8512 if (ftrace_create_function_files(tr, d_tracer)) 8513 WARN(1, "Could not allocate function filter files"); 8514 8515 #ifdef CONFIG_TRACER_SNAPSHOT 8516 trace_create_file("snapshot", 0644, d_tracer, 8517 tr, &snapshot_fops); 8518 #endif 8519 8520 trace_create_file("error_log", 0644, d_tracer, 8521 tr, &tracing_err_log_fops); 8522 8523 for_each_tracing_cpu(cpu) 8524 tracing_init_tracefs_percpu(tr, cpu); 8525 8526 ftrace_init_tracefs(tr, d_tracer); 8527 } 8528 8529 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 8530 { 8531 struct vfsmount *mnt; 8532 struct file_system_type *type; 8533 8534 /* 8535 * To maintain backward compatibility for tools that mount 8536 * debugfs to get to the tracing facility, tracefs is automatically 8537 * mounted to the debugfs/tracing directory. 8538 */ 8539 type = get_fs_type("tracefs"); 8540 if (!type) 8541 return NULL; 8542 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 8543 put_filesystem(type); 8544 if (IS_ERR(mnt)) 8545 return NULL; 8546 mntget(mnt); 8547 8548 return mnt; 8549 } 8550 8551 /** 8552 * tracing_init_dentry - initialize top level trace array 8553 * 8554 * This is called when creating files or directories in the tracing 8555 * directory. It is called via fs_initcall() by any of the boot up code 8556 * and expects to return the dentry of the top level tracing directory. 8557 */ 8558 struct dentry *tracing_init_dentry(void) 8559 { 8560 struct trace_array *tr = &global_trace; 8561 8562 /* The top level trace array uses NULL as parent */ 8563 if (tr->dir) 8564 return NULL; 8565 8566 if (WARN_ON(!tracefs_initialized()) || 8567 (IS_ENABLED(CONFIG_DEBUG_FS) && 8568 WARN_ON(!debugfs_initialized()))) 8569 return ERR_PTR(-ENODEV); 8570 8571 /* 8572 * As there may still be users that expect the tracing 8573 * files to exist in debugfs/tracing, we must automount 8574 * the tracefs file system there, so older tools still 8575 * work with the newer kerenl. 8576 */ 8577 tr->dir = debugfs_create_automount("tracing", NULL, 8578 trace_automount, NULL); 8579 if (!tr->dir) { 8580 pr_warn_once("Could not create debugfs directory 'tracing'\n"); 8581 return ERR_PTR(-ENOMEM); 8582 } 8583 8584 return NULL; 8585 } 8586 8587 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 8588 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 8589 8590 static void __init trace_eval_init(void) 8591 { 8592 int len; 8593 8594 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 8595 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 8596 } 8597 8598 #ifdef CONFIG_MODULES 8599 static void trace_module_add_evals(struct module *mod) 8600 { 8601 if (!mod->num_trace_evals) 8602 return; 8603 8604 /* 8605 * Modules with bad taint do not have events created, do 8606 * not bother with enums either. 8607 */ 8608 if (trace_module_has_bad_taint(mod)) 8609 return; 8610 8611 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 8612 } 8613 8614 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 8615 static void trace_module_remove_evals(struct module *mod) 8616 { 8617 union trace_eval_map_item *map; 8618 union trace_eval_map_item **last = &trace_eval_maps; 8619 8620 if (!mod->num_trace_evals) 8621 return; 8622 8623 mutex_lock(&trace_eval_mutex); 8624 8625 map = trace_eval_maps; 8626 8627 while (map) { 8628 if (map->head.mod == mod) 8629 break; 8630 map = trace_eval_jmp_to_tail(map); 8631 last = &map->tail.next; 8632 map = map->tail.next; 8633 } 8634 if (!map) 8635 goto out; 8636 8637 *last = trace_eval_jmp_to_tail(map)->tail.next; 8638 kfree(map); 8639 out: 8640 mutex_unlock(&trace_eval_mutex); 8641 } 8642 #else 8643 static inline void trace_module_remove_evals(struct module *mod) { } 8644 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 8645 8646 static int trace_module_notify(struct notifier_block *self, 8647 unsigned long val, void *data) 8648 { 8649 struct module *mod = data; 8650 8651 switch (val) { 8652 case MODULE_STATE_COMING: 8653 trace_module_add_evals(mod); 8654 break; 8655 case MODULE_STATE_GOING: 8656 trace_module_remove_evals(mod); 8657 break; 8658 } 8659 8660 return 0; 8661 } 8662 8663 static struct notifier_block trace_module_nb = { 8664 .notifier_call = trace_module_notify, 8665 .priority = 0, 8666 }; 8667 #endif /* CONFIG_MODULES */ 8668 8669 static __init int tracer_init_tracefs(void) 8670 { 8671 struct dentry *d_tracer; 8672 8673 trace_access_lock_init(); 8674 8675 d_tracer = tracing_init_dentry(); 8676 if (IS_ERR(d_tracer)) 8677 return 0; 8678 8679 event_trace_init(); 8680 8681 init_tracer_tracefs(&global_trace, d_tracer); 8682 ftrace_init_tracefs_toplevel(&global_trace, d_tracer); 8683 8684 trace_create_file("tracing_thresh", 0644, d_tracer, 8685 &global_trace, &tracing_thresh_fops); 8686 8687 trace_create_file("README", 0444, d_tracer, 8688 NULL, &tracing_readme_fops); 8689 8690 trace_create_file("saved_cmdlines", 0444, d_tracer, 8691 NULL, &tracing_saved_cmdlines_fops); 8692 8693 trace_create_file("saved_cmdlines_size", 0644, d_tracer, 8694 NULL, &tracing_saved_cmdlines_size_fops); 8695 8696 trace_create_file("saved_tgids", 0444, d_tracer, 8697 NULL, &tracing_saved_tgids_fops); 8698 8699 trace_eval_init(); 8700 8701 trace_create_eval_file(d_tracer); 8702 8703 #ifdef CONFIG_MODULES 8704 register_module_notifier(&trace_module_nb); 8705 #endif 8706 8707 #ifdef CONFIG_DYNAMIC_FTRACE 8708 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 8709 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 8710 #endif 8711 8712 create_trace_instances(d_tracer); 8713 8714 update_tracer_options(&global_trace); 8715 8716 return 0; 8717 } 8718 8719 static int trace_panic_handler(struct notifier_block *this, 8720 unsigned long event, void *unused) 8721 { 8722 if (ftrace_dump_on_oops) 8723 ftrace_dump(ftrace_dump_on_oops); 8724 return NOTIFY_OK; 8725 } 8726 8727 static struct notifier_block trace_panic_notifier = { 8728 .notifier_call = trace_panic_handler, 8729 .next = NULL, 8730 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 8731 }; 8732 8733 static int trace_die_handler(struct notifier_block *self, 8734 unsigned long val, 8735 void *data) 8736 { 8737 switch (val) { 8738 case DIE_OOPS: 8739 if (ftrace_dump_on_oops) 8740 ftrace_dump(ftrace_dump_on_oops); 8741 break; 8742 default: 8743 break; 8744 } 8745 return NOTIFY_OK; 8746 } 8747 8748 static struct notifier_block trace_die_notifier = { 8749 .notifier_call = trace_die_handler, 8750 .priority = 200 8751 }; 8752 8753 /* 8754 * printk is set to max of 1024, we really don't need it that big. 8755 * Nothing should be printing 1000 characters anyway. 8756 */ 8757 #define TRACE_MAX_PRINT 1000 8758 8759 /* 8760 * Define here KERN_TRACE so that we have one place to modify 8761 * it if we decide to change what log level the ftrace dump 8762 * should be at. 8763 */ 8764 #define KERN_TRACE KERN_EMERG 8765 8766 void 8767 trace_printk_seq(struct trace_seq *s) 8768 { 8769 /* Probably should print a warning here. */ 8770 if (s->seq.len >= TRACE_MAX_PRINT) 8771 s->seq.len = TRACE_MAX_PRINT; 8772 8773 /* 8774 * More paranoid code. Although the buffer size is set to 8775 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 8776 * an extra layer of protection. 8777 */ 8778 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 8779 s->seq.len = s->seq.size - 1; 8780 8781 /* should be zero ended, but we are paranoid. */ 8782 s->buffer[s->seq.len] = 0; 8783 8784 printk(KERN_TRACE "%s", s->buffer); 8785 8786 trace_seq_init(s); 8787 } 8788 8789 void trace_init_global_iter(struct trace_iterator *iter) 8790 { 8791 iter->tr = &global_trace; 8792 iter->trace = iter->tr->current_trace; 8793 iter->cpu_file = RING_BUFFER_ALL_CPUS; 8794 iter->trace_buffer = &global_trace.trace_buffer; 8795 8796 if (iter->trace && iter->trace->open) 8797 iter->trace->open(iter); 8798 8799 /* Annotate start of buffers if we had overruns */ 8800 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 8801 iter->iter_flags |= TRACE_FILE_ANNOTATE; 8802 8803 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 8804 if (trace_clocks[iter->tr->clock_id].in_ns) 8805 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 8806 } 8807 8808 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 8809 { 8810 /* use static because iter can be a bit big for the stack */ 8811 static struct trace_iterator iter; 8812 static atomic_t dump_running; 8813 struct trace_array *tr = &global_trace; 8814 unsigned int old_userobj; 8815 unsigned long flags; 8816 int cnt = 0, cpu; 8817 8818 /* Only allow one dump user at a time. */ 8819 if (atomic_inc_return(&dump_running) != 1) { 8820 atomic_dec(&dump_running); 8821 return; 8822 } 8823 8824 /* 8825 * Always turn off tracing when we dump. 8826 * We don't need to show trace output of what happens 8827 * between multiple crashes. 8828 * 8829 * If the user does a sysrq-z, then they can re-enable 8830 * tracing with echo 1 > tracing_on. 8831 */ 8832 tracing_off(); 8833 8834 local_irq_save(flags); 8835 printk_nmi_direct_enter(); 8836 8837 /* Simulate the iterator */ 8838 trace_init_global_iter(&iter); 8839 8840 for_each_tracing_cpu(cpu) { 8841 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 8842 } 8843 8844 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 8845 8846 /* don't look at user memory in panic mode */ 8847 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 8848 8849 switch (oops_dump_mode) { 8850 case DUMP_ALL: 8851 iter.cpu_file = RING_BUFFER_ALL_CPUS; 8852 break; 8853 case DUMP_ORIG: 8854 iter.cpu_file = raw_smp_processor_id(); 8855 break; 8856 case DUMP_NONE: 8857 goto out_enable; 8858 default: 8859 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 8860 iter.cpu_file = RING_BUFFER_ALL_CPUS; 8861 } 8862 8863 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 8864 8865 /* Did function tracer already get disabled? */ 8866 if (ftrace_is_dead()) { 8867 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 8868 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 8869 } 8870 8871 /* 8872 * We need to stop all tracing on all CPUS to read the 8873 * the next buffer. This is a bit expensive, but is 8874 * not done often. We fill all what we can read, 8875 * and then release the locks again. 8876 */ 8877 8878 while (!trace_empty(&iter)) { 8879 8880 if (!cnt) 8881 printk(KERN_TRACE "---------------------------------\n"); 8882 8883 cnt++; 8884 8885 /* reset all but tr, trace, and overruns */ 8886 memset(&iter.seq, 0, 8887 sizeof(struct trace_iterator) - 8888 offsetof(struct trace_iterator, seq)); 8889 iter.iter_flags |= TRACE_FILE_LAT_FMT; 8890 iter.pos = -1; 8891 8892 if (trace_find_next_entry_inc(&iter) != NULL) { 8893 int ret; 8894 8895 ret = print_trace_line(&iter); 8896 if (ret != TRACE_TYPE_NO_CONSUME) 8897 trace_consume(&iter); 8898 } 8899 touch_nmi_watchdog(); 8900 8901 trace_printk_seq(&iter.seq); 8902 } 8903 8904 if (!cnt) 8905 printk(KERN_TRACE " (ftrace buffer empty)\n"); 8906 else 8907 printk(KERN_TRACE "---------------------------------\n"); 8908 8909 out_enable: 8910 tr->trace_flags |= old_userobj; 8911 8912 for_each_tracing_cpu(cpu) { 8913 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 8914 } 8915 atomic_dec(&dump_running); 8916 printk_nmi_direct_exit(); 8917 local_irq_restore(flags); 8918 } 8919 EXPORT_SYMBOL_GPL(ftrace_dump); 8920 8921 int trace_run_command(const char *buf, int (*createfn)(int, char **)) 8922 { 8923 char **argv; 8924 int argc, ret; 8925 8926 argc = 0; 8927 ret = 0; 8928 argv = argv_split(GFP_KERNEL, buf, &argc); 8929 if (!argv) 8930 return -ENOMEM; 8931 8932 if (argc) 8933 ret = createfn(argc, argv); 8934 8935 argv_free(argv); 8936 8937 return ret; 8938 } 8939 8940 #define WRITE_BUFSIZE 4096 8941 8942 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 8943 size_t count, loff_t *ppos, 8944 int (*createfn)(int, char **)) 8945 { 8946 char *kbuf, *buf, *tmp; 8947 int ret = 0; 8948 size_t done = 0; 8949 size_t size; 8950 8951 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 8952 if (!kbuf) 8953 return -ENOMEM; 8954 8955 while (done < count) { 8956 size = count - done; 8957 8958 if (size >= WRITE_BUFSIZE) 8959 size = WRITE_BUFSIZE - 1; 8960 8961 if (copy_from_user(kbuf, buffer + done, size)) { 8962 ret = -EFAULT; 8963 goto out; 8964 } 8965 kbuf[size] = '\0'; 8966 buf = kbuf; 8967 do { 8968 tmp = strchr(buf, '\n'); 8969 if (tmp) { 8970 *tmp = '\0'; 8971 size = tmp - buf + 1; 8972 } else { 8973 size = strlen(buf); 8974 if (done + size < count) { 8975 if (buf != kbuf) 8976 break; 8977 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 8978 pr_warn("Line length is too long: Should be less than %d\n", 8979 WRITE_BUFSIZE - 2); 8980 ret = -EINVAL; 8981 goto out; 8982 } 8983 } 8984 done += size; 8985 8986 /* Remove comments */ 8987 tmp = strchr(buf, '#'); 8988 8989 if (tmp) 8990 *tmp = '\0'; 8991 8992 ret = trace_run_command(buf, createfn); 8993 if (ret) 8994 goto out; 8995 buf += size; 8996 8997 } while (done < count); 8998 } 8999 ret = done; 9000 9001 out: 9002 kfree(kbuf); 9003 9004 return ret; 9005 } 9006 9007 __init static int tracer_alloc_buffers(void) 9008 { 9009 int ring_buf_size; 9010 int ret = -ENOMEM; 9011 9012 /* 9013 * Make sure we don't accidently add more trace options 9014 * than we have bits for. 9015 */ 9016 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 9017 9018 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 9019 goto out; 9020 9021 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 9022 goto out_free_buffer_mask; 9023 9024 /* Only allocate trace_printk buffers if a trace_printk exists */ 9025 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 9026 /* Must be called before global_trace.buffer is allocated */ 9027 trace_printk_init_buffers(); 9028 9029 /* To save memory, keep the ring buffer size to its minimum */ 9030 if (ring_buffer_expanded) 9031 ring_buf_size = trace_buf_size; 9032 else 9033 ring_buf_size = 1; 9034 9035 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 9036 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 9037 9038 raw_spin_lock_init(&global_trace.start_lock); 9039 9040 /* 9041 * The prepare callbacks allocates some memory for the ring buffer. We 9042 * don't free the buffer if the if the CPU goes down. If we were to free 9043 * the buffer, then the user would lose any trace that was in the 9044 * buffer. The memory will be removed once the "instance" is removed. 9045 */ 9046 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 9047 "trace/RB:preapre", trace_rb_cpu_prepare, 9048 NULL); 9049 if (ret < 0) 9050 goto out_free_cpumask; 9051 /* Used for event triggers */ 9052 ret = -ENOMEM; 9053 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 9054 if (!temp_buffer) 9055 goto out_rm_hp_state; 9056 9057 if (trace_create_savedcmd() < 0) 9058 goto out_free_temp_buffer; 9059 9060 /* TODO: make the number of buffers hot pluggable with CPUS */ 9061 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 9062 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 9063 WARN_ON(1); 9064 goto out_free_savedcmd; 9065 } 9066 9067 if (global_trace.buffer_disabled) 9068 tracing_off(); 9069 9070 if (trace_boot_clock) { 9071 ret = tracing_set_clock(&global_trace, trace_boot_clock); 9072 if (ret < 0) 9073 pr_warn("Trace clock %s not defined, going back to default\n", 9074 trace_boot_clock); 9075 } 9076 9077 /* 9078 * register_tracer() might reference current_trace, so it 9079 * needs to be set before we register anything. This is 9080 * just a bootstrap of current_trace anyway. 9081 */ 9082 global_trace.current_trace = &nop_trace; 9083 9084 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9085 9086 ftrace_init_global_array_ops(&global_trace); 9087 9088 init_trace_flags_index(&global_trace); 9089 9090 register_tracer(&nop_trace); 9091 9092 /* Function tracing may start here (via kernel command line) */ 9093 init_function_trace(); 9094 9095 /* All seems OK, enable tracing */ 9096 tracing_disabled = 0; 9097 9098 atomic_notifier_chain_register(&panic_notifier_list, 9099 &trace_panic_notifier); 9100 9101 register_die_notifier(&trace_die_notifier); 9102 9103 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 9104 9105 INIT_LIST_HEAD(&global_trace.systems); 9106 INIT_LIST_HEAD(&global_trace.events); 9107 INIT_LIST_HEAD(&global_trace.hist_vars); 9108 INIT_LIST_HEAD(&global_trace.err_log); 9109 list_add(&global_trace.list, &ftrace_trace_arrays); 9110 9111 apply_trace_boot_options(); 9112 9113 register_snapshot_cmd(); 9114 9115 return 0; 9116 9117 out_free_savedcmd: 9118 free_saved_cmdlines_buffer(savedcmd); 9119 out_free_temp_buffer: 9120 ring_buffer_free(temp_buffer); 9121 out_rm_hp_state: 9122 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 9123 out_free_cpumask: 9124 free_cpumask_var(global_trace.tracing_cpumask); 9125 out_free_buffer_mask: 9126 free_cpumask_var(tracing_buffer_mask); 9127 out: 9128 return ret; 9129 } 9130 9131 void __init early_trace_init(void) 9132 { 9133 if (tracepoint_printk) { 9134 tracepoint_print_iter = 9135 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 9136 if (WARN_ON(!tracepoint_print_iter)) 9137 tracepoint_printk = 0; 9138 else 9139 static_key_enable(&tracepoint_printk_key.key); 9140 } 9141 tracer_alloc_buffers(); 9142 } 9143 9144 void __init trace_init(void) 9145 { 9146 trace_event_init(); 9147 } 9148 9149 __init static int clear_boot_tracer(void) 9150 { 9151 /* 9152 * The default tracer at boot buffer is an init section. 9153 * This function is called in lateinit. If we did not 9154 * find the boot tracer, then clear it out, to prevent 9155 * later registration from accessing the buffer that is 9156 * about to be freed. 9157 */ 9158 if (!default_bootup_tracer) 9159 return 0; 9160 9161 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 9162 default_bootup_tracer); 9163 default_bootup_tracer = NULL; 9164 9165 return 0; 9166 } 9167 9168 fs_initcall(tracer_init_tracefs); 9169 late_initcall_sync(clear_boot_tracer); 9170 9171 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 9172 __init static int tracing_set_default_clock(void) 9173 { 9174 /* sched_clock_stable() is determined in late_initcall */ 9175 if (!trace_boot_clock && !sched_clock_stable()) { 9176 printk(KERN_WARNING 9177 "Unstable clock detected, switching default tracing clock to \"global\"\n" 9178 "If you want to keep using the local clock, then add:\n" 9179 " \"trace_clock=local\"\n" 9180 "on the kernel command line\n"); 9181 tracing_set_clock(&global_trace, "global"); 9182 } 9183 9184 return 0; 9185 } 9186 late_initcall_sync(tracing_set_default_clock); 9187 #endif 9188