1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <[email protected]> 5 * Copyright (C) 2008 Ingo Molnar <[email protected]> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <[email protected]> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 Nadia Yvette Chambers 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/notifier.h> 21 #include <linux/irqflags.h> 22 #include <linux/irq_work.h> 23 #include <linux/debugfs.h> 24 #include <linux/pagemap.h> 25 #include <linux/hardirq.h> 26 #include <linux/linkage.h> 27 #include <linux/uaccess.h> 28 #include <linux/kprobes.h> 29 #include <linux/ftrace.h> 30 #include <linux/module.h> 31 #include <linux/percpu.h> 32 #include <linux/splice.h> 33 #include <linux/kdebug.h> 34 #include <linux/string.h> 35 #include <linux/rwsem.h> 36 #include <linux/slab.h> 37 #include <linux/ctype.h> 38 #include <linux/init.h> 39 #include <linux/poll.h> 40 #include <linux/nmi.h> 41 #include <linux/fs.h> 42 43 #include "trace.h" 44 #include "trace_output.h" 45 46 /* 47 * On boot up, the ring buffer is set to the minimum size, so that 48 * we do not waste memory on systems that are not using tracing. 49 */ 50 int ring_buffer_expanded; 51 52 /* 53 * We need to change this state when a selftest is running. 54 * A selftest will lurk into the ring-buffer to count the 55 * entries inserted during the selftest although some concurrent 56 * insertions into the ring-buffer such as trace_printk could occurred 57 * at the same time, giving false positive or negative results. 58 */ 59 static bool __read_mostly tracing_selftest_running; 60 61 /* 62 * If a tracer is running, we do not want to run SELFTEST. 63 */ 64 bool __read_mostly tracing_selftest_disabled; 65 66 /* For tracers that don't implement custom flags */ 67 static struct tracer_opt dummy_tracer_opt[] = { 68 { } 69 }; 70 71 static struct tracer_flags dummy_tracer_flags = { 72 .val = 0, 73 .opts = dummy_tracer_opt 74 }; 75 76 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 77 { 78 return 0; 79 } 80 81 /* 82 * To prevent the comm cache from being overwritten when no 83 * tracing is active, only save the comm when a trace event 84 * occurred. 85 */ 86 static DEFINE_PER_CPU(bool, trace_cmdline_save); 87 88 /* 89 * When a reader is waiting for data, then this variable is 90 * set to true. 91 */ 92 static bool trace_wakeup_needed; 93 94 static struct irq_work trace_work_wakeup; 95 96 /* 97 * Kill all tracing for good (never come back). 98 * It is initialized to 1 but will turn to zero if the initialization 99 * of the tracer is successful. But that is the only place that sets 100 * this back to zero. 101 */ 102 static int tracing_disabled = 1; 103 104 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 105 106 cpumask_var_t __read_mostly tracing_buffer_mask; 107 108 /* 109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 110 * 111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 112 * is set, then ftrace_dump is called. This will output the contents 113 * of the ftrace buffers to the console. This is very useful for 114 * capturing traces that lead to crashes and outputing it to a 115 * serial console. 116 * 117 * It is default off, but you can enable it with either specifying 118 * "ftrace_dump_on_oops" in the kernel command line, or setting 119 * /proc/sys/kernel/ftrace_dump_on_oops 120 * Set 1 if you want to dump buffers of all CPUs 121 * Set 2 if you want to dump the buffer of the CPU that triggered oops 122 */ 123 124 enum ftrace_dump_mode ftrace_dump_on_oops; 125 126 static int tracing_set_tracer(const char *buf); 127 128 #define MAX_TRACER_SIZE 100 129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 130 static char *default_bootup_tracer; 131 132 static int __init set_cmdline_ftrace(char *str) 133 { 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 135 default_bootup_tracer = bootup_tracer_buf; 136 /* We are using ftrace early, expand it */ 137 ring_buffer_expanded = 1; 138 return 1; 139 } 140 __setup("ftrace=", set_cmdline_ftrace); 141 142 static int __init set_ftrace_dump_on_oops(char *str) 143 { 144 if (*str++ != '=' || !*str) { 145 ftrace_dump_on_oops = DUMP_ALL; 146 return 1; 147 } 148 149 if (!strcmp("orig_cpu", str)) { 150 ftrace_dump_on_oops = DUMP_ORIG; 151 return 1; 152 } 153 154 return 0; 155 } 156 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157 158 159 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 160 static char *trace_boot_options __initdata; 161 162 static int __init set_trace_boot_options(char *str) 163 { 164 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 165 trace_boot_options = trace_boot_options_buf; 166 return 0; 167 } 168 __setup("trace_options=", set_trace_boot_options); 169 170 unsigned long long ns2usecs(cycle_t nsec) 171 { 172 nsec += 500; 173 do_div(nsec, 1000); 174 return nsec; 175 } 176 177 /* 178 * The global_trace is the descriptor that holds the tracing 179 * buffers for the live tracing. For each CPU, it contains 180 * a link list of pages that will store trace entries. The 181 * page descriptor of the pages in the memory is used to hold 182 * the link list by linking the lru item in the page descriptor 183 * to each of the pages in the buffer per CPU. 184 * 185 * For each active CPU there is a data field that holds the 186 * pages for the buffer for that CPU. Each CPU has the same number 187 * of pages allocated for its buffer. 188 */ 189 static struct trace_array global_trace; 190 191 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 192 193 int filter_current_check_discard(struct ring_buffer *buffer, 194 struct ftrace_event_call *call, void *rec, 195 struct ring_buffer_event *event) 196 { 197 return filter_check_discard(call, rec, buffer, event); 198 } 199 EXPORT_SYMBOL_GPL(filter_current_check_discard); 200 201 cycle_t ftrace_now(int cpu) 202 { 203 u64 ts; 204 205 /* Early boot up does not have a buffer yet */ 206 if (!global_trace.buffer) 207 return trace_clock_local(); 208 209 ts = ring_buffer_time_stamp(global_trace.buffer, cpu); 210 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); 211 212 return ts; 213 } 214 215 /* 216 * The max_tr is used to snapshot the global_trace when a maximum 217 * latency is reached. Some tracers will use this to store a maximum 218 * trace while it continues examining live traces. 219 * 220 * The buffers for the max_tr are set up the same as the global_trace. 221 * When a snapshot is taken, the link list of the max_tr is swapped 222 * with the link list of the global_trace and the buffers are reset for 223 * the global_trace so the tracing can continue. 224 */ 225 static struct trace_array max_tr; 226 227 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); 228 229 int tracing_is_enabled(void) 230 { 231 return tracing_is_on(); 232 } 233 234 /* 235 * trace_buf_size is the size in bytes that is allocated 236 * for a buffer. Note, the number of bytes is always rounded 237 * to page size. 238 * 239 * This number is purposely set to a low number of 16384. 240 * If the dump on oops happens, it will be much appreciated 241 * to not have to wait for all that output. Anyway this can be 242 * boot time and run time configurable. 243 */ 244 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 245 246 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 247 248 /* trace_types holds a link list of available tracers. */ 249 static struct tracer *trace_types __read_mostly; 250 251 /* current_trace points to the tracer that is currently active */ 252 static struct tracer *current_trace __read_mostly = &nop_trace; 253 254 /* 255 * trace_types_lock is used to protect the trace_types list. 256 */ 257 static DEFINE_MUTEX(trace_types_lock); 258 259 /* 260 * serialize the access of the ring buffer 261 * 262 * ring buffer serializes readers, but it is low level protection. 263 * The validity of the events (which returns by ring_buffer_peek() ..etc) 264 * are not protected by ring buffer. 265 * 266 * The content of events may become garbage if we allow other process consumes 267 * these events concurrently: 268 * A) the page of the consumed events may become a normal page 269 * (not reader page) in ring buffer, and this page will be rewrited 270 * by events producer. 271 * B) The page of the consumed events may become a page for splice_read, 272 * and this page will be returned to system. 273 * 274 * These primitives allow multi process access to different cpu ring buffer 275 * concurrently. 276 * 277 * These primitives don't distinguish read-only and read-consume access. 278 * Multi read-only access are also serialized. 279 */ 280 281 #ifdef CONFIG_SMP 282 static DECLARE_RWSEM(all_cpu_access_lock); 283 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 284 285 static inline void trace_access_lock(int cpu) 286 { 287 if (cpu == TRACE_PIPE_ALL_CPU) { 288 /* gain it for accessing the whole ring buffer. */ 289 down_write(&all_cpu_access_lock); 290 } else { 291 /* gain it for accessing a cpu ring buffer. */ 292 293 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ 294 down_read(&all_cpu_access_lock); 295 296 /* Secondly block other access to this @cpu ring buffer. */ 297 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 298 } 299 } 300 301 static inline void trace_access_unlock(int cpu) 302 { 303 if (cpu == TRACE_PIPE_ALL_CPU) { 304 up_write(&all_cpu_access_lock); 305 } else { 306 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 307 up_read(&all_cpu_access_lock); 308 } 309 } 310 311 static inline void trace_access_lock_init(void) 312 { 313 int cpu; 314 315 for_each_possible_cpu(cpu) 316 mutex_init(&per_cpu(cpu_access_lock, cpu)); 317 } 318 319 #else 320 321 static DEFINE_MUTEX(access_lock); 322 323 static inline void trace_access_lock(int cpu) 324 { 325 (void)cpu; 326 mutex_lock(&access_lock); 327 } 328 329 static inline void trace_access_unlock(int cpu) 330 { 331 (void)cpu; 332 mutex_unlock(&access_lock); 333 } 334 335 static inline void trace_access_lock_init(void) 336 { 337 } 338 339 #endif 340 341 /* trace_wait is a waitqueue for tasks blocked on trace_poll */ 342 static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 343 344 /* trace_flags holds trace_options default values */ 345 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 346 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 347 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 348 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; 349 350 static int trace_stop_count; 351 static DEFINE_RAW_SPINLOCK(tracing_start_lock); 352 353 /** 354 * trace_wake_up - wake up tasks waiting for trace input 355 * 356 * Schedules a delayed work to wake up any task that is blocked on the 357 * trace_wait queue. These is used with trace_poll for tasks polling the 358 * trace. 359 */ 360 static void trace_wake_up(struct irq_work *work) 361 { 362 wake_up_all(&trace_wait); 363 364 } 365 366 /** 367 * tracing_on - enable tracing buffers 368 * 369 * This function enables tracing buffers that may have been 370 * disabled with tracing_off. 371 */ 372 void tracing_on(void) 373 { 374 if (global_trace.buffer) 375 ring_buffer_record_on(global_trace.buffer); 376 /* 377 * This flag is only looked at when buffers haven't been 378 * allocated yet. We don't really care about the race 379 * between setting this flag and actually turning 380 * on the buffer. 381 */ 382 global_trace.buffer_disabled = 0; 383 } 384 EXPORT_SYMBOL_GPL(tracing_on); 385 386 /** 387 * tracing_off - turn off tracing buffers 388 * 389 * This function stops the tracing buffers from recording data. 390 * It does not disable any overhead the tracers themselves may 391 * be causing. This function simply causes all recording to 392 * the ring buffers to fail. 393 */ 394 void tracing_off(void) 395 { 396 if (global_trace.buffer) 397 ring_buffer_record_off(global_trace.buffer); 398 /* 399 * This flag is only looked at when buffers haven't been 400 * allocated yet. We don't really care about the race 401 * between setting this flag and actually turning 402 * on the buffer. 403 */ 404 global_trace.buffer_disabled = 1; 405 } 406 EXPORT_SYMBOL_GPL(tracing_off); 407 408 /** 409 * tracing_is_on - show state of ring buffers enabled 410 */ 411 int tracing_is_on(void) 412 { 413 if (global_trace.buffer) 414 return ring_buffer_record_is_on(global_trace.buffer); 415 return !global_trace.buffer_disabled; 416 } 417 EXPORT_SYMBOL_GPL(tracing_is_on); 418 419 static int __init set_buf_size(char *str) 420 { 421 unsigned long buf_size; 422 423 if (!str) 424 return 0; 425 buf_size = memparse(str, &str); 426 /* nr_entries can not be zero */ 427 if (buf_size == 0) 428 return 0; 429 trace_buf_size = buf_size; 430 return 1; 431 } 432 __setup("trace_buf_size=", set_buf_size); 433 434 static int __init set_tracing_thresh(char *str) 435 { 436 unsigned long threshold; 437 int ret; 438 439 if (!str) 440 return 0; 441 ret = kstrtoul(str, 0, &threshold); 442 if (ret < 0) 443 return 0; 444 tracing_thresh = threshold * 1000; 445 return 1; 446 } 447 __setup("tracing_thresh=", set_tracing_thresh); 448 449 unsigned long nsecs_to_usecs(unsigned long nsecs) 450 { 451 return nsecs / 1000; 452 } 453 454 /* These must match the bit postions in trace_iterator_flags */ 455 static const char *trace_options[] = { 456 "print-parent", 457 "sym-offset", 458 "sym-addr", 459 "verbose", 460 "raw", 461 "hex", 462 "bin", 463 "block", 464 "stacktrace", 465 "trace_printk", 466 "ftrace_preempt", 467 "branch", 468 "annotate", 469 "userstacktrace", 470 "sym-userobj", 471 "printk-msg-only", 472 "context-info", 473 "latency-format", 474 "sleep-time", 475 "graph-time", 476 "record-cmd", 477 "overwrite", 478 "disable_on_free", 479 "irq-info", 480 "markers", 481 NULL 482 }; 483 484 static struct { 485 u64 (*func)(void); 486 const char *name; 487 int in_ns; /* is this clock in nanoseconds? */ 488 } trace_clocks[] = { 489 { trace_clock_local, "local", 1 }, 490 { trace_clock_global, "global", 1 }, 491 { trace_clock_counter, "counter", 0 }, 492 ARCH_TRACE_CLOCKS 493 }; 494 495 int trace_clock_id; 496 497 /* 498 * trace_parser_get_init - gets the buffer for trace parser 499 */ 500 int trace_parser_get_init(struct trace_parser *parser, int size) 501 { 502 memset(parser, 0, sizeof(*parser)); 503 504 parser->buffer = kmalloc(size, GFP_KERNEL); 505 if (!parser->buffer) 506 return 1; 507 508 parser->size = size; 509 return 0; 510 } 511 512 /* 513 * trace_parser_put - frees the buffer for trace parser 514 */ 515 void trace_parser_put(struct trace_parser *parser) 516 { 517 kfree(parser->buffer); 518 } 519 520 /* 521 * trace_get_user - reads the user input string separated by space 522 * (matched by isspace(ch)) 523 * 524 * For each string found the 'struct trace_parser' is updated, 525 * and the function returns. 526 * 527 * Returns number of bytes read. 528 * 529 * See kernel/trace/trace.h for 'struct trace_parser' details. 530 */ 531 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 532 size_t cnt, loff_t *ppos) 533 { 534 char ch; 535 size_t read = 0; 536 ssize_t ret; 537 538 if (!*ppos) 539 trace_parser_clear(parser); 540 541 ret = get_user(ch, ubuf++); 542 if (ret) 543 goto out; 544 545 read++; 546 cnt--; 547 548 /* 549 * The parser is not finished with the last write, 550 * continue reading the user input without skipping spaces. 551 */ 552 if (!parser->cont) { 553 /* skip white space */ 554 while (cnt && isspace(ch)) { 555 ret = get_user(ch, ubuf++); 556 if (ret) 557 goto out; 558 read++; 559 cnt--; 560 } 561 562 /* only spaces were written */ 563 if (isspace(ch)) { 564 *ppos += read; 565 ret = read; 566 goto out; 567 } 568 569 parser->idx = 0; 570 } 571 572 /* read the non-space input */ 573 while (cnt && !isspace(ch)) { 574 if (parser->idx < parser->size - 1) 575 parser->buffer[parser->idx++] = ch; 576 else { 577 ret = -EINVAL; 578 goto out; 579 } 580 ret = get_user(ch, ubuf++); 581 if (ret) 582 goto out; 583 read++; 584 cnt--; 585 } 586 587 /* We either got finished input or we have to wait for another call. */ 588 if (isspace(ch)) { 589 parser->buffer[parser->idx] = 0; 590 parser->cont = false; 591 } else { 592 parser->cont = true; 593 parser->buffer[parser->idx++] = ch; 594 } 595 596 *ppos += read; 597 ret = read; 598 599 out: 600 return ret; 601 } 602 603 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 604 { 605 int len; 606 int ret; 607 608 if (!cnt) 609 return 0; 610 611 if (s->len <= s->readpos) 612 return -EBUSY; 613 614 len = s->len - s->readpos; 615 if (cnt > len) 616 cnt = len; 617 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 618 if (ret == cnt) 619 return -EFAULT; 620 621 cnt -= ret; 622 623 s->readpos += cnt; 624 return cnt; 625 } 626 627 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 628 { 629 int len; 630 631 if (s->len <= s->readpos) 632 return -EBUSY; 633 634 len = s->len - s->readpos; 635 if (cnt > len) 636 cnt = len; 637 memcpy(buf, s->buffer + s->readpos, cnt); 638 639 s->readpos += cnt; 640 return cnt; 641 } 642 643 /* 644 * ftrace_max_lock is used to protect the swapping of buffers 645 * when taking a max snapshot. The buffers themselves are 646 * protected by per_cpu spinlocks. But the action of the swap 647 * needs its own lock. 648 * 649 * This is defined as a arch_spinlock_t in order to help 650 * with performance when lockdep debugging is enabled. 651 * 652 * It is also used in other places outside the update_max_tr 653 * so it needs to be defined outside of the 654 * CONFIG_TRACER_MAX_TRACE. 655 */ 656 static arch_spinlock_t ftrace_max_lock = 657 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 658 659 unsigned long __read_mostly tracing_thresh; 660 661 #ifdef CONFIG_TRACER_MAX_TRACE 662 unsigned long __read_mostly tracing_max_latency; 663 664 /* 665 * Copy the new maximum trace into the separate maximum-trace 666 * structure. (this way the maximum trace is permanently saved, 667 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 668 */ 669 static void 670 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 671 { 672 struct trace_array_cpu *data = tr->data[cpu]; 673 struct trace_array_cpu *max_data; 674 675 max_tr.cpu = cpu; 676 max_tr.time_start = data->preempt_timestamp; 677 678 max_data = max_tr.data[cpu]; 679 max_data->saved_latency = tracing_max_latency; 680 max_data->critical_start = data->critical_start; 681 max_data->critical_end = data->critical_end; 682 683 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 684 max_data->pid = tsk->pid; 685 max_data->uid = task_uid(tsk); 686 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 687 max_data->policy = tsk->policy; 688 max_data->rt_priority = tsk->rt_priority; 689 690 /* record this tasks comm */ 691 tracing_record_cmdline(tsk); 692 } 693 694 /** 695 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 696 * @tr: tracer 697 * @tsk: the task with the latency 698 * @cpu: The cpu that initiated the trace. 699 * 700 * Flip the buffers between the @tr and the max_tr and record information 701 * about which task was the cause of this latency. 702 */ 703 void 704 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 705 { 706 struct ring_buffer *buf = tr->buffer; 707 708 if (trace_stop_count) 709 return; 710 711 WARN_ON_ONCE(!irqs_disabled()); 712 713 if (!current_trace->allocated_snapshot) { 714 /* Only the nop tracer should hit this when disabling */ 715 WARN_ON_ONCE(current_trace != &nop_trace); 716 return; 717 } 718 719 arch_spin_lock(&ftrace_max_lock); 720 721 tr->buffer = max_tr.buffer; 722 max_tr.buffer = buf; 723 724 __update_max_tr(tr, tsk, cpu); 725 arch_spin_unlock(&ftrace_max_lock); 726 } 727 728 /** 729 * update_max_tr_single - only copy one trace over, and reset the rest 730 * @tr - tracer 731 * @tsk - task with the latency 732 * @cpu - the cpu of the buffer to copy. 733 * 734 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 735 */ 736 void 737 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 738 { 739 int ret; 740 741 if (trace_stop_count) 742 return; 743 744 WARN_ON_ONCE(!irqs_disabled()); 745 if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) 746 return; 747 748 arch_spin_lock(&ftrace_max_lock); 749 750 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 751 752 if (ret == -EBUSY) { 753 /* 754 * We failed to swap the buffer due to a commit taking 755 * place on this CPU. We fail to record, but we reset 756 * the max trace buffer (no one writes directly to it) 757 * and flag that it failed. 758 */ 759 trace_array_printk(&max_tr, _THIS_IP_, 760 "Failed to swap buffers due to commit in progress\n"); 761 } 762 763 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 764 765 __update_max_tr(tr, tsk, cpu); 766 arch_spin_unlock(&ftrace_max_lock); 767 } 768 #endif /* CONFIG_TRACER_MAX_TRACE */ 769 770 static void default_wait_pipe(struct trace_iterator *iter) 771 { 772 DEFINE_WAIT(wait); 773 774 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 775 776 /* 777 * The events can happen in critical sections where 778 * checking a work queue can cause deadlocks. 779 * After adding a task to the queue, this flag is set 780 * only to notify events to try to wake up the queue 781 * using irq_work. 782 * 783 * We don't clear it even if the buffer is no longer 784 * empty. The flag only causes the next event to run 785 * irq_work to do the work queue wake up. The worse 786 * that can happen if we race with !trace_empty() is that 787 * an event will cause an irq_work to try to wake up 788 * an empty queue. 789 * 790 * There's no reason to protect this flag either, as 791 * the work queue and irq_work logic will do the necessary 792 * synchronization for the wake ups. The only thing 793 * that is necessary is that the wake up happens after 794 * a task has been queued. It's OK for spurious wake ups. 795 */ 796 trace_wakeup_needed = true; 797 798 if (trace_empty(iter)) 799 schedule(); 800 801 finish_wait(&trace_wait, &wait); 802 } 803 804 /** 805 * register_tracer - register a tracer with the ftrace system. 806 * @type - the plugin for the tracer 807 * 808 * Register a new plugin tracer. 809 */ 810 int register_tracer(struct tracer *type) 811 { 812 struct tracer *t; 813 int ret = 0; 814 815 if (!type->name) { 816 pr_info("Tracer must have a name\n"); 817 return -1; 818 } 819 820 if (strlen(type->name) >= MAX_TRACER_SIZE) { 821 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 822 return -1; 823 } 824 825 mutex_lock(&trace_types_lock); 826 827 tracing_selftest_running = true; 828 829 for (t = trace_types; t; t = t->next) { 830 if (strcmp(type->name, t->name) == 0) { 831 /* already found */ 832 pr_info("Tracer %s already registered\n", 833 type->name); 834 ret = -1; 835 goto out; 836 } 837 } 838 839 if (!type->set_flag) 840 type->set_flag = &dummy_set_flag; 841 if (!type->flags) 842 type->flags = &dummy_tracer_flags; 843 else 844 if (!type->flags->opts) 845 type->flags->opts = dummy_tracer_opt; 846 if (!type->wait_pipe) 847 type->wait_pipe = default_wait_pipe; 848 849 850 #ifdef CONFIG_FTRACE_STARTUP_TEST 851 if (type->selftest && !tracing_selftest_disabled) { 852 struct tracer *saved_tracer = current_trace; 853 struct trace_array *tr = &global_trace; 854 855 /* 856 * Run a selftest on this tracer. 857 * Here we reset the trace buffer, and set the current 858 * tracer to be this tracer. The tracer can then run some 859 * internal tracing to verify that everything is in order. 860 * If we fail, we do not register this tracer. 861 */ 862 tracing_reset_online_cpus(tr); 863 864 current_trace = type; 865 866 if (type->use_max_tr) { 867 /* If we expanded the buffers, make sure the max is expanded too */ 868 if (ring_buffer_expanded) 869 ring_buffer_resize(max_tr.buffer, trace_buf_size, 870 RING_BUFFER_ALL_CPUS); 871 type->allocated_snapshot = true; 872 } 873 874 /* the test is responsible for initializing and enabling */ 875 pr_info("Testing tracer %s: ", type->name); 876 ret = type->selftest(type, tr); 877 /* the test is responsible for resetting too */ 878 current_trace = saved_tracer; 879 if (ret) { 880 printk(KERN_CONT "FAILED!\n"); 881 /* Add the warning after printing 'FAILED' */ 882 WARN_ON(1); 883 goto out; 884 } 885 /* Only reset on passing, to avoid touching corrupted buffers */ 886 tracing_reset_online_cpus(tr); 887 888 if (type->use_max_tr) { 889 type->allocated_snapshot = false; 890 891 /* Shrink the max buffer again */ 892 if (ring_buffer_expanded) 893 ring_buffer_resize(max_tr.buffer, 1, 894 RING_BUFFER_ALL_CPUS); 895 } 896 897 printk(KERN_CONT "PASSED\n"); 898 } 899 #endif 900 901 type->next = trace_types; 902 trace_types = type; 903 904 out: 905 tracing_selftest_running = false; 906 mutex_unlock(&trace_types_lock); 907 908 if (ret || !default_bootup_tracer) 909 goto out_unlock; 910 911 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 912 goto out_unlock; 913 914 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 915 /* Do we want this tracer to start on bootup? */ 916 tracing_set_tracer(type->name); 917 default_bootup_tracer = NULL; 918 /* disable other selftests, since this will break it. */ 919 tracing_selftest_disabled = 1; 920 #ifdef CONFIG_FTRACE_STARTUP_TEST 921 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 922 type->name); 923 #endif 924 925 out_unlock: 926 return ret; 927 } 928 929 void tracing_reset(struct trace_array *tr, int cpu) 930 { 931 struct ring_buffer *buffer = tr->buffer; 932 933 if (!buffer) 934 return; 935 936 ring_buffer_record_disable(buffer); 937 938 /* Make sure all commits have finished */ 939 synchronize_sched(); 940 ring_buffer_reset_cpu(buffer, cpu); 941 942 ring_buffer_record_enable(buffer); 943 } 944 945 void tracing_reset_online_cpus(struct trace_array *tr) 946 { 947 struct ring_buffer *buffer = tr->buffer; 948 int cpu; 949 950 if (!buffer) 951 return; 952 953 ring_buffer_record_disable(buffer); 954 955 /* Make sure all commits have finished */ 956 synchronize_sched(); 957 958 tr->time_start = ftrace_now(tr->cpu); 959 960 for_each_online_cpu(cpu) 961 ring_buffer_reset_cpu(buffer, cpu); 962 963 ring_buffer_record_enable(buffer); 964 } 965 966 void tracing_reset_current(int cpu) 967 { 968 tracing_reset(&global_trace, cpu); 969 } 970 971 void tracing_reset_current_online_cpus(void) 972 { 973 tracing_reset_online_cpus(&global_trace); 974 } 975 976 #define SAVED_CMDLINES 128 977 #define NO_CMDLINE_MAP UINT_MAX 978 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 979 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 980 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 981 static int cmdline_idx; 982 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 983 984 /* temporary disable recording */ 985 static atomic_t trace_record_cmdline_disabled __read_mostly; 986 987 static void trace_init_cmdlines(void) 988 { 989 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 990 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 991 cmdline_idx = 0; 992 } 993 994 int is_tracing_stopped(void) 995 { 996 return trace_stop_count; 997 } 998 999 /** 1000 * ftrace_off_permanent - disable all ftrace code permanently 1001 * 1002 * This should only be called when a serious anomally has 1003 * been detected. This will turn off the function tracing, 1004 * ring buffers, and other tracing utilites. It takes no 1005 * locks and can be called from any context. 1006 */ 1007 void ftrace_off_permanent(void) 1008 { 1009 tracing_disabled = 1; 1010 ftrace_stop(); 1011 tracing_off_permanent(); 1012 } 1013 1014 /** 1015 * tracing_start - quick start of the tracer 1016 * 1017 * If tracing is enabled but was stopped by tracing_stop, 1018 * this will start the tracer back up. 1019 */ 1020 void tracing_start(void) 1021 { 1022 struct ring_buffer *buffer; 1023 unsigned long flags; 1024 1025 if (tracing_disabled) 1026 return; 1027 1028 raw_spin_lock_irqsave(&tracing_start_lock, flags); 1029 if (--trace_stop_count) { 1030 if (trace_stop_count < 0) { 1031 /* Someone screwed up their debugging */ 1032 WARN_ON_ONCE(1); 1033 trace_stop_count = 0; 1034 } 1035 goto out; 1036 } 1037 1038 /* Prevent the buffers from switching */ 1039 arch_spin_lock(&ftrace_max_lock); 1040 1041 buffer = global_trace.buffer; 1042 if (buffer) 1043 ring_buffer_record_enable(buffer); 1044 1045 buffer = max_tr.buffer; 1046 if (buffer) 1047 ring_buffer_record_enable(buffer); 1048 1049 arch_spin_unlock(&ftrace_max_lock); 1050 1051 ftrace_start(); 1052 out: 1053 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 1054 } 1055 1056 /** 1057 * tracing_stop - quick stop of the tracer 1058 * 1059 * Light weight way to stop tracing. Use in conjunction with 1060 * tracing_start. 1061 */ 1062 void tracing_stop(void) 1063 { 1064 struct ring_buffer *buffer; 1065 unsigned long flags; 1066 1067 ftrace_stop(); 1068 raw_spin_lock_irqsave(&tracing_start_lock, flags); 1069 if (trace_stop_count++) 1070 goto out; 1071 1072 /* Prevent the buffers from switching */ 1073 arch_spin_lock(&ftrace_max_lock); 1074 1075 buffer = global_trace.buffer; 1076 if (buffer) 1077 ring_buffer_record_disable(buffer); 1078 1079 buffer = max_tr.buffer; 1080 if (buffer) 1081 ring_buffer_record_disable(buffer); 1082 1083 arch_spin_unlock(&ftrace_max_lock); 1084 1085 out: 1086 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 1087 } 1088 1089 void trace_stop_cmdline_recording(void); 1090 1091 static void trace_save_cmdline(struct task_struct *tsk) 1092 { 1093 unsigned pid, idx; 1094 1095 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1096 return; 1097 1098 /* 1099 * It's not the end of the world if we don't get 1100 * the lock, but we also don't want to spin 1101 * nor do we want to disable interrupts, 1102 * so if we miss here, then better luck next time. 1103 */ 1104 if (!arch_spin_trylock(&trace_cmdline_lock)) 1105 return; 1106 1107 idx = map_pid_to_cmdline[tsk->pid]; 1108 if (idx == NO_CMDLINE_MAP) { 1109 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1110 1111 /* 1112 * Check whether the cmdline buffer at idx has a pid 1113 * mapped. We are going to overwrite that entry so we 1114 * need to clear the map_pid_to_cmdline. Otherwise we 1115 * would read the new comm for the old pid. 1116 */ 1117 pid = map_cmdline_to_pid[idx]; 1118 if (pid != NO_CMDLINE_MAP) 1119 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1120 1121 map_cmdline_to_pid[idx] = tsk->pid; 1122 map_pid_to_cmdline[tsk->pid] = idx; 1123 1124 cmdline_idx = idx; 1125 } 1126 1127 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1128 1129 arch_spin_unlock(&trace_cmdline_lock); 1130 } 1131 1132 void trace_find_cmdline(int pid, char comm[]) 1133 { 1134 unsigned map; 1135 1136 if (!pid) { 1137 strcpy(comm, "<idle>"); 1138 return; 1139 } 1140 1141 if (WARN_ON_ONCE(pid < 0)) { 1142 strcpy(comm, "<XXX>"); 1143 return; 1144 } 1145 1146 if (pid > PID_MAX_DEFAULT) { 1147 strcpy(comm, "<...>"); 1148 return; 1149 } 1150 1151 preempt_disable(); 1152 arch_spin_lock(&trace_cmdline_lock); 1153 map = map_pid_to_cmdline[pid]; 1154 if (map != NO_CMDLINE_MAP) 1155 strcpy(comm, saved_cmdlines[map]); 1156 else 1157 strcpy(comm, "<...>"); 1158 1159 arch_spin_unlock(&trace_cmdline_lock); 1160 preempt_enable(); 1161 } 1162 1163 void tracing_record_cmdline(struct task_struct *tsk) 1164 { 1165 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1166 return; 1167 1168 if (!__this_cpu_read(trace_cmdline_save)) 1169 return; 1170 1171 __this_cpu_write(trace_cmdline_save, false); 1172 1173 trace_save_cmdline(tsk); 1174 } 1175 1176 void 1177 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1178 int pc) 1179 { 1180 struct task_struct *tsk = current; 1181 1182 entry->preempt_count = pc & 0xff; 1183 entry->pid = (tsk) ? tsk->pid : 0; 1184 entry->flags = 1185 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1186 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1187 #else 1188 TRACE_FLAG_IRQS_NOSUPPORT | 1189 #endif 1190 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1191 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1192 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 1193 } 1194 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1195 1196 struct ring_buffer_event * 1197 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1198 int type, 1199 unsigned long len, 1200 unsigned long flags, int pc) 1201 { 1202 struct ring_buffer_event *event; 1203 1204 event = ring_buffer_lock_reserve(buffer, len); 1205 if (event != NULL) { 1206 struct trace_entry *ent = ring_buffer_event_data(event); 1207 1208 tracing_generic_entry_update(ent, flags, pc); 1209 ent->type = type; 1210 } 1211 1212 return event; 1213 } 1214 1215 void 1216 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1217 { 1218 __this_cpu_write(trace_cmdline_save, true); 1219 if (trace_wakeup_needed) { 1220 trace_wakeup_needed = false; 1221 /* irq_work_queue() supplies it's own memory barriers */ 1222 irq_work_queue(&trace_work_wakeup); 1223 } 1224 ring_buffer_unlock_commit(buffer, event); 1225 } 1226 1227 static inline void 1228 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1229 struct ring_buffer_event *event, 1230 unsigned long flags, int pc) 1231 { 1232 __buffer_unlock_commit(buffer, event); 1233 1234 ftrace_trace_stack(buffer, flags, 6, pc); 1235 ftrace_trace_userstack(buffer, flags, pc); 1236 } 1237 1238 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1239 struct ring_buffer_event *event, 1240 unsigned long flags, int pc) 1241 { 1242 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1243 } 1244 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1245 1246 struct ring_buffer_event * 1247 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1248 int type, unsigned long len, 1249 unsigned long flags, int pc) 1250 { 1251 *current_rb = global_trace.buffer; 1252 return trace_buffer_lock_reserve(*current_rb, 1253 type, len, flags, pc); 1254 } 1255 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1256 1257 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1258 struct ring_buffer_event *event, 1259 unsigned long flags, int pc) 1260 { 1261 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1262 } 1263 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1264 1265 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1266 struct ring_buffer_event *event, 1267 unsigned long flags, int pc, 1268 struct pt_regs *regs) 1269 { 1270 __buffer_unlock_commit(buffer, event); 1271 1272 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1273 ftrace_trace_userstack(buffer, flags, pc); 1274 } 1275 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1276 1277 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1278 struct ring_buffer_event *event) 1279 { 1280 ring_buffer_discard_commit(buffer, event); 1281 } 1282 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1283 1284 void 1285 trace_function(struct trace_array *tr, 1286 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1287 int pc) 1288 { 1289 struct ftrace_event_call *call = &event_function; 1290 struct ring_buffer *buffer = tr->buffer; 1291 struct ring_buffer_event *event; 1292 struct ftrace_entry *entry; 1293 1294 /* If we are reading the ring buffer, don't trace */ 1295 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1296 return; 1297 1298 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1299 flags, pc); 1300 if (!event) 1301 return; 1302 entry = ring_buffer_event_data(event); 1303 entry->ip = ip; 1304 entry->parent_ip = parent_ip; 1305 1306 if (!filter_check_discard(call, entry, buffer, event)) 1307 __buffer_unlock_commit(buffer, event); 1308 } 1309 1310 void 1311 ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1312 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1313 int pc) 1314 { 1315 if (likely(!atomic_read(&data->disabled))) 1316 trace_function(tr, ip, parent_ip, flags, pc); 1317 } 1318 1319 #ifdef CONFIG_STACKTRACE 1320 1321 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1322 struct ftrace_stack { 1323 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 1324 }; 1325 1326 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 1327 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 1328 1329 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1330 unsigned long flags, 1331 int skip, int pc, struct pt_regs *regs) 1332 { 1333 struct ftrace_event_call *call = &event_kernel_stack; 1334 struct ring_buffer_event *event; 1335 struct stack_entry *entry; 1336 struct stack_trace trace; 1337 int use_stack; 1338 int size = FTRACE_STACK_ENTRIES; 1339 1340 trace.nr_entries = 0; 1341 trace.skip = skip; 1342 1343 /* 1344 * Since events can happen in NMIs there's no safe way to 1345 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 1346 * or NMI comes in, it will just have to use the default 1347 * FTRACE_STACK_SIZE. 1348 */ 1349 preempt_disable_notrace(); 1350 1351 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1352 /* 1353 * We don't need any atomic variables, just a barrier. 1354 * If an interrupt comes in, we don't care, because it would 1355 * have exited and put the counter back to what we want. 1356 * We just need a barrier to keep gcc from moving things 1357 * around. 1358 */ 1359 barrier(); 1360 if (use_stack == 1) { 1361 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1362 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1363 1364 if (regs) 1365 save_stack_trace_regs(regs, &trace); 1366 else 1367 save_stack_trace(&trace); 1368 1369 if (trace.nr_entries > size) 1370 size = trace.nr_entries; 1371 } else 1372 /* From now on, use_stack is a boolean */ 1373 use_stack = 0; 1374 1375 size *= sizeof(unsigned long); 1376 1377 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1378 sizeof(*entry) + size, flags, pc); 1379 if (!event) 1380 goto out; 1381 entry = ring_buffer_event_data(event); 1382 1383 memset(&entry->caller, 0, size); 1384 1385 if (use_stack) 1386 memcpy(&entry->caller, trace.entries, 1387 trace.nr_entries * sizeof(unsigned long)); 1388 else { 1389 trace.max_entries = FTRACE_STACK_ENTRIES; 1390 trace.entries = entry->caller; 1391 if (regs) 1392 save_stack_trace_regs(regs, &trace); 1393 else 1394 save_stack_trace(&trace); 1395 } 1396 1397 entry->size = trace.nr_entries; 1398 1399 if (!filter_check_discard(call, entry, buffer, event)) 1400 __buffer_unlock_commit(buffer, event); 1401 1402 out: 1403 /* Again, don't let gcc optimize things here */ 1404 barrier(); 1405 __this_cpu_dec(ftrace_stack_reserve); 1406 preempt_enable_notrace(); 1407 1408 } 1409 1410 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1411 int skip, int pc, struct pt_regs *regs) 1412 { 1413 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1414 return; 1415 1416 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1417 } 1418 1419 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1420 int skip, int pc) 1421 { 1422 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1423 return; 1424 1425 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1426 } 1427 1428 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1429 int pc) 1430 { 1431 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); 1432 } 1433 1434 /** 1435 * trace_dump_stack - record a stack back trace in the trace buffer 1436 */ 1437 void trace_dump_stack(void) 1438 { 1439 unsigned long flags; 1440 1441 if (tracing_disabled || tracing_selftest_running) 1442 return; 1443 1444 local_save_flags(flags); 1445 1446 /* skipping 3 traces, seems to get us at the caller of this function */ 1447 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); 1448 } 1449 1450 static DEFINE_PER_CPU(int, user_stack_count); 1451 1452 void 1453 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1454 { 1455 struct ftrace_event_call *call = &event_user_stack; 1456 struct ring_buffer_event *event; 1457 struct userstack_entry *entry; 1458 struct stack_trace trace; 1459 1460 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1461 return; 1462 1463 /* 1464 * NMIs can not handle page faults, even with fix ups. 1465 * The save user stack can (and often does) fault. 1466 */ 1467 if (unlikely(in_nmi())) 1468 return; 1469 1470 /* 1471 * prevent recursion, since the user stack tracing may 1472 * trigger other kernel events. 1473 */ 1474 preempt_disable(); 1475 if (__this_cpu_read(user_stack_count)) 1476 goto out; 1477 1478 __this_cpu_inc(user_stack_count); 1479 1480 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1481 sizeof(*entry), flags, pc); 1482 if (!event) 1483 goto out_drop_count; 1484 entry = ring_buffer_event_data(event); 1485 1486 entry->tgid = current->tgid; 1487 memset(&entry->caller, 0, sizeof(entry->caller)); 1488 1489 trace.nr_entries = 0; 1490 trace.max_entries = FTRACE_STACK_ENTRIES; 1491 trace.skip = 0; 1492 trace.entries = entry->caller; 1493 1494 save_stack_trace_user(&trace); 1495 if (!filter_check_discard(call, entry, buffer, event)) 1496 __buffer_unlock_commit(buffer, event); 1497 1498 out_drop_count: 1499 __this_cpu_dec(user_stack_count); 1500 out: 1501 preempt_enable(); 1502 } 1503 1504 #ifdef UNUSED 1505 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1506 { 1507 ftrace_trace_userstack(tr, flags, preempt_count()); 1508 } 1509 #endif /* UNUSED */ 1510 1511 #endif /* CONFIG_STACKTRACE */ 1512 1513 /* created for use with alloc_percpu */ 1514 struct trace_buffer_struct { 1515 char buffer[TRACE_BUF_SIZE]; 1516 }; 1517 1518 static struct trace_buffer_struct *trace_percpu_buffer; 1519 static struct trace_buffer_struct *trace_percpu_sirq_buffer; 1520 static struct trace_buffer_struct *trace_percpu_irq_buffer; 1521 static struct trace_buffer_struct *trace_percpu_nmi_buffer; 1522 1523 /* 1524 * The buffer used is dependent on the context. There is a per cpu 1525 * buffer for normal context, softirq contex, hard irq context and 1526 * for NMI context. Thise allows for lockless recording. 1527 * 1528 * Note, if the buffers failed to be allocated, then this returns NULL 1529 */ 1530 static char *get_trace_buf(void) 1531 { 1532 struct trace_buffer_struct *percpu_buffer; 1533 1534 /* 1535 * If we have allocated per cpu buffers, then we do not 1536 * need to do any locking. 1537 */ 1538 if (in_nmi()) 1539 percpu_buffer = trace_percpu_nmi_buffer; 1540 else if (in_irq()) 1541 percpu_buffer = trace_percpu_irq_buffer; 1542 else if (in_softirq()) 1543 percpu_buffer = trace_percpu_sirq_buffer; 1544 else 1545 percpu_buffer = trace_percpu_buffer; 1546 1547 if (!percpu_buffer) 1548 return NULL; 1549 1550 return this_cpu_ptr(&percpu_buffer->buffer[0]); 1551 } 1552 1553 static int alloc_percpu_trace_buffer(void) 1554 { 1555 struct trace_buffer_struct *buffers; 1556 struct trace_buffer_struct *sirq_buffers; 1557 struct trace_buffer_struct *irq_buffers; 1558 struct trace_buffer_struct *nmi_buffers; 1559 1560 buffers = alloc_percpu(struct trace_buffer_struct); 1561 if (!buffers) 1562 goto err_warn; 1563 1564 sirq_buffers = alloc_percpu(struct trace_buffer_struct); 1565 if (!sirq_buffers) 1566 goto err_sirq; 1567 1568 irq_buffers = alloc_percpu(struct trace_buffer_struct); 1569 if (!irq_buffers) 1570 goto err_irq; 1571 1572 nmi_buffers = alloc_percpu(struct trace_buffer_struct); 1573 if (!nmi_buffers) 1574 goto err_nmi; 1575 1576 trace_percpu_buffer = buffers; 1577 trace_percpu_sirq_buffer = sirq_buffers; 1578 trace_percpu_irq_buffer = irq_buffers; 1579 trace_percpu_nmi_buffer = nmi_buffers; 1580 1581 return 0; 1582 1583 err_nmi: 1584 free_percpu(irq_buffers); 1585 err_irq: 1586 free_percpu(sirq_buffers); 1587 err_sirq: 1588 free_percpu(buffers); 1589 err_warn: 1590 WARN(1, "Could not allocate percpu trace_printk buffer"); 1591 return -ENOMEM; 1592 } 1593 1594 static int buffers_allocated; 1595 1596 void trace_printk_init_buffers(void) 1597 { 1598 if (buffers_allocated) 1599 return; 1600 1601 if (alloc_percpu_trace_buffer()) 1602 return; 1603 1604 pr_info("ftrace: Allocated trace_printk buffers\n"); 1605 1606 /* Expand the buffers to set size */ 1607 tracing_update_buffers(); 1608 1609 buffers_allocated = 1; 1610 1611 /* 1612 * trace_printk_init_buffers() can be called by modules. 1613 * If that happens, then we need to start cmdline recording 1614 * directly here. If the global_trace.buffer is already 1615 * allocated here, then this was called by module code. 1616 */ 1617 if (global_trace.buffer) 1618 tracing_start_cmdline_record(); 1619 } 1620 1621 void trace_printk_start_comm(void) 1622 { 1623 /* Start tracing comms if trace printk is set */ 1624 if (!buffers_allocated) 1625 return; 1626 tracing_start_cmdline_record(); 1627 } 1628 1629 static void trace_printk_start_stop_comm(int enabled) 1630 { 1631 if (!buffers_allocated) 1632 return; 1633 1634 if (enabled) 1635 tracing_start_cmdline_record(); 1636 else 1637 tracing_stop_cmdline_record(); 1638 } 1639 1640 /** 1641 * trace_vbprintk - write binary msg to tracing buffer 1642 * 1643 */ 1644 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1645 { 1646 struct ftrace_event_call *call = &event_bprint; 1647 struct ring_buffer_event *event; 1648 struct ring_buffer *buffer; 1649 struct trace_array *tr = &global_trace; 1650 struct bprint_entry *entry; 1651 unsigned long flags; 1652 char *tbuffer; 1653 int len = 0, size, pc; 1654 1655 if (unlikely(tracing_selftest_running || tracing_disabled)) 1656 return 0; 1657 1658 /* Don't pollute graph traces with trace_vprintk internals */ 1659 pause_graph_tracing(); 1660 1661 pc = preempt_count(); 1662 preempt_disable_notrace(); 1663 1664 tbuffer = get_trace_buf(); 1665 if (!tbuffer) { 1666 len = 0; 1667 goto out; 1668 } 1669 1670 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 1671 1672 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 1673 goto out; 1674 1675 local_save_flags(flags); 1676 size = sizeof(*entry) + sizeof(u32) * len; 1677 buffer = tr->buffer; 1678 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1679 flags, pc); 1680 if (!event) 1681 goto out; 1682 entry = ring_buffer_event_data(event); 1683 entry->ip = ip; 1684 entry->fmt = fmt; 1685 1686 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 1687 if (!filter_check_discard(call, entry, buffer, event)) { 1688 __buffer_unlock_commit(buffer, event); 1689 ftrace_trace_stack(buffer, flags, 6, pc); 1690 } 1691 1692 out: 1693 preempt_enable_notrace(); 1694 unpause_graph_tracing(); 1695 1696 return len; 1697 } 1698 EXPORT_SYMBOL_GPL(trace_vbprintk); 1699 1700 int trace_array_printk(struct trace_array *tr, 1701 unsigned long ip, const char *fmt, ...) 1702 { 1703 int ret; 1704 va_list ap; 1705 1706 if (!(trace_flags & TRACE_ITER_PRINTK)) 1707 return 0; 1708 1709 va_start(ap, fmt); 1710 ret = trace_array_vprintk(tr, ip, fmt, ap); 1711 va_end(ap); 1712 return ret; 1713 } 1714 1715 int trace_array_vprintk(struct trace_array *tr, 1716 unsigned long ip, const char *fmt, va_list args) 1717 { 1718 struct ftrace_event_call *call = &event_print; 1719 struct ring_buffer_event *event; 1720 struct ring_buffer *buffer; 1721 int len = 0, size, pc; 1722 struct print_entry *entry; 1723 unsigned long flags; 1724 char *tbuffer; 1725 1726 if (tracing_disabled || tracing_selftest_running) 1727 return 0; 1728 1729 /* Don't pollute graph traces with trace_vprintk internals */ 1730 pause_graph_tracing(); 1731 1732 pc = preempt_count(); 1733 preempt_disable_notrace(); 1734 1735 1736 tbuffer = get_trace_buf(); 1737 if (!tbuffer) { 1738 len = 0; 1739 goto out; 1740 } 1741 1742 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 1743 if (len > TRACE_BUF_SIZE) 1744 goto out; 1745 1746 local_save_flags(flags); 1747 size = sizeof(*entry) + len + 1; 1748 buffer = tr->buffer; 1749 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1750 flags, pc); 1751 if (!event) 1752 goto out; 1753 entry = ring_buffer_event_data(event); 1754 entry->ip = ip; 1755 1756 memcpy(&entry->buf, tbuffer, len); 1757 entry->buf[len] = '\0'; 1758 if (!filter_check_discard(call, entry, buffer, event)) { 1759 __buffer_unlock_commit(buffer, event); 1760 ftrace_trace_stack(buffer, flags, 6, pc); 1761 } 1762 out: 1763 preempt_enable_notrace(); 1764 unpause_graph_tracing(); 1765 1766 return len; 1767 } 1768 1769 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1770 { 1771 return trace_array_vprintk(&global_trace, ip, fmt, args); 1772 } 1773 EXPORT_SYMBOL_GPL(trace_vprintk); 1774 1775 static void trace_iterator_increment(struct trace_iterator *iter) 1776 { 1777 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 1778 1779 iter->idx++; 1780 if (buf_iter) 1781 ring_buffer_read(buf_iter, NULL); 1782 } 1783 1784 static struct trace_entry * 1785 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 1786 unsigned long *lost_events) 1787 { 1788 struct ring_buffer_event *event; 1789 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 1790 1791 if (buf_iter) 1792 event = ring_buffer_iter_peek(buf_iter, ts); 1793 else 1794 event = ring_buffer_peek(iter->tr->buffer, cpu, ts, 1795 lost_events); 1796 1797 if (event) { 1798 iter->ent_size = ring_buffer_event_length(event); 1799 return ring_buffer_event_data(event); 1800 } 1801 iter->ent_size = 0; 1802 return NULL; 1803 } 1804 1805 static struct trace_entry * 1806 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 1807 unsigned long *missing_events, u64 *ent_ts) 1808 { 1809 struct ring_buffer *buffer = iter->tr->buffer; 1810 struct trace_entry *ent, *next = NULL; 1811 unsigned long lost_events = 0, next_lost = 0; 1812 int cpu_file = iter->cpu_file; 1813 u64 next_ts = 0, ts; 1814 int next_cpu = -1; 1815 int next_size = 0; 1816 int cpu; 1817 1818 /* 1819 * If we are in a per_cpu trace file, don't bother by iterating over 1820 * all cpu and peek directly. 1821 */ 1822 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1823 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1824 return NULL; 1825 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 1826 if (ent_cpu) 1827 *ent_cpu = cpu_file; 1828 1829 return ent; 1830 } 1831 1832 for_each_tracing_cpu(cpu) { 1833 1834 if (ring_buffer_empty_cpu(buffer, cpu)) 1835 continue; 1836 1837 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 1838 1839 /* 1840 * Pick the entry with the smallest timestamp: 1841 */ 1842 if (ent && (!next || ts < next_ts)) { 1843 next = ent; 1844 next_cpu = cpu; 1845 next_ts = ts; 1846 next_lost = lost_events; 1847 next_size = iter->ent_size; 1848 } 1849 } 1850 1851 iter->ent_size = next_size; 1852 1853 if (ent_cpu) 1854 *ent_cpu = next_cpu; 1855 1856 if (ent_ts) 1857 *ent_ts = next_ts; 1858 1859 if (missing_events) 1860 *missing_events = next_lost; 1861 1862 return next; 1863 } 1864 1865 /* Find the next real entry, without updating the iterator itself */ 1866 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1867 int *ent_cpu, u64 *ent_ts) 1868 { 1869 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 1870 } 1871 1872 /* Find the next real entry, and increment the iterator to the next entry */ 1873 void *trace_find_next_entry_inc(struct trace_iterator *iter) 1874 { 1875 iter->ent = __find_next_entry(iter, &iter->cpu, 1876 &iter->lost_events, &iter->ts); 1877 1878 if (iter->ent) 1879 trace_iterator_increment(iter); 1880 1881 return iter->ent ? iter : NULL; 1882 } 1883 1884 static void trace_consume(struct trace_iterator *iter) 1885 { 1886 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, 1887 &iter->lost_events); 1888 } 1889 1890 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1891 { 1892 struct trace_iterator *iter = m->private; 1893 int i = (int)*pos; 1894 void *ent; 1895 1896 WARN_ON_ONCE(iter->leftover); 1897 1898 (*pos)++; 1899 1900 /* can't go backwards */ 1901 if (iter->idx > i) 1902 return NULL; 1903 1904 if (iter->idx < 0) 1905 ent = trace_find_next_entry_inc(iter); 1906 else 1907 ent = iter; 1908 1909 while (ent && iter->idx < i) 1910 ent = trace_find_next_entry_inc(iter); 1911 1912 iter->pos = *pos; 1913 1914 return ent; 1915 } 1916 1917 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1918 { 1919 struct trace_array *tr = iter->tr; 1920 struct ring_buffer_event *event; 1921 struct ring_buffer_iter *buf_iter; 1922 unsigned long entries = 0; 1923 u64 ts; 1924 1925 tr->data[cpu]->skipped_entries = 0; 1926 1927 buf_iter = trace_buffer_iter(iter, cpu); 1928 if (!buf_iter) 1929 return; 1930 1931 ring_buffer_iter_reset(buf_iter); 1932 1933 /* 1934 * We could have the case with the max latency tracers 1935 * that a reset never took place on a cpu. This is evident 1936 * by the timestamp being before the start of the buffer. 1937 */ 1938 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 1939 if (ts >= iter->tr->time_start) 1940 break; 1941 entries++; 1942 ring_buffer_read(buf_iter, NULL); 1943 } 1944 1945 tr->data[cpu]->skipped_entries = entries; 1946 } 1947 1948 /* 1949 * The current tracer is copied to avoid a global locking 1950 * all around. 1951 */ 1952 static void *s_start(struct seq_file *m, loff_t *pos) 1953 { 1954 struct trace_iterator *iter = m->private; 1955 int cpu_file = iter->cpu_file; 1956 void *p = NULL; 1957 loff_t l = 0; 1958 int cpu; 1959 1960 /* 1961 * copy the tracer to avoid using a global lock all around. 1962 * iter->trace is a copy of current_trace, the pointer to the 1963 * name may be used instead of a strcmp(), as iter->trace->name 1964 * will point to the same string as current_trace->name. 1965 */ 1966 mutex_lock(&trace_types_lock); 1967 if (unlikely(current_trace && iter->trace->name != current_trace->name)) 1968 *iter->trace = *current_trace; 1969 mutex_unlock(&trace_types_lock); 1970 1971 if (iter->snapshot && iter->trace->use_max_tr) 1972 return ERR_PTR(-EBUSY); 1973 1974 if (!iter->snapshot) 1975 atomic_inc(&trace_record_cmdline_disabled); 1976 1977 if (*pos != iter->pos) { 1978 iter->ent = NULL; 1979 iter->cpu = 0; 1980 iter->idx = -1; 1981 1982 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1983 for_each_tracing_cpu(cpu) 1984 tracing_iter_reset(iter, cpu); 1985 } else 1986 tracing_iter_reset(iter, cpu_file); 1987 1988 iter->leftover = 0; 1989 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1990 ; 1991 1992 } else { 1993 /* 1994 * If we overflowed the seq_file before, then we want 1995 * to just reuse the trace_seq buffer again. 1996 */ 1997 if (iter->leftover) 1998 p = iter; 1999 else { 2000 l = *pos - 1; 2001 p = s_next(m, p, &l); 2002 } 2003 } 2004 2005 trace_event_read_lock(); 2006 trace_access_lock(cpu_file); 2007 return p; 2008 } 2009 2010 static void s_stop(struct seq_file *m, void *p) 2011 { 2012 struct trace_iterator *iter = m->private; 2013 2014 if (iter->snapshot && iter->trace->use_max_tr) 2015 return; 2016 2017 if (!iter->snapshot) 2018 atomic_dec(&trace_record_cmdline_disabled); 2019 trace_access_unlock(iter->cpu_file); 2020 trace_event_read_unlock(); 2021 } 2022 2023 static void 2024 get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) 2025 { 2026 unsigned long count; 2027 int cpu; 2028 2029 *total = 0; 2030 *entries = 0; 2031 2032 for_each_tracing_cpu(cpu) { 2033 count = ring_buffer_entries_cpu(tr->buffer, cpu); 2034 /* 2035 * If this buffer has skipped entries, then we hold all 2036 * entries for the trace and we need to ignore the 2037 * ones before the time stamp. 2038 */ 2039 if (tr->data[cpu]->skipped_entries) { 2040 count -= tr->data[cpu]->skipped_entries; 2041 /* total is the same as the entries */ 2042 *total += count; 2043 } else 2044 *total += count + 2045 ring_buffer_overrun_cpu(tr->buffer, cpu); 2046 *entries += count; 2047 } 2048 } 2049 2050 static void print_lat_help_header(struct seq_file *m) 2051 { 2052 seq_puts(m, "# _------=> CPU# \n"); 2053 seq_puts(m, "# / _-----=> irqs-off \n"); 2054 seq_puts(m, "# | / _----=> need-resched \n"); 2055 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2056 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2057 seq_puts(m, "# |||| / delay \n"); 2058 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2059 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2060 } 2061 2062 static void print_event_info(struct trace_array *tr, struct seq_file *m) 2063 { 2064 unsigned long total; 2065 unsigned long entries; 2066 2067 get_total_entries(tr, &total, &entries); 2068 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2069 entries, total, num_online_cpus()); 2070 seq_puts(m, "#\n"); 2071 } 2072 2073 static void print_func_help_header(struct trace_array *tr, struct seq_file *m) 2074 { 2075 print_event_info(tr, m); 2076 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2077 seq_puts(m, "# | | | | |\n"); 2078 } 2079 2080 static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) 2081 { 2082 print_event_info(tr, m); 2083 seq_puts(m, "# _-----=> irqs-off\n"); 2084 seq_puts(m, "# / _----=> need-resched\n"); 2085 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2086 seq_puts(m, "# || / _--=> preempt-depth\n"); 2087 seq_puts(m, "# ||| / delay\n"); 2088 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2089 seq_puts(m, "# | | | |||| | |\n"); 2090 } 2091 2092 void 2093 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2094 { 2095 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2096 struct trace_array *tr = iter->tr; 2097 struct trace_array_cpu *data = tr->data[tr->cpu]; 2098 struct tracer *type = current_trace; 2099 unsigned long entries; 2100 unsigned long total; 2101 const char *name = "preemption"; 2102 2103 name = type->name; 2104 2105 get_total_entries(tr, &total, &entries); 2106 2107 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2108 name, UTS_RELEASE); 2109 seq_puts(m, "# -----------------------------------" 2110 "---------------------------------\n"); 2111 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 2112 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 2113 nsecs_to_usecs(data->saved_latency), 2114 entries, 2115 total, 2116 tr->cpu, 2117 #if defined(CONFIG_PREEMPT_NONE) 2118 "server", 2119 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 2120 "desktop", 2121 #elif defined(CONFIG_PREEMPT) 2122 "preempt", 2123 #else 2124 "unknown", 2125 #endif 2126 /* These are reserved for later use */ 2127 0, 0, 0, 0); 2128 #ifdef CONFIG_SMP 2129 seq_printf(m, " #P:%d)\n", num_online_cpus()); 2130 #else 2131 seq_puts(m, ")\n"); 2132 #endif 2133 seq_puts(m, "# -----------------\n"); 2134 seq_printf(m, "# | task: %.16s-%d " 2135 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2136 data->comm, data->pid, 2137 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2138 data->policy, data->rt_priority); 2139 seq_puts(m, "# -----------------\n"); 2140 2141 if (data->critical_start) { 2142 seq_puts(m, "# => started at: "); 2143 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 2144 trace_print_seq(m, &iter->seq); 2145 seq_puts(m, "\n# => ended at: "); 2146 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 2147 trace_print_seq(m, &iter->seq); 2148 seq_puts(m, "\n#\n"); 2149 } 2150 2151 seq_puts(m, "#\n"); 2152 } 2153 2154 static void test_cpu_buff_start(struct trace_iterator *iter) 2155 { 2156 struct trace_seq *s = &iter->seq; 2157 2158 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2159 return; 2160 2161 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2162 return; 2163 2164 if (cpumask_test_cpu(iter->cpu, iter->started)) 2165 return; 2166 2167 if (iter->tr->data[iter->cpu]->skipped_entries) 2168 return; 2169 2170 cpumask_set_cpu(iter->cpu, iter->started); 2171 2172 /* Don't print started cpu buffer for the first entry of the trace */ 2173 if (iter->idx > 1) 2174 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 2175 iter->cpu); 2176 } 2177 2178 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2179 { 2180 struct trace_seq *s = &iter->seq; 2181 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2182 struct trace_entry *entry; 2183 struct trace_event *event; 2184 2185 entry = iter->ent; 2186 2187 test_cpu_buff_start(iter); 2188 2189 event = ftrace_find_event(entry->type); 2190 2191 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2192 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2193 if (!trace_print_lat_context(iter)) 2194 goto partial; 2195 } else { 2196 if (!trace_print_context(iter)) 2197 goto partial; 2198 } 2199 } 2200 2201 if (event) 2202 return event->funcs->trace(iter, sym_flags, event); 2203 2204 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2205 goto partial; 2206 2207 return TRACE_TYPE_HANDLED; 2208 partial: 2209 return TRACE_TYPE_PARTIAL_LINE; 2210 } 2211 2212 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2213 { 2214 struct trace_seq *s = &iter->seq; 2215 struct trace_entry *entry; 2216 struct trace_event *event; 2217 2218 entry = iter->ent; 2219 2220 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2221 if (!trace_seq_printf(s, "%d %d %llu ", 2222 entry->pid, iter->cpu, iter->ts)) 2223 goto partial; 2224 } 2225 2226 event = ftrace_find_event(entry->type); 2227 if (event) 2228 return event->funcs->raw(iter, 0, event); 2229 2230 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2231 goto partial; 2232 2233 return TRACE_TYPE_HANDLED; 2234 partial: 2235 return TRACE_TYPE_PARTIAL_LINE; 2236 } 2237 2238 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2239 { 2240 struct trace_seq *s = &iter->seq; 2241 unsigned char newline = '\n'; 2242 struct trace_entry *entry; 2243 struct trace_event *event; 2244 2245 entry = iter->ent; 2246 2247 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2248 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2249 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2250 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2251 } 2252 2253 event = ftrace_find_event(entry->type); 2254 if (event) { 2255 enum print_line_t ret = event->funcs->hex(iter, 0, event); 2256 if (ret != TRACE_TYPE_HANDLED) 2257 return ret; 2258 } 2259 2260 SEQ_PUT_FIELD_RET(s, newline); 2261 2262 return TRACE_TYPE_HANDLED; 2263 } 2264 2265 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2266 { 2267 struct trace_seq *s = &iter->seq; 2268 struct trace_entry *entry; 2269 struct trace_event *event; 2270 2271 entry = iter->ent; 2272 2273 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2274 SEQ_PUT_FIELD_RET(s, entry->pid); 2275 SEQ_PUT_FIELD_RET(s, iter->cpu); 2276 SEQ_PUT_FIELD_RET(s, iter->ts); 2277 } 2278 2279 event = ftrace_find_event(entry->type); 2280 return event ? event->funcs->binary(iter, 0, event) : 2281 TRACE_TYPE_HANDLED; 2282 } 2283 2284 int trace_empty(struct trace_iterator *iter) 2285 { 2286 struct ring_buffer_iter *buf_iter; 2287 int cpu; 2288 2289 /* If we are looking at one CPU buffer, only check that one */ 2290 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { 2291 cpu = iter->cpu_file; 2292 buf_iter = trace_buffer_iter(iter, cpu); 2293 if (buf_iter) { 2294 if (!ring_buffer_iter_empty(buf_iter)) 2295 return 0; 2296 } else { 2297 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2298 return 0; 2299 } 2300 return 1; 2301 } 2302 2303 for_each_tracing_cpu(cpu) { 2304 buf_iter = trace_buffer_iter(iter, cpu); 2305 if (buf_iter) { 2306 if (!ring_buffer_iter_empty(buf_iter)) 2307 return 0; 2308 } else { 2309 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2310 return 0; 2311 } 2312 } 2313 2314 return 1; 2315 } 2316 2317 /* Called with trace_event_read_lock() held. */ 2318 enum print_line_t print_trace_line(struct trace_iterator *iter) 2319 { 2320 enum print_line_t ret; 2321 2322 if (iter->lost_events && 2323 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2324 iter->cpu, iter->lost_events)) 2325 return TRACE_TYPE_PARTIAL_LINE; 2326 2327 if (iter->trace && iter->trace->print_line) { 2328 ret = iter->trace->print_line(iter); 2329 if (ret != TRACE_TYPE_UNHANDLED) 2330 return ret; 2331 } 2332 2333 if (iter->ent->type == TRACE_BPRINT && 2334 trace_flags & TRACE_ITER_PRINTK && 2335 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2336 return trace_print_bprintk_msg_only(iter); 2337 2338 if (iter->ent->type == TRACE_PRINT && 2339 trace_flags & TRACE_ITER_PRINTK && 2340 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2341 return trace_print_printk_msg_only(iter); 2342 2343 if (trace_flags & TRACE_ITER_BIN) 2344 return print_bin_fmt(iter); 2345 2346 if (trace_flags & TRACE_ITER_HEX) 2347 return print_hex_fmt(iter); 2348 2349 if (trace_flags & TRACE_ITER_RAW) 2350 return print_raw_fmt(iter); 2351 2352 return print_trace_fmt(iter); 2353 } 2354 2355 void trace_latency_header(struct seq_file *m) 2356 { 2357 struct trace_iterator *iter = m->private; 2358 2359 /* print nothing if the buffers are empty */ 2360 if (trace_empty(iter)) 2361 return; 2362 2363 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2364 print_trace_header(m, iter); 2365 2366 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2367 print_lat_help_header(m); 2368 } 2369 2370 void trace_default_header(struct seq_file *m) 2371 { 2372 struct trace_iterator *iter = m->private; 2373 2374 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2375 return; 2376 2377 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2378 /* print nothing if the buffers are empty */ 2379 if (trace_empty(iter)) 2380 return; 2381 print_trace_header(m, iter); 2382 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2383 print_lat_help_header(m); 2384 } else { 2385 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2386 if (trace_flags & TRACE_ITER_IRQ_INFO) 2387 print_func_help_header_irq(iter->tr, m); 2388 else 2389 print_func_help_header(iter->tr, m); 2390 } 2391 } 2392 } 2393 2394 static void test_ftrace_alive(struct seq_file *m) 2395 { 2396 if (!ftrace_is_dead()) 2397 return; 2398 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2399 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2400 } 2401 2402 static int s_show(struct seq_file *m, void *v) 2403 { 2404 struct trace_iterator *iter = v; 2405 int ret; 2406 2407 if (iter->ent == NULL) { 2408 if (iter->tr) { 2409 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2410 seq_puts(m, "#\n"); 2411 test_ftrace_alive(m); 2412 } 2413 if (iter->trace && iter->trace->print_header) 2414 iter->trace->print_header(m); 2415 else 2416 trace_default_header(m); 2417 2418 } else if (iter->leftover) { 2419 /* 2420 * If we filled the seq_file buffer earlier, we 2421 * want to just show it now. 2422 */ 2423 ret = trace_print_seq(m, &iter->seq); 2424 2425 /* ret should this time be zero, but you never know */ 2426 iter->leftover = ret; 2427 2428 } else { 2429 print_trace_line(iter); 2430 ret = trace_print_seq(m, &iter->seq); 2431 /* 2432 * If we overflow the seq_file buffer, then it will 2433 * ask us for this data again at start up. 2434 * Use that instead. 2435 * ret is 0 if seq_file write succeeded. 2436 * -1 otherwise. 2437 */ 2438 iter->leftover = ret; 2439 } 2440 2441 return 0; 2442 } 2443 2444 static const struct seq_operations tracer_seq_ops = { 2445 .start = s_start, 2446 .next = s_next, 2447 .stop = s_stop, 2448 .show = s_show, 2449 }; 2450 2451 static struct trace_iterator * 2452 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2453 { 2454 long cpu_file = (long) inode->i_private; 2455 struct trace_iterator *iter; 2456 int cpu; 2457 2458 if (tracing_disabled) 2459 return ERR_PTR(-ENODEV); 2460 2461 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 2462 if (!iter) 2463 return ERR_PTR(-ENOMEM); 2464 2465 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), 2466 GFP_KERNEL); 2467 if (!iter->buffer_iter) 2468 goto release; 2469 2470 /* 2471 * We make a copy of the current tracer to avoid concurrent 2472 * changes on it while we are reading. 2473 */ 2474 mutex_lock(&trace_types_lock); 2475 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2476 if (!iter->trace) 2477 goto fail; 2478 2479 *iter->trace = *current_trace; 2480 2481 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2482 goto fail; 2483 2484 if (current_trace->print_max || snapshot) 2485 iter->tr = &max_tr; 2486 else 2487 iter->tr = &global_trace; 2488 iter->snapshot = snapshot; 2489 iter->pos = -1; 2490 mutex_init(&iter->mutex); 2491 iter->cpu_file = cpu_file; 2492 2493 /* Notify the tracer early; before we stop tracing. */ 2494 if (iter->trace && iter->trace->open) 2495 iter->trace->open(iter); 2496 2497 /* Annotate start of buffers if we had overruns */ 2498 if (ring_buffer_overruns(iter->tr->buffer)) 2499 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2500 2501 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2502 if (trace_clocks[trace_clock_id].in_ns) 2503 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2504 2505 /* stop the trace while dumping if we are not opening "snapshot" */ 2506 if (!iter->snapshot) 2507 tracing_stop(); 2508 2509 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2510 for_each_tracing_cpu(cpu) { 2511 iter->buffer_iter[cpu] = 2512 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2513 } 2514 ring_buffer_read_prepare_sync(); 2515 for_each_tracing_cpu(cpu) { 2516 ring_buffer_read_start(iter->buffer_iter[cpu]); 2517 tracing_iter_reset(iter, cpu); 2518 } 2519 } else { 2520 cpu = iter->cpu_file; 2521 iter->buffer_iter[cpu] = 2522 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2523 ring_buffer_read_prepare_sync(); 2524 ring_buffer_read_start(iter->buffer_iter[cpu]); 2525 tracing_iter_reset(iter, cpu); 2526 } 2527 2528 mutex_unlock(&trace_types_lock); 2529 2530 return iter; 2531 2532 fail: 2533 mutex_unlock(&trace_types_lock); 2534 kfree(iter->trace); 2535 kfree(iter->buffer_iter); 2536 release: 2537 seq_release_private(inode, file); 2538 return ERR_PTR(-ENOMEM); 2539 } 2540 2541 int tracing_open_generic(struct inode *inode, struct file *filp) 2542 { 2543 if (tracing_disabled) 2544 return -ENODEV; 2545 2546 filp->private_data = inode->i_private; 2547 return 0; 2548 } 2549 2550 static int tracing_release(struct inode *inode, struct file *file) 2551 { 2552 struct seq_file *m = file->private_data; 2553 struct trace_iterator *iter; 2554 int cpu; 2555 2556 if (!(file->f_mode & FMODE_READ)) 2557 return 0; 2558 2559 iter = m->private; 2560 2561 mutex_lock(&trace_types_lock); 2562 for_each_tracing_cpu(cpu) { 2563 if (iter->buffer_iter[cpu]) 2564 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2565 } 2566 2567 if (iter->trace && iter->trace->close) 2568 iter->trace->close(iter); 2569 2570 if (!iter->snapshot) 2571 /* reenable tracing if it was previously enabled */ 2572 tracing_start(); 2573 mutex_unlock(&trace_types_lock); 2574 2575 mutex_destroy(&iter->mutex); 2576 free_cpumask_var(iter->started); 2577 kfree(iter->trace); 2578 kfree(iter->buffer_iter); 2579 seq_release_private(inode, file); 2580 return 0; 2581 } 2582 2583 static int tracing_open(struct inode *inode, struct file *file) 2584 { 2585 struct trace_iterator *iter; 2586 int ret = 0; 2587 2588 /* If this file was open for write, then erase contents */ 2589 if ((file->f_mode & FMODE_WRITE) && 2590 (file->f_flags & O_TRUNC)) { 2591 long cpu = (long) inode->i_private; 2592 2593 if (cpu == TRACE_PIPE_ALL_CPU) 2594 tracing_reset_online_cpus(&global_trace); 2595 else 2596 tracing_reset(&global_trace, cpu); 2597 } 2598 2599 if (file->f_mode & FMODE_READ) { 2600 iter = __tracing_open(inode, file, false); 2601 if (IS_ERR(iter)) 2602 ret = PTR_ERR(iter); 2603 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2604 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2605 } 2606 return ret; 2607 } 2608 2609 static void * 2610 t_next(struct seq_file *m, void *v, loff_t *pos) 2611 { 2612 struct tracer *t = v; 2613 2614 (*pos)++; 2615 2616 if (t) 2617 t = t->next; 2618 2619 return t; 2620 } 2621 2622 static void *t_start(struct seq_file *m, loff_t *pos) 2623 { 2624 struct tracer *t; 2625 loff_t l = 0; 2626 2627 mutex_lock(&trace_types_lock); 2628 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 2629 ; 2630 2631 return t; 2632 } 2633 2634 static void t_stop(struct seq_file *m, void *p) 2635 { 2636 mutex_unlock(&trace_types_lock); 2637 } 2638 2639 static int t_show(struct seq_file *m, void *v) 2640 { 2641 struct tracer *t = v; 2642 2643 if (!t) 2644 return 0; 2645 2646 seq_printf(m, "%s", t->name); 2647 if (t->next) 2648 seq_putc(m, ' '); 2649 else 2650 seq_putc(m, '\n'); 2651 2652 return 0; 2653 } 2654 2655 static const struct seq_operations show_traces_seq_ops = { 2656 .start = t_start, 2657 .next = t_next, 2658 .stop = t_stop, 2659 .show = t_show, 2660 }; 2661 2662 static int show_traces_open(struct inode *inode, struct file *file) 2663 { 2664 if (tracing_disabled) 2665 return -ENODEV; 2666 2667 return seq_open(file, &show_traces_seq_ops); 2668 } 2669 2670 static ssize_t 2671 tracing_write_stub(struct file *filp, const char __user *ubuf, 2672 size_t count, loff_t *ppos) 2673 { 2674 return count; 2675 } 2676 2677 static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 2678 { 2679 if (file->f_mode & FMODE_READ) 2680 return seq_lseek(file, offset, origin); 2681 else 2682 return 0; 2683 } 2684 2685 static const struct file_operations tracing_fops = { 2686 .open = tracing_open, 2687 .read = seq_read, 2688 .write = tracing_write_stub, 2689 .llseek = tracing_seek, 2690 .release = tracing_release, 2691 }; 2692 2693 static const struct file_operations show_traces_fops = { 2694 .open = show_traces_open, 2695 .read = seq_read, 2696 .release = seq_release, 2697 .llseek = seq_lseek, 2698 }; 2699 2700 /* 2701 * Only trace on a CPU if the bitmask is set: 2702 */ 2703 static cpumask_var_t tracing_cpumask; 2704 2705 /* 2706 * The tracer itself will not take this lock, but still we want 2707 * to provide a consistent cpumask to user-space: 2708 */ 2709 static DEFINE_MUTEX(tracing_cpumask_update_lock); 2710 2711 /* 2712 * Temporary storage for the character representation of the 2713 * CPU bitmask (and one more byte for the newline): 2714 */ 2715 static char mask_str[NR_CPUS + 1]; 2716 2717 static ssize_t 2718 tracing_cpumask_read(struct file *filp, char __user *ubuf, 2719 size_t count, loff_t *ppos) 2720 { 2721 int len; 2722 2723 mutex_lock(&tracing_cpumask_update_lock); 2724 2725 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2726 if (count - len < 2) { 2727 count = -EINVAL; 2728 goto out_err; 2729 } 2730 len += sprintf(mask_str + len, "\n"); 2731 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 2732 2733 out_err: 2734 mutex_unlock(&tracing_cpumask_update_lock); 2735 2736 return count; 2737 } 2738 2739 static ssize_t 2740 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 2741 size_t count, loff_t *ppos) 2742 { 2743 int err, cpu; 2744 cpumask_var_t tracing_cpumask_new; 2745 2746 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2747 return -ENOMEM; 2748 2749 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2750 if (err) 2751 goto err_unlock; 2752 2753 mutex_lock(&tracing_cpumask_update_lock); 2754 2755 local_irq_disable(); 2756 arch_spin_lock(&ftrace_max_lock); 2757 for_each_tracing_cpu(cpu) { 2758 /* 2759 * Increase/decrease the disabled counter if we are 2760 * about to flip a bit in the cpumask: 2761 */ 2762 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2763 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2764 atomic_inc(&global_trace.data[cpu]->disabled); 2765 ring_buffer_record_disable_cpu(global_trace.buffer, cpu); 2766 } 2767 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2768 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2769 atomic_dec(&global_trace.data[cpu]->disabled); 2770 ring_buffer_record_enable_cpu(global_trace.buffer, cpu); 2771 } 2772 } 2773 arch_spin_unlock(&ftrace_max_lock); 2774 local_irq_enable(); 2775 2776 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2777 2778 mutex_unlock(&tracing_cpumask_update_lock); 2779 free_cpumask_var(tracing_cpumask_new); 2780 2781 return count; 2782 2783 err_unlock: 2784 free_cpumask_var(tracing_cpumask_new); 2785 2786 return err; 2787 } 2788 2789 static const struct file_operations tracing_cpumask_fops = { 2790 .open = tracing_open_generic, 2791 .read = tracing_cpumask_read, 2792 .write = tracing_cpumask_write, 2793 .llseek = generic_file_llseek, 2794 }; 2795 2796 static int tracing_trace_options_show(struct seq_file *m, void *v) 2797 { 2798 struct tracer_opt *trace_opts; 2799 u32 tracer_flags; 2800 int i; 2801 2802 mutex_lock(&trace_types_lock); 2803 tracer_flags = current_trace->flags->val; 2804 trace_opts = current_trace->flags->opts; 2805 2806 for (i = 0; trace_options[i]; i++) { 2807 if (trace_flags & (1 << i)) 2808 seq_printf(m, "%s\n", trace_options[i]); 2809 else 2810 seq_printf(m, "no%s\n", trace_options[i]); 2811 } 2812 2813 for (i = 0; trace_opts[i].name; i++) { 2814 if (tracer_flags & trace_opts[i].bit) 2815 seq_printf(m, "%s\n", trace_opts[i].name); 2816 else 2817 seq_printf(m, "no%s\n", trace_opts[i].name); 2818 } 2819 mutex_unlock(&trace_types_lock); 2820 2821 return 0; 2822 } 2823 2824 static int __set_tracer_option(struct tracer *trace, 2825 struct tracer_flags *tracer_flags, 2826 struct tracer_opt *opts, int neg) 2827 { 2828 int ret; 2829 2830 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 2831 if (ret) 2832 return ret; 2833 2834 if (neg) 2835 tracer_flags->val &= ~opts->bit; 2836 else 2837 tracer_flags->val |= opts->bit; 2838 return 0; 2839 } 2840 2841 /* Try to assign a tracer specific option */ 2842 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2843 { 2844 struct tracer_flags *tracer_flags = trace->flags; 2845 struct tracer_opt *opts = NULL; 2846 int i; 2847 2848 for (i = 0; tracer_flags->opts[i].name; i++) { 2849 opts = &tracer_flags->opts[i]; 2850 2851 if (strcmp(cmp, opts->name) == 0) 2852 return __set_tracer_option(trace, trace->flags, 2853 opts, neg); 2854 } 2855 2856 return -EINVAL; 2857 } 2858 2859 static void set_tracer_flags(unsigned int mask, int enabled) 2860 { 2861 /* do nothing if flag is already set */ 2862 if (!!(trace_flags & mask) == !!enabled) 2863 return; 2864 2865 if (enabled) 2866 trace_flags |= mask; 2867 else 2868 trace_flags &= ~mask; 2869 2870 if (mask == TRACE_ITER_RECORD_CMD) 2871 trace_event_enable_cmd_record(enabled); 2872 2873 if (mask == TRACE_ITER_OVERWRITE) 2874 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2875 2876 if (mask == TRACE_ITER_PRINTK) 2877 trace_printk_start_stop_comm(enabled); 2878 } 2879 2880 static int trace_set_options(char *option) 2881 { 2882 char *cmp; 2883 int neg = 0; 2884 int ret = 0; 2885 int i; 2886 2887 cmp = strstrip(option); 2888 2889 if (strncmp(cmp, "no", 2) == 0) { 2890 neg = 1; 2891 cmp += 2; 2892 } 2893 2894 for (i = 0; trace_options[i]; i++) { 2895 if (strcmp(cmp, trace_options[i]) == 0) { 2896 set_tracer_flags(1 << i, !neg); 2897 break; 2898 } 2899 } 2900 2901 /* If no option could be set, test the specific tracer options */ 2902 if (!trace_options[i]) { 2903 mutex_lock(&trace_types_lock); 2904 ret = set_tracer_option(current_trace, cmp, neg); 2905 mutex_unlock(&trace_types_lock); 2906 } 2907 2908 return ret; 2909 } 2910 2911 static ssize_t 2912 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2913 size_t cnt, loff_t *ppos) 2914 { 2915 char buf[64]; 2916 2917 if (cnt >= sizeof(buf)) 2918 return -EINVAL; 2919 2920 if (copy_from_user(&buf, ubuf, cnt)) 2921 return -EFAULT; 2922 2923 buf[cnt] = 0; 2924 2925 trace_set_options(buf); 2926 2927 *ppos += cnt; 2928 2929 return cnt; 2930 } 2931 2932 static int tracing_trace_options_open(struct inode *inode, struct file *file) 2933 { 2934 if (tracing_disabled) 2935 return -ENODEV; 2936 return single_open(file, tracing_trace_options_show, NULL); 2937 } 2938 2939 static const struct file_operations tracing_iter_fops = { 2940 .open = tracing_trace_options_open, 2941 .read = seq_read, 2942 .llseek = seq_lseek, 2943 .release = single_release, 2944 .write = tracing_trace_options_write, 2945 }; 2946 2947 static const char readme_msg[] = 2948 "tracing mini-HOWTO:\n\n" 2949 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2950 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2951 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" 2952 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2953 "nop\n" 2954 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" 2955 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2956 "wakeup\n" 2957 "# cat /sys/kernel/debug/tracing/trace_options\n" 2958 "noprint-parent nosym-offset nosym-addr noverbose\n" 2959 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2960 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n" 2961 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" 2962 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n" 2963 ; 2964 2965 static ssize_t 2966 tracing_readme_read(struct file *filp, char __user *ubuf, 2967 size_t cnt, loff_t *ppos) 2968 { 2969 return simple_read_from_buffer(ubuf, cnt, ppos, 2970 readme_msg, strlen(readme_msg)); 2971 } 2972 2973 static const struct file_operations tracing_readme_fops = { 2974 .open = tracing_open_generic, 2975 .read = tracing_readme_read, 2976 .llseek = generic_file_llseek, 2977 }; 2978 2979 static ssize_t 2980 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 2981 size_t cnt, loff_t *ppos) 2982 { 2983 char *buf_comm; 2984 char *file_buf; 2985 char *buf; 2986 int len = 0; 2987 int pid; 2988 int i; 2989 2990 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 2991 if (!file_buf) 2992 return -ENOMEM; 2993 2994 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 2995 if (!buf_comm) { 2996 kfree(file_buf); 2997 return -ENOMEM; 2998 } 2999 3000 buf = file_buf; 3001 3002 for (i = 0; i < SAVED_CMDLINES; i++) { 3003 int r; 3004 3005 pid = map_cmdline_to_pid[i]; 3006 if (pid == -1 || pid == NO_CMDLINE_MAP) 3007 continue; 3008 3009 trace_find_cmdline(pid, buf_comm); 3010 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3011 buf += r; 3012 len += r; 3013 } 3014 3015 len = simple_read_from_buffer(ubuf, cnt, ppos, 3016 file_buf, len); 3017 3018 kfree(file_buf); 3019 kfree(buf_comm); 3020 3021 return len; 3022 } 3023 3024 static const struct file_operations tracing_saved_cmdlines_fops = { 3025 .open = tracing_open_generic, 3026 .read = tracing_saved_cmdlines_read, 3027 .llseek = generic_file_llseek, 3028 }; 3029 3030 static ssize_t 3031 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3032 size_t cnt, loff_t *ppos) 3033 { 3034 char buf[MAX_TRACER_SIZE+2]; 3035 int r; 3036 3037 mutex_lock(&trace_types_lock); 3038 r = sprintf(buf, "%s\n", current_trace->name); 3039 mutex_unlock(&trace_types_lock); 3040 3041 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3042 } 3043 3044 int tracer_init(struct tracer *t, struct trace_array *tr) 3045 { 3046 tracing_reset_online_cpus(tr); 3047 return t->init(tr); 3048 } 3049 3050 static void set_buffer_entries(struct trace_array *tr, unsigned long val) 3051 { 3052 int cpu; 3053 for_each_tracing_cpu(cpu) 3054 tr->data[cpu]->entries = val; 3055 } 3056 3057 /* resize @tr's buffer to the size of @size_tr's entries */ 3058 static int resize_buffer_duplicate_size(struct trace_array *tr, 3059 struct trace_array *size_tr, int cpu_id) 3060 { 3061 int cpu, ret = 0; 3062 3063 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3064 for_each_tracing_cpu(cpu) { 3065 ret = ring_buffer_resize(tr->buffer, 3066 size_tr->data[cpu]->entries, cpu); 3067 if (ret < 0) 3068 break; 3069 tr->data[cpu]->entries = size_tr->data[cpu]->entries; 3070 } 3071 } else { 3072 ret = ring_buffer_resize(tr->buffer, 3073 size_tr->data[cpu_id]->entries, cpu_id); 3074 if (ret == 0) 3075 tr->data[cpu_id]->entries = 3076 size_tr->data[cpu_id]->entries; 3077 } 3078 3079 return ret; 3080 } 3081 3082 static int __tracing_resize_ring_buffer(unsigned long size, int cpu) 3083 { 3084 int ret; 3085 3086 /* 3087 * If kernel or user changes the size of the ring buffer 3088 * we use the size that was given, and we can forget about 3089 * expanding it later. 3090 */ 3091 ring_buffer_expanded = 1; 3092 3093 /* May be called before buffers are initialized */ 3094 if (!global_trace.buffer) 3095 return 0; 3096 3097 ret = ring_buffer_resize(global_trace.buffer, size, cpu); 3098 if (ret < 0) 3099 return ret; 3100 3101 if (!current_trace->use_max_tr) 3102 goto out; 3103 3104 ret = ring_buffer_resize(max_tr.buffer, size, cpu); 3105 if (ret < 0) { 3106 int r = resize_buffer_duplicate_size(&global_trace, 3107 &global_trace, cpu); 3108 if (r < 0) { 3109 /* 3110 * AARGH! We are left with different 3111 * size max buffer!!!! 3112 * The max buffer is our "snapshot" buffer. 3113 * When a tracer needs a snapshot (one of the 3114 * latency tracers), it swaps the max buffer 3115 * with the saved snap shot. We succeeded to 3116 * update the size of the main buffer, but failed to 3117 * update the size of the max buffer. But when we tried 3118 * to reset the main buffer to the original size, we 3119 * failed there too. This is very unlikely to 3120 * happen, but if it does, warn and kill all 3121 * tracing. 3122 */ 3123 WARN_ON(1); 3124 tracing_disabled = 1; 3125 } 3126 return ret; 3127 } 3128 3129 if (cpu == RING_BUFFER_ALL_CPUS) 3130 set_buffer_entries(&max_tr, size); 3131 else 3132 max_tr.data[cpu]->entries = size; 3133 3134 out: 3135 if (cpu == RING_BUFFER_ALL_CPUS) 3136 set_buffer_entries(&global_trace, size); 3137 else 3138 global_trace.data[cpu]->entries = size; 3139 3140 return ret; 3141 } 3142 3143 static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) 3144 { 3145 int ret = size; 3146 3147 mutex_lock(&trace_types_lock); 3148 3149 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3150 /* make sure, this cpu is enabled in the mask */ 3151 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3152 ret = -EINVAL; 3153 goto out; 3154 } 3155 } 3156 3157 ret = __tracing_resize_ring_buffer(size, cpu_id); 3158 if (ret < 0) 3159 ret = -ENOMEM; 3160 3161 out: 3162 mutex_unlock(&trace_types_lock); 3163 3164 return ret; 3165 } 3166 3167 3168 /** 3169 * tracing_update_buffers - used by tracing facility to expand ring buffers 3170 * 3171 * To save on memory when the tracing is never used on a system with it 3172 * configured in. The ring buffers are set to a minimum size. But once 3173 * a user starts to use the tracing facility, then they need to grow 3174 * to their default size. 3175 * 3176 * This function is to be called when a tracer is about to be used. 3177 */ 3178 int tracing_update_buffers(void) 3179 { 3180 int ret = 0; 3181 3182 mutex_lock(&trace_types_lock); 3183 if (!ring_buffer_expanded) 3184 ret = __tracing_resize_ring_buffer(trace_buf_size, 3185 RING_BUFFER_ALL_CPUS); 3186 mutex_unlock(&trace_types_lock); 3187 3188 return ret; 3189 } 3190 3191 struct trace_option_dentry; 3192 3193 static struct trace_option_dentry * 3194 create_trace_option_files(struct tracer *tracer); 3195 3196 static void 3197 destroy_trace_option_files(struct trace_option_dentry *topts); 3198 3199 static int tracing_set_tracer(const char *buf) 3200 { 3201 static struct trace_option_dentry *topts; 3202 struct trace_array *tr = &global_trace; 3203 struct tracer *t; 3204 bool had_max_tr; 3205 int ret = 0; 3206 3207 mutex_lock(&trace_types_lock); 3208 3209 if (!ring_buffer_expanded) { 3210 ret = __tracing_resize_ring_buffer(trace_buf_size, 3211 RING_BUFFER_ALL_CPUS); 3212 if (ret < 0) 3213 goto out; 3214 ret = 0; 3215 } 3216 3217 for (t = trace_types; t; t = t->next) { 3218 if (strcmp(t->name, buf) == 0) 3219 break; 3220 } 3221 if (!t) { 3222 ret = -EINVAL; 3223 goto out; 3224 } 3225 if (t == current_trace) 3226 goto out; 3227 3228 trace_branch_disable(); 3229 if (current_trace->reset) 3230 current_trace->reset(tr); 3231 3232 had_max_tr = current_trace->allocated_snapshot; 3233 current_trace = &nop_trace; 3234 3235 if (had_max_tr && !t->use_max_tr) { 3236 /* 3237 * We need to make sure that the update_max_tr sees that 3238 * current_trace changed to nop_trace to keep it from 3239 * swapping the buffers after we resize it. 3240 * The update_max_tr is called from interrupts disabled 3241 * so a synchronized_sched() is sufficient. 3242 */ 3243 synchronize_sched(); 3244 /* 3245 * We don't free the ring buffer. instead, resize it because 3246 * The max_tr ring buffer has some state (e.g. ring->clock) and 3247 * we want preserve it. 3248 */ 3249 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3250 set_buffer_entries(&max_tr, 1); 3251 tracing_reset_online_cpus(&max_tr); 3252 current_trace->allocated_snapshot = false; 3253 } 3254 destroy_trace_option_files(topts); 3255 3256 topts = create_trace_option_files(t); 3257 if (t->use_max_tr && !had_max_tr) { 3258 /* we need to make per cpu buffer sizes equivalent */ 3259 ret = resize_buffer_duplicate_size(&max_tr, &global_trace, 3260 RING_BUFFER_ALL_CPUS); 3261 if (ret < 0) 3262 goto out; 3263 t->allocated_snapshot = true; 3264 } 3265 3266 if (t->init) { 3267 ret = tracer_init(t, tr); 3268 if (ret) 3269 goto out; 3270 } 3271 3272 current_trace = t; 3273 trace_branch_enable(tr); 3274 out: 3275 mutex_unlock(&trace_types_lock); 3276 3277 return ret; 3278 } 3279 3280 static ssize_t 3281 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 3282 size_t cnt, loff_t *ppos) 3283 { 3284 char buf[MAX_TRACER_SIZE+1]; 3285 int i; 3286 size_t ret; 3287 int err; 3288 3289 ret = cnt; 3290 3291 if (cnt > MAX_TRACER_SIZE) 3292 cnt = MAX_TRACER_SIZE; 3293 3294 if (copy_from_user(&buf, ubuf, cnt)) 3295 return -EFAULT; 3296 3297 buf[cnt] = 0; 3298 3299 /* strip ending whitespace. */ 3300 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 3301 buf[i] = 0; 3302 3303 err = tracing_set_tracer(buf); 3304 if (err) 3305 return err; 3306 3307 *ppos += ret; 3308 3309 return ret; 3310 } 3311 3312 static ssize_t 3313 tracing_max_lat_read(struct file *filp, char __user *ubuf, 3314 size_t cnt, loff_t *ppos) 3315 { 3316 unsigned long *ptr = filp->private_data; 3317 char buf[64]; 3318 int r; 3319 3320 r = snprintf(buf, sizeof(buf), "%ld\n", 3321 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 3322 if (r > sizeof(buf)) 3323 r = sizeof(buf); 3324 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3325 } 3326 3327 static ssize_t 3328 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 3329 size_t cnt, loff_t *ppos) 3330 { 3331 unsigned long *ptr = filp->private_data; 3332 unsigned long val; 3333 int ret; 3334 3335 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3336 if (ret) 3337 return ret; 3338 3339 *ptr = val * 1000; 3340 3341 return cnt; 3342 } 3343 3344 static int tracing_open_pipe(struct inode *inode, struct file *filp) 3345 { 3346 long cpu_file = (long) inode->i_private; 3347 struct trace_iterator *iter; 3348 int ret = 0; 3349 3350 if (tracing_disabled) 3351 return -ENODEV; 3352 3353 mutex_lock(&trace_types_lock); 3354 3355 /* create a buffer to store the information to pass to userspace */ 3356 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3357 if (!iter) { 3358 ret = -ENOMEM; 3359 goto out; 3360 } 3361 3362 /* 3363 * We make a copy of the current tracer to avoid concurrent 3364 * changes on it while we are reading. 3365 */ 3366 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 3367 if (!iter->trace) { 3368 ret = -ENOMEM; 3369 goto fail; 3370 } 3371 *iter->trace = *current_trace; 3372 3373 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3374 ret = -ENOMEM; 3375 goto fail; 3376 } 3377 3378 /* trace pipe does not show start of buffer */ 3379 cpumask_setall(iter->started); 3380 3381 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3382 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3383 3384 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3385 if (trace_clocks[trace_clock_id].in_ns) 3386 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3387 3388 iter->cpu_file = cpu_file; 3389 iter->tr = &global_trace; 3390 mutex_init(&iter->mutex); 3391 filp->private_data = iter; 3392 3393 if (iter->trace->pipe_open) 3394 iter->trace->pipe_open(iter); 3395 3396 nonseekable_open(inode, filp); 3397 out: 3398 mutex_unlock(&trace_types_lock); 3399 return ret; 3400 3401 fail: 3402 kfree(iter->trace); 3403 kfree(iter); 3404 mutex_unlock(&trace_types_lock); 3405 return ret; 3406 } 3407 3408 static int tracing_release_pipe(struct inode *inode, struct file *file) 3409 { 3410 struct trace_iterator *iter = file->private_data; 3411 3412 mutex_lock(&trace_types_lock); 3413 3414 if (iter->trace->pipe_close) 3415 iter->trace->pipe_close(iter); 3416 3417 mutex_unlock(&trace_types_lock); 3418 3419 free_cpumask_var(iter->started); 3420 mutex_destroy(&iter->mutex); 3421 kfree(iter->trace); 3422 kfree(iter); 3423 3424 return 0; 3425 } 3426 3427 static unsigned int 3428 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 3429 { 3430 struct trace_iterator *iter = filp->private_data; 3431 3432 if (trace_flags & TRACE_ITER_BLOCK) { 3433 /* 3434 * Always select as readable when in blocking mode 3435 */ 3436 return POLLIN | POLLRDNORM; 3437 } else { 3438 if (!trace_empty(iter)) 3439 return POLLIN | POLLRDNORM; 3440 poll_wait(filp, &trace_wait, poll_table); 3441 if (!trace_empty(iter)) 3442 return POLLIN | POLLRDNORM; 3443 3444 return 0; 3445 } 3446 } 3447 3448 /* 3449 * This is a make-shift waitqueue. 3450 * A tracer might use this callback on some rare cases: 3451 * 3452 * 1) the current tracer might hold the runqueue lock when it wakes up 3453 * a reader, hence a deadlock (sched, function, and function graph tracers) 3454 * 2) the function tracers, trace all functions, we don't want 3455 * the overhead of calling wake_up and friends 3456 * (and tracing them too) 3457 * 3458 * Anyway, this is really very primitive wakeup. 3459 */ 3460 void poll_wait_pipe(struct trace_iterator *iter) 3461 { 3462 set_current_state(TASK_INTERRUPTIBLE); 3463 /* sleep for 100 msecs, and try again. */ 3464 schedule_timeout(HZ / 10); 3465 } 3466 3467 /* Must be called with trace_types_lock mutex held. */ 3468 static int tracing_wait_pipe(struct file *filp) 3469 { 3470 struct trace_iterator *iter = filp->private_data; 3471 3472 while (trace_empty(iter)) { 3473 3474 if ((filp->f_flags & O_NONBLOCK)) { 3475 return -EAGAIN; 3476 } 3477 3478 mutex_unlock(&iter->mutex); 3479 3480 iter->trace->wait_pipe(iter); 3481 3482 mutex_lock(&iter->mutex); 3483 3484 if (signal_pending(current)) 3485 return -EINTR; 3486 3487 /* 3488 * We block until we read something and tracing is disabled. 3489 * We still block if tracing is disabled, but we have never 3490 * read anything. This allows a user to cat this file, and 3491 * then enable tracing. But after we have read something, 3492 * we give an EOF when tracing is again disabled. 3493 * 3494 * iter->pos will be 0 if we haven't read anything. 3495 */ 3496 if (!tracing_is_enabled() && iter->pos) 3497 break; 3498 } 3499 3500 return 1; 3501 } 3502 3503 /* 3504 * Consumer reader. 3505 */ 3506 static ssize_t 3507 tracing_read_pipe(struct file *filp, char __user *ubuf, 3508 size_t cnt, loff_t *ppos) 3509 { 3510 struct trace_iterator *iter = filp->private_data; 3511 ssize_t sret; 3512 3513 /* return any leftover data */ 3514 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3515 if (sret != -EBUSY) 3516 return sret; 3517 3518 trace_seq_init(&iter->seq); 3519 3520 /* copy the tracer to avoid using a global lock all around */ 3521 mutex_lock(&trace_types_lock); 3522 if (unlikely(iter->trace->name != current_trace->name)) 3523 *iter->trace = *current_trace; 3524 mutex_unlock(&trace_types_lock); 3525 3526 /* 3527 * Avoid more than one consumer on a single file descriptor 3528 * This is just a matter of traces coherency, the ring buffer itself 3529 * is protected. 3530 */ 3531 mutex_lock(&iter->mutex); 3532 if (iter->trace->read) { 3533 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 3534 if (sret) 3535 goto out; 3536 } 3537 3538 waitagain: 3539 sret = tracing_wait_pipe(filp); 3540 if (sret <= 0) 3541 goto out; 3542 3543 /* stop when tracing is finished */ 3544 if (trace_empty(iter)) { 3545 sret = 0; 3546 goto out; 3547 } 3548 3549 if (cnt >= PAGE_SIZE) 3550 cnt = PAGE_SIZE - 1; 3551 3552 /* reset all but tr, trace, and overruns */ 3553 memset(&iter->seq, 0, 3554 sizeof(struct trace_iterator) - 3555 offsetof(struct trace_iterator, seq)); 3556 iter->pos = -1; 3557 3558 trace_event_read_lock(); 3559 trace_access_lock(iter->cpu_file); 3560 while (trace_find_next_entry_inc(iter) != NULL) { 3561 enum print_line_t ret; 3562 int len = iter->seq.len; 3563 3564 ret = print_trace_line(iter); 3565 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3566 /* don't print partial lines */ 3567 iter->seq.len = len; 3568 break; 3569 } 3570 if (ret != TRACE_TYPE_NO_CONSUME) 3571 trace_consume(iter); 3572 3573 if (iter->seq.len >= cnt) 3574 break; 3575 3576 /* 3577 * Setting the full flag means we reached the trace_seq buffer 3578 * size and we should leave by partial output condition above. 3579 * One of the trace_seq_* functions is not used properly. 3580 */ 3581 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 3582 iter->ent->type); 3583 } 3584 trace_access_unlock(iter->cpu_file); 3585 trace_event_read_unlock(); 3586 3587 /* Now copy what we have to the user */ 3588 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3589 if (iter->seq.readpos >= iter->seq.len) 3590 trace_seq_init(&iter->seq); 3591 3592 /* 3593 * If there was nothing to send to user, in spite of consuming trace 3594 * entries, go back to wait for more entries. 3595 */ 3596 if (sret == -EBUSY) 3597 goto waitagain; 3598 3599 out: 3600 mutex_unlock(&iter->mutex); 3601 3602 return sret; 3603 } 3604 3605 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 3606 struct pipe_buffer *buf) 3607 { 3608 __free_page(buf->page); 3609 } 3610 3611 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 3612 unsigned int idx) 3613 { 3614 __free_page(spd->pages[idx]); 3615 } 3616 3617 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 3618 .can_merge = 0, 3619 .map = generic_pipe_buf_map, 3620 .unmap = generic_pipe_buf_unmap, 3621 .confirm = generic_pipe_buf_confirm, 3622 .release = tracing_pipe_buf_release, 3623 .steal = generic_pipe_buf_steal, 3624 .get = generic_pipe_buf_get, 3625 }; 3626 3627 static size_t 3628 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 3629 { 3630 size_t count; 3631 int ret; 3632 3633 /* Seq buffer is page-sized, exactly what we need. */ 3634 for (;;) { 3635 count = iter->seq.len; 3636 ret = print_trace_line(iter); 3637 count = iter->seq.len - count; 3638 if (rem < count) { 3639 rem = 0; 3640 iter->seq.len -= count; 3641 break; 3642 } 3643 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3644 iter->seq.len -= count; 3645 break; 3646 } 3647 3648 if (ret != TRACE_TYPE_NO_CONSUME) 3649 trace_consume(iter); 3650 rem -= count; 3651 if (!trace_find_next_entry_inc(iter)) { 3652 rem = 0; 3653 iter->ent = NULL; 3654 break; 3655 } 3656 } 3657 3658 return rem; 3659 } 3660 3661 static ssize_t tracing_splice_read_pipe(struct file *filp, 3662 loff_t *ppos, 3663 struct pipe_inode_info *pipe, 3664 size_t len, 3665 unsigned int flags) 3666 { 3667 struct page *pages_def[PIPE_DEF_BUFFERS]; 3668 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 3669 struct trace_iterator *iter = filp->private_data; 3670 struct splice_pipe_desc spd = { 3671 .pages = pages_def, 3672 .partial = partial_def, 3673 .nr_pages = 0, /* This gets updated below. */ 3674 .nr_pages_max = PIPE_DEF_BUFFERS, 3675 .flags = flags, 3676 .ops = &tracing_pipe_buf_ops, 3677 .spd_release = tracing_spd_release_pipe, 3678 }; 3679 ssize_t ret; 3680 size_t rem; 3681 unsigned int i; 3682 3683 if (splice_grow_spd(pipe, &spd)) 3684 return -ENOMEM; 3685 3686 /* copy the tracer to avoid using a global lock all around */ 3687 mutex_lock(&trace_types_lock); 3688 if (unlikely(iter->trace->name != current_trace->name)) 3689 *iter->trace = *current_trace; 3690 mutex_unlock(&trace_types_lock); 3691 3692 mutex_lock(&iter->mutex); 3693 3694 if (iter->trace->splice_read) { 3695 ret = iter->trace->splice_read(iter, filp, 3696 ppos, pipe, len, flags); 3697 if (ret) 3698 goto out_err; 3699 } 3700 3701 ret = tracing_wait_pipe(filp); 3702 if (ret <= 0) 3703 goto out_err; 3704 3705 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 3706 ret = -EFAULT; 3707 goto out_err; 3708 } 3709 3710 trace_event_read_lock(); 3711 trace_access_lock(iter->cpu_file); 3712 3713 /* Fill as many pages as possible. */ 3714 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 3715 spd.pages[i] = alloc_page(GFP_KERNEL); 3716 if (!spd.pages[i]) 3717 break; 3718 3719 rem = tracing_fill_pipe_page(rem, iter); 3720 3721 /* Copy the data into the page, so we can start over. */ 3722 ret = trace_seq_to_buffer(&iter->seq, 3723 page_address(spd.pages[i]), 3724 iter->seq.len); 3725 if (ret < 0) { 3726 __free_page(spd.pages[i]); 3727 break; 3728 } 3729 spd.partial[i].offset = 0; 3730 spd.partial[i].len = iter->seq.len; 3731 3732 trace_seq_init(&iter->seq); 3733 } 3734 3735 trace_access_unlock(iter->cpu_file); 3736 trace_event_read_unlock(); 3737 mutex_unlock(&iter->mutex); 3738 3739 spd.nr_pages = i; 3740 3741 ret = splice_to_pipe(pipe, &spd); 3742 out: 3743 splice_shrink_spd(&spd); 3744 return ret; 3745 3746 out_err: 3747 mutex_unlock(&iter->mutex); 3748 goto out; 3749 } 3750 3751 struct ftrace_entries_info { 3752 struct trace_array *tr; 3753 int cpu; 3754 }; 3755 3756 static int tracing_entries_open(struct inode *inode, struct file *filp) 3757 { 3758 struct ftrace_entries_info *info; 3759 3760 if (tracing_disabled) 3761 return -ENODEV; 3762 3763 info = kzalloc(sizeof(*info), GFP_KERNEL); 3764 if (!info) 3765 return -ENOMEM; 3766 3767 info->tr = &global_trace; 3768 info->cpu = (unsigned long)inode->i_private; 3769 3770 filp->private_data = info; 3771 3772 return 0; 3773 } 3774 3775 static ssize_t 3776 tracing_entries_read(struct file *filp, char __user *ubuf, 3777 size_t cnt, loff_t *ppos) 3778 { 3779 struct ftrace_entries_info *info = filp->private_data; 3780 struct trace_array *tr = info->tr; 3781 char buf[64]; 3782 int r = 0; 3783 ssize_t ret; 3784 3785 mutex_lock(&trace_types_lock); 3786 3787 if (info->cpu == RING_BUFFER_ALL_CPUS) { 3788 int cpu, buf_size_same; 3789 unsigned long size; 3790 3791 size = 0; 3792 buf_size_same = 1; 3793 /* check if all cpu sizes are same */ 3794 for_each_tracing_cpu(cpu) { 3795 /* fill in the size from first enabled cpu */ 3796 if (size == 0) 3797 size = tr->data[cpu]->entries; 3798 if (size != tr->data[cpu]->entries) { 3799 buf_size_same = 0; 3800 break; 3801 } 3802 } 3803 3804 if (buf_size_same) { 3805 if (!ring_buffer_expanded) 3806 r = sprintf(buf, "%lu (expanded: %lu)\n", 3807 size >> 10, 3808 trace_buf_size >> 10); 3809 else 3810 r = sprintf(buf, "%lu\n", size >> 10); 3811 } else 3812 r = sprintf(buf, "X\n"); 3813 } else 3814 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); 3815 3816 mutex_unlock(&trace_types_lock); 3817 3818 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3819 return ret; 3820 } 3821 3822 static ssize_t 3823 tracing_entries_write(struct file *filp, const char __user *ubuf, 3824 size_t cnt, loff_t *ppos) 3825 { 3826 struct ftrace_entries_info *info = filp->private_data; 3827 unsigned long val; 3828 int ret; 3829 3830 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3831 if (ret) 3832 return ret; 3833 3834 /* must have at least 1 entry */ 3835 if (!val) 3836 return -EINVAL; 3837 3838 /* value is in KB */ 3839 val <<= 10; 3840 3841 ret = tracing_resize_ring_buffer(val, info->cpu); 3842 if (ret < 0) 3843 return ret; 3844 3845 *ppos += cnt; 3846 3847 return cnt; 3848 } 3849 3850 static int 3851 tracing_entries_release(struct inode *inode, struct file *filp) 3852 { 3853 struct ftrace_entries_info *info = filp->private_data; 3854 3855 kfree(info); 3856 3857 return 0; 3858 } 3859 3860 static ssize_t 3861 tracing_total_entries_read(struct file *filp, char __user *ubuf, 3862 size_t cnt, loff_t *ppos) 3863 { 3864 struct trace_array *tr = filp->private_data; 3865 char buf[64]; 3866 int r, cpu; 3867 unsigned long size = 0, expanded_size = 0; 3868 3869 mutex_lock(&trace_types_lock); 3870 for_each_tracing_cpu(cpu) { 3871 size += tr->data[cpu]->entries >> 10; 3872 if (!ring_buffer_expanded) 3873 expanded_size += trace_buf_size >> 10; 3874 } 3875 if (ring_buffer_expanded) 3876 r = sprintf(buf, "%lu\n", size); 3877 else 3878 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 3879 mutex_unlock(&trace_types_lock); 3880 3881 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3882 } 3883 3884 static ssize_t 3885 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 3886 size_t cnt, loff_t *ppos) 3887 { 3888 /* 3889 * There is no need to read what the user has written, this function 3890 * is just to make sure that there is no error when "echo" is used 3891 */ 3892 3893 *ppos += cnt; 3894 3895 return cnt; 3896 } 3897 3898 static int 3899 tracing_free_buffer_release(struct inode *inode, struct file *filp) 3900 { 3901 /* disable tracing ? */ 3902 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 3903 tracing_off(); 3904 /* resize the ring buffer to 0 */ 3905 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); 3906 3907 return 0; 3908 } 3909 3910 static ssize_t 3911 tracing_mark_write(struct file *filp, const char __user *ubuf, 3912 size_t cnt, loff_t *fpos) 3913 { 3914 unsigned long addr = (unsigned long)ubuf; 3915 struct ring_buffer_event *event; 3916 struct ring_buffer *buffer; 3917 struct print_entry *entry; 3918 unsigned long irq_flags; 3919 struct page *pages[2]; 3920 void *map_page[2]; 3921 int nr_pages = 1; 3922 ssize_t written; 3923 int offset; 3924 int size; 3925 int len; 3926 int ret; 3927 int i; 3928 3929 if (tracing_disabled) 3930 return -EINVAL; 3931 3932 if (!(trace_flags & TRACE_ITER_MARKERS)) 3933 return -EINVAL; 3934 3935 if (cnt > TRACE_BUF_SIZE) 3936 cnt = TRACE_BUF_SIZE; 3937 3938 /* 3939 * Userspace is injecting traces into the kernel trace buffer. 3940 * We want to be as non intrusive as possible. 3941 * To do so, we do not want to allocate any special buffers 3942 * or take any locks, but instead write the userspace data 3943 * straight into the ring buffer. 3944 * 3945 * First we need to pin the userspace buffer into memory, 3946 * which, most likely it is, because it just referenced it. 3947 * But there's no guarantee that it is. By using get_user_pages_fast() 3948 * and kmap_atomic/kunmap_atomic() we can get access to the 3949 * pages directly. We then write the data directly into the 3950 * ring buffer. 3951 */ 3952 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 3953 3954 /* check if we cross pages */ 3955 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) 3956 nr_pages = 2; 3957 3958 offset = addr & (PAGE_SIZE - 1); 3959 addr &= PAGE_MASK; 3960 3961 ret = get_user_pages_fast(addr, nr_pages, 0, pages); 3962 if (ret < nr_pages) { 3963 while (--ret >= 0) 3964 put_page(pages[ret]); 3965 written = -EFAULT; 3966 goto out; 3967 } 3968 3969 for (i = 0; i < nr_pages; i++) 3970 map_page[i] = kmap_atomic(pages[i]); 3971 3972 local_save_flags(irq_flags); 3973 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 3974 buffer = global_trace.buffer; 3975 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3976 irq_flags, preempt_count()); 3977 if (!event) { 3978 /* Ring buffer disabled, return as if not open for write */ 3979 written = -EBADF; 3980 goto out_unlock; 3981 } 3982 3983 entry = ring_buffer_event_data(event); 3984 entry->ip = _THIS_IP_; 3985 3986 if (nr_pages == 2) { 3987 len = PAGE_SIZE - offset; 3988 memcpy(&entry->buf, map_page[0] + offset, len); 3989 memcpy(&entry->buf[len], map_page[1], cnt - len); 3990 } else 3991 memcpy(&entry->buf, map_page[0] + offset, cnt); 3992 3993 if (entry->buf[cnt - 1] != '\n') { 3994 entry->buf[cnt] = '\n'; 3995 entry->buf[cnt + 1] = '\0'; 3996 } else 3997 entry->buf[cnt] = '\0'; 3998 3999 __buffer_unlock_commit(buffer, event); 4000 4001 written = cnt; 4002 4003 *fpos += written; 4004 4005 out_unlock: 4006 for (i = 0; i < nr_pages; i++){ 4007 kunmap_atomic(map_page[i]); 4008 put_page(pages[i]); 4009 } 4010 out: 4011 return written; 4012 } 4013 4014 static int tracing_clock_show(struct seq_file *m, void *v) 4015 { 4016 int i; 4017 4018 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 4019 seq_printf(m, 4020 "%s%s%s%s", i ? " " : "", 4021 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 4022 i == trace_clock_id ? "]" : ""); 4023 seq_putc(m, '\n'); 4024 4025 return 0; 4026 } 4027 4028 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4029 size_t cnt, loff_t *fpos) 4030 { 4031 char buf[64]; 4032 const char *clockstr; 4033 int i; 4034 4035 if (cnt >= sizeof(buf)) 4036 return -EINVAL; 4037 4038 if (copy_from_user(&buf, ubuf, cnt)) 4039 return -EFAULT; 4040 4041 buf[cnt] = 0; 4042 4043 clockstr = strstrip(buf); 4044 4045 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4046 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4047 break; 4048 } 4049 if (i == ARRAY_SIZE(trace_clocks)) 4050 return -EINVAL; 4051 4052 trace_clock_id = i; 4053 4054 mutex_lock(&trace_types_lock); 4055 4056 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); 4057 if (max_tr.buffer) 4058 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 4059 4060 /* 4061 * New clock may not be consistent with the previous clock. 4062 * Reset the buffer so that it doesn't have incomparable timestamps. 4063 */ 4064 tracing_reset_online_cpus(&global_trace); 4065 tracing_reset_online_cpus(&max_tr); 4066 4067 mutex_unlock(&trace_types_lock); 4068 4069 *fpos += cnt; 4070 4071 return cnt; 4072 } 4073 4074 static int tracing_clock_open(struct inode *inode, struct file *file) 4075 { 4076 if (tracing_disabled) 4077 return -ENODEV; 4078 return single_open(file, tracing_clock_show, NULL); 4079 } 4080 4081 #ifdef CONFIG_TRACER_SNAPSHOT 4082 static int tracing_snapshot_open(struct inode *inode, struct file *file) 4083 { 4084 struct trace_iterator *iter; 4085 int ret = 0; 4086 4087 if (file->f_mode & FMODE_READ) { 4088 iter = __tracing_open(inode, file, true); 4089 if (IS_ERR(iter)) 4090 ret = PTR_ERR(iter); 4091 } 4092 return ret; 4093 } 4094 4095 static ssize_t 4096 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4097 loff_t *ppos) 4098 { 4099 unsigned long val; 4100 int ret; 4101 4102 ret = tracing_update_buffers(); 4103 if (ret < 0) 4104 return ret; 4105 4106 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4107 if (ret) 4108 return ret; 4109 4110 mutex_lock(&trace_types_lock); 4111 4112 if (current_trace->use_max_tr) { 4113 ret = -EBUSY; 4114 goto out; 4115 } 4116 4117 switch (val) { 4118 case 0: 4119 if (current_trace->allocated_snapshot) { 4120 /* free spare buffer */ 4121 ring_buffer_resize(max_tr.buffer, 1, 4122 RING_BUFFER_ALL_CPUS); 4123 set_buffer_entries(&max_tr, 1); 4124 tracing_reset_online_cpus(&max_tr); 4125 current_trace->allocated_snapshot = false; 4126 } 4127 break; 4128 case 1: 4129 if (!current_trace->allocated_snapshot) { 4130 /* allocate spare buffer */ 4131 ret = resize_buffer_duplicate_size(&max_tr, 4132 &global_trace, RING_BUFFER_ALL_CPUS); 4133 if (ret < 0) 4134 break; 4135 current_trace->allocated_snapshot = true; 4136 } 4137 4138 local_irq_disable(); 4139 /* Now, we're going to swap */ 4140 update_max_tr(&global_trace, current, smp_processor_id()); 4141 local_irq_enable(); 4142 break; 4143 default: 4144 if (current_trace->allocated_snapshot) 4145 tracing_reset_online_cpus(&max_tr); 4146 else 4147 ret = -EINVAL; 4148 break; 4149 } 4150 4151 if (ret >= 0) { 4152 *ppos += cnt; 4153 ret = cnt; 4154 } 4155 out: 4156 mutex_unlock(&trace_types_lock); 4157 return ret; 4158 } 4159 #endif /* CONFIG_TRACER_SNAPSHOT */ 4160 4161 4162 static const struct file_operations tracing_max_lat_fops = { 4163 .open = tracing_open_generic, 4164 .read = tracing_max_lat_read, 4165 .write = tracing_max_lat_write, 4166 .llseek = generic_file_llseek, 4167 }; 4168 4169 static const struct file_operations set_tracer_fops = { 4170 .open = tracing_open_generic, 4171 .read = tracing_set_trace_read, 4172 .write = tracing_set_trace_write, 4173 .llseek = generic_file_llseek, 4174 }; 4175 4176 static const struct file_operations tracing_pipe_fops = { 4177 .open = tracing_open_pipe, 4178 .poll = tracing_poll_pipe, 4179 .read = tracing_read_pipe, 4180 .splice_read = tracing_splice_read_pipe, 4181 .release = tracing_release_pipe, 4182 .llseek = no_llseek, 4183 }; 4184 4185 static const struct file_operations tracing_entries_fops = { 4186 .open = tracing_entries_open, 4187 .read = tracing_entries_read, 4188 .write = tracing_entries_write, 4189 .release = tracing_entries_release, 4190 .llseek = generic_file_llseek, 4191 }; 4192 4193 static const struct file_operations tracing_total_entries_fops = { 4194 .open = tracing_open_generic, 4195 .read = tracing_total_entries_read, 4196 .llseek = generic_file_llseek, 4197 }; 4198 4199 static const struct file_operations tracing_free_buffer_fops = { 4200 .write = tracing_free_buffer_write, 4201 .release = tracing_free_buffer_release, 4202 }; 4203 4204 static const struct file_operations tracing_mark_fops = { 4205 .open = tracing_open_generic, 4206 .write = tracing_mark_write, 4207 .llseek = generic_file_llseek, 4208 }; 4209 4210 static const struct file_operations trace_clock_fops = { 4211 .open = tracing_clock_open, 4212 .read = seq_read, 4213 .llseek = seq_lseek, 4214 .release = single_release, 4215 .write = tracing_clock_write, 4216 }; 4217 4218 #ifdef CONFIG_TRACER_SNAPSHOT 4219 static const struct file_operations snapshot_fops = { 4220 .open = tracing_snapshot_open, 4221 .read = seq_read, 4222 .write = tracing_snapshot_write, 4223 .llseek = tracing_seek, 4224 .release = tracing_release, 4225 }; 4226 #endif /* CONFIG_TRACER_SNAPSHOT */ 4227 4228 struct ftrace_buffer_info { 4229 struct trace_array *tr; 4230 void *spare; 4231 int cpu; 4232 unsigned int read; 4233 }; 4234 4235 static int tracing_buffers_open(struct inode *inode, struct file *filp) 4236 { 4237 int cpu = (int)(long)inode->i_private; 4238 struct ftrace_buffer_info *info; 4239 4240 if (tracing_disabled) 4241 return -ENODEV; 4242 4243 info = kzalloc(sizeof(*info), GFP_KERNEL); 4244 if (!info) 4245 return -ENOMEM; 4246 4247 info->tr = &global_trace; 4248 info->cpu = cpu; 4249 info->spare = NULL; 4250 /* Force reading ring buffer for first read */ 4251 info->read = (unsigned int)-1; 4252 4253 filp->private_data = info; 4254 4255 return nonseekable_open(inode, filp); 4256 } 4257 4258 static ssize_t 4259 tracing_buffers_read(struct file *filp, char __user *ubuf, 4260 size_t count, loff_t *ppos) 4261 { 4262 struct ftrace_buffer_info *info = filp->private_data; 4263 ssize_t ret; 4264 size_t size; 4265 4266 if (!count) 4267 return 0; 4268 4269 if (!info->spare) 4270 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); 4271 if (!info->spare) 4272 return -ENOMEM; 4273 4274 /* Do we have previous read data to read? */ 4275 if (info->read < PAGE_SIZE) 4276 goto read; 4277 4278 trace_access_lock(info->cpu); 4279 ret = ring_buffer_read_page(info->tr->buffer, 4280 &info->spare, 4281 count, 4282 info->cpu, 0); 4283 trace_access_unlock(info->cpu); 4284 if (ret < 0) 4285 return 0; 4286 4287 info->read = 0; 4288 4289 read: 4290 size = PAGE_SIZE - info->read; 4291 if (size > count) 4292 size = count; 4293 4294 ret = copy_to_user(ubuf, info->spare + info->read, size); 4295 if (ret == size) 4296 return -EFAULT; 4297 size -= ret; 4298 4299 *ppos += size; 4300 info->read += size; 4301 4302 return size; 4303 } 4304 4305 static int tracing_buffers_release(struct inode *inode, struct file *file) 4306 { 4307 struct ftrace_buffer_info *info = file->private_data; 4308 4309 if (info->spare) 4310 ring_buffer_free_read_page(info->tr->buffer, info->spare); 4311 kfree(info); 4312 4313 return 0; 4314 } 4315 4316 struct buffer_ref { 4317 struct ring_buffer *buffer; 4318 void *page; 4319 int ref; 4320 }; 4321 4322 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 4323 struct pipe_buffer *buf) 4324 { 4325 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 4326 4327 if (--ref->ref) 4328 return; 4329 4330 ring_buffer_free_read_page(ref->buffer, ref->page); 4331 kfree(ref); 4332 buf->private = 0; 4333 } 4334 4335 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 4336 struct pipe_buffer *buf) 4337 { 4338 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 4339 4340 ref->ref++; 4341 } 4342 4343 /* Pipe buffer operations for a buffer. */ 4344 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 4345 .can_merge = 0, 4346 .map = generic_pipe_buf_map, 4347 .unmap = generic_pipe_buf_unmap, 4348 .confirm = generic_pipe_buf_confirm, 4349 .release = buffer_pipe_buf_release, 4350 .steal = generic_pipe_buf_steal, 4351 .get = buffer_pipe_buf_get, 4352 }; 4353 4354 /* 4355 * Callback from splice_to_pipe(), if we need to release some pages 4356 * at the end of the spd in case we error'ed out in filling the pipe. 4357 */ 4358 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 4359 { 4360 struct buffer_ref *ref = 4361 (struct buffer_ref *)spd->partial[i].private; 4362 4363 if (--ref->ref) 4364 return; 4365 4366 ring_buffer_free_read_page(ref->buffer, ref->page); 4367 kfree(ref); 4368 spd->partial[i].private = 0; 4369 } 4370 4371 static ssize_t 4372 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 4373 struct pipe_inode_info *pipe, size_t len, 4374 unsigned int flags) 4375 { 4376 struct ftrace_buffer_info *info = file->private_data; 4377 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 4378 struct page *pages_def[PIPE_DEF_BUFFERS]; 4379 struct splice_pipe_desc spd = { 4380 .pages = pages_def, 4381 .partial = partial_def, 4382 .nr_pages_max = PIPE_DEF_BUFFERS, 4383 .flags = flags, 4384 .ops = &buffer_pipe_buf_ops, 4385 .spd_release = buffer_spd_release, 4386 }; 4387 struct buffer_ref *ref; 4388 int entries, size, i; 4389 size_t ret; 4390 4391 if (splice_grow_spd(pipe, &spd)) 4392 return -ENOMEM; 4393 4394 if (*ppos & (PAGE_SIZE - 1)) { 4395 ret = -EINVAL; 4396 goto out; 4397 } 4398 4399 if (len & (PAGE_SIZE - 1)) { 4400 if (len < PAGE_SIZE) { 4401 ret = -EINVAL; 4402 goto out; 4403 } 4404 len &= PAGE_MASK; 4405 } 4406 4407 trace_access_lock(info->cpu); 4408 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 4409 4410 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 4411 struct page *page; 4412 int r; 4413 4414 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 4415 if (!ref) 4416 break; 4417 4418 ref->ref = 1; 4419 ref->buffer = info->tr->buffer; 4420 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); 4421 if (!ref->page) { 4422 kfree(ref); 4423 break; 4424 } 4425 4426 r = ring_buffer_read_page(ref->buffer, &ref->page, 4427 len, info->cpu, 1); 4428 if (r < 0) { 4429 ring_buffer_free_read_page(ref->buffer, ref->page); 4430 kfree(ref); 4431 break; 4432 } 4433 4434 /* 4435 * zero out any left over data, this is going to 4436 * user land. 4437 */ 4438 size = ring_buffer_page_len(ref->page); 4439 if (size < PAGE_SIZE) 4440 memset(ref->page + size, 0, PAGE_SIZE - size); 4441 4442 page = virt_to_page(ref->page); 4443 4444 spd.pages[i] = page; 4445 spd.partial[i].len = PAGE_SIZE; 4446 spd.partial[i].offset = 0; 4447 spd.partial[i].private = (unsigned long)ref; 4448 spd.nr_pages++; 4449 *ppos += PAGE_SIZE; 4450 4451 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 4452 } 4453 4454 trace_access_unlock(info->cpu); 4455 spd.nr_pages = i; 4456 4457 /* did we read anything? */ 4458 if (!spd.nr_pages) { 4459 if (flags & SPLICE_F_NONBLOCK) 4460 ret = -EAGAIN; 4461 else 4462 ret = 0; 4463 /* TODO: block */ 4464 goto out; 4465 } 4466 4467 ret = splice_to_pipe(pipe, &spd); 4468 splice_shrink_spd(&spd); 4469 out: 4470 return ret; 4471 } 4472 4473 static const struct file_operations tracing_buffers_fops = { 4474 .open = tracing_buffers_open, 4475 .read = tracing_buffers_read, 4476 .release = tracing_buffers_release, 4477 .splice_read = tracing_buffers_splice_read, 4478 .llseek = no_llseek, 4479 }; 4480 4481 static ssize_t 4482 tracing_stats_read(struct file *filp, char __user *ubuf, 4483 size_t count, loff_t *ppos) 4484 { 4485 unsigned long cpu = (unsigned long)filp->private_data; 4486 struct trace_array *tr = &global_trace; 4487 struct trace_seq *s; 4488 unsigned long cnt; 4489 unsigned long long t; 4490 unsigned long usec_rem; 4491 4492 s = kmalloc(sizeof(*s), GFP_KERNEL); 4493 if (!s) 4494 return -ENOMEM; 4495 4496 trace_seq_init(s); 4497 4498 cnt = ring_buffer_entries_cpu(tr->buffer, cpu); 4499 trace_seq_printf(s, "entries: %ld\n", cnt); 4500 4501 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); 4502 trace_seq_printf(s, "overrun: %ld\n", cnt); 4503 4504 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 4505 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 4506 4507 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); 4508 trace_seq_printf(s, "bytes: %ld\n", cnt); 4509 4510 if (trace_clocks[trace_clock_id].in_ns) { 4511 /* local or global for trace_clock */ 4512 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4513 usec_rem = do_div(t, USEC_PER_SEC); 4514 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 4515 t, usec_rem); 4516 4517 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); 4518 usec_rem = do_div(t, USEC_PER_SEC); 4519 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 4520 } else { 4521 /* counter or tsc mode for trace_clock */ 4522 trace_seq_printf(s, "oldest event ts: %llu\n", 4523 ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4524 4525 trace_seq_printf(s, "now ts: %llu\n", 4526 ring_buffer_time_stamp(tr->buffer, cpu)); 4527 } 4528 4529 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4530 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4531 4532 cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); 4533 trace_seq_printf(s, "read events: %ld\n", cnt); 4534 4535 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4536 4537 kfree(s); 4538 4539 return count; 4540 } 4541 4542 static const struct file_operations tracing_stats_fops = { 4543 .open = tracing_open_generic, 4544 .read = tracing_stats_read, 4545 .llseek = generic_file_llseek, 4546 }; 4547 4548 #ifdef CONFIG_DYNAMIC_FTRACE 4549 4550 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 4551 { 4552 return 0; 4553 } 4554 4555 static ssize_t 4556 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 4557 size_t cnt, loff_t *ppos) 4558 { 4559 static char ftrace_dyn_info_buffer[1024]; 4560 static DEFINE_MUTEX(dyn_info_mutex); 4561 unsigned long *p = filp->private_data; 4562 char *buf = ftrace_dyn_info_buffer; 4563 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 4564 int r; 4565 4566 mutex_lock(&dyn_info_mutex); 4567 r = sprintf(buf, "%ld ", *p); 4568 4569 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 4570 buf[r++] = '\n'; 4571 4572 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4573 4574 mutex_unlock(&dyn_info_mutex); 4575 4576 return r; 4577 } 4578 4579 static const struct file_operations tracing_dyn_info_fops = { 4580 .open = tracing_open_generic, 4581 .read = tracing_read_dyn_info, 4582 .llseek = generic_file_llseek, 4583 }; 4584 #endif 4585 4586 static struct dentry *d_tracer; 4587 4588 struct dentry *tracing_init_dentry(void) 4589 { 4590 static int once; 4591 4592 if (d_tracer) 4593 return d_tracer; 4594 4595 if (!debugfs_initialized()) 4596 return NULL; 4597 4598 d_tracer = debugfs_create_dir("tracing", NULL); 4599 4600 if (!d_tracer && !once) { 4601 once = 1; 4602 pr_warning("Could not create debugfs directory 'tracing'\n"); 4603 return NULL; 4604 } 4605 4606 return d_tracer; 4607 } 4608 4609 static struct dentry *d_percpu; 4610 4611 static struct dentry *tracing_dentry_percpu(void) 4612 { 4613 static int once; 4614 struct dentry *d_tracer; 4615 4616 if (d_percpu) 4617 return d_percpu; 4618 4619 d_tracer = tracing_init_dentry(); 4620 4621 if (!d_tracer) 4622 return NULL; 4623 4624 d_percpu = debugfs_create_dir("per_cpu", d_tracer); 4625 4626 if (!d_percpu && !once) { 4627 once = 1; 4628 pr_warning("Could not create debugfs directory 'per_cpu'\n"); 4629 return NULL; 4630 } 4631 4632 return d_percpu; 4633 } 4634 4635 static void tracing_init_debugfs_percpu(long cpu) 4636 { 4637 struct dentry *d_percpu = tracing_dentry_percpu(); 4638 struct dentry *d_cpu; 4639 char cpu_dir[30]; /* 30 characters should be more than enough */ 4640 4641 if (!d_percpu) 4642 return; 4643 4644 snprintf(cpu_dir, 30, "cpu%ld", cpu); 4645 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 4646 if (!d_cpu) { 4647 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 4648 return; 4649 } 4650 4651 /* per cpu trace_pipe */ 4652 trace_create_file("trace_pipe", 0444, d_cpu, 4653 (void *) cpu, &tracing_pipe_fops); 4654 4655 /* per cpu trace */ 4656 trace_create_file("trace", 0644, d_cpu, 4657 (void *) cpu, &tracing_fops); 4658 4659 trace_create_file("trace_pipe_raw", 0444, d_cpu, 4660 (void *) cpu, &tracing_buffers_fops); 4661 4662 trace_create_file("stats", 0444, d_cpu, 4663 (void *) cpu, &tracing_stats_fops); 4664 4665 trace_create_file("buffer_size_kb", 0444, d_cpu, 4666 (void *) cpu, &tracing_entries_fops); 4667 } 4668 4669 #ifdef CONFIG_FTRACE_SELFTEST 4670 /* Let selftest have access to static functions in this file */ 4671 #include "trace_selftest.c" 4672 #endif 4673 4674 struct trace_option_dentry { 4675 struct tracer_opt *opt; 4676 struct tracer_flags *flags; 4677 struct dentry *entry; 4678 }; 4679 4680 static ssize_t 4681 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 4682 loff_t *ppos) 4683 { 4684 struct trace_option_dentry *topt = filp->private_data; 4685 char *buf; 4686 4687 if (topt->flags->val & topt->opt->bit) 4688 buf = "1\n"; 4689 else 4690 buf = "0\n"; 4691 4692 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4693 } 4694 4695 static ssize_t 4696 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 4697 loff_t *ppos) 4698 { 4699 struct trace_option_dentry *topt = filp->private_data; 4700 unsigned long val; 4701 int ret; 4702 4703 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4704 if (ret) 4705 return ret; 4706 4707 if (val != 0 && val != 1) 4708 return -EINVAL; 4709 4710 if (!!(topt->flags->val & topt->opt->bit) != val) { 4711 mutex_lock(&trace_types_lock); 4712 ret = __set_tracer_option(current_trace, topt->flags, 4713 topt->opt, !val); 4714 mutex_unlock(&trace_types_lock); 4715 if (ret) 4716 return ret; 4717 } 4718 4719 *ppos += cnt; 4720 4721 return cnt; 4722 } 4723 4724 4725 static const struct file_operations trace_options_fops = { 4726 .open = tracing_open_generic, 4727 .read = trace_options_read, 4728 .write = trace_options_write, 4729 .llseek = generic_file_llseek, 4730 }; 4731 4732 static ssize_t 4733 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 4734 loff_t *ppos) 4735 { 4736 long index = (long)filp->private_data; 4737 char *buf; 4738 4739 if (trace_flags & (1 << index)) 4740 buf = "1\n"; 4741 else 4742 buf = "0\n"; 4743 4744 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4745 } 4746 4747 static ssize_t 4748 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 4749 loff_t *ppos) 4750 { 4751 long index = (long)filp->private_data; 4752 unsigned long val; 4753 int ret; 4754 4755 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4756 if (ret) 4757 return ret; 4758 4759 if (val != 0 && val != 1) 4760 return -EINVAL; 4761 set_tracer_flags(1 << index, val); 4762 4763 *ppos += cnt; 4764 4765 return cnt; 4766 } 4767 4768 static const struct file_operations trace_options_core_fops = { 4769 .open = tracing_open_generic, 4770 .read = trace_options_core_read, 4771 .write = trace_options_core_write, 4772 .llseek = generic_file_llseek, 4773 }; 4774 4775 struct dentry *trace_create_file(const char *name, 4776 umode_t mode, 4777 struct dentry *parent, 4778 void *data, 4779 const struct file_operations *fops) 4780 { 4781 struct dentry *ret; 4782 4783 ret = debugfs_create_file(name, mode, parent, data, fops); 4784 if (!ret) 4785 pr_warning("Could not create debugfs '%s' entry\n", name); 4786 4787 return ret; 4788 } 4789 4790 4791 static struct dentry *trace_options_init_dentry(void) 4792 { 4793 struct dentry *d_tracer; 4794 static struct dentry *t_options; 4795 4796 if (t_options) 4797 return t_options; 4798 4799 d_tracer = tracing_init_dentry(); 4800 if (!d_tracer) 4801 return NULL; 4802 4803 t_options = debugfs_create_dir("options", d_tracer); 4804 if (!t_options) { 4805 pr_warning("Could not create debugfs directory 'options'\n"); 4806 return NULL; 4807 } 4808 4809 return t_options; 4810 } 4811 4812 static void 4813 create_trace_option_file(struct trace_option_dentry *topt, 4814 struct tracer_flags *flags, 4815 struct tracer_opt *opt) 4816 { 4817 struct dentry *t_options; 4818 4819 t_options = trace_options_init_dentry(); 4820 if (!t_options) 4821 return; 4822 4823 topt->flags = flags; 4824 topt->opt = opt; 4825 4826 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 4827 &trace_options_fops); 4828 4829 } 4830 4831 static struct trace_option_dentry * 4832 create_trace_option_files(struct tracer *tracer) 4833 { 4834 struct trace_option_dentry *topts; 4835 struct tracer_flags *flags; 4836 struct tracer_opt *opts; 4837 int cnt; 4838 4839 if (!tracer) 4840 return NULL; 4841 4842 flags = tracer->flags; 4843 4844 if (!flags || !flags->opts) 4845 return NULL; 4846 4847 opts = flags->opts; 4848 4849 for (cnt = 0; opts[cnt].name; cnt++) 4850 ; 4851 4852 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 4853 if (!topts) 4854 return NULL; 4855 4856 for (cnt = 0; opts[cnt].name; cnt++) 4857 create_trace_option_file(&topts[cnt], flags, 4858 &opts[cnt]); 4859 4860 return topts; 4861 } 4862 4863 static void 4864 destroy_trace_option_files(struct trace_option_dentry *topts) 4865 { 4866 int cnt; 4867 4868 if (!topts) 4869 return; 4870 4871 for (cnt = 0; topts[cnt].opt; cnt++) { 4872 if (topts[cnt].entry) 4873 debugfs_remove(topts[cnt].entry); 4874 } 4875 4876 kfree(topts); 4877 } 4878 4879 static struct dentry * 4880 create_trace_option_core_file(const char *option, long index) 4881 { 4882 struct dentry *t_options; 4883 4884 t_options = trace_options_init_dentry(); 4885 if (!t_options) 4886 return NULL; 4887 4888 return trace_create_file(option, 0644, t_options, (void *)index, 4889 &trace_options_core_fops); 4890 } 4891 4892 static __init void create_trace_options_dir(void) 4893 { 4894 struct dentry *t_options; 4895 int i; 4896 4897 t_options = trace_options_init_dentry(); 4898 if (!t_options) 4899 return; 4900 4901 for (i = 0; trace_options[i]; i++) 4902 create_trace_option_core_file(trace_options[i], i); 4903 } 4904 4905 static ssize_t 4906 rb_simple_read(struct file *filp, char __user *ubuf, 4907 size_t cnt, loff_t *ppos) 4908 { 4909 struct trace_array *tr = filp->private_data; 4910 struct ring_buffer *buffer = tr->buffer; 4911 char buf[64]; 4912 int r; 4913 4914 if (buffer) 4915 r = ring_buffer_record_is_on(buffer); 4916 else 4917 r = 0; 4918 4919 r = sprintf(buf, "%d\n", r); 4920 4921 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4922 } 4923 4924 static ssize_t 4925 rb_simple_write(struct file *filp, const char __user *ubuf, 4926 size_t cnt, loff_t *ppos) 4927 { 4928 struct trace_array *tr = filp->private_data; 4929 struct ring_buffer *buffer = tr->buffer; 4930 unsigned long val; 4931 int ret; 4932 4933 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4934 if (ret) 4935 return ret; 4936 4937 if (buffer) { 4938 mutex_lock(&trace_types_lock); 4939 if (val) { 4940 ring_buffer_record_on(buffer); 4941 if (current_trace->start) 4942 current_trace->start(tr); 4943 } else { 4944 ring_buffer_record_off(buffer); 4945 if (current_trace->stop) 4946 current_trace->stop(tr); 4947 } 4948 mutex_unlock(&trace_types_lock); 4949 } 4950 4951 (*ppos)++; 4952 4953 return cnt; 4954 } 4955 4956 static const struct file_operations rb_simple_fops = { 4957 .open = tracing_open_generic, 4958 .read = rb_simple_read, 4959 .write = rb_simple_write, 4960 .llseek = default_llseek, 4961 }; 4962 4963 static __init int tracer_init_debugfs(void) 4964 { 4965 struct dentry *d_tracer; 4966 int cpu; 4967 4968 trace_access_lock_init(); 4969 4970 d_tracer = tracing_init_dentry(); 4971 4972 trace_create_file("trace_options", 0644, d_tracer, 4973 NULL, &tracing_iter_fops); 4974 4975 trace_create_file("tracing_cpumask", 0644, d_tracer, 4976 NULL, &tracing_cpumask_fops); 4977 4978 trace_create_file("trace", 0644, d_tracer, 4979 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); 4980 4981 trace_create_file("available_tracers", 0444, d_tracer, 4982 &global_trace, &show_traces_fops); 4983 4984 trace_create_file("current_tracer", 0644, d_tracer, 4985 &global_trace, &set_tracer_fops); 4986 4987 #ifdef CONFIG_TRACER_MAX_TRACE 4988 trace_create_file("tracing_max_latency", 0644, d_tracer, 4989 &tracing_max_latency, &tracing_max_lat_fops); 4990 #endif 4991 4992 trace_create_file("tracing_thresh", 0644, d_tracer, 4993 &tracing_thresh, &tracing_max_lat_fops); 4994 4995 trace_create_file("README", 0444, d_tracer, 4996 NULL, &tracing_readme_fops); 4997 4998 trace_create_file("trace_pipe", 0444, d_tracer, 4999 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 5000 5001 trace_create_file("buffer_size_kb", 0644, d_tracer, 5002 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); 5003 5004 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 5005 &global_trace, &tracing_total_entries_fops); 5006 5007 trace_create_file("free_buffer", 0644, d_tracer, 5008 &global_trace, &tracing_free_buffer_fops); 5009 5010 trace_create_file("trace_marker", 0220, d_tracer, 5011 NULL, &tracing_mark_fops); 5012 5013 trace_create_file("saved_cmdlines", 0444, d_tracer, 5014 NULL, &tracing_saved_cmdlines_fops); 5015 5016 trace_create_file("trace_clock", 0644, d_tracer, NULL, 5017 &trace_clock_fops); 5018 5019 trace_create_file("tracing_on", 0644, d_tracer, 5020 &global_trace, &rb_simple_fops); 5021 5022 #ifdef CONFIG_DYNAMIC_FTRACE 5023 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 5024 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5025 #endif 5026 5027 #ifdef CONFIG_TRACER_SNAPSHOT 5028 trace_create_file("snapshot", 0644, d_tracer, 5029 (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); 5030 #endif 5031 5032 create_trace_options_dir(); 5033 5034 for_each_tracing_cpu(cpu) 5035 tracing_init_debugfs_percpu(cpu); 5036 5037 return 0; 5038 } 5039 5040 static int trace_panic_handler(struct notifier_block *this, 5041 unsigned long event, void *unused) 5042 { 5043 if (ftrace_dump_on_oops) 5044 ftrace_dump(ftrace_dump_on_oops); 5045 return NOTIFY_OK; 5046 } 5047 5048 static struct notifier_block trace_panic_notifier = { 5049 .notifier_call = trace_panic_handler, 5050 .next = NULL, 5051 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 5052 }; 5053 5054 static int trace_die_handler(struct notifier_block *self, 5055 unsigned long val, 5056 void *data) 5057 { 5058 switch (val) { 5059 case DIE_OOPS: 5060 if (ftrace_dump_on_oops) 5061 ftrace_dump(ftrace_dump_on_oops); 5062 break; 5063 default: 5064 break; 5065 } 5066 return NOTIFY_OK; 5067 } 5068 5069 static struct notifier_block trace_die_notifier = { 5070 .notifier_call = trace_die_handler, 5071 .priority = 200 5072 }; 5073 5074 /* 5075 * printk is set to max of 1024, we really don't need it that big. 5076 * Nothing should be printing 1000 characters anyway. 5077 */ 5078 #define TRACE_MAX_PRINT 1000 5079 5080 /* 5081 * Define here KERN_TRACE so that we have one place to modify 5082 * it if we decide to change what log level the ftrace dump 5083 * should be at. 5084 */ 5085 #define KERN_TRACE KERN_EMERG 5086 5087 void 5088 trace_printk_seq(struct trace_seq *s) 5089 { 5090 /* Probably should print a warning here. */ 5091 if (s->len >= 1000) 5092 s->len = 1000; 5093 5094 /* should be zero ended, but we are paranoid. */ 5095 s->buffer[s->len] = 0; 5096 5097 printk(KERN_TRACE "%s", s->buffer); 5098 5099 trace_seq_init(s); 5100 } 5101 5102 void trace_init_global_iter(struct trace_iterator *iter) 5103 { 5104 iter->tr = &global_trace; 5105 iter->trace = current_trace; 5106 iter->cpu_file = TRACE_PIPE_ALL_CPU; 5107 } 5108 5109 static void 5110 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) 5111 { 5112 static arch_spinlock_t ftrace_dump_lock = 5113 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 5114 /* use static because iter can be a bit big for the stack */ 5115 static struct trace_iterator iter; 5116 unsigned int old_userobj; 5117 static int dump_ran; 5118 unsigned long flags; 5119 int cnt = 0, cpu; 5120 5121 /* only one dump */ 5122 local_irq_save(flags); 5123 arch_spin_lock(&ftrace_dump_lock); 5124 if (dump_ran) 5125 goto out; 5126 5127 dump_ran = 1; 5128 5129 tracing_off(); 5130 5131 /* Did function tracer already get disabled? */ 5132 if (ftrace_is_dead()) { 5133 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 5134 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 5135 } 5136 5137 if (disable_tracing) 5138 ftrace_kill(); 5139 5140 /* Simulate the iterator */ 5141 trace_init_global_iter(&iter); 5142 5143 for_each_tracing_cpu(cpu) { 5144 atomic_inc(&iter.tr->data[cpu]->disabled); 5145 } 5146 5147 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 5148 5149 /* don't look at user memory in panic mode */ 5150 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 5151 5152 switch (oops_dump_mode) { 5153 case DUMP_ALL: 5154 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5155 break; 5156 case DUMP_ORIG: 5157 iter.cpu_file = raw_smp_processor_id(); 5158 break; 5159 case DUMP_NONE: 5160 goto out_enable; 5161 default: 5162 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 5163 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5164 } 5165 5166 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 5167 5168 /* 5169 * We need to stop all tracing on all CPUS to read the 5170 * the next buffer. This is a bit expensive, but is 5171 * not done often. We fill all what we can read, 5172 * and then release the locks again. 5173 */ 5174 5175 while (!trace_empty(&iter)) { 5176 5177 if (!cnt) 5178 printk(KERN_TRACE "---------------------------------\n"); 5179 5180 cnt++; 5181 5182 /* reset all but tr, trace, and overruns */ 5183 memset(&iter.seq, 0, 5184 sizeof(struct trace_iterator) - 5185 offsetof(struct trace_iterator, seq)); 5186 iter.iter_flags |= TRACE_FILE_LAT_FMT; 5187 iter.pos = -1; 5188 5189 if (trace_find_next_entry_inc(&iter) != NULL) { 5190 int ret; 5191 5192 ret = print_trace_line(&iter); 5193 if (ret != TRACE_TYPE_NO_CONSUME) 5194 trace_consume(&iter); 5195 } 5196 touch_nmi_watchdog(); 5197 5198 trace_printk_seq(&iter.seq); 5199 } 5200 5201 if (!cnt) 5202 printk(KERN_TRACE " (ftrace buffer empty)\n"); 5203 else 5204 printk(KERN_TRACE "---------------------------------\n"); 5205 5206 out_enable: 5207 /* Re-enable tracing if requested */ 5208 if (!disable_tracing) { 5209 trace_flags |= old_userobj; 5210 5211 for_each_tracing_cpu(cpu) { 5212 atomic_dec(&iter.tr->data[cpu]->disabled); 5213 } 5214 tracing_on(); 5215 } 5216 5217 out: 5218 arch_spin_unlock(&ftrace_dump_lock); 5219 local_irq_restore(flags); 5220 } 5221 5222 /* By default: disable tracing after the dump */ 5223 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 5224 { 5225 __ftrace_dump(true, oops_dump_mode); 5226 } 5227 EXPORT_SYMBOL_GPL(ftrace_dump); 5228 5229 __init static int tracer_alloc_buffers(void) 5230 { 5231 int ring_buf_size; 5232 enum ring_buffer_flags rb_flags; 5233 int i; 5234 int ret = -ENOMEM; 5235 5236 5237 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 5238 goto out; 5239 5240 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 5241 goto out_free_buffer_mask; 5242 5243 /* Only allocate trace_printk buffers if a trace_printk exists */ 5244 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 5245 /* Must be called before global_trace.buffer is allocated */ 5246 trace_printk_init_buffers(); 5247 5248 /* To save memory, keep the ring buffer size to its minimum */ 5249 if (ring_buffer_expanded) 5250 ring_buf_size = trace_buf_size; 5251 else 5252 ring_buf_size = 1; 5253 5254 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5255 5256 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 5257 cpumask_copy(tracing_cpumask, cpu_all_mask); 5258 5259 /* TODO: make the number of buffers hot pluggable with CPUS */ 5260 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); 5261 if (!global_trace.buffer) { 5262 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 5263 WARN_ON(1); 5264 goto out_free_cpumask; 5265 } 5266 if (global_trace.buffer_disabled) 5267 tracing_off(); 5268 5269 5270 #ifdef CONFIG_TRACER_MAX_TRACE 5271 max_tr.buffer = ring_buffer_alloc(1, rb_flags); 5272 if (!max_tr.buffer) { 5273 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 5274 WARN_ON(1); 5275 ring_buffer_free(global_trace.buffer); 5276 goto out_free_cpumask; 5277 } 5278 #endif 5279 5280 /* Allocate the first page for all buffers */ 5281 for_each_tracing_cpu(i) { 5282 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 5283 max_tr.data[i] = &per_cpu(max_tr_data, i); 5284 } 5285 5286 set_buffer_entries(&global_trace, 5287 ring_buffer_size(global_trace.buffer, 0)); 5288 #ifdef CONFIG_TRACER_MAX_TRACE 5289 set_buffer_entries(&max_tr, 1); 5290 #endif 5291 5292 trace_init_cmdlines(); 5293 init_irq_work(&trace_work_wakeup, trace_wake_up); 5294 5295 register_tracer(&nop_trace); 5296 5297 /* All seems OK, enable tracing */ 5298 tracing_disabled = 0; 5299 5300 atomic_notifier_chain_register(&panic_notifier_list, 5301 &trace_panic_notifier); 5302 5303 register_die_notifier(&trace_die_notifier); 5304 5305 while (trace_boot_options) { 5306 char *option; 5307 5308 option = strsep(&trace_boot_options, ","); 5309 trace_set_options(option); 5310 } 5311 5312 return 0; 5313 5314 out_free_cpumask: 5315 free_cpumask_var(tracing_cpumask); 5316 out_free_buffer_mask: 5317 free_cpumask_var(tracing_buffer_mask); 5318 out: 5319 return ret; 5320 } 5321 5322 __init static int clear_boot_tracer(void) 5323 { 5324 /* 5325 * The default tracer at boot buffer is an init section. 5326 * This function is called in lateinit. If we did not 5327 * find the boot tracer, then clear it out, to prevent 5328 * later registration from accessing the buffer that is 5329 * about to be freed. 5330 */ 5331 if (!default_bootup_tracer) 5332 return 0; 5333 5334 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 5335 default_bootup_tracer); 5336 default_bootup_tracer = NULL; 5337 5338 return 0; 5339 } 5340 5341 early_initcall(tracer_alloc_buffers); 5342 fs_initcall(tracer_init_debugfs); 5343 late_initcall(clear_boot_tracer); 5344