1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <[email protected]> 6 * Copyright (C) 2008 Ingo Molnar <[email protected]> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <[email protected]> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <linux/utsname.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/security.h> 21 #include <linux/seq_file.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/cleanup.h> 30 #include <linux/vmalloc.h> 31 #include <linux/ftrace.h> 32 #include <linux/module.h> 33 #include <linux/percpu.h> 34 #include <linux/splice.h> 35 #include <linux/kdebug.h> 36 #include <linux/string.h> 37 #include <linux/mount.h> 38 #include <linux/rwsem.h> 39 #include <linux/slab.h> 40 #include <linux/ctype.h> 41 #include <linux/init.h> 42 #include <linux/panic_notifier.h> 43 #include <linux/poll.h> 44 #include <linux/nmi.h> 45 #include <linux/fs.h> 46 #include <linux/trace.h> 47 #include <linux/sched/clock.h> 48 #include <linux/sched/rt.h> 49 #include <linux/fsnotify.h> 50 #include <linux/irq_work.h> 51 #include <linux/workqueue.h> 52 53 #include <asm/setup.h> /* COMMAND_LINE_SIZE and kaslr_offset() */ 54 55 #include "trace.h" 56 #include "trace_output.h" 57 58 #ifdef CONFIG_FTRACE_STARTUP_TEST 59 /* 60 * We need to change this state when a selftest is running. 61 * A selftest will lurk into the ring-buffer to count the 62 * entries inserted during the selftest although some concurrent 63 * insertions into the ring-buffer such as trace_printk could occurred 64 * at the same time, giving false positive or negative results. 65 */ 66 static bool __read_mostly tracing_selftest_running; 67 68 /* 69 * If boot-time tracing including tracers/events via kernel cmdline 70 * is running, we do not want to run SELFTEST. 71 */ 72 bool __read_mostly tracing_selftest_disabled; 73 74 void __init disable_tracing_selftest(const char *reason) 75 { 76 if (!tracing_selftest_disabled) { 77 tracing_selftest_disabled = true; 78 pr_info("Ftrace startup test is disabled due to %s\n", reason); 79 } 80 } 81 #else 82 #define tracing_selftest_running 0 83 #define tracing_selftest_disabled 0 84 #endif 85 86 /* Pipe tracepoints to printk */ 87 static struct trace_iterator *tracepoint_print_iter; 88 int tracepoint_printk; 89 static bool tracepoint_printk_stop_on_boot __initdata; 90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 91 92 /* For tracers that don't implement custom flags */ 93 static struct tracer_opt dummy_tracer_opt[] = { 94 { } 95 }; 96 97 static int 98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 99 { 100 return 0; 101 } 102 103 /* 104 * To prevent the comm cache from being overwritten when no 105 * tracing is active, only save the comm when a trace event 106 * occurred. 107 */ 108 DEFINE_PER_CPU(bool, trace_taskinfo_save); 109 110 /* 111 * Kill all tracing for good (never come back). 112 * It is initialized to 1 but will turn to zero if the initialization 113 * of the tracer is successful. But that is the only place that sets 114 * this back to zero. 115 */ 116 static int tracing_disabled = 1; 117 118 cpumask_var_t __read_mostly tracing_buffer_mask; 119 120 /* 121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 122 * 123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 124 * is set, then ftrace_dump is called. This will output the contents 125 * of the ftrace buffers to the console. This is very useful for 126 * capturing traces that lead to crashes and outputing it to a 127 * serial console. 128 * 129 * It is default off, but you can enable it with either specifying 130 * "ftrace_dump_on_oops" in the kernel command line, or setting 131 * /proc/sys/kernel/ftrace_dump_on_oops 132 * Set 1 if you want to dump buffers of all CPUs 133 * Set 2 if you want to dump the buffer of the CPU that triggered oops 134 * Set instance name if you want to dump the specific trace instance 135 * Multiple instance dump is also supported, and instances are seperated 136 * by commas. 137 */ 138 /* Set to string format zero to disable by default */ 139 char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; 140 141 /* When set, tracing will stop when a WARN*() is hit */ 142 int __disable_trace_on_warning; 143 144 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 145 /* Map of enums to their values, for "eval_map" file */ 146 struct trace_eval_map_head { 147 struct module *mod; 148 unsigned long length; 149 }; 150 151 union trace_eval_map_item; 152 153 struct trace_eval_map_tail { 154 /* 155 * "end" is first and points to NULL as it must be different 156 * than "mod" or "eval_string" 157 */ 158 union trace_eval_map_item *next; 159 const char *end; /* points to NULL */ 160 }; 161 162 static DEFINE_MUTEX(trace_eval_mutex); 163 164 /* 165 * The trace_eval_maps are saved in an array with two extra elements, 166 * one at the beginning, and one at the end. The beginning item contains 167 * the count of the saved maps (head.length), and the module they 168 * belong to if not built in (head.mod). The ending item contains a 169 * pointer to the next array of saved eval_map items. 170 */ 171 union trace_eval_map_item { 172 struct trace_eval_map map; 173 struct trace_eval_map_head head; 174 struct trace_eval_map_tail tail; 175 }; 176 177 static union trace_eval_map_item *trace_eval_maps; 178 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 179 180 int tracing_set_tracer(struct trace_array *tr, const char *buf); 181 static void ftrace_trace_userstack(struct trace_array *tr, 182 struct trace_buffer *buffer, 183 unsigned int trace_ctx); 184 185 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 186 static char *default_bootup_tracer; 187 188 static bool allocate_snapshot; 189 static bool snapshot_at_boot; 190 191 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 192 static int boot_instance_index; 193 194 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 195 static int boot_snapshot_index; 196 197 static int __init set_cmdline_ftrace(char *str) 198 { 199 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 200 default_bootup_tracer = bootup_tracer_buf; 201 /* We are using ftrace early, expand it */ 202 trace_set_ring_buffer_expanded(NULL); 203 return 1; 204 } 205 __setup("ftrace=", set_cmdline_ftrace); 206 207 int ftrace_dump_on_oops_enabled(void) 208 { 209 if (!strcmp("0", ftrace_dump_on_oops)) 210 return 0; 211 else 212 return 1; 213 } 214 215 static int __init set_ftrace_dump_on_oops(char *str) 216 { 217 if (!*str) { 218 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 219 return 1; 220 } 221 222 if (*str == ',') { 223 strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); 224 strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); 225 return 1; 226 } 227 228 if (*str++ == '=') { 229 strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE); 230 return 1; 231 } 232 233 return 0; 234 } 235 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 236 237 static int __init stop_trace_on_warning(char *str) 238 { 239 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 240 __disable_trace_on_warning = 1; 241 return 1; 242 } 243 __setup("traceoff_on_warning", stop_trace_on_warning); 244 245 static int __init boot_alloc_snapshot(char *str) 246 { 247 char *slot = boot_snapshot_info + boot_snapshot_index; 248 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 249 int ret; 250 251 if (str[0] == '=') { 252 str++; 253 if (strlen(str) >= left) 254 return -1; 255 256 ret = snprintf(slot, left, "%s\t", str); 257 boot_snapshot_index += ret; 258 } else { 259 allocate_snapshot = true; 260 /* We also need the main ring buffer expanded */ 261 trace_set_ring_buffer_expanded(NULL); 262 } 263 return 1; 264 } 265 __setup("alloc_snapshot", boot_alloc_snapshot); 266 267 268 static int __init boot_snapshot(char *str) 269 { 270 snapshot_at_boot = true; 271 boot_alloc_snapshot(str); 272 return 1; 273 } 274 __setup("ftrace_boot_snapshot", boot_snapshot); 275 276 277 static int __init boot_instance(char *str) 278 { 279 char *slot = boot_instance_info + boot_instance_index; 280 int left = sizeof(boot_instance_info) - boot_instance_index; 281 int ret; 282 283 if (strlen(str) >= left) 284 return -1; 285 286 ret = snprintf(slot, left, "%s\t", str); 287 boot_instance_index += ret; 288 289 return 1; 290 } 291 __setup("trace_instance=", boot_instance); 292 293 294 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 295 296 static int __init set_trace_boot_options(char *str) 297 { 298 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 299 return 1; 300 } 301 __setup("trace_options=", set_trace_boot_options); 302 303 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 304 static char *trace_boot_clock __initdata; 305 306 static int __init set_trace_boot_clock(char *str) 307 { 308 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 309 trace_boot_clock = trace_boot_clock_buf; 310 return 1; 311 } 312 __setup("trace_clock=", set_trace_boot_clock); 313 314 static int __init set_tracepoint_printk(char *str) 315 { 316 /* Ignore the "tp_printk_stop_on_boot" param */ 317 if (*str == '_') 318 return 0; 319 320 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 321 tracepoint_printk = 1; 322 return 1; 323 } 324 __setup("tp_printk", set_tracepoint_printk); 325 326 static int __init set_tracepoint_printk_stop(char *str) 327 { 328 tracepoint_printk_stop_on_boot = true; 329 return 1; 330 } 331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 332 333 unsigned long long ns2usecs(u64 nsec) 334 { 335 nsec += 500; 336 do_div(nsec, 1000); 337 return nsec; 338 } 339 340 static void 341 trace_process_export(struct trace_export *export, 342 struct ring_buffer_event *event, int flag) 343 { 344 struct trace_entry *entry; 345 unsigned int size = 0; 346 347 if (export->flags & flag) { 348 entry = ring_buffer_event_data(event); 349 size = ring_buffer_event_length(event); 350 export->write(export, entry, size); 351 } 352 } 353 354 static DEFINE_MUTEX(ftrace_export_lock); 355 356 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 357 358 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); 359 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); 360 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); 361 362 static inline void ftrace_exports_enable(struct trace_export *export) 363 { 364 if (export->flags & TRACE_EXPORT_FUNCTION) 365 static_branch_inc(&trace_function_exports_enabled); 366 367 if (export->flags & TRACE_EXPORT_EVENT) 368 static_branch_inc(&trace_event_exports_enabled); 369 370 if (export->flags & TRACE_EXPORT_MARKER) 371 static_branch_inc(&trace_marker_exports_enabled); 372 } 373 374 static inline void ftrace_exports_disable(struct trace_export *export) 375 { 376 if (export->flags & TRACE_EXPORT_FUNCTION) 377 static_branch_dec(&trace_function_exports_enabled); 378 379 if (export->flags & TRACE_EXPORT_EVENT) 380 static_branch_dec(&trace_event_exports_enabled); 381 382 if (export->flags & TRACE_EXPORT_MARKER) 383 static_branch_dec(&trace_marker_exports_enabled); 384 } 385 386 static void ftrace_exports(struct ring_buffer_event *event, int flag) 387 { 388 struct trace_export *export; 389 390 preempt_disable_notrace(); 391 392 export = rcu_dereference_raw_check(ftrace_exports_list); 393 while (export) { 394 trace_process_export(export, event, flag); 395 export = rcu_dereference_raw_check(export->next); 396 } 397 398 preempt_enable_notrace(); 399 } 400 401 static inline void 402 add_trace_export(struct trace_export **list, struct trace_export *export) 403 { 404 rcu_assign_pointer(export->next, *list); 405 /* 406 * We are entering export into the list but another 407 * CPU might be walking that list. We need to make sure 408 * the export->next pointer is valid before another CPU sees 409 * the export pointer included into the list. 410 */ 411 rcu_assign_pointer(*list, export); 412 } 413 414 static inline int 415 rm_trace_export(struct trace_export **list, struct trace_export *export) 416 { 417 struct trace_export **p; 418 419 for (p = list; *p != NULL; p = &(*p)->next) 420 if (*p == export) 421 break; 422 423 if (*p != export) 424 return -1; 425 426 rcu_assign_pointer(*p, (*p)->next); 427 428 return 0; 429 } 430 431 static inline void 432 add_ftrace_export(struct trace_export **list, struct trace_export *export) 433 { 434 ftrace_exports_enable(export); 435 436 add_trace_export(list, export); 437 } 438 439 static inline int 440 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 441 { 442 int ret; 443 444 ret = rm_trace_export(list, export); 445 ftrace_exports_disable(export); 446 447 return ret; 448 } 449 450 int register_ftrace_export(struct trace_export *export) 451 { 452 if (WARN_ON_ONCE(!export->write)) 453 return -1; 454 455 mutex_lock(&ftrace_export_lock); 456 457 add_ftrace_export(&ftrace_exports_list, export); 458 459 mutex_unlock(&ftrace_export_lock); 460 461 return 0; 462 } 463 EXPORT_SYMBOL_GPL(register_ftrace_export); 464 465 int unregister_ftrace_export(struct trace_export *export) 466 { 467 int ret; 468 469 mutex_lock(&ftrace_export_lock); 470 471 ret = rm_ftrace_export(&ftrace_exports_list, export); 472 473 mutex_unlock(&ftrace_export_lock); 474 475 return ret; 476 } 477 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 478 479 /* trace_flags holds trace_options default values */ 480 #define TRACE_DEFAULT_FLAGS \ 481 (FUNCTION_DEFAULT_FLAGS | \ 482 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 483 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 484 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 485 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ 486 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK) 487 488 /* trace_options that are only supported by global_trace */ 489 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 490 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 491 492 /* trace_flags that are default zero for instances */ 493 #define ZEROED_TRACE_FLAGS \ 494 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK) 495 496 /* 497 * The global_trace is the descriptor that holds the top-level tracing 498 * buffers for the live tracing. 499 */ 500 static struct trace_array global_trace = { 501 .trace_flags = TRACE_DEFAULT_FLAGS, 502 }; 503 504 static struct trace_array *printk_trace = &global_trace; 505 506 static __always_inline bool printk_binsafe(struct trace_array *tr) 507 { 508 /* 509 * The binary format of traceprintk can cause a crash if used 510 * by a buffer from another boot. Force the use of the 511 * non binary version of trace_printk if the trace_printk 512 * buffer is a boot mapped ring buffer. 513 */ 514 return !(tr->flags & TRACE_ARRAY_FL_BOOT); 515 } 516 517 static void update_printk_trace(struct trace_array *tr) 518 { 519 if (printk_trace == tr) 520 return; 521 522 printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK; 523 printk_trace = tr; 524 tr->trace_flags |= TRACE_ITER_TRACE_PRINTK; 525 } 526 527 void trace_set_ring_buffer_expanded(struct trace_array *tr) 528 { 529 if (!tr) 530 tr = &global_trace; 531 tr->ring_buffer_expanded = true; 532 } 533 534 LIST_HEAD(ftrace_trace_arrays); 535 536 int trace_array_get(struct trace_array *this_tr) 537 { 538 struct trace_array *tr; 539 540 guard(mutex)(&trace_types_lock); 541 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 542 if (tr == this_tr) { 543 tr->ref++; 544 return 0; 545 } 546 } 547 548 return -ENODEV; 549 } 550 551 static void __trace_array_put(struct trace_array *this_tr) 552 { 553 WARN_ON(!this_tr->ref); 554 this_tr->ref--; 555 } 556 557 /** 558 * trace_array_put - Decrement the reference counter for this trace array. 559 * @this_tr : pointer to the trace array 560 * 561 * NOTE: Use this when we no longer need the trace array returned by 562 * trace_array_get_by_name(). This ensures the trace array can be later 563 * destroyed. 564 * 565 */ 566 void trace_array_put(struct trace_array *this_tr) 567 { 568 if (!this_tr) 569 return; 570 571 mutex_lock(&trace_types_lock); 572 __trace_array_put(this_tr); 573 mutex_unlock(&trace_types_lock); 574 } 575 EXPORT_SYMBOL_GPL(trace_array_put); 576 577 int tracing_check_open_get_tr(struct trace_array *tr) 578 { 579 int ret; 580 581 ret = security_locked_down(LOCKDOWN_TRACEFS); 582 if (ret) 583 return ret; 584 585 if (tracing_disabled) 586 return -ENODEV; 587 588 if (tr && trace_array_get(tr) < 0) 589 return -ENODEV; 590 591 return 0; 592 } 593 594 /** 595 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 596 * @filtered_pids: The list of pids to check 597 * @search_pid: The PID to find in @filtered_pids 598 * 599 * Returns true if @search_pid is found in @filtered_pids, and false otherwise. 600 */ 601 bool 602 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 603 { 604 return trace_pid_list_is_set(filtered_pids, search_pid); 605 } 606 607 /** 608 * trace_ignore_this_task - should a task be ignored for tracing 609 * @filtered_pids: The list of pids to check 610 * @filtered_no_pids: The list of pids not to be traced 611 * @task: The task that should be ignored if not filtered 612 * 613 * Checks if @task should be traced or not from @filtered_pids. 614 * Returns true if @task should *NOT* be traced. 615 * Returns false if @task should be traced. 616 */ 617 bool 618 trace_ignore_this_task(struct trace_pid_list *filtered_pids, 619 struct trace_pid_list *filtered_no_pids, 620 struct task_struct *task) 621 { 622 /* 623 * If filtered_no_pids is not empty, and the task's pid is listed 624 * in filtered_no_pids, then return true. 625 * Otherwise, if filtered_pids is empty, that means we can 626 * trace all tasks. If it has content, then only trace pids 627 * within filtered_pids. 628 */ 629 630 return (filtered_pids && 631 !trace_find_filtered_pid(filtered_pids, task->pid)) || 632 (filtered_no_pids && 633 trace_find_filtered_pid(filtered_no_pids, task->pid)); 634 } 635 636 /** 637 * trace_filter_add_remove_task - Add or remove a task from a pid_list 638 * @pid_list: The list to modify 639 * @self: The current task for fork or NULL for exit 640 * @task: The task to add or remove 641 * 642 * If adding a task, if @self is defined, the task is only added if @self 643 * is also included in @pid_list. This happens on fork and tasks should 644 * only be added when the parent is listed. If @self is NULL, then the 645 * @task pid will be removed from the list, which would happen on exit 646 * of a task. 647 */ 648 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 649 struct task_struct *self, 650 struct task_struct *task) 651 { 652 if (!pid_list) 653 return; 654 655 /* For forks, we only add if the forking task is listed */ 656 if (self) { 657 if (!trace_find_filtered_pid(pid_list, self->pid)) 658 return; 659 } 660 661 /* "self" is set for forks, and NULL for exits */ 662 if (self) 663 trace_pid_list_set(pid_list, task->pid); 664 else 665 trace_pid_list_clear(pid_list, task->pid); 666 } 667 668 /** 669 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 670 * @pid_list: The pid list to show 671 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 672 * @pos: The position of the file 673 * 674 * This is used by the seq_file "next" operation to iterate the pids 675 * listed in a trace_pid_list structure. 676 * 677 * Returns the pid+1 as we want to display pid of zero, but NULL would 678 * stop the iteration. 679 */ 680 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 681 { 682 long pid = (unsigned long)v; 683 unsigned int next; 684 685 (*pos)++; 686 687 /* pid already is +1 of the actual previous bit */ 688 if (trace_pid_list_next(pid_list, pid, &next) < 0) 689 return NULL; 690 691 pid = next; 692 693 /* Return pid + 1 to allow zero to be represented */ 694 return (void *)(pid + 1); 695 } 696 697 /** 698 * trace_pid_start - Used for seq_file to start reading pid lists 699 * @pid_list: The pid list to show 700 * @pos: The position of the file 701 * 702 * This is used by seq_file "start" operation to start the iteration 703 * of listing pids. 704 * 705 * Returns the pid+1 as we want to display pid of zero, but NULL would 706 * stop the iteration. 707 */ 708 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 709 { 710 unsigned long pid; 711 unsigned int first; 712 loff_t l = 0; 713 714 if (trace_pid_list_first(pid_list, &first) < 0) 715 return NULL; 716 717 pid = first; 718 719 /* Return pid + 1 so that zero can be the exit value */ 720 for (pid++; pid && l < *pos; 721 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 722 ; 723 return (void *)pid; 724 } 725 726 /** 727 * trace_pid_show - show the current pid in seq_file processing 728 * @m: The seq_file structure to write into 729 * @v: A void pointer of the pid (+1) value to display 730 * 731 * Can be directly used by seq_file operations to display the current 732 * pid value. 733 */ 734 int trace_pid_show(struct seq_file *m, void *v) 735 { 736 unsigned long pid = (unsigned long)v - 1; 737 738 seq_printf(m, "%lu\n", pid); 739 return 0; 740 } 741 742 /* 128 should be much more than enough */ 743 #define PID_BUF_SIZE 127 744 745 int trace_pid_write(struct trace_pid_list *filtered_pids, 746 struct trace_pid_list **new_pid_list, 747 const char __user *ubuf, size_t cnt) 748 { 749 struct trace_pid_list *pid_list; 750 struct trace_parser parser; 751 unsigned long val; 752 int nr_pids = 0; 753 ssize_t read = 0; 754 ssize_t ret; 755 loff_t pos; 756 pid_t pid; 757 758 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 759 return -ENOMEM; 760 761 /* 762 * Always recreate a new array. The write is an all or nothing 763 * operation. Always create a new array when adding new pids by 764 * the user. If the operation fails, then the current list is 765 * not modified. 766 */ 767 pid_list = trace_pid_list_alloc(); 768 if (!pid_list) { 769 trace_parser_put(&parser); 770 return -ENOMEM; 771 } 772 773 if (filtered_pids) { 774 /* copy the current bits to the new max */ 775 ret = trace_pid_list_first(filtered_pids, &pid); 776 while (!ret) { 777 trace_pid_list_set(pid_list, pid); 778 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); 779 nr_pids++; 780 } 781 } 782 783 ret = 0; 784 while (cnt > 0) { 785 786 pos = 0; 787 788 ret = trace_get_user(&parser, ubuf, cnt, &pos); 789 if (ret < 0) 790 break; 791 792 read += ret; 793 ubuf += ret; 794 cnt -= ret; 795 796 if (!trace_parser_loaded(&parser)) 797 break; 798 799 ret = -EINVAL; 800 if (kstrtoul(parser.buffer, 0, &val)) 801 break; 802 803 pid = (pid_t)val; 804 805 if (trace_pid_list_set(pid_list, pid) < 0) { 806 ret = -1; 807 break; 808 } 809 nr_pids++; 810 811 trace_parser_clear(&parser); 812 ret = 0; 813 } 814 trace_parser_put(&parser); 815 816 if (ret < 0) { 817 trace_pid_list_free(pid_list); 818 return ret; 819 } 820 821 if (!nr_pids) { 822 /* Cleared the list of pids */ 823 trace_pid_list_free(pid_list); 824 pid_list = NULL; 825 } 826 827 *new_pid_list = pid_list; 828 829 return read; 830 } 831 832 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) 833 { 834 u64 ts; 835 836 /* Early boot up does not have a buffer yet */ 837 if (!buf->buffer) 838 return trace_clock_local(); 839 840 ts = ring_buffer_time_stamp(buf->buffer); 841 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 842 843 return ts; 844 } 845 846 u64 ftrace_now(int cpu) 847 { 848 return buffer_ftrace_now(&global_trace.array_buffer, cpu); 849 } 850 851 /** 852 * tracing_is_enabled - Show if global_trace has been enabled 853 * 854 * Shows if the global trace has been enabled or not. It uses the 855 * mirror flag "buffer_disabled" to be used in fast paths such as for 856 * the irqsoff tracer. But it may be inaccurate due to races. If you 857 * need to know the accurate state, use tracing_is_on() which is a little 858 * slower, but accurate. 859 */ 860 int tracing_is_enabled(void) 861 { 862 /* 863 * For quick access (irqsoff uses this in fast path), just 864 * return the mirror variable of the state of the ring buffer. 865 * It's a little racy, but we don't really care. 866 */ 867 smp_rmb(); 868 return !global_trace.buffer_disabled; 869 } 870 871 /* 872 * trace_buf_size is the size in bytes that is allocated 873 * for a buffer. Note, the number of bytes is always rounded 874 * to page size. 875 * 876 * This number is purposely set to a low number of 16384. 877 * If the dump on oops happens, it will be much appreciated 878 * to not have to wait for all that output. Anyway this can be 879 * boot time and run time configurable. 880 */ 881 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 882 883 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 884 885 /* trace_types holds a link list of available tracers. */ 886 static struct tracer *trace_types __read_mostly; 887 888 /* 889 * trace_types_lock is used to protect the trace_types list. 890 */ 891 DEFINE_MUTEX(trace_types_lock); 892 893 /* 894 * serialize the access of the ring buffer 895 * 896 * ring buffer serializes readers, but it is low level protection. 897 * The validity of the events (which returns by ring_buffer_peek() ..etc) 898 * are not protected by ring buffer. 899 * 900 * The content of events may become garbage if we allow other process consumes 901 * these events concurrently: 902 * A) the page of the consumed events may become a normal page 903 * (not reader page) in ring buffer, and this page will be rewritten 904 * by events producer. 905 * B) The page of the consumed events may become a page for splice_read, 906 * and this page will be returned to system. 907 * 908 * These primitives allow multi process access to different cpu ring buffer 909 * concurrently. 910 * 911 * These primitives don't distinguish read-only and read-consume access. 912 * Multi read-only access are also serialized. 913 */ 914 915 #ifdef CONFIG_SMP 916 static DECLARE_RWSEM(all_cpu_access_lock); 917 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 918 919 static inline void trace_access_lock(int cpu) 920 { 921 if (cpu == RING_BUFFER_ALL_CPUS) { 922 /* gain it for accessing the whole ring buffer. */ 923 down_write(&all_cpu_access_lock); 924 } else { 925 /* gain it for accessing a cpu ring buffer. */ 926 927 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 928 down_read(&all_cpu_access_lock); 929 930 /* Secondly block other access to this @cpu ring buffer. */ 931 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 932 } 933 } 934 935 static inline void trace_access_unlock(int cpu) 936 { 937 if (cpu == RING_BUFFER_ALL_CPUS) { 938 up_write(&all_cpu_access_lock); 939 } else { 940 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 941 up_read(&all_cpu_access_lock); 942 } 943 } 944 945 static inline void trace_access_lock_init(void) 946 { 947 int cpu; 948 949 for_each_possible_cpu(cpu) 950 mutex_init(&per_cpu(cpu_access_lock, cpu)); 951 } 952 953 #else 954 955 static DEFINE_MUTEX(access_lock); 956 957 static inline void trace_access_lock(int cpu) 958 { 959 (void)cpu; 960 mutex_lock(&access_lock); 961 } 962 963 static inline void trace_access_unlock(int cpu) 964 { 965 (void)cpu; 966 mutex_unlock(&access_lock); 967 } 968 969 static inline void trace_access_lock_init(void) 970 { 971 } 972 973 #endif 974 975 #ifdef CONFIG_STACKTRACE 976 static void __ftrace_trace_stack(struct trace_array *tr, 977 struct trace_buffer *buffer, 978 unsigned int trace_ctx, 979 int skip, struct pt_regs *regs); 980 static inline void ftrace_trace_stack(struct trace_array *tr, 981 struct trace_buffer *buffer, 982 unsigned int trace_ctx, 983 int skip, struct pt_regs *regs); 984 985 #else 986 static inline void __ftrace_trace_stack(struct trace_array *tr, 987 struct trace_buffer *buffer, 988 unsigned int trace_ctx, 989 int skip, struct pt_regs *regs) 990 { 991 } 992 static inline void ftrace_trace_stack(struct trace_array *tr, 993 struct trace_buffer *buffer, 994 unsigned long trace_ctx, 995 int skip, struct pt_regs *regs) 996 { 997 } 998 999 #endif 1000 1001 static __always_inline void 1002 trace_event_setup(struct ring_buffer_event *event, 1003 int type, unsigned int trace_ctx) 1004 { 1005 struct trace_entry *ent = ring_buffer_event_data(event); 1006 1007 tracing_generic_entry_update(ent, type, trace_ctx); 1008 } 1009 1010 static __always_inline struct ring_buffer_event * 1011 __trace_buffer_lock_reserve(struct trace_buffer *buffer, 1012 int type, 1013 unsigned long len, 1014 unsigned int trace_ctx) 1015 { 1016 struct ring_buffer_event *event; 1017 1018 event = ring_buffer_lock_reserve(buffer, len); 1019 if (event != NULL) 1020 trace_event_setup(event, type, trace_ctx); 1021 1022 return event; 1023 } 1024 1025 void tracer_tracing_on(struct trace_array *tr) 1026 { 1027 if (tr->array_buffer.buffer) 1028 ring_buffer_record_on(tr->array_buffer.buffer); 1029 /* 1030 * This flag is looked at when buffers haven't been allocated 1031 * yet, or by some tracers (like irqsoff), that just want to 1032 * know if the ring buffer has been disabled, but it can handle 1033 * races of where it gets disabled but we still do a record. 1034 * As the check is in the fast path of the tracers, it is more 1035 * important to be fast than accurate. 1036 */ 1037 tr->buffer_disabled = 0; 1038 /* Make the flag seen by readers */ 1039 smp_wmb(); 1040 } 1041 1042 /** 1043 * tracing_on - enable tracing buffers 1044 * 1045 * This function enables tracing buffers that may have been 1046 * disabled with tracing_off. 1047 */ 1048 void tracing_on(void) 1049 { 1050 tracer_tracing_on(&global_trace); 1051 } 1052 EXPORT_SYMBOL_GPL(tracing_on); 1053 1054 1055 static __always_inline void 1056 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) 1057 { 1058 __this_cpu_write(trace_taskinfo_save, true); 1059 1060 /* If this is the temp buffer, we need to commit fully */ 1061 if (this_cpu_read(trace_buffered_event) == event) { 1062 /* Length is in event->array[0] */ 1063 ring_buffer_write(buffer, event->array[0], &event->array[1]); 1064 /* Release the temp buffer */ 1065 this_cpu_dec(trace_buffered_event_cnt); 1066 /* ring_buffer_unlock_commit() enables preemption */ 1067 preempt_enable_notrace(); 1068 } else 1069 ring_buffer_unlock_commit(buffer); 1070 } 1071 1072 int __trace_array_puts(struct trace_array *tr, unsigned long ip, 1073 const char *str, int size) 1074 { 1075 struct ring_buffer_event *event; 1076 struct trace_buffer *buffer; 1077 struct print_entry *entry; 1078 unsigned int trace_ctx; 1079 int alloc; 1080 1081 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1082 return 0; 1083 1084 if (unlikely(tracing_selftest_running && tr == &global_trace)) 1085 return 0; 1086 1087 if (unlikely(tracing_disabled)) 1088 return 0; 1089 1090 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 1091 1092 trace_ctx = tracing_gen_ctx(); 1093 buffer = tr->array_buffer.buffer; 1094 ring_buffer_nest_start(buffer); 1095 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1096 trace_ctx); 1097 if (!event) { 1098 size = 0; 1099 goto out; 1100 } 1101 1102 entry = ring_buffer_event_data(event); 1103 entry->ip = ip; 1104 1105 memcpy(&entry->buf, str, size); 1106 1107 /* Add a newline if necessary */ 1108 if (entry->buf[size - 1] != '\n') { 1109 entry->buf[size] = '\n'; 1110 entry->buf[size + 1] = '\0'; 1111 } else 1112 entry->buf[size] = '\0'; 1113 1114 __buffer_unlock_commit(buffer, event); 1115 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1116 out: 1117 ring_buffer_nest_end(buffer); 1118 return size; 1119 } 1120 EXPORT_SYMBOL_GPL(__trace_array_puts); 1121 1122 /** 1123 * __trace_puts - write a constant string into the trace buffer. 1124 * @ip: The address of the caller 1125 * @str: The constant string to write 1126 * @size: The size of the string. 1127 */ 1128 int __trace_puts(unsigned long ip, const char *str, int size) 1129 { 1130 return __trace_array_puts(printk_trace, ip, str, size); 1131 } 1132 EXPORT_SYMBOL_GPL(__trace_puts); 1133 1134 /** 1135 * __trace_bputs - write the pointer to a constant string into trace buffer 1136 * @ip: The address of the caller 1137 * @str: The constant string to write to the buffer to 1138 */ 1139 int __trace_bputs(unsigned long ip, const char *str) 1140 { 1141 struct trace_array *tr = READ_ONCE(printk_trace); 1142 struct ring_buffer_event *event; 1143 struct trace_buffer *buffer; 1144 struct bputs_entry *entry; 1145 unsigned int trace_ctx; 1146 int size = sizeof(struct bputs_entry); 1147 int ret = 0; 1148 1149 if (!printk_binsafe(tr)) 1150 return __trace_puts(ip, str, strlen(str)); 1151 1152 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1153 return 0; 1154 1155 if (unlikely(tracing_selftest_running || tracing_disabled)) 1156 return 0; 1157 1158 trace_ctx = tracing_gen_ctx(); 1159 buffer = tr->array_buffer.buffer; 1160 1161 ring_buffer_nest_start(buffer); 1162 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1163 trace_ctx); 1164 if (!event) 1165 goto out; 1166 1167 entry = ring_buffer_event_data(event); 1168 entry->ip = ip; 1169 entry->str = str; 1170 1171 __buffer_unlock_commit(buffer, event); 1172 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1173 1174 ret = 1; 1175 out: 1176 ring_buffer_nest_end(buffer); 1177 return ret; 1178 } 1179 EXPORT_SYMBOL_GPL(__trace_bputs); 1180 1181 #ifdef CONFIG_TRACER_SNAPSHOT 1182 static void tracing_snapshot_instance_cond(struct trace_array *tr, 1183 void *cond_data) 1184 { 1185 struct tracer *tracer = tr->current_trace; 1186 unsigned long flags; 1187 1188 if (in_nmi()) { 1189 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 1190 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 1191 return; 1192 } 1193 1194 if (!tr->allocated_snapshot) { 1195 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 1196 trace_array_puts(tr, "*** stopping trace here! ***\n"); 1197 tracer_tracing_off(tr); 1198 return; 1199 } 1200 1201 /* Note, snapshot can not be used when the tracer uses it */ 1202 if (tracer->use_max_tr) { 1203 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 1204 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1205 return; 1206 } 1207 1208 if (tr->mapped) { 1209 trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); 1210 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1211 return; 1212 } 1213 1214 local_irq_save(flags); 1215 update_max_tr(tr, current, smp_processor_id(), cond_data); 1216 local_irq_restore(flags); 1217 } 1218 1219 void tracing_snapshot_instance(struct trace_array *tr) 1220 { 1221 tracing_snapshot_instance_cond(tr, NULL); 1222 } 1223 1224 /** 1225 * tracing_snapshot - take a snapshot of the current buffer. 1226 * 1227 * This causes a swap between the snapshot buffer and the current live 1228 * tracing buffer. You can use this to take snapshots of the live 1229 * trace when some condition is triggered, but continue to trace. 1230 * 1231 * Note, make sure to allocate the snapshot with either 1232 * a tracing_snapshot_alloc(), or by doing it manually 1233 * with: echo 1 > /sys/kernel/tracing/snapshot 1234 * 1235 * If the snapshot buffer is not allocated, it will stop tracing. 1236 * Basically making a permanent snapshot. 1237 */ 1238 void tracing_snapshot(void) 1239 { 1240 struct trace_array *tr = &global_trace; 1241 1242 tracing_snapshot_instance(tr); 1243 } 1244 EXPORT_SYMBOL_GPL(tracing_snapshot); 1245 1246 /** 1247 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 1248 * @tr: The tracing instance to snapshot 1249 * @cond_data: The data to be tested conditionally, and possibly saved 1250 * 1251 * This is the same as tracing_snapshot() except that the snapshot is 1252 * conditional - the snapshot will only happen if the 1253 * cond_snapshot.update() implementation receiving the cond_data 1254 * returns true, which means that the trace array's cond_snapshot 1255 * update() operation used the cond_data to determine whether the 1256 * snapshot should be taken, and if it was, presumably saved it along 1257 * with the snapshot. 1258 */ 1259 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1260 { 1261 tracing_snapshot_instance_cond(tr, cond_data); 1262 } 1263 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1264 1265 /** 1266 * tracing_cond_snapshot_data - get the user data associated with a snapshot 1267 * @tr: The tracing instance 1268 * 1269 * When the user enables a conditional snapshot using 1270 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 1271 * with the snapshot. This accessor is used to retrieve it. 1272 * 1273 * Should not be called from cond_snapshot.update(), since it takes 1274 * the tr->max_lock lock, which the code calling 1275 * cond_snapshot.update() has already done. 1276 * 1277 * Returns the cond_data associated with the trace array's snapshot. 1278 */ 1279 void *tracing_cond_snapshot_data(struct trace_array *tr) 1280 { 1281 void *cond_data = NULL; 1282 1283 local_irq_disable(); 1284 arch_spin_lock(&tr->max_lock); 1285 1286 if (tr->cond_snapshot) 1287 cond_data = tr->cond_snapshot->cond_data; 1288 1289 arch_spin_unlock(&tr->max_lock); 1290 local_irq_enable(); 1291 1292 return cond_data; 1293 } 1294 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1295 1296 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 1297 struct array_buffer *size_buf, int cpu_id); 1298 static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 1299 1300 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1301 { 1302 int order; 1303 int ret; 1304 1305 if (!tr->allocated_snapshot) { 1306 1307 /* Make the snapshot buffer have the same order as main buffer */ 1308 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 1309 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 1310 if (ret < 0) 1311 return ret; 1312 1313 /* allocate spare buffer */ 1314 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1315 &tr->array_buffer, RING_BUFFER_ALL_CPUS); 1316 if (ret < 0) 1317 return ret; 1318 1319 tr->allocated_snapshot = true; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static void free_snapshot(struct trace_array *tr) 1326 { 1327 /* 1328 * We don't free the ring buffer. instead, resize it because 1329 * The max_tr ring buffer has some state (e.g. ring->clock) and 1330 * we want preserve it. 1331 */ 1332 ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); 1333 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1334 set_buffer_entries(&tr->max_buffer, 1); 1335 tracing_reset_online_cpus(&tr->max_buffer); 1336 tr->allocated_snapshot = false; 1337 } 1338 1339 static int tracing_arm_snapshot_locked(struct trace_array *tr) 1340 { 1341 int ret; 1342 1343 lockdep_assert_held(&trace_types_lock); 1344 1345 spin_lock(&tr->snapshot_trigger_lock); 1346 if (tr->snapshot == UINT_MAX || tr->mapped) { 1347 spin_unlock(&tr->snapshot_trigger_lock); 1348 return -EBUSY; 1349 } 1350 1351 tr->snapshot++; 1352 spin_unlock(&tr->snapshot_trigger_lock); 1353 1354 ret = tracing_alloc_snapshot_instance(tr); 1355 if (ret) { 1356 spin_lock(&tr->snapshot_trigger_lock); 1357 tr->snapshot--; 1358 spin_unlock(&tr->snapshot_trigger_lock); 1359 } 1360 1361 return ret; 1362 } 1363 1364 int tracing_arm_snapshot(struct trace_array *tr) 1365 { 1366 int ret; 1367 1368 mutex_lock(&trace_types_lock); 1369 ret = tracing_arm_snapshot_locked(tr); 1370 mutex_unlock(&trace_types_lock); 1371 1372 return ret; 1373 } 1374 1375 void tracing_disarm_snapshot(struct trace_array *tr) 1376 { 1377 spin_lock(&tr->snapshot_trigger_lock); 1378 if (!WARN_ON(!tr->snapshot)) 1379 tr->snapshot--; 1380 spin_unlock(&tr->snapshot_trigger_lock); 1381 } 1382 1383 /** 1384 * tracing_alloc_snapshot - allocate snapshot buffer. 1385 * 1386 * This only allocates the snapshot buffer if it isn't already 1387 * allocated - it doesn't also take a snapshot. 1388 * 1389 * This is meant to be used in cases where the snapshot buffer needs 1390 * to be set up for events that can't sleep but need to be able to 1391 * trigger a snapshot. 1392 */ 1393 int tracing_alloc_snapshot(void) 1394 { 1395 struct trace_array *tr = &global_trace; 1396 int ret; 1397 1398 ret = tracing_alloc_snapshot_instance(tr); 1399 WARN_ON(ret < 0); 1400 1401 return ret; 1402 } 1403 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1404 1405 /** 1406 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1407 * 1408 * This is similar to tracing_snapshot(), but it will allocate the 1409 * snapshot buffer if it isn't already allocated. Use this only 1410 * where it is safe to sleep, as the allocation may sleep. 1411 * 1412 * This causes a swap between the snapshot buffer and the current live 1413 * tracing buffer. You can use this to take snapshots of the live 1414 * trace when some condition is triggered, but continue to trace. 1415 */ 1416 void tracing_snapshot_alloc(void) 1417 { 1418 int ret; 1419 1420 ret = tracing_alloc_snapshot(); 1421 if (ret < 0) 1422 return; 1423 1424 tracing_snapshot(); 1425 } 1426 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1427 1428 /** 1429 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1430 * @tr: The tracing instance 1431 * @cond_data: User data to associate with the snapshot 1432 * @update: Implementation of the cond_snapshot update function 1433 * 1434 * Check whether the conditional snapshot for the given instance has 1435 * already been enabled, or if the current tracer is already using a 1436 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1437 * save the cond_data and update function inside. 1438 * 1439 * Returns 0 if successful, error otherwise. 1440 */ 1441 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1442 cond_update_fn_t update) 1443 { 1444 struct cond_snapshot *cond_snapshot __free(kfree) = 1445 kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1446 int ret; 1447 1448 if (!cond_snapshot) 1449 return -ENOMEM; 1450 1451 cond_snapshot->cond_data = cond_data; 1452 cond_snapshot->update = update; 1453 1454 guard(mutex)(&trace_types_lock); 1455 1456 if (tr->current_trace->use_max_tr) 1457 return -EBUSY; 1458 1459 /* 1460 * The cond_snapshot can only change to NULL without the 1461 * trace_types_lock. We don't care if we race with it going 1462 * to NULL, but we want to make sure that it's not set to 1463 * something other than NULL when we get here, which we can 1464 * do safely with only holding the trace_types_lock and not 1465 * having to take the max_lock. 1466 */ 1467 if (tr->cond_snapshot) 1468 return -EBUSY; 1469 1470 ret = tracing_arm_snapshot_locked(tr); 1471 if (ret) 1472 return ret; 1473 1474 local_irq_disable(); 1475 arch_spin_lock(&tr->max_lock); 1476 tr->cond_snapshot = no_free_ptr(cond_snapshot); 1477 arch_spin_unlock(&tr->max_lock); 1478 local_irq_enable(); 1479 1480 return 0; 1481 } 1482 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1483 1484 /** 1485 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1486 * @tr: The tracing instance 1487 * 1488 * Check whether the conditional snapshot for the given instance is 1489 * enabled; if so, free the cond_snapshot associated with it, 1490 * otherwise return -EINVAL. 1491 * 1492 * Returns 0 if successful, error otherwise. 1493 */ 1494 int tracing_snapshot_cond_disable(struct trace_array *tr) 1495 { 1496 int ret = 0; 1497 1498 local_irq_disable(); 1499 arch_spin_lock(&tr->max_lock); 1500 1501 if (!tr->cond_snapshot) 1502 ret = -EINVAL; 1503 else { 1504 kfree(tr->cond_snapshot); 1505 tr->cond_snapshot = NULL; 1506 } 1507 1508 arch_spin_unlock(&tr->max_lock); 1509 local_irq_enable(); 1510 1511 tracing_disarm_snapshot(tr); 1512 1513 return ret; 1514 } 1515 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1516 #else 1517 void tracing_snapshot(void) 1518 { 1519 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1520 } 1521 EXPORT_SYMBOL_GPL(tracing_snapshot); 1522 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1523 { 1524 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1525 } 1526 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1527 int tracing_alloc_snapshot(void) 1528 { 1529 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1530 return -ENODEV; 1531 } 1532 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1533 void tracing_snapshot_alloc(void) 1534 { 1535 /* Give warning */ 1536 tracing_snapshot(); 1537 } 1538 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1539 void *tracing_cond_snapshot_data(struct trace_array *tr) 1540 { 1541 return NULL; 1542 } 1543 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1544 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1545 { 1546 return -ENODEV; 1547 } 1548 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1549 int tracing_snapshot_cond_disable(struct trace_array *tr) 1550 { 1551 return false; 1552 } 1553 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1554 #define free_snapshot(tr) do { } while (0) 1555 #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) 1556 #endif /* CONFIG_TRACER_SNAPSHOT */ 1557 1558 void tracer_tracing_off(struct trace_array *tr) 1559 { 1560 if (tr->array_buffer.buffer) 1561 ring_buffer_record_off(tr->array_buffer.buffer); 1562 /* 1563 * This flag is looked at when buffers haven't been allocated 1564 * yet, or by some tracers (like irqsoff), that just want to 1565 * know if the ring buffer has been disabled, but it can handle 1566 * races of where it gets disabled but we still do a record. 1567 * As the check is in the fast path of the tracers, it is more 1568 * important to be fast than accurate. 1569 */ 1570 tr->buffer_disabled = 1; 1571 /* Make the flag seen by readers */ 1572 smp_wmb(); 1573 } 1574 1575 /** 1576 * tracing_off - turn off tracing buffers 1577 * 1578 * This function stops the tracing buffers from recording data. 1579 * It does not disable any overhead the tracers themselves may 1580 * be causing. This function simply causes all recording to 1581 * the ring buffers to fail. 1582 */ 1583 void tracing_off(void) 1584 { 1585 tracer_tracing_off(&global_trace); 1586 } 1587 EXPORT_SYMBOL_GPL(tracing_off); 1588 1589 void disable_trace_on_warning(void) 1590 { 1591 if (__disable_trace_on_warning) { 1592 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, 1593 "Disabling tracing due to warning\n"); 1594 tracing_off(); 1595 } 1596 } 1597 1598 /** 1599 * tracer_tracing_is_on - show real state of ring buffer enabled 1600 * @tr : the trace array to know if ring buffer is enabled 1601 * 1602 * Shows real state of the ring buffer if it is enabled or not. 1603 */ 1604 bool tracer_tracing_is_on(struct trace_array *tr) 1605 { 1606 if (tr->array_buffer.buffer) 1607 return ring_buffer_record_is_set_on(tr->array_buffer.buffer); 1608 return !tr->buffer_disabled; 1609 } 1610 1611 /** 1612 * tracing_is_on - show state of ring buffers enabled 1613 */ 1614 int tracing_is_on(void) 1615 { 1616 return tracer_tracing_is_on(&global_trace); 1617 } 1618 EXPORT_SYMBOL_GPL(tracing_is_on); 1619 1620 static int __init set_buf_size(char *str) 1621 { 1622 unsigned long buf_size; 1623 1624 if (!str) 1625 return 0; 1626 buf_size = memparse(str, &str); 1627 /* 1628 * nr_entries can not be zero and the startup 1629 * tests require some buffer space. Therefore 1630 * ensure we have at least 4096 bytes of buffer. 1631 */ 1632 trace_buf_size = max(4096UL, buf_size); 1633 return 1; 1634 } 1635 __setup("trace_buf_size=", set_buf_size); 1636 1637 static int __init set_tracing_thresh(char *str) 1638 { 1639 unsigned long threshold; 1640 int ret; 1641 1642 if (!str) 1643 return 0; 1644 ret = kstrtoul(str, 0, &threshold); 1645 if (ret < 0) 1646 return 0; 1647 tracing_thresh = threshold * 1000; 1648 return 1; 1649 } 1650 __setup("tracing_thresh=", set_tracing_thresh); 1651 1652 unsigned long nsecs_to_usecs(unsigned long nsecs) 1653 { 1654 return nsecs / 1000; 1655 } 1656 1657 /* 1658 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1659 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1660 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1661 * of strings in the order that the evals (enum) were defined. 1662 */ 1663 #undef C 1664 #define C(a, b) b 1665 1666 /* These must match the bit positions in trace_iterator_flags */ 1667 static const char *trace_options[] = { 1668 TRACE_FLAGS 1669 NULL 1670 }; 1671 1672 static struct { 1673 u64 (*func)(void); 1674 const char *name; 1675 int in_ns; /* is this clock in nanoseconds? */ 1676 } trace_clocks[] = { 1677 { trace_clock_local, "local", 1 }, 1678 { trace_clock_global, "global", 1 }, 1679 { trace_clock_counter, "counter", 0 }, 1680 { trace_clock_jiffies, "uptime", 0 }, 1681 { trace_clock, "perf", 1 }, 1682 { ktime_get_mono_fast_ns, "mono", 1 }, 1683 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1684 { ktime_get_boot_fast_ns, "boot", 1 }, 1685 { ktime_get_tai_fast_ns, "tai", 1 }, 1686 ARCH_TRACE_CLOCKS 1687 }; 1688 1689 bool trace_clock_in_ns(struct trace_array *tr) 1690 { 1691 if (trace_clocks[tr->clock_id].in_ns) 1692 return true; 1693 1694 return false; 1695 } 1696 1697 /* 1698 * trace_parser_get_init - gets the buffer for trace parser 1699 */ 1700 int trace_parser_get_init(struct trace_parser *parser, int size) 1701 { 1702 memset(parser, 0, sizeof(*parser)); 1703 1704 parser->buffer = kmalloc(size, GFP_KERNEL); 1705 if (!parser->buffer) 1706 return 1; 1707 1708 parser->size = size; 1709 return 0; 1710 } 1711 1712 /* 1713 * trace_parser_put - frees the buffer for trace parser 1714 */ 1715 void trace_parser_put(struct trace_parser *parser) 1716 { 1717 kfree(parser->buffer); 1718 parser->buffer = NULL; 1719 } 1720 1721 /* 1722 * trace_get_user - reads the user input string separated by space 1723 * (matched by isspace(ch)) 1724 * 1725 * For each string found the 'struct trace_parser' is updated, 1726 * and the function returns. 1727 * 1728 * Returns number of bytes read. 1729 * 1730 * See kernel/trace/trace.h for 'struct trace_parser' details. 1731 */ 1732 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1733 size_t cnt, loff_t *ppos) 1734 { 1735 char ch; 1736 size_t read = 0; 1737 ssize_t ret; 1738 1739 if (!*ppos) 1740 trace_parser_clear(parser); 1741 1742 ret = get_user(ch, ubuf++); 1743 if (ret) 1744 goto out; 1745 1746 read++; 1747 cnt--; 1748 1749 /* 1750 * The parser is not finished with the last write, 1751 * continue reading the user input without skipping spaces. 1752 */ 1753 if (!parser->cont) { 1754 /* skip white space */ 1755 while (cnt && isspace(ch)) { 1756 ret = get_user(ch, ubuf++); 1757 if (ret) 1758 goto out; 1759 read++; 1760 cnt--; 1761 } 1762 1763 parser->idx = 0; 1764 1765 /* only spaces were written */ 1766 if (isspace(ch) || !ch) { 1767 *ppos += read; 1768 ret = read; 1769 goto out; 1770 } 1771 } 1772 1773 /* read the non-space input */ 1774 while (cnt && !isspace(ch) && ch) { 1775 if (parser->idx < parser->size - 1) 1776 parser->buffer[parser->idx++] = ch; 1777 else { 1778 ret = -EINVAL; 1779 goto out; 1780 } 1781 ret = get_user(ch, ubuf++); 1782 if (ret) 1783 goto out; 1784 read++; 1785 cnt--; 1786 } 1787 1788 /* We either got finished input or we have to wait for another call. */ 1789 if (isspace(ch) || !ch) { 1790 parser->buffer[parser->idx] = 0; 1791 parser->cont = false; 1792 } else if (parser->idx < parser->size - 1) { 1793 parser->cont = true; 1794 parser->buffer[parser->idx++] = ch; 1795 /* Make sure the parsed string always terminates with '\0'. */ 1796 parser->buffer[parser->idx] = 0; 1797 } else { 1798 ret = -EINVAL; 1799 goto out; 1800 } 1801 1802 *ppos += read; 1803 ret = read; 1804 1805 out: 1806 return ret; 1807 } 1808 1809 /* TODO add a seq_buf_to_buffer() */ 1810 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1811 { 1812 int len; 1813 1814 if (trace_seq_used(s) <= s->readpos) 1815 return -EBUSY; 1816 1817 len = trace_seq_used(s) - s->readpos; 1818 if (cnt > len) 1819 cnt = len; 1820 memcpy(buf, s->buffer + s->readpos, cnt); 1821 1822 s->readpos += cnt; 1823 return cnt; 1824 } 1825 1826 unsigned long __read_mostly tracing_thresh; 1827 1828 #ifdef CONFIG_TRACER_MAX_TRACE 1829 static const struct file_operations tracing_max_lat_fops; 1830 1831 #ifdef LATENCY_FS_NOTIFY 1832 1833 static struct workqueue_struct *fsnotify_wq; 1834 1835 static void latency_fsnotify_workfn(struct work_struct *work) 1836 { 1837 struct trace_array *tr = container_of(work, struct trace_array, 1838 fsnotify_work); 1839 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1840 } 1841 1842 static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1843 { 1844 struct trace_array *tr = container_of(iwork, struct trace_array, 1845 fsnotify_irqwork); 1846 queue_work(fsnotify_wq, &tr->fsnotify_work); 1847 } 1848 1849 static void trace_create_maxlat_file(struct trace_array *tr, 1850 struct dentry *d_tracer) 1851 { 1852 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1853 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1854 tr->d_max_latency = trace_create_file("tracing_max_latency", 1855 TRACE_MODE_WRITE, 1856 d_tracer, tr, 1857 &tracing_max_lat_fops); 1858 } 1859 1860 __init static int latency_fsnotify_init(void) 1861 { 1862 fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1863 WQ_UNBOUND | WQ_HIGHPRI, 0); 1864 if (!fsnotify_wq) { 1865 pr_err("Unable to allocate tr_max_lat_wq\n"); 1866 return -ENOMEM; 1867 } 1868 return 0; 1869 } 1870 1871 late_initcall_sync(latency_fsnotify_init); 1872 1873 void latency_fsnotify(struct trace_array *tr) 1874 { 1875 if (!fsnotify_wq) 1876 return; 1877 /* 1878 * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1879 * possible that we are called from __schedule() or do_idle(), which 1880 * could cause a deadlock. 1881 */ 1882 irq_work_queue(&tr->fsnotify_irqwork); 1883 } 1884 1885 #else /* !LATENCY_FS_NOTIFY */ 1886 1887 #define trace_create_maxlat_file(tr, d_tracer) \ 1888 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ 1889 d_tracer, tr, &tracing_max_lat_fops) 1890 1891 #endif 1892 1893 /* 1894 * Copy the new maximum trace into the separate maximum-trace 1895 * structure. (this way the maximum trace is permanently saved, 1896 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1897 */ 1898 static void 1899 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1900 { 1901 struct array_buffer *trace_buf = &tr->array_buffer; 1902 struct array_buffer *max_buf = &tr->max_buffer; 1903 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1904 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1905 1906 max_buf->cpu = cpu; 1907 max_buf->time_start = data->preempt_timestamp; 1908 1909 max_data->saved_latency = tr->max_latency; 1910 max_data->critical_start = data->critical_start; 1911 max_data->critical_end = data->critical_end; 1912 1913 strscpy(max_data->comm, tsk->comm); 1914 max_data->pid = tsk->pid; 1915 /* 1916 * If tsk == current, then use current_uid(), as that does not use 1917 * RCU. The irq tracer can be called out of RCU scope. 1918 */ 1919 if (tsk == current) 1920 max_data->uid = current_uid(); 1921 else 1922 max_data->uid = task_uid(tsk); 1923 1924 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1925 max_data->policy = tsk->policy; 1926 max_data->rt_priority = tsk->rt_priority; 1927 1928 /* record this tasks comm */ 1929 tracing_record_cmdline(tsk); 1930 latency_fsnotify(tr); 1931 } 1932 1933 /** 1934 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1935 * @tr: tracer 1936 * @tsk: the task with the latency 1937 * @cpu: The cpu that initiated the trace. 1938 * @cond_data: User data associated with a conditional snapshot 1939 * 1940 * Flip the buffers between the @tr and the max_tr and record information 1941 * about which task was the cause of this latency. 1942 */ 1943 void 1944 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1945 void *cond_data) 1946 { 1947 if (tr->stop_count) 1948 return; 1949 1950 WARN_ON_ONCE(!irqs_disabled()); 1951 1952 if (!tr->allocated_snapshot) { 1953 /* Only the nop tracer should hit this when disabling */ 1954 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1955 return; 1956 } 1957 1958 arch_spin_lock(&tr->max_lock); 1959 1960 /* Inherit the recordable setting from array_buffer */ 1961 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1962 ring_buffer_record_on(tr->max_buffer.buffer); 1963 else 1964 ring_buffer_record_off(tr->max_buffer.buffer); 1965 1966 #ifdef CONFIG_TRACER_SNAPSHOT 1967 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1968 arch_spin_unlock(&tr->max_lock); 1969 return; 1970 } 1971 #endif 1972 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); 1973 1974 __update_max_tr(tr, tsk, cpu); 1975 1976 arch_spin_unlock(&tr->max_lock); 1977 1978 /* Any waiters on the old snapshot buffer need to wake up */ 1979 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); 1980 } 1981 1982 /** 1983 * update_max_tr_single - only copy one trace over, and reset the rest 1984 * @tr: tracer 1985 * @tsk: task with the latency 1986 * @cpu: the cpu of the buffer to copy. 1987 * 1988 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1989 */ 1990 void 1991 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1992 { 1993 int ret; 1994 1995 if (tr->stop_count) 1996 return; 1997 1998 WARN_ON_ONCE(!irqs_disabled()); 1999 if (!tr->allocated_snapshot) { 2000 /* Only the nop tracer should hit this when disabling */ 2001 WARN_ON_ONCE(tr->current_trace != &nop_trace); 2002 return; 2003 } 2004 2005 arch_spin_lock(&tr->max_lock); 2006 2007 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); 2008 2009 if (ret == -EBUSY) { 2010 /* 2011 * We failed to swap the buffer due to a commit taking 2012 * place on this CPU. We fail to record, but we reset 2013 * the max trace buffer (no one writes directly to it) 2014 * and flag that it failed. 2015 * Another reason is resize is in progress. 2016 */ 2017 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 2018 "Failed to swap buffers due to commit or resize in progress\n"); 2019 } 2020 2021 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 2022 2023 __update_max_tr(tr, tsk, cpu); 2024 arch_spin_unlock(&tr->max_lock); 2025 } 2026 2027 #endif /* CONFIG_TRACER_MAX_TRACE */ 2028 2029 struct pipe_wait { 2030 struct trace_iterator *iter; 2031 int wait_index; 2032 }; 2033 2034 static bool wait_pipe_cond(void *data) 2035 { 2036 struct pipe_wait *pwait = data; 2037 struct trace_iterator *iter = pwait->iter; 2038 2039 if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) 2040 return true; 2041 2042 return iter->closed; 2043 } 2044 2045 static int wait_on_pipe(struct trace_iterator *iter, int full) 2046 { 2047 struct pipe_wait pwait; 2048 int ret; 2049 2050 /* Iterators are static, they should be filled or empty */ 2051 if (trace_buffer_iter(iter, iter->cpu_file)) 2052 return 0; 2053 2054 pwait.wait_index = atomic_read_acquire(&iter->wait_index); 2055 pwait.iter = iter; 2056 2057 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, 2058 wait_pipe_cond, &pwait); 2059 2060 #ifdef CONFIG_TRACER_MAX_TRACE 2061 /* 2062 * Make sure this is still the snapshot buffer, as if a snapshot were 2063 * to happen, this would now be the main buffer. 2064 */ 2065 if (iter->snapshot) 2066 iter->array_buffer = &iter->tr->max_buffer; 2067 #endif 2068 return ret; 2069 } 2070 2071 #ifdef CONFIG_FTRACE_STARTUP_TEST 2072 static bool selftests_can_run; 2073 2074 struct trace_selftests { 2075 struct list_head list; 2076 struct tracer *type; 2077 }; 2078 2079 static LIST_HEAD(postponed_selftests); 2080 2081 static int save_selftest(struct tracer *type) 2082 { 2083 struct trace_selftests *selftest; 2084 2085 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 2086 if (!selftest) 2087 return -ENOMEM; 2088 2089 selftest->type = type; 2090 list_add(&selftest->list, &postponed_selftests); 2091 return 0; 2092 } 2093 2094 static int run_tracer_selftest(struct tracer *type) 2095 { 2096 struct trace_array *tr = &global_trace; 2097 struct tracer *saved_tracer = tr->current_trace; 2098 int ret; 2099 2100 if (!type->selftest || tracing_selftest_disabled) 2101 return 0; 2102 2103 /* 2104 * If a tracer registers early in boot up (before scheduling is 2105 * initialized and such), then do not run its selftests yet. 2106 * Instead, run it a little later in the boot process. 2107 */ 2108 if (!selftests_can_run) 2109 return save_selftest(type); 2110 2111 if (!tracing_is_on()) { 2112 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", 2113 type->name); 2114 return 0; 2115 } 2116 2117 /* 2118 * Run a selftest on this tracer. 2119 * Here we reset the trace buffer, and set the current 2120 * tracer to be this tracer. The tracer can then run some 2121 * internal tracing to verify that everything is in order. 2122 * If we fail, we do not register this tracer. 2123 */ 2124 tracing_reset_online_cpus(&tr->array_buffer); 2125 2126 tr->current_trace = type; 2127 2128 #ifdef CONFIG_TRACER_MAX_TRACE 2129 if (type->use_max_tr) { 2130 /* If we expanded the buffers, make sure the max is expanded too */ 2131 if (tr->ring_buffer_expanded) 2132 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 2133 RING_BUFFER_ALL_CPUS); 2134 tr->allocated_snapshot = true; 2135 } 2136 #endif 2137 2138 /* the test is responsible for initializing and enabling */ 2139 pr_info("Testing tracer %s: ", type->name); 2140 ret = type->selftest(type, tr); 2141 /* the test is responsible for resetting too */ 2142 tr->current_trace = saved_tracer; 2143 if (ret) { 2144 printk(KERN_CONT "FAILED!\n"); 2145 /* Add the warning after printing 'FAILED' */ 2146 WARN_ON(1); 2147 return -1; 2148 } 2149 /* Only reset on passing, to avoid touching corrupted buffers */ 2150 tracing_reset_online_cpus(&tr->array_buffer); 2151 2152 #ifdef CONFIG_TRACER_MAX_TRACE 2153 if (type->use_max_tr) { 2154 tr->allocated_snapshot = false; 2155 2156 /* Shrink the max buffer again */ 2157 if (tr->ring_buffer_expanded) 2158 ring_buffer_resize(tr->max_buffer.buffer, 1, 2159 RING_BUFFER_ALL_CPUS); 2160 } 2161 #endif 2162 2163 printk(KERN_CONT "PASSED\n"); 2164 return 0; 2165 } 2166 2167 static int do_run_tracer_selftest(struct tracer *type) 2168 { 2169 int ret; 2170 2171 /* 2172 * Tests can take a long time, especially if they are run one after the 2173 * other, as does happen during bootup when all the tracers are 2174 * registered. This could cause the soft lockup watchdog to trigger. 2175 */ 2176 cond_resched(); 2177 2178 tracing_selftest_running = true; 2179 ret = run_tracer_selftest(type); 2180 tracing_selftest_running = false; 2181 2182 return ret; 2183 } 2184 2185 static __init int init_trace_selftests(void) 2186 { 2187 struct trace_selftests *p, *n; 2188 struct tracer *t, **last; 2189 int ret; 2190 2191 selftests_can_run = true; 2192 2193 guard(mutex)(&trace_types_lock); 2194 2195 if (list_empty(&postponed_selftests)) 2196 return 0; 2197 2198 pr_info("Running postponed tracer tests:\n"); 2199 2200 tracing_selftest_running = true; 2201 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 2202 /* This loop can take minutes when sanitizers are enabled, so 2203 * lets make sure we allow RCU processing. 2204 */ 2205 cond_resched(); 2206 ret = run_tracer_selftest(p->type); 2207 /* If the test fails, then warn and remove from available_tracers */ 2208 if (ret < 0) { 2209 WARN(1, "tracer: %s failed selftest, disabling\n", 2210 p->type->name); 2211 last = &trace_types; 2212 for (t = trace_types; t; t = t->next) { 2213 if (t == p->type) { 2214 *last = t->next; 2215 break; 2216 } 2217 last = &t->next; 2218 } 2219 } 2220 list_del(&p->list); 2221 kfree(p); 2222 } 2223 tracing_selftest_running = false; 2224 2225 return 0; 2226 } 2227 core_initcall(init_trace_selftests); 2228 #else 2229 static inline int do_run_tracer_selftest(struct tracer *type) 2230 { 2231 return 0; 2232 } 2233 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 2234 2235 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 2236 2237 static void __init apply_trace_boot_options(void); 2238 2239 /** 2240 * register_tracer - register a tracer with the ftrace system. 2241 * @type: the plugin for the tracer 2242 * 2243 * Register a new plugin tracer. 2244 */ 2245 int __init register_tracer(struct tracer *type) 2246 { 2247 struct tracer *t; 2248 int ret = 0; 2249 2250 if (!type->name) { 2251 pr_info("Tracer must have a name\n"); 2252 return -1; 2253 } 2254 2255 if (strlen(type->name) >= MAX_TRACER_SIZE) { 2256 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 2257 return -1; 2258 } 2259 2260 if (security_locked_down(LOCKDOWN_TRACEFS)) { 2261 pr_warn("Can not register tracer %s due to lockdown\n", 2262 type->name); 2263 return -EPERM; 2264 } 2265 2266 mutex_lock(&trace_types_lock); 2267 2268 for (t = trace_types; t; t = t->next) { 2269 if (strcmp(type->name, t->name) == 0) { 2270 /* already found */ 2271 pr_info("Tracer %s already registered\n", 2272 type->name); 2273 ret = -1; 2274 goto out; 2275 } 2276 } 2277 2278 if (!type->set_flag) 2279 type->set_flag = &dummy_set_flag; 2280 if (!type->flags) { 2281 /*allocate a dummy tracer_flags*/ 2282 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 2283 if (!type->flags) { 2284 ret = -ENOMEM; 2285 goto out; 2286 } 2287 type->flags->val = 0; 2288 type->flags->opts = dummy_tracer_opt; 2289 } else 2290 if (!type->flags->opts) 2291 type->flags->opts = dummy_tracer_opt; 2292 2293 /* store the tracer for __set_tracer_option */ 2294 type->flags->trace = type; 2295 2296 ret = do_run_tracer_selftest(type); 2297 if (ret < 0) 2298 goto out; 2299 2300 type->next = trace_types; 2301 trace_types = type; 2302 add_tracer_options(&global_trace, type); 2303 2304 out: 2305 mutex_unlock(&trace_types_lock); 2306 2307 if (ret || !default_bootup_tracer) 2308 goto out_unlock; 2309 2310 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2311 goto out_unlock; 2312 2313 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2314 /* Do we want this tracer to start on bootup? */ 2315 tracing_set_tracer(&global_trace, type->name); 2316 default_bootup_tracer = NULL; 2317 2318 apply_trace_boot_options(); 2319 2320 /* disable other selftests, since this will break it. */ 2321 disable_tracing_selftest("running a tracer"); 2322 2323 out_unlock: 2324 return ret; 2325 } 2326 2327 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 2328 { 2329 struct trace_buffer *buffer = buf->buffer; 2330 2331 if (!buffer) 2332 return; 2333 2334 ring_buffer_record_disable(buffer); 2335 2336 /* Make sure all commits have finished */ 2337 synchronize_rcu(); 2338 ring_buffer_reset_cpu(buffer, cpu); 2339 2340 ring_buffer_record_enable(buffer); 2341 } 2342 2343 void tracing_reset_online_cpus(struct array_buffer *buf) 2344 { 2345 struct trace_buffer *buffer = buf->buffer; 2346 2347 if (!buffer) 2348 return; 2349 2350 ring_buffer_record_disable(buffer); 2351 2352 /* Make sure all commits have finished */ 2353 synchronize_rcu(); 2354 2355 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2356 2357 ring_buffer_reset_online_cpus(buffer); 2358 2359 ring_buffer_record_enable(buffer); 2360 } 2361 2362 static void tracing_reset_all_cpus(struct array_buffer *buf) 2363 { 2364 struct trace_buffer *buffer = buf->buffer; 2365 2366 if (!buffer) 2367 return; 2368 2369 ring_buffer_record_disable(buffer); 2370 2371 /* Make sure all commits have finished */ 2372 synchronize_rcu(); 2373 2374 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2375 2376 ring_buffer_reset(buffer); 2377 2378 ring_buffer_record_enable(buffer); 2379 } 2380 2381 /* Must have trace_types_lock held */ 2382 void tracing_reset_all_online_cpus_unlocked(void) 2383 { 2384 struct trace_array *tr; 2385 2386 lockdep_assert_held(&trace_types_lock); 2387 2388 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2389 if (!tr->clear_trace) 2390 continue; 2391 tr->clear_trace = false; 2392 tracing_reset_online_cpus(&tr->array_buffer); 2393 #ifdef CONFIG_TRACER_MAX_TRACE 2394 tracing_reset_online_cpus(&tr->max_buffer); 2395 #endif 2396 } 2397 } 2398 2399 void tracing_reset_all_online_cpus(void) 2400 { 2401 mutex_lock(&trace_types_lock); 2402 tracing_reset_all_online_cpus_unlocked(); 2403 mutex_unlock(&trace_types_lock); 2404 } 2405 2406 int is_tracing_stopped(void) 2407 { 2408 return global_trace.stop_count; 2409 } 2410 2411 static void tracing_start_tr(struct trace_array *tr) 2412 { 2413 struct trace_buffer *buffer; 2414 unsigned long flags; 2415 2416 if (tracing_disabled) 2417 return; 2418 2419 raw_spin_lock_irqsave(&tr->start_lock, flags); 2420 if (--tr->stop_count) { 2421 if (WARN_ON_ONCE(tr->stop_count < 0)) { 2422 /* Someone screwed up their debugging */ 2423 tr->stop_count = 0; 2424 } 2425 goto out; 2426 } 2427 2428 /* Prevent the buffers from switching */ 2429 arch_spin_lock(&tr->max_lock); 2430 2431 buffer = tr->array_buffer.buffer; 2432 if (buffer) 2433 ring_buffer_record_enable(buffer); 2434 2435 #ifdef CONFIG_TRACER_MAX_TRACE 2436 buffer = tr->max_buffer.buffer; 2437 if (buffer) 2438 ring_buffer_record_enable(buffer); 2439 #endif 2440 2441 arch_spin_unlock(&tr->max_lock); 2442 2443 out: 2444 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2445 } 2446 2447 /** 2448 * tracing_start - quick start of the tracer 2449 * 2450 * If tracing is enabled but was stopped by tracing_stop, 2451 * this will start the tracer back up. 2452 */ 2453 void tracing_start(void) 2454 2455 { 2456 return tracing_start_tr(&global_trace); 2457 } 2458 2459 static void tracing_stop_tr(struct trace_array *tr) 2460 { 2461 struct trace_buffer *buffer; 2462 unsigned long flags; 2463 2464 raw_spin_lock_irqsave(&tr->start_lock, flags); 2465 if (tr->stop_count++) 2466 goto out; 2467 2468 /* Prevent the buffers from switching */ 2469 arch_spin_lock(&tr->max_lock); 2470 2471 buffer = tr->array_buffer.buffer; 2472 if (buffer) 2473 ring_buffer_record_disable(buffer); 2474 2475 #ifdef CONFIG_TRACER_MAX_TRACE 2476 buffer = tr->max_buffer.buffer; 2477 if (buffer) 2478 ring_buffer_record_disable(buffer); 2479 #endif 2480 2481 arch_spin_unlock(&tr->max_lock); 2482 2483 out: 2484 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2485 } 2486 2487 /** 2488 * tracing_stop - quick stop of the tracer 2489 * 2490 * Light weight way to stop tracing. Use in conjunction with 2491 * tracing_start. 2492 */ 2493 void tracing_stop(void) 2494 { 2495 return tracing_stop_tr(&global_trace); 2496 } 2497 2498 /* 2499 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2500 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2501 * simplifies those functions and keeps them in sync. 2502 */ 2503 enum print_line_t trace_handle_return(struct trace_seq *s) 2504 { 2505 return trace_seq_has_overflowed(s) ? 2506 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2507 } 2508 EXPORT_SYMBOL_GPL(trace_handle_return); 2509 2510 static unsigned short migration_disable_value(void) 2511 { 2512 #if defined(CONFIG_SMP) 2513 return current->migration_disabled; 2514 #else 2515 return 0; 2516 #endif 2517 } 2518 2519 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) 2520 { 2521 unsigned int trace_flags = irqs_status; 2522 unsigned int pc; 2523 2524 pc = preempt_count(); 2525 2526 if (pc & NMI_MASK) 2527 trace_flags |= TRACE_FLAG_NMI; 2528 if (pc & HARDIRQ_MASK) 2529 trace_flags |= TRACE_FLAG_HARDIRQ; 2530 if (in_serving_softirq()) 2531 trace_flags |= TRACE_FLAG_SOFTIRQ; 2532 if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) 2533 trace_flags |= TRACE_FLAG_BH_OFF; 2534 2535 if (tif_need_resched()) 2536 trace_flags |= TRACE_FLAG_NEED_RESCHED; 2537 if (test_preempt_need_resched()) 2538 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; 2539 if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) 2540 trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; 2541 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | 2542 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; 2543 } 2544 2545 struct ring_buffer_event * 2546 trace_buffer_lock_reserve(struct trace_buffer *buffer, 2547 int type, 2548 unsigned long len, 2549 unsigned int trace_ctx) 2550 { 2551 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); 2552 } 2553 2554 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2555 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2556 static int trace_buffered_event_ref; 2557 2558 /** 2559 * trace_buffered_event_enable - enable buffering events 2560 * 2561 * When events are being filtered, it is quicker to use a temporary 2562 * buffer to write the event data into if there's a likely chance 2563 * that it will not be committed. The discard of the ring buffer 2564 * is not as fast as committing, and is much slower than copying 2565 * a commit. 2566 * 2567 * When an event is to be filtered, allocate per cpu buffers to 2568 * write the event data into, and if the event is filtered and discarded 2569 * it is simply dropped, otherwise, the entire data is to be committed 2570 * in one shot. 2571 */ 2572 void trace_buffered_event_enable(void) 2573 { 2574 struct ring_buffer_event *event; 2575 struct page *page; 2576 int cpu; 2577 2578 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2579 2580 if (trace_buffered_event_ref++) 2581 return; 2582 2583 for_each_tracing_cpu(cpu) { 2584 page = alloc_pages_node(cpu_to_node(cpu), 2585 GFP_KERNEL | __GFP_NORETRY, 0); 2586 /* This is just an optimization and can handle failures */ 2587 if (!page) { 2588 pr_err("Failed to allocate event buffer\n"); 2589 break; 2590 } 2591 2592 event = page_address(page); 2593 memset(event, 0, sizeof(*event)); 2594 2595 per_cpu(trace_buffered_event, cpu) = event; 2596 2597 preempt_disable(); 2598 if (cpu == smp_processor_id() && 2599 __this_cpu_read(trace_buffered_event) != 2600 per_cpu(trace_buffered_event, cpu)) 2601 WARN_ON_ONCE(1); 2602 preempt_enable(); 2603 } 2604 } 2605 2606 static void enable_trace_buffered_event(void *data) 2607 { 2608 /* Probably not needed, but do it anyway */ 2609 smp_rmb(); 2610 this_cpu_dec(trace_buffered_event_cnt); 2611 } 2612 2613 static void disable_trace_buffered_event(void *data) 2614 { 2615 this_cpu_inc(trace_buffered_event_cnt); 2616 } 2617 2618 /** 2619 * trace_buffered_event_disable - disable buffering events 2620 * 2621 * When a filter is removed, it is faster to not use the buffered 2622 * events, and to commit directly into the ring buffer. Free up 2623 * the temp buffers when there are no more users. This requires 2624 * special synchronization with current events. 2625 */ 2626 void trace_buffered_event_disable(void) 2627 { 2628 int cpu; 2629 2630 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2631 2632 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2633 return; 2634 2635 if (--trace_buffered_event_ref) 2636 return; 2637 2638 /* For each CPU, set the buffer as used. */ 2639 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event, 2640 NULL, true); 2641 2642 /* Wait for all current users to finish */ 2643 synchronize_rcu(); 2644 2645 for_each_tracing_cpu(cpu) { 2646 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2647 per_cpu(trace_buffered_event, cpu) = NULL; 2648 } 2649 2650 /* 2651 * Wait for all CPUs that potentially started checking if they can use 2652 * their event buffer only after the previous synchronize_rcu() call and 2653 * they still read a valid pointer from trace_buffered_event. It must be 2654 * ensured they don't see cleared trace_buffered_event_cnt else they 2655 * could wrongly decide to use the pointed-to buffer which is now freed. 2656 */ 2657 synchronize_rcu(); 2658 2659 /* For each CPU, relinquish the buffer */ 2660 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL, 2661 true); 2662 } 2663 2664 static struct trace_buffer *temp_buffer; 2665 2666 struct ring_buffer_event * 2667 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, 2668 struct trace_event_file *trace_file, 2669 int type, unsigned long len, 2670 unsigned int trace_ctx) 2671 { 2672 struct ring_buffer_event *entry; 2673 struct trace_array *tr = trace_file->tr; 2674 int val; 2675 2676 *current_rb = tr->array_buffer.buffer; 2677 2678 if (!tr->no_filter_buffering_ref && 2679 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { 2680 preempt_disable_notrace(); 2681 /* 2682 * Filtering is on, so try to use the per cpu buffer first. 2683 * This buffer will simulate a ring_buffer_event, 2684 * where the type_len is zero and the array[0] will 2685 * hold the full length. 2686 * (see include/linux/ring-buffer.h for details on 2687 * how the ring_buffer_event is structured). 2688 * 2689 * Using a temp buffer during filtering and copying it 2690 * on a matched filter is quicker than writing directly 2691 * into the ring buffer and then discarding it when 2692 * it doesn't match. That is because the discard 2693 * requires several atomic operations to get right. 2694 * Copying on match and doing nothing on a failed match 2695 * is still quicker than no copy on match, but having 2696 * to discard out of the ring buffer on a failed match. 2697 */ 2698 if ((entry = __this_cpu_read(trace_buffered_event))) { 2699 int max_len = PAGE_SIZE - struct_size(entry, array, 1); 2700 2701 val = this_cpu_inc_return(trace_buffered_event_cnt); 2702 2703 /* 2704 * Preemption is disabled, but interrupts and NMIs 2705 * can still come in now. If that happens after 2706 * the above increment, then it will have to go 2707 * back to the old method of allocating the event 2708 * on the ring buffer, and if the filter fails, it 2709 * will have to call ring_buffer_discard_commit() 2710 * to remove it. 2711 * 2712 * Need to also check the unlikely case that the 2713 * length is bigger than the temp buffer size. 2714 * If that happens, then the reserve is pretty much 2715 * guaranteed to fail, as the ring buffer currently 2716 * only allows events less than a page. But that may 2717 * change in the future, so let the ring buffer reserve 2718 * handle the failure in that case. 2719 */ 2720 if (val == 1 && likely(len <= max_len)) { 2721 trace_event_setup(entry, type, trace_ctx); 2722 entry->array[0] = len; 2723 /* Return with preemption disabled */ 2724 return entry; 2725 } 2726 this_cpu_dec(trace_buffered_event_cnt); 2727 } 2728 /* __trace_buffer_lock_reserve() disables preemption */ 2729 preempt_enable_notrace(); 2730 } 2731 2732 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2733 trace_ctx); 2734 /* 2735 * If tracing is off, but we have triggers enabled 2736 * we still need to look at the event data. Use the temp_buffer 2737 * to store the trace event for the trigger to use. It's recursive 2738 * safe and will not be recorded anywhere. 2739 */ 2740 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2741 *current_rb = temp_buffer; 2742 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2743 trace_ctx); 2744 } 2745 return entry; 2746 } 2747 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2748 2749 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); 2750 static DEFINE_MUTEX(tracepoint_printk_mutex); 2751 2752 static void output_printk(struct trace_event_buffer *fbuffer) 2753 { 2754 struct trace_event_call *event_call; 2755 struct trace_event_file *file; 2756 struct trace_event *event; 2757 unsigned long flags; 2758 struct trace_iterator *iter = tracepoint_print_iter; 2759 2760 /* We should never get here if iter is NULL */ 2761 if (WARN_ON_ONCE(!iter)) 2762 return; 2763 2764 event_call = fbuffer->trace_file->event_call; 2765 if (!event_call || !event_call->event.funcs || 2766 !event_call->event.funcs->trace) 2767 return; 2768 2769 file = fbuffer->trace_file; 2770 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 2771 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 2772 !filter_match_preds(file->filter, fbuffer->entry))) 2773 return; 2774 2775 event = &fbuffer->trace_file->event_call->event; 2776 2777 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); 2778 trace_seq_init(&iter->seq); 2779 iter->ent = fbuffer->entry; 2780 event_call->event.funcs->trace(iter, 0, event); 2781 trace_seq_putc(&iter->seq, 0); 2782 printk("%s", iter->seq.buffer); 2783 2784 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2785 } 2786 2787 int tracepoint_printk_sysctl(const struct ctl_table *table, int write, 2788 void *buffer, size_t *lenp, 2789 loff_t *ppos) 2790 { 2791 int save_tracepoint_printk; 2792 int ret; 2793 2794 guard(mutex)(&tracepoint_printk_mutex); 2795 save_tracepoint_printk = tracepoint_printk; 2796 2797 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2798 2799 /* 2800 * This will force exiting early, as tracepoint_printk 2801 * is always zero when tracepoint_printk_iter is not allocated 2802 */ 2803 if (!tracepoint_print_iter) 2804 tracepoint_printk = 0; 2805 2806 if (save_tracepoint_printk == tracepoint_printk) 2807 return ret; 2808 2809 if (tracepoint_printk) 2810 static_key_enable(&tracepoint_printk_key.key); 2811 else 2812 static_key_disable(&tracepoint_printk_key.key); 2813 2814 return ret; 2815 } 2816 2817 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 2818 { 2819 enum event_trigger_type tt = ETT_NONE; 2820 struct trace_event_file *file = fbuffer->trace_file; 2821 2822 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 2823 fbuffer->entry, &tt)) 2824 goto discard; 2825 2826 if (static_key_false(&tracepoint_printk_key.key)) 2827 output_printk(fbuffer); 2828 2829 if (static_branch_unlikely(&trace_event_exports_enabled)) 2830 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 2831 2832 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 2833 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 2834 2835 discard: 2836 if (tt) 2837 event_triggers_post_call(file, tt); 2838 2839 } 2840 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 2841 2842 /* 2843 * Skip 3: 2844 * 2845 * trace_buffer_unlock_commit_regs() 2846 * trace_event_buffer_commit() 2847 * trace_event_raw_event_xxx() 2848 */ 2849 # define STACK_SKIP 3 2850 2851 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 2852 struct trace_buffer *buffer, 2853 struct ring_buffer_event *event, 2854 unsigned int trace_ctx, 2855 struct pt_regs *regs) 2856 { 2857 __buffer_unlock_commit(buffer, event); 2858 2859 /* 2860 * If regs is not set, then skip the necessary functions. 2861 * Note, we can still get here via blktrace, wakeup tracer 2862 * and mmiotrace, but that's ok if they lose a function or 2863 * two. They are not that meaningful. 2864 */ 2865 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); 2866 ftrace_trace_userstack(tr, buffer, trace_ctx); 2867 } 2868 2869 /* 2870 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 2871 */ 2872 void 2873 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 2874 struct ring_buffer_event *event) 2875 { 2876 __buffer_unlock_commit(buffer, event); 2877 } 2878 2879 void 2880 trace_function(struct trace_array *tr, unsigned long ip, unsigned long 2881 parent_ip, unsigned int trace_ctx) 2882 { 2883 struct trace_buffer *buffer = tr->array_buffer.buffer; 2884 struct ring_buffer_event *event; 2885 struct ftrace_entry *entry; 2886 2887 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 2888 trace_ctx); 2889 if (!event) 2890 return; 2891 entry = ring_buffer_event_data(event); 2892 entry->ip = ip; 2893 entry->parent_ip = parent_ip; 2894 2895 if (static_branch_unlikely(&trace_function_exports_enabled)) 2896 ftrace_exports(event, TRACE_EXPORT_FUNCTION); 2897 __buffer_unlock_commit(buffer, event); 2898 } 2899 2900 #ifdef CONFIG_STACKTRACE 2901 2902 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ 2903 #define FTRACE_KSTACK_NESTING 4 2904 2905 #define FTRACE_KSTACK_ENTRIES (SZ_4K / FTRACE_KSTACK_NESTING) 2906 2907 struct ftrace_stack { 2908 unsigned long calls[FTRACE_KSTACK_ENTRIES]; 2909 }; 2910 2911 2912 struct ftrace_stacks { 2913 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; 2914 }; 2915 2916 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); 2917 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 2918 2919 static void __ftrace_trace_stack(struct trace_array *tr, 2920 struct trace_buffer *buffer, 2921 unsigned int trace_ctx, 2922 int skip, struct pt_regs *regs) 2923 { 2924 struct ring_buffer_event *event; 2925 unsigned int size, nr_entries; 2926 struct ftrace_stack *fstack; 2927 struct stack_entry *entry; 2928 int stackidx; 2929 2930 /* 2931 * Add one, for this function and the call to save_stack_trace() 2932 * If regs is set, then these functions will not be in the way. 2933 */ 2934 #ifndef CONFIG_UNWINDER_ORC 2935 if (!regs) 2936 skip++; 2937 #endif 2938 2939 preempt_disable_notrace(); 2940 2941 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 2942 2943 /* This should never happen. If it does, yell once and skip */ 2944 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) 2945 goto out; 2946 2947 /* 2948 * The above __this_cpu_inc_return() is 'atomic' cpu local. An 2949 * interrupt will either see the value pre increment or post 2950 * increment. If the interrupt happens pre increment it will have 2951 * restored the counter when it returns. We just need a barrier to 2952 * keep gcc from moving things around. 2953 */ 2954 barrier(); 2955 2956 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; 2957 size = ARRAY_SIZE(fstack->calls); 2958 2959 if (regs) { 2960 nr_entries = stack_trace_save_regs(regs, fstack->calls, 2961 size, skip); 2962 } else { 2963 nr_entries = stack_trace_save(fstack->calls, size, skip); 2964 } 2965 2966 #ifdef CONFIG_DYNAMIC_FTRACE 2967 /* Mark entry of stack trace as trampoline code */ 2968 if (tr->ops && tr->ops->trampoline) { 2969 unsigned long tramp_start = tr->ops->trampoline; 2970 unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; 2971 unsigned long *calls = fstack->calls; 2972 2973 for (int i = 0; i < nr_entries; i++) { 2974 if (calls[i] >= tramp_start && calls[i] < tramp_end) 2975 calls[i] = FTRACE_TRAMPOLINE_MARKER; 2976 } 2977 } 2978 #endif 2979 2980 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 2981 struct_size(entry, caller, nr_entries), 2982 trace_ctx); 2983 if (!event) 2984 goto out; 2985 entry = ring_buffer_event_data(event); 2986 2987 entry->size = nr_entries; 2988 memcpy(&entry->caller, fstack->calls, 2989 flex_array_size(entry, caller, nr_entries)); 2990 2991 __buffer_unlock_commit(buffer, event); 2992 2993 out: 2994 /* Again, don't let gcc optimize things here */ 2995 barrier(); 2996 __this_cpu_dec(ftrace_stack_reserve); 2997 preempt_enable_notrace(); 2998 2999 } 3000 3001 static inline void ftrace_trace_stack(struct trace_array *tr, 3002 struct trace_buffer *buffer, 3003 unsigned int trace_ctx, 3004 int skip, struct pt_regs *regs) 3005 { 3006 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 3007 return; 3008 3009 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); 3010 } 3011 3012 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 3013 int skip) 3014 { 3015 struct trace_buffer *buffer = tr->array_buffer.buffer; 3016 3017 if (rcu_is_watching()) { 3018 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3019 return; 3020 } 3021 3022 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) 3023 return; 3024 3025 /* 3026 * When an NMI triggers, RCU is enabled via ct_nmi_enter(), 3027 * but if the above rcu_is_watching() failed, then the NMI 3028 * triggered someplace critical, and ct_irq_enter() should 3029 * not be called from NMI. 3030 */ 3031 if (unlikely(in_nmi())) 3032 return; 3033 3034 ct_irq_enter_irqson(); 3035 __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); 3036 ct_irq_exit_irqson(); 3037 } 3038 3039 /** 3040 * trace_dump_stack - record a stack back trace in the trace buffer 3041 * @skip: Number of functions to skip (helper handlers) 3042 */ 3043 void trace_dump_stack(int skip) 3044 { 3045 if (tracing_disabled || tracing_selftest_running) 3046 return; 3047 3048 #ifndef CONFIG_UNWINDER_ORC 3049 /* Skip 1 to skip this function. */ 3050 skip++; 3051 #endif 3052 __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, 3053 tracing_gen_ctx(), skip, NULL); 3054 } 3055 EXPORT_SYMBOL_GPL(trace_dump_stack); 3056 3057 #ifdef CONFIG_USER_STACKTRACE_SUPPORT 3058 static DEFINE_PER_CPU(int, user_stack_count); 3059 3060 static void 3061 ftrace_trace_userstack(struct trace_array *tr, 3062 struct trace_buffer *buffer, unsigned int trace_ctx) 3063 { 3064 struct ring_buffer_event *event; 3065 struct userstack_entry *entry; 3066 3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3068 return; 3069 3070 /* 3071 * NMIs can not handle page faults, even with fix ups. 3072 * The save user stack can (and often does) fault. 3073 */ 3074 if (unlikely(in_nmi())) 3075 return; 3076 3077 /* 3078 * prevent recursion, since the user stack tracing may 3079 * trigger other kernel events. 3080 */ 3081 preempt_disable(); 3082 if (__this_cpu_read(user_stack_count)) 3083 goto out; 3084 3085 __this_cpu_inc(user_stack_count); 3086 3087 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 3088 sizeof(*entry), trace_ctx); 3089 if (!event) 3090 goto out_drop_count; 3091 entry = ring_buffer_event_data(event); 3092 3093 entry->tgid = current->tgid; 3094 memset(&entry->caller, 0, sizeof(entry->caller)); 3095 3096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); 3097 __buffer_unlock_commit(buffer, event); 3098 3099 out_drop_count: 3100 __this_cpu_dec(user_stack_count); 3101 out: 3102 preempt_enable(); 3103 } 3104 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3105 static void ftrace_trace_userstack(struct trace_array *tr, 3106 struct trace_buffer *buffer, 3107 unsigned int trace_ctx) 3108 { 3109 } 3110 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ 3111 3112 #endif /* CONFIG_STACKTRACE */ 3113 3114 static inline void 3115 func_repeats_set_delta_ts(struct func_repeats_entry *entry, 3116 unsigned long long delta) 3117 { 3118 entry->bottom_delta_ts = delta & U32_MAX; 3119 entry->top_delta_ts = (delta >> 32); 3120 } 3121 3122 void trace_last_func_repeats(struct trace_array *tr, 3123 struct trace_func_repeats *last_info, 3124 unsigned int trace_ctx) 3125 { 3126 struct trace_buffer *buffer = tr->array_buffer.buffer; 3127 struct func_repeats_entry *entry; 3128 struct ring_buffer_event *event; 3129 u64 delta; 3130 3131 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, 3132 sizeof(*entry), trace_ctx); 3133 if (!event) 3134 return; 3135 3136 delta = ring_buffer_event_time_stamp(buffer, event) - 3137 last_info->ts_last_call; 3138 3139 entry = ring_buffer_event_data(event); 3140 entry->ip = last_info->ip; 3141 entry->parent_ip = last_info->parent_ip; 3142 entry->count = last_info->count; 3143 func_repeats_set_delta_ts(entry, delta); 3144 3145 __buffer_unlock_commit(buffer, event); 3146 } 3147 3148 /* created for use with alloc_percpu */ 3149 struct trace_buffer_struct { 3150 int nesting; 3151 char buffer[4][TRACE_BUF_SIZE]; 3152 }; 3153 3154 static struct trace_buffer_struct __percpu *trace_percpu_buffer; 3155 3156 /* 3157 * This allows for lockless recording. If we're nested too deeply, then 3158 * this returns NULL. 3159 */ 3160 static char *get_trace_buf(void) 3161 { 3162 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 3163 3164 if (!trace_percpu_buffer || buffer->nesting >= 4) 3165 return NULL; 3166 3167 buffer->nesting++; 3168 3169 /* Interrupts must see nesting incremented before we use the buffer */ 3170 barrier(); 3171 return &buffer->buffer[buffer->nesting - 1][0]; 3172 } 3173 3174 static void put_trace_buf(void) 3175 { 3176 /* Don't let the decrement of nesting leak before this */ 3177 barrier(); 3178 this_cpu_dec(trace_percpu_buffer->nesting); 3179 } 3180 3181 static int alloc_percpu_trace_buffer(void) 3182 { 3183 struct trace_buffer_struct __percpu *buffers; 3184 3185 if (trace_percpu_buffer) 3186 return 0; 3187 3188 buffers = alloc_percpu(struct trace_buffer_struct); 3189 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) 3190 return -ENOMEM; 3191 3192 trace_percpu_buffer = buffers; 3193 return 0; 3194 } 3195 3196 static int buffers_allocated; 3197 3198 void trace_printk_init_buffers(void) 3199 { 3200 if (buffers_allocated) 3201 return; 3202 3203 if (alloc_percpu_trace_buffer()) 3204 return; 3205 3206 /* trace_printk() is for debug use only. Don't use it in production. */ 3207 3208 pr_warn("\n"); 3209 pr_warn("**********************************************************\n"); 3210 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3211 pr_warn("** **\n"); 3212 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3213 pr_warn("** **\n"); 3214 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3215 pr_warn("** unsafe for production use. **\n"); 3216 pr_warn("** **\n"); 3217 pr_warn("** If you see this message and you are not debugging **\n"); 3218 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3219 pr_warn("** **\n"); 3220 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3221 pr_warn("**********************************************************\n"); 3222 3223 /* Expand the buffers to set size */ 3224 tracing_update_buffers(&global_trace); 3225 3226 buffers_allocated = 1; 3227 3228 /* 3229 * trace_printk_init_buffers() can be called by modules. 3230 * If that happens, then we need to start cmdline recording 3231 * directly here. If the global_trace.buffer is already 3232 * allocated here, then this was called by module code. 3233 */ 3234 if (global_trace.array_buffer.buffer) 3235 tracing_start_cmdline_record(); 3236 } 3237 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3238 3239 void trace_printk_start_comm(void) 3240 { 3241 /* Start tracing comms if trace printk is set */ 3242 if (!buffers_allocated) 3243 return; 3244 tracing_start_cmdline_record(); 3245 } 3246 3247 static void trace_printk_start_stop_comm(int enabled) 3248 { 3249 if (!buffers_allocated) 3250 return; 3251 3252 if (enabled) 3253 tracing_start_cmdline_record(); 3254 else 3255 tracing_stop_cmdline_record(); 3256 } 3257 3258 /** 3259 * trace_vbprintk - write binary msg to tracing buffer 3260 * @ip: The address of the caller 3261 * @fmt: The string format to write to the buffer 3262 * @args: Arguments for @fmt 3263 */ 3264 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3265 { 3266 struct ring_buffer_event *event; 3267 struct trace_buffer *buffer; 3268 struct trace_array *tr = READ_ONCE(printk_trace); 3269 struct bprint_entry *entry; 3270 unsigned int trace_ctx; 3271 char *tbuffer; 3272 int len = 0, size; 3273 3274 if (!printk_binsafe(tr)) 3275 return trace_vprintk(ip, fmt, args); 3276 3277 if (unlikely(tracing_selftest_running || tracing_disabled)) 3278 return 0; 3279 3280 /* Don't pollute graph traces with trace_vprintk internals */ 3281 pause_graph_tracing(); 3282 3283 trace_ctx = tracing_gen_ctx(); 3284 preempt_disable_notrace(); 3285 3286 tbuffer = get_trace_buf(); 3287 if (!tbuffer) { 3288 len = 0; 3289 goto out_nobuffer; 3290 } 3291 3292 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3293 3294 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3295 goto out_put; 3296 3297 size = sizeof(*entry) + sizeof(u32) * len; 3298 buffer = tr->array_buffer.buffer; 3299 ring_buffer_nest_start(buffer); 3300 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3301 trace_ctx); 3302 if (!event) 3303 goto out; 3304 entry = ring_buffer_event_data(event); 3305 entry->ip = ip; 3306 entry->fmt = fmt; 3307 3308 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3309 __buffer_unlock_commit(buffer, event); 3310 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3311 3312 out: 3313 ring_buffer_nest_end(buffer); 3314 out_put: 3315 put_trace_buf(); 3316 3317 out_nobuffer: 3318 preempt_enable_notrace(); 3319 unpause_graph_tracing(); 3320 3321 return len; 3322 } 3323 EXPORT_SYMBOL_GPL(trace_vbprintk); 3324 3325 __printf(3, 0) 3326 static int 3327 __trace_array_vprintk(struct trace_buffer *buffer, 3328 unsigned long ip, const char *fmt, va_list args) 3329 { 3330 struct ring_buffer_event *event; 3331 int len = 0, size; 3332 struct print_entry *entry; 3333 unsigned int trace_ctx; 3334 char *tbuffer; 3335 3336 if (tracing_disabled) 3337 return 0; 3338 3339 /* Don't pollute graph traces with trace_vprintk internals */ 3340 pause_graph_tracing(); 3341 3342 trace_ctx = tracing_gen_ctx(); 3343 preempt_disable_notrace(); 3344 3345 3346 tbuffer = get_trace_buf(); 3347 if (!tbuffer) { 3348 len = 0; 3349 goto out_nobuffer; 3350 } 3351 3352 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3353 3354 size = sizeof(*entry) + len + 1; 3355 ring_buffer_nest_start(buffer); 3356 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3357 trace_ctx); 3358 if (!event) 3359 goto out; 3360 entry = ring_buffer_event_data(event); 3361 entry->ip = ip; 3362 3363 memcpy(&entry->buf, tbuffer, len + 1); 3364 __buffer_unlock_commit(buffer, event); 3365 ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); 3366 3367 out: 3368 ring_buffer_nest_end(buffer); 3369 put_trace_buf(); 3370 3371 out_nobuffer: 3372 preempt_enable_notrace(); 3373 unpause_graph_tracing(); 3374 3375 return len; 3376 } 3377 3378 __printf(3, 0) 3379 int trace_array_vprintk(struct trace_array *tr, 3380 unsigned long ip, const char *fmt, va_list args) 3381 { 3382 if (tracing_selftest_running && tr == &global_trace) 3383 return 0; 3384 3385 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); 3386 } 3387 3388 /** 3389 * trace_array_printk - Print a message to a specific instance 3390 * @tr: The instance trace_array descriptor 3391 * @ip: The instruction pointer that this is called from. 3392 * @fmt: The format to print (printf format) 3393 * 3394 * If a subsystem sets up its own instance, they have the right to 3395 * printk strings into their tracing instance buffer using this 3396 * function. Note, this function will not write into the top level 3397 * buffer (use trace_printk() for that), as writing into the top level 3398 * buffer should only have events that can be individually disabled. 3399 * trace_printk() is only used for debugging a kernel, and should not 3400 * be ever incorporated in normal use. 3401 * 3402 * trace_array_printk() can be used, as it will not add noise to the 3403 * top level tracing buffer. 3404 * 3405 * Note, trace_array_init_printk() must be called on @tr before this 3406 * can be used. 3407 */ 3408 __printf(3, 0) 3409 int trace_array_printk(struct trace_array *tr, 3410 unsigned long ip, const char *fmt, ...) 3411 { 3412 int ret; 3413 va_list ap; 3414 3415 if (!tr) 3416 return -ENOENT; 3417 3418 /* This is only allowed for created instances */ 3419 if (tr == &global_trace) 3420 return 0; 3421 3422 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 3423 return 0; 3424 3425 va_start(ap, fmt); 3426 ret = trace_array_vprintk(tr, ip, fmt, ap); 3427 va_end(ap); 3428 return ret; 3429 } 3430 EXPORT_SYMBOL_GPL(trace_array_printk); 3431 3432 /** 3433 * trace_array_init_printk - Initialize buffers for trace_array_printk() 3434 * @tr: The trace array to initialize the buffers for 3435 * 3436 * As trace_array_printk() only writes into instances, they are OK to 3437 * have in the kernel (unlike trace_printk()). This needs to be called 3438 * before trace_array_printk() can be used on a trace_array. 3439 */ 3440 int trace_array_init_printk(struct trace_array *tr) 3441 { 3442 if (!tr) 3443 return -ENOENT; 3444 3445 /* This is only allowed for created instances */ 3446 if (tr == &global_trace) 3447 return -EINVAL; 3448 3449 return alloc_percpu_trace_buffer(); 3450 } 3451 EXPORT_SYMBOL_GPL(trace_array_init_printk); 3452 3453 __printf(3, 4) 3454 int trace_array_printk_buf(struct trace_buffer *buffer, 3455 unsigned long ip, const char *fmt, ...) 3456 { 3457 int ret; 3458 va_list ap; 3459 3460 if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK)) 3461 return 0; 3462 3463 va_start(ap, fmt); 3464 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3465 va_end(ap); 3466 return ret; 3467 } 3468 3469 __printf(2, 0) 3470 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3471 { 3472 return trace_array_vprintk(printk_trace, ip, fmt, args); 3473 } 3474 EXPORT_SYMBOL_GPL(trace_vprintk); 3475 3476 static void trace_iterator_increment(struct trace_iterator *iter) 3477 { 3478 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3479 3480 iter->idx++; 3481 if (buf_iter) 3482 ring_buffer_iter_advance(buf_iter); 3483 } 3484 3485 static struct trace_entry * 3486 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3487 unsigned long *lost_events) 3488 { 3489 struct ring_buffer_event *event; 3490 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3491 3492 if (buf_iter) { 3493 event = ring_buffer_iter_peek(buf_iter, ts); 3494 if (lost_events) 3495 *lost_events = ring_buffer_iter_dropped(buf_iter) ? 3496 (unsigned long)-1 : 0; 3497 } else { 3498 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, 3499 lost_events); 3500 } 3501 3502 if (event) { 3503 iter->ent_size = ring_buffer_event_length(event); 3504 return ring_buffer_event_data(event); 3505 } 3506 iter->ent_size = 0; 3507 return NULL; 3508 } 3509 3510 static struct trace_entry * 3511 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3512 unsigned long *missing_events, u64 *ent_ts) 3513 { 3514 struct trace_buffer *buffer = iter->array_buffer->buffer; 3515 struct trace_entry *ent, *next = NULL; 3516 unsigned long lost_events = 0, next_lost = 0; 3517 int cpu_file = iter->cpu_file; 3518 u64 next_ts = 0, ts; 3519 int next_cpu = -1; 3520 int next_size = 0; 3521 int cpu; 3522 3523 /* 3524 * If we are in a per_cpu trace file, don't bother by iterating over 3525 * all cpu and peek directly. 3526 */ 3527 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3528 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3529 return NULL; 3530 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3531 if (ent_cpu) 3532 *ent_cpu = cpu_file; 3533 3534 return ent; 3535 } 3536 3537 for_each_tracing_cpu(cpu) { 3538 3539 if (ring_buffer_empty_cpu(buffer, cpu)) 3540 continue; 3541 3542 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3543 3544 /* 3545 * Pick the entry with the smallest timestamp: 3546 */ 3547 if (ent && (!next || ts < next_ts)) { 3548 next = ent; 3549 next_cpu = cpu; 3550 next_ts = ts; 3551 next_lost = lost_events; 3552 next_size = iter->ent_size; 3553 } 3554 } 3555 3556 iter->ent_size = next_size; 3557 3558 if (ent_cpu) 3559 *ent_cpu = next_cpu; 3560 3561 if (ent_ts) 3562 *ent_ts = next_ts; 3563 3564 if (missing_events) 3565 *missing_events = next_lost; 3566 3567 return next; 3568 } 3569 3570 #define STATIC_FMT_BUF_SIZE 128 3571 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; 3572 3573 char *trace_iter_expand_format(struct trace_iterator *iter) 3574 { 3575 char *tmp; 3576 3577 /* 3578 * iter->tr is NULL when used with tp_printk, which makes 3579 * this get called where it is not safe to call krealloc(). 3580 */ 3581 if (!iter->tr || iter->fmt == static_fmt_buf) 3582 return NULL; 3583 3584 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, 3585 GFP_KERNEL); 3586 if (tmp) { 3587 iter->fmt_size += STATIC_FMT_BUF_SIZE; 3588 iter->fmt = tmp; 3589 } 3590 3591 return tmp; 3592 } 3593 3594 /* Returns true if the string is safe to dereference from an event */ 3595 static bool trace_safe_str(struct trace_iterator *iter, const char *str) 3596 { 3597 unsigned long addr = (unsigned long)str; 3598 struct trace_event *trace_event; 3599 struct trace_event_call *event; 3600 3601 /* OK if part of the event data */ 3602 if ((addr >= (unsigned long)iter->ent) && 3603 (addr < (unsigned long)iter->ent + iter->ent_size)) 3604 return true; 3605 3606 /* OK if part of the temp seq buffer */ 3607 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && 3608 (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) 3609 return true; 3610 3611 /* Core rodata can not be freed */ 3612 if (is_kernel_rodata(addr)) 3613 return true; 3614 3615 if (trace_is_tracepoint_string(str)) 3616 return true; 3617 3618 /* 3619 * Now this could be a module event, referencing core module 3620 * data, which is OK. 3621 */ 3622 if (!iter->ent) 3623 return false; 3624 3625 trace_event = ftrace_find_event(iter->ent->type); 3626 if (!trace_event) 3627 return false; 3628 3629 event = container_of(trace_event, struct trace_event_call, event); 3630 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) 3631 return false; 3632 3633 /* Would rather have rodata, but this will suffice */ 3634 if (within_module_core(addr, event->module)) 3635 return true; 3636 3637 return false; 3638 } 3639 3640 /** 3641 * ignore_event - Check dereferenced fields while writing to the seq buffer 3642 * @iter: The iterator that holds the seq buffer and the event being printed 3643 * 3644 * At boot up, test_event_printk() will flag any event that dereferences 3645 * a string with "%s" that does exist in the ring buffer. It may still 3646 * be valid, as the string may point to a static string in the kernel 3647 * rodata that never gets freed. But if the string pointer is pointing 3648 * to something that was allocated, there's a chance that it can be freed 3649 * by the time the user reads the trace. This would cause a bad memory 3650 * access by the kernel and possibly crash the system. 3651 * 3652 * This function will check if the event has any fields flagged as needing 3653 * to be checked at runtime and perform those checks. 3654 * 3655 * If it is found that a field is unsafe, it will write into the @iter->seq 3656 * a message stating what was found to be unsafe. 3657 * 3658 * @return: true if the event is unsafe and should be ignored, 3659 * false otherwise. 3660 */ 3661 bool ignore_event(struct trace_iterator *iter) 3662 { 3663 struct ftrace_event_field *field; 3664 struct trace_event *trace_event; 3665 struct trace_event_call *event; 3666 struct list_head *head; 3667 struct trace_seq *seq; 3668 const void *ptr; 3669 3670 trace_event = ftrace_find_event(iter->ent->type); 3671 3672 seq = &iter->seq; 3673 3674 if (!trace_event) { 3675 trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); 3676 return true; 3677 } 3678 3679 event = container_of(trace_event, struct trace_event_call, event); 3680 if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) 3681 return false; 3682 3683 head = trace_get_fields(event); 3684 if (!head) { 3685 trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n", 3686 trace_event_name(event)); 3687 return true; 3688 } 3689 3690 /* Offsets are from the iter->ent that points to the raw event */ 3691 ptr = iter->ent; 3692 3693 list_for_each_entry(field, head, link) { 3694 const char *str; 3695 bool good; 3696 3697 if (!field->needs_test) 3698 continue; 3699 3700 str = *(const char **)(ptr + field->offset); 3701 3702 good = trace_safe_str(iter, str); 3703 3704 /* 3705 * If you hit this warning, it is likely that the 3706 * trace event in question used %s on a string that 3707 * was saved at the time of the event, but may not be 3708 * around when the trace is read. Use __string(), 3709 * __assign_str() and __get_str() helpers in the TRACE_EVENT() 3710 * instead. See samples/trace_events/trace-events-sample.h 3711 * for reference. 3712 */ 3713 if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'", 3714 trace_event_name(event), field->name)) { 3715 trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n", 3716 trace_event_name(event), field->name); 3717 return true; 3718 } 3719 } 3720 return false; 3721 } 3722 3723 const char *trace_event_format(struct trace_iterator *iter, const char *fmt) 3724 { 3725 const char *p, *new_fmt; 3726 char *q; 3727 3728 if (WARN_ON_ONCE(!fmt)) 3729 return fmt; 3730 3731 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) 3732 return fmt; 3733 3734 p = fmt; 3735 new_fmt = q = iter->fmt; 3736 while (*p) { 3737 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { 3738 if (!trace_iter_expand_format(iter)) 3739 return fmt; 3740 3741 q += iter->fmt - new_fmt; 3742 new_fmt = iter->fmt; 3743 } 3744 3745 *q++ = *p++; 3746 3747 /* Replace %p with %px */ 3748 if (p[-1] == '%') { 3749 if (p[0] == '%') { 3750 *q++ = *p++; 3751 } else if (p[0] == 'p' && !isalnum(p[1])) { 3752 *q++ = *p++; 3753 *q++ = 'x'; 3754 } 3755 } 3756 } 3757 *q = '\0'; 3758 3759 return new_fmt; 3760 } 3761 3762 #define STATIC_TEMP_BUF_SIZE 128 3763 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); 3764 3765 /* Find the next real entry, without updating the iterator itself */ 3766 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 3767 int *ent_cpu, u64 *ent_ts) 3768 { 3769 /* __find_next_entry will reset ent_size */ 3770 int ent_size = iter->ent_size; 3771 struct trace_entry *entry; 3772 3773 /* 3774 * If called from ftrace_dump(), then the iter->temp buffer 3775 * will be the static_temp_buf and not created from kmalloc. 3776 * If the entry size is greater than the buffer, we can 3777 * not save it. Just return NULL in that case. This is only 3778 * used to add markers when two consecutive events' time 3779 * stamps have a large delta. See trace_print_lat_context() 3780 */ 3781 if (iter->temp == static_temp_buf && 3782 STATIC_TEMP_BUF_SIZE < ent_size) 3783 return NULL; 3784 3785 /* 3786 * The __find_next_entry() may call peek_next_entry(), which may 3787 * call ring_buffer_peek() that may make the contents of iter->ent 3788 * undefined. Need to copy iter->ent now. 3789 */ 3790 if (iter->ent && iter->ent != iter->temp) { 3791 if ((!iter->temp || iter->temp_size < iter->ent_size) && 3792 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { 3793 void *temp; 3794 temp = kmalloc(iter->ent_size, GFP_KERNEL); 3795 if (!temp) 3796 return NULL; 3797 kfree(iter->temp); 3798 iter->temp = temp; 3799 iter->temp_size = iter->ent_size; 3800 } 3801 memcpy(iter->temp, iter->ent, iter->ent_size); 3802 iter->ent = iter->temp; 3803 } 3804 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); 3805 /* Put back the original ent_size */ 3806 iter->ent_size = ent_size; 3807 3808 return entry; 3809 } 3810 3811 /* Find the next real entry, and increment the iterator to the next entry */ 3812 void *trace_find_next_entry_inc(struct trace_iterator *iter) 3813 { 3814 iter->ent = __find_next_entry(iter, &iter->cpu, 3815 &iter->lost_events, &iter->ts); 3816 3817 if (iter->ent) 3818 trace_iterator_increment(iter); 3819 3820 return iter->ent ? iter : NULL; 3821 } 3822 3823 static void trace_consume(struct trace_iterator *iter) 3824 { 3825 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, 3826 &iter->lost_events); 3827 } 3828 3829 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 3830 { 3831 struct trace_iterator *iter = m->private; 3832 int i = (int)*pos; 3833 void *ent; 3834 3835 WARN_ON_ONCE(iter->leftover); 3836 3837 (*pos)++; 3838 3839 /* can't go backwards */ 3840 if (iter->idx > i) 3841 return NULL; 3842 3843 if (iter->idx < 0) 3844 ent = trace_find_next_entry_inc(iter); 3845 else 3846 ent = iter; 3847 3848 while (ent && iter->idx < i) 3849 ent = trace_find_next_entry_inc(iter); 3850 3851 iter->pos = *pos; 3852 3853 return ent; 3854 } 3855 3856 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 3857 { 3858 struct ring_buffer_iter *buf_iter; 3859 unsigned long entries = 0; 3860 u64 ts; 3861 3862 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; 3863 3864 buf_iter = trace_buffer_iter(iter, cpu); 3865 if (!buf_iter) 3866 return; 3867 3868 ring_buffer_iter_reset(buf_iter); 3869 3870 /* 3871 * We could have the case with the max latency tracers 3872 * that a reset never took place on a cpu. This is evident 3873 * by the timestamp being before the start of the buffer. 3874 */ 3875 while (ring_buffer_iter_peek(buf_iter, &ts)) { 3876 if (ts >= iter->array_buffer->time_start) 3877 break; 3878 entries++; 3879 ring_buffer_iter_advance(buf_iter); 3880 /* This could be a big loop */ 3881 cond_resched(); 3882 } 3883 3884 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; 3885 } 3886 3887 /* 3888 * The current tracer is copied to avoid a global locking 3889 * all around. 3890 */ 3891 static void *s_start(struct seq_file *m, loff_t *pos) 3892 { 3893 struct trace_iterator *iter = m->private; 3894 struct trace_array *tr = iter->tr; 3895 int cpu_file = iter->cpu_file; 3896 void *p = NULL; 3897 loff_t l = 0; 3898 int cpu; 3899 3900 mutex_lock(&trace_types_lock); 3901 if (unlikely(tr->current_trace != iter->trace)) { 3902 /* Close iter->trace before switching to the new current tracer */ 3903 if (iter->trace->close) 3904 iter->trace->close(iter); 3905 iter->trace = tr->current_trace; 3906 /* Reopen the new current tracer */ 3907 if (iter->trace->open) 3908 iter->trace->open(iter); 3909 } 3910 mutex_unlock(&trace_types_lock); 3911 3912 #ifdef CONFIG_TRACER_MAX_TRACE 3913 if (iter->snapshot && iter->trace->use_max_tr) 3914 return ERR_PTR(-EBUSY); 3915 #endif 3916 3917 if (*pos != iter->pos) { 3918 iter->ent = NULL; 3919 iter->cpu = 0; 3920 iter->idx = -1; 3921 3922 if (cpu_file == RING_BUFFER_ALL_CPUS) { 3923 for_each_tracing_cpu(cpu) 3924 tracing_iter_reset(iter, cpu); 3925 } else 3926 tracing_iter_reset(iter, cpu_file); 3927 3928 iter->leftover = 0; 3929 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 3930 ; 3931 3932 } else { 3933 /* 3934 * If we overflowed the seq_file before, then we want 3935 * to just reuse the trace_seq buffer again. 3936 */ 3937 if (iter->leftover) 3938 p = iter; 3939 else { 3940 l = *pos - 1; 3941 p = s_next(m, p, &l); 3942 } 3943 } 3944 3945 trace_event_read_lock(); 3946 trace_access_lock(cpu_file); 3947 return p; 3948 } 3949 3950 static void s_stop(struct seq_file *m, void *p) 3951 { 3952 struct trace_iterator *iter = m->private; 3953 3954 #ifdef CONFIG_TRACER_MAX_TRACE 3955 if (iter->snapshot && iter->trace->use_max_tr) 3956 return; 3957 #endif 3958 3959 trace_access_unlock(iter->cpu_file); 3960 trace_event_read_unlock(); 3961 } 3962 3963 static void 3964 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, 3965 unsigned long *entries, int cpu) 3966 { 3967 unsigned long count; 3968 3969 count = ring_buffer_entries_cpu(buf->buffer, cpu); 3970 /* 3971 * If this buffer has skipped entries, then we hold all 3972 * entries for the trace and we need to ignore the 3973 * ones before the time stamp. 3974 */ 3975 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3976 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3977 /* total is the same as the entries */ 3978 *total = count; 3979 } else 3980 *total = count + 3981 ring_buffer_overrun_cpu(buf->buffer, cpu); 3982 *entries = count; 3983 } 3984 3985 static void 3986 get_total_entries(struct array_buffer *buf, 3987 unsigned long *total, unsigned long *entries) 3988 { 3989 unsigned long t, e; 3990 int cpu; 3991 3992 *total = 0; 3993 *entries = 0; 3994 3995 for_each_tracing_cpu(cpu) { 3996 get_total_entries_cpu(buf, &t, &e, cpu); 3997 *total += t; 3998 *entries += e; 3999 } 4000 } 4001 4002 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 4003 { 4004 unsigned long total, entries; 4005 4006 if (!tr) 4007 tr = &global_trace; 4008 4009 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); 4010 4011 return entries; 4012 } 4013 4014 unsigned long trace_total_entries(struct trace_array *tr) 4015 { 4016 unsigned long total, entries; 4017 4018 if (!tr) 4019 tr = &global_trace; 4020 4021 get_total_entries(&tr->array_buffer, &total, &entries); 4022 4023 return entries; 4024 } 4025 4026 static void print_lat_help_header(struct seq_file *m) 4027 { 4028 seq_puts(m, "# _------=> CPU# \n" 4029 "# / _-----=> irqs-off/BH-disabled\n" 4030 "# | / _----=> need-resched \n" 4031 "# || / _---=> hardirq/softirq \n" 4032 "# ||| / _--=> preempt-depth \n" 4033 "# |||| / _-=> migrate-disable \n" 4034 "# ||||| / delay \n" 4035 "# cmd pid |||||| time | caller \n" 4036 "# \\ / |||||| \\ | / \n"); 4037 } 4038 4039 static void print_event_info(struct array_buffer *buf, struct seq_file *m) 4040 { 4041 unsigned long total; 4042 unsigned long entries; 4043 4044 get_total_entries(buf, &total, &entries); 4045 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 4046 entries, total, num_online_cpus()); 4047 seq_puts(m, "#\n"); 4048 } 4049 4050 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, 4051 unsigned int flags) 4052 { 4053 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4054 4055 print_event_info(buf, m); 4056 4057 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); 4058 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 4059 } 4060 4061 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, 4062 unsigned int flags) 4063 { 4064 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4065 static const char space[] = " "; 4066 int prec = tgid ? 12 : 2; 4067 4068 print_event_info(buf, m); 4069 4070 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); 4071 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 4072 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 4073 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 4074 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); 4075 seq_printf(m, "# %.*s|||| / delay\n", prec, space); 4076 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); 4077 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); 4078 } 4079 4080 void 4081 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 4082 { 4083 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 4084 struct array_buffer *buf = iter->array_buffer; 4085 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 4086 struct tracer *type = iter->trace; 4087 unsigned long entries; 4088 unsigned long total; 4089 const char *name = type->name; 4090 4091 get_total_entries(buf, &total, &entries); 4092 4093 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 4094 name, init_utsname()->release); 4095 seq_puts(m, "# -----------------------------------" 4096 "---------------------------------\n"); 4097 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 4098 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 4099 nsecs_to_usecs(data->saved_latency), 4100 entries, 4101 total, 4102 buf->cpu, 4103 preempt_model_none() ? "server" : 4104 preempt_model_voluntary() ? "desktop" : 4105 preempt_model_full() ? "preempt" : 4106 preempt_model_lazy() ? "lazy" : 4107 preempt_model_rt() ? "preempt_rt" : 4108 "unknown", 4109 /* These are reserved for later use */ 4110 0, 0, 0, 0); 4111 #ifdef CONFIG_SMP 4112 seq_printf(m, " #P:%d)\n", num_online_cpus()); 4113 #else 4114 seq_puts(m, ")\n"); 4115 #endif 4116 seq_puts(m, "# -----------------\n"); 4117 seq_printf(m, "# | task: %.16s-%d " 4118 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 4119 data->comm, data->pid, 4120 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 4121 data->policy, data->rt_priority); 4122 seq_puts(m, "# -----------------\n"); 4123 4124 if (data->critical_start) { 4125 seq_puts(m, "# => started at: "); 4126 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 4127 trace_print_seq(m, &iter->seq); 4128 seq_puts(m, "\n# => ended at: "); 4129 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 4130 trace_print_seq(m, &iter->seq); 4131 seq_puts(m, "\n#\n"); 4132 } 4133 4134 seq_puts(m, "#\n"); 4135 } 4136 4137 static void test_cpu_buff_start(struct trace_iterator *iter) 4138 { 4139 struct trace_seq *s = &iter->seq; 4140 struct trace_array *tr = iter->tr; 4141 4142 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 4143 return; 4144 4145 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 4146 return; 4147 4148 if (cpumask_available(iter->started) && 4149 cpumask_test_cpu(iter->cpu, iter->started)) 4150 return; 4151 4152 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) 4153 return; 4154 4155 if (cpumask_available(iter->started)) 4156 cpumask_set_cpu(iter->cpu, iter->started); 4157 4158 /* Don't print started cpu buffer for the first entry of the trace */ 4159 if (iter->idx > 1) 4160 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 4161 iter->cpu); 4162 } 4163 4164 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 4165 { 4166 struct trace_array *tr = iter->tr; 4167 struct trace_seq *s = &iter->seq; 4168 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 4169 struct trace_entry *entry; 4170 struct trace_event *event; 4171 4172 entry = iter->ent; 4173 4174 test_cpu_buff_start(iter); 4175 4176 event = ftrace_find_event(entry->type); 4177 4178 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4179 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4180 trace_print_lat_context(iter); 4181 else 4182 trace_print_context(iter); 4183 } 4184 4185 if (trace_seq_has_overflowed(s)) 4186 return TRACE_TYPE_PARTIAL_LINE; 4187 4188 if (event) { 4189 if (tr->trace_flags & TRACE_ITER_FIELDS) 4190 return print_event_fields(iter, event); 4191 /* 4192 * For TRACE_EVENT() events, the print_fmt is not 4193 * safe to use if the array has delta offsets 4194 * Force printing via the fields. 4195 */ 4196 if ((tr->text_delta) && 4197 event->type > __TRACE_LAST_TYPE) 4198 return print_event_fields(iter, event); 4199 4200 return event->funcs->trace(iter, sym_flags, event); 4201 } 4202 4203 trace_seq_printf(s, "Unknown type %d\n", entry->type); 4204 4205 return trace_handle_return(s); 4206 } 4207 4208 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 4209 { 4210 struct trace_array *tr = iter->tr; 4211 struct trace_seq *s = &iter->seq; 4212 struct trace_entry *entry; 4213 struct trace_event *event; 4214 4215 entry = iter->ent; 4216 4217 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 4218 trace_seq_printf(s, "%d %d %llu ", 4219 entry->pid, iter->cpu, iter->ts); 4220 4221 if (trace_seq_has_overflowed(s)) 4222 return TRACE_TYPE_PARTIAL_LINE; 4223 4224 event = ftrace_find_event(entry->type); 4225 if (event) 4226 return event->funcs->raw(iter, 0, event); 4227 4228 trace_seq_printf(s, "%d ?\n", entry->type); 4229 4230 return trace_handle_return(s); 4231 } 4232 4233 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 4234 { 4235 struct trace_array *tr = iter->tr; 4236 struct trace_seq *s = &iter->seq; 4237 unsigned char newline = '\n'; 4238 struct trace_entry *entry; 4239 struct trace_event *event; 4240 4241 entry = iter->ent; 4242 4243 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4244 SEQ_PUT_HEX_FIELD(s, entry->pid); 4245 SEQ_PUT_HEX_FIELD(s, iter->cpu); 4246 SEQ_PUT_HEX_FIELD(s, iter->ts); 4247 if (trace_seq_has_overflowed(s)) 4248 return TRACE_TYPE_PARTIAL_LINE; 4249 } 4250 4251 event = ftrace_find_event(entry->type); 4252 if (event) { 4253 enum print_line_t ret = event->funcs->hex(iter, 0, event); 4254 if (ret != TRACE_TYPE_HANDLED) 4255 return ret; 4256 } 4257 4258 SEQ_PUT_FIELD(s, newline); 4259 4260 return trace_handle_return(s); 4261 } 4262 4263 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 4264 { 4265 struct trace_array *tr = iter->tr; 4266 struct trace_seq *s = &iter->seq; 4267 struct trace_entry *entry; 4268 struct trace_event *event; 4269 4270 entry = iter->ent; 4271 4272 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4273 SEQ_PUT_FIELD(s, entry->pid); 4274 SEQ_PUT_FIELD(s, iter->cpu); 4275 SEQ_PUT_FIELD(s, iter->ts); 4276 if (trace_seq_has_overflowed(s)) 4277 return TRACE_TYPE_PARTIAL_LINE; 4278 } 4279 4280 event = ftrace_find_event(entry->type); 4281 return event ? event->funcs->binary(iter, 0, event) : 4282 TRACE_TYPE_HANDLED; 4283 } 4284 4285 int trace_empty(struct trace_iterator *iter) 4286 { 4287 struct ring_buffer_iter *buf_iter; 4288 int cpu; 4289 4290 /* If we are looking at one CPU buffer, only check that one */ 4291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4292 cpu = iter->cpu_file; 4293 buf_iter = trace_buffer_iter(iter, cpu); 4294 if (buf_iter) { 4295 if (!ring_buffer_iter_empty(buf_iter)) 4296 return 0; 4297 } else { 4298 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4299 return 0; 4300 } 4301 return 1; 4302 } 4303 4304 for_each_tracing_cpu(cpu) { 4305 buf_iter = trace_buffer_iter(iter, cpu); 4306 if (buf_iter) { 4307 if (!ring_buffer_iter_empty(buf_iter)) 4308 return 0; 4309 } else { 4310 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4311 return 0; 4312 } 4313 } 4314 4315 return 1; 4316 } 4317 4318 /* Called with trace_event_read_lock() held. */ 4319 enum print_line_t print_trace_line(struct trace_iterator *iter) 4320 { 4321 struct trace_array *tr = iter->tr; 4322 unsigned long trace_flags = tr->trace_flags; 4323 enum print_line_t ret; 4324 4325 if (iter->lost_events) { 4326 if (iter->lost_events == (unsigned long)-1) 4327 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", 4328 iter->cpu); 4329 else 4330 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 4331 iter->cpu, iter->lost_events); 4332 if (trace_seq_has_overflowed(&iter->seq)) 4333 return TRACE_TYPE_PARTIAL_LINE; 4334 } 4335 4336 if (iter->trace && iter->trace->print_line) { 4337 ret = iter->trace->print_line(iter); 4338 if (ret != TRACE_TYPE_UNHANDLED) 4339 return ret; 4340 } 4341 4342 if (iter->ent->type == TRACE_BPUTS && 4343 trace_flags & TRACE_ITER_PRINTK && 4344 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4345 return trace_print_bputs_msg_only(iter); 4346 4347 if (iter->ent->type == TRACE_BPRINT && 4348 trace_flags & TRACE_ITER_PRINTK && 4349 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4350 return trace_print_bprintk_msg_only(iter); 4351 4352 if (iter->ent->type == TRACE_PRINT && 4353 trace_flags & TRACE_ITER_PRINTK && 4354 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4355 return trace_print_printk_msg_only(iter); 4356 4357 if (trace_flags & TRACE_ITER_BIN) 4358 return print_bin_fmt(iter); 4359 4360 if (trace_flags & TRACE_ITER_HEX) 4361 return print_hex_fmt(iter); 4362 4363 if (trace_flags & TRACE_ITER_RAW) 4364 return print_raw_fmt(iter); 4365 4366 return print_trace_fmt(iter); 4367 } 4368 4369 void trace_latency_header(struct seq_file *m) 4370 { 4371 struct trace_iterator *iter = m->private; 4372 struct trace_array *tr = iter->tr; 4373 4374 /* print nothing if the buffers are empty */ 4375 if (trace_empty(iter)) 4376 return; 4377 4378 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4379 print_trace_header(m, iter); 4380 4381 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 4382 print_lat_help_header(m); 4383 } 4384 4385 void trace_default_header(struct seq_file *m) 4386 { 4387 struct trace_iterator *iter = m->private; 4388 struct trace_array *tr = iter->tr; 4389 unsigned long trace_flags = tr->trace_flags; 4390 4391 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 4392 return; 4393 4394 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 4395 /* print nothing if the buffers are empty */ 4396 if (trace_empty(iter)) 4397 return; 4398 print_trace_header(m, iter); 4399 if (!(trace_flags & TRACE_ITER_VERBOSE)) 4400 print_lat_help_header(m); 4401 } else { 4402 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 4403 if (trace_flags & TRACE_ITER_IRQ_INFO) 4404 print_func_help_header_irq(iter->array_buffer, 4405 m, trace_flags); 4406 else 4407 print_func_help_header(iter->array_buffer, m, 4408 trace_flags); 4409 } 4410 } 4411 } 4412 4413 static void test_ftrace_alive(struct seq_file *m) 4414 { 4415 if (!ftrace_is_dead()) 4416 return; 4417 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 4418 "# MAY BE MISSING FUNCTION EVENTS\n"); 4419 } 4420 4421 #ifdef CONFIG_TRACER_MAX_TRACE 4422 static void show_snapshot_main_help(struct seq_file *m) 4423 { 4424 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 4425 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4426 "# Takes a snapshot of the main buffer.\n" 4427 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 4428 "# (Doesn't have to be '2' works with any number that\n" 4429 "# is not a '0' or '1')\n"); 4430 } 4431 4432 static void show_snapshot_percpu_help(struct seq_file *m) 4433 { 4434 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 4435 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4436 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4437 "# Takes a snapshot of the main buffer for this cpu.\n"); 4438 #else 4439 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 4440 "# Must use main snapshot file to allocate.\n"); 4441 #endif 4442 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 4443 "# (Doesn't have to be '2' works with any number that\n" 4444 "# is not a '0' or '1')\n"); 4445 } 4446 4447 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 4448 { 4449 if (iter->tr->allocated_snapshot) 4450 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 4451 else 4452 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 4453 4454 seq_puts(m, "# Snapshot commands:\n"); 4455 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4456 show_snapshot_main_help(m); 4457 else 4458 show_snapshot_percpu_help(m); 4459 } 4460 #else 4461 /* Should never be called */ 4462 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 4463 #endif 4464 4465 static int s_show(struct seq_file *m, void *v) 4466 { 4467 struct trace_iterator *iter = v; 4468 int ret; 4469 4470 if (iter->ent == NULL) { 4471 if (iter->tr) { 4472 seq_printf(m, "# tracer: %s\n", iter->trace->name); 4473 seq_puts(m, "#\n"); 4474 test_ftrace_alive(m); 4475 } 4476 if (iter->snapshot && trace_empty(iter)) 4477 print_snapshot_help(m, iter); 4478 else if (iter->trace && iter->trace->print_header) 4479 iter->trace->print_header(m); 4480 else 4481 trace_default_header(m); 4482 4483 } else if (iter->leftover) { 4484 /* 4485 * If we filled the seq_file buffer earlier, we 4486 * want to just show it now. 4487 */ 4488 ret = trace_print_seq(m, &iter->seq); 4489 4490 /* ret should this time be zero, but you never know */ 4491 iter->leftover = ret; 4492 4493 } else { 4494 ret = print_trace_line(iter); 4495 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4496 iter->seq.full = 0; 4497 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 4498 } 4499 ret = trace_print_seq(m, &iter->seq); 4500 /* 4501 * If we overflow the seq_file buffer, then it will 4502 * ask us for this data again at start up. 4503 * Use that instead. 4504 * ret is 0 if seq_file write succeeded. 4505 * -1 otherwise. 4506 */ 4507 iter->leftover = ret; 4508 } 4509 4510 return 0; 4511 } 4512 4513 /* 4514 * Should be used after trace_array_get(), trace_types_lock 4515 * ensures that i_cdev was already initialized. 4516 */ 4517 static inline int tracing_get_cpu(struct inode *inode) 4518 { 4519 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4520 return (long)inode->i_cdev - 1; 4521 return RING_BUFFER_ALL_CPUS; 4522 } 4523 4524 static const struct seq_operations tracer_seq_ops = { 4525 .start = s_start, 4526 .next = s_next, 4527 .stop = s_stop, 4528 .show = s_show, 4529 }; 4530 4531 /* 4532 * Note, as iter itself can be allocated and freed in different 4533 * ways, this function is only used to free its content, and not 4534 * the iterator itself. The only requirement to all the allocations 4535 * is that it must zero all fields (kzalloc), as freeing works with 4536 * ethier allocated content or NULL. 4537 */ 4538 static void free_trace_iter_content(struct trace_iterator *iter) 4539 { 4540 /* The fmt is either NULL, allocated or points to static_fmt_buf */ 4541 if (iter->fmt != static_fmt_buf) 4542 kfree(iter->fmt); 4543 4544 kfree(iter->temp); 4545 kfree(iter->buffer_iter); 4546 mutex_destroy(&iter->mutex); 4547 free_cpumask_var(iter->started); 4548 } 4549 4550 static struct trace_iterator * 4551 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4552 { 4553 struct trace_array *tr = inode->i_private; 4554 struct trace_iterator *iter; 4555 int cpu; 4556 4557 if (tracing_disabled) 4558 return ERR_PTR(-ENODEV); 4559 4560 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4561 if (!iter) 4562 return ERR_PTR(-ENOMEM); 4563 4564 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4565 GFP_KERNEL); 4566 if (!iter->buffer_iter) 4567 goto release; 4568 4569 /* 4570 * trace_find_next_entry() may need to save off iter->ent. 4571 * It will place it into the iter->temp buffer. As most 4572 * events are less than 128, allocate a buffer of that size. 4573 * If one is greater, then trace_find_next_entry() will 4574 * allocate a new buffer to adjust for the bigger iter->ent. 4575 * It's not critical if it fails to get allocated here. 4576 */ 4577 iter->temp = kmalloc(128, GFP_KERNEL); 4578 if (iter->temp) 4579 iter->temp_size = 128; 4580 4581 /* 4582 * trace_event_printf() may need to modify given format 4583 * string to replace %p with %px so that it shows real address 4584 * instead of hash value. However, that is only for the event 4585 * tracing, other tracer may not need. Defer the allocation 4586 * until it is needed. 4587 */ 4588 iter->fmt = NULL; 4589 iter->fmt_size = 0; 4590 4591 mutex_lock(&trace_types_lock); 4592 iter->trace = tr->current_trace; 4593 4594 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4595 goto fail; 4596 4597 iter->tr = tr; 4598 4599 #ifdef CONFIG_TRACER_MAX_TRACE 4600 /* Currently only the top directory has a snapshot */ 4601 if (tr->current_trace->print_max || snapshot) 4602 iter->array_buffer = &tr->max_buffer; 4603 else 4604 #endif 4605 iter->array_buffer = &tr->array_buffer; 4606 iter->snapshot = snapshot; 4607 iter->pos = -1; 4608 iter->cpu_file = tracing_get_cpu(inode); 4609 mutex_init(&iter->mutex); 4610 4611 /* Notify the tracer early; before we stop tracing. */ 4612 if (iter->trace->open) 4613 iter->trace->open(iter); 4614 4615 /* Annotate start of buffers if we had overruns */ 4616 if (ring_buffer_overruns(iter->array_buffer->buffer)) 4617 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4618 4619 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4620 if (trace_clocks[tr->clock_id].in_ns) 4621 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4622 4623 /* 4624 * If pause-on-trace is enabled, then stop the trace while 4625 * dumping, unless this is the "snapshot" file 4626 */ 4627 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) 4628 tracing_stop_tr(tr); 4629 4630 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4631 for_each_tracing_cpu(cpu) { 4632 iter->buffer_iter[cpu] = 4633 ring_buffer_read_prepare(iter->array_buffer->buffer, 4634 cpu, GFP_KERNEL); 4635 } 4636 ring_buffer_read_prepare_sync(); 4637 for_each_tracing_cpu(cpu) { 4638 ring_buffer_read_start(iter->buffer_iter[cpu]); 4639 tracing_iter_reset(iter, cpu); 4640 } 4641 } else { 4642 cpu = iter->cpu_file; 4643 iter->buffer_iter[cpu] = 4644 ring_buffer_read_prepare(iter->array_buffer->buffer, 4645 cpu, GFP_KERNEL); 4646 ring_buffer_read_prepare_sync(); 4647 ring_buffer_read_start(iter->buffer_iter[cpu]); 4648 tracing_iter_reset(iter, cpu); 4649 } 4650 4651 mutex_unlock(&trace_types_lock); 4652 4653 return iter; 4654 4655 fail: 4656 mutex_unlock(&trace_types_lock); 4657 free_trace_iter_content(iter); 4658 release: 4659 seq_release_private(inode, file); 4660 return ERR_PTR(-ENOMEM); 4661 } 4662 4663 int tracing_open_generic(struct inode *inode, struct file *filp) 4664 { 4665 int ret; 4666 4667 ret = tracing_check_open_get_tr(NULL); 4668 if (ret) 4669 return ret; 4670 4671 filp->private_data = inode->i_private; 4672 return 0; 4673 } 4674 4675 bool tracing_is_disabled(void) 4676 { 4677 return (tracing_disabled) ? true: false; 4678 } 4679 4680 /* 4681 * Open and update trace_array ref count. 4682 * Must have the current trace_array passed to it. 4683 */ 4684 int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4685 { 4686 struct trace_array *tr = inode->i_private; 4687 int ret; 4688 4689 ret = tracing_check_open_get_tr(tr); 4690 if (ret) 4691 return ret; 4692 4693 filp->private_data = inode->i_private; 4694 4695 return 0; 4696 } 4697 4698 /* 4699 * The private pointer of the inode is the trace_event_file. 4700 * Update the tr ref count associated to it. 4701 */ 4702 int tracing_open_file_tr(struct inode *inode, struct file *filp) 4703 { 4704 struct trace_event_file *file = inode->i_private; 4705 int ret; 4706 4707 ret = tracing_check_open_get_tr(file->tr); 4708 if (ret) 4709 return ret; 4710 4711 mutex_lock(&event_mutex); 4712 4713 /* Fail if the file is marked for removal */ 4714 if (file->flags & EVENT_FILE_FL_FREED) { 4715 trace_array_put(file->tr); 4716 ret = -ENODEV; 4717 } else { 4718 event_file_get(file); 4719 } 4720 4721 mutex_unlock(&event_mutex); 4722 if (ret) 4723 return ret; 4724 4725 filp->private_data = inode->i_private; 4726 4727 return 0; 4728 } 4729 4730 int tracing_release_file_tr(struct inode *inode, struct file *filp) 4731 { 4732 struct trace_event_file *file = inode->i_private; 4733 4734 trace_array_put(file->tr); 4735 event_file_put(file); 4736 4737 return 0; 4738 } 4739 4740 int tracing_single_release_file_tr(struct inode *inode, struct file *filp) 4741 { 4742 tracing_release_file_tr(inode, filp); 4743 return single_release(inode, filp); 4744 } 4745 4746 static int tracing_mark_open(struct inode *inode, struct file *filp) 4747 { 4748 stream_open(inode, filp); 4749 return tracing_open_generic_tr(inode, filp); 4750 } 4751 4752 static int tracing_release(struct inode *inode, struct file *file) 4753 { 4754 struct trace_array *tr = inode->i_private; 4755 struct seq_file *m = file->private_data; 4756 struct trace_iterator *iter; 4757 int cpu; 4758 4759 if (!(file->f_mode & FMODE_READ)) { 4760 trace_array_put(tr); 4761 return 0; 4762 } 4763 4764 /* Writes do not use seq_file */ 4765 iter = m->private; 4766 mutex_lock(&trace_types_lock); 4767 4768 for_each_tracing_cpu(cpu) { 4769 if (iter->buffer_iter[cpu]) 4770 ring_buffer_read_finish(iter->buffer_iter[cpu]); 4771 } 4772 4773 if (iter->trace && iter->trace->close) 4774 iter->trace->close(iter); 4775 4776 if (!iter->snapshot && tr->stop_count) 4777 /* reenable tracing if it was previously enabled */ 4778 tracing_start_tr(tr); 4779 4780 __trace_array_put(tr); 4781 4782 mutex_unlock(&trace_types_lock); 4783 4784 free_trace_iter_content(iter); 4785 seq_release_private(inode, file); 4786 4787 return 0; 4788 } 4789 4790 int tracing_release_generic_tr(struct inode *inode, struct file *file) 4791 { 4792 struct trace_array *tr = inode->i_private; 4793 4794 trace_array_put(tr); 4795 return 0; 4796 } 4797 4798 static int tracing_single_release_tr(struct inode *inode, struct file *file) 4799 { 4800 struct trace_array *tr = inode->i_private; 4801 4802 trace_array_put(tr); 4803 4804 return single_release(inode, file); 4805 } 4806 4807 static int tracing_open(struct inode *inode, struct file *file) 4808 { 4809 struct trace_array *tr = inode->i_private; 4810 struct trace_iterator *iter; 4811 int ret; 4812 4813 ret = tracing_check_open_get_tr(tr); 4814 if (ret) 4815 return ret; 4816 4817 /* If this file was open for write, then erase contents */ 4818 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 4819 int cpu = tracing_get_cpu(inode); 4820 struct array_buffer *trace_buf = &tr->array_buffer; 4821 4822 #ifdef CONFIG_TRACER_MAX_TRACE 4823 if (tr->current_trace->print_max) 4824 trace_buf = &tr->max_buffer; 4825 #endif 4826 4827 if (cpu == RING_BUFFER_ALL_CPUS) 4828 tracing_reset_online_cpus(trace_buf); 4829 else 4830 tracing_reset_cpu(trace_buf, cpu); 4831 } 4832 4833 if (file->f_mode & FMODE_READ) { 4834 iter = __tracing_open(inode, file, false); 4835 if (IS_ERR(iter)) 4836 ret = PTR_ERR(iter); 4837 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 4838 iter->iter_flags |= TRACE_FILE_LAT_FMT; 4839 } 4840 4841 if (ret < 0) 4842 trace_array_put(tr); 4843 4844 return ret; 4845 } 4846 4847 /* 4848 * Some tracers are not suitable for instance buffers. 4849 * A tracer is always available for the global array (toplevel) 4850 * or if it explicitly states that it is. 4851 */ 4852 static bool 4853 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 4854 { 4855 #ifdef CONFIG_TRACER_SNAPSHOT 4856 /* arrays with mapped buffer range do not have snapshots */ 4857 if (tr->range_addr_start && t->use_max_tr) 4858 return false; 4859 #endif 4860 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 4861 } 4862 4863 /* Find the next tracer that this trace array may use */ 4864 static struct tracer * 4865 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 4866 { 4867 while (t && !trace_ok_for_array(t, tr)) 4868 t = t->next; 4869 4870 return t; 4871 } 4872 4873 static void * 4874 t_next(struct seq_file *m, void *v, loff_t *pos) 4875 { 4876 struct trace_array *tr = m->private; 4877 struct tracer *t = v; 4878 4879 (*pos)++; 4880 4881 if (t) 4882 t = get_tracer_for_array(tr, t->next); 4883 4884 return t; 4885 } 4886 4887 static void *t_start(struct seq_file *m, loff_t *pos) 4888 { 4889 struct trace_array *tr = m->private; 4890 struct tracer *t; 4891 loff_t l = 0; 4892 4893 mutex_lock(&trace_types_lock); 4894 4895 t = get_tracer_for_array(tr, trace_types); 4896 for (; t && l < *pos; t = t_next(m, t, &l)) 4897 ; 4898 4899 return t; 4900 } 4901 4902 static void t_stop(struct seq_file *m, void *p) 4903 { 4904 mutex_unlock(&trace_types_lock); 4905 } 4906 4907 static int t_show(struct seq_file *m, void *v) 4908 { 4909 struct tracer *t = v; 4910 4911 if (!t) 4912 return 0; 4913 4914 seq_puts(m, t->name); 4915 if (t->next) 4916 seq_putc(m, ' '); 4917 else 4918 seq_putc(m, '\n'); 4919 4920 return 0; 4921 } 4922 4923 static const struct seq_operations show_traces_seq_ops = { 4924 .start = t_start, 4925 .next = t_next, 4926 .stop = t_stop, 4927 .show = t_show, 4928 }; 4929 4930 static int show_traces_open(struct inode *inode, struct file *file) 4931 { 4932 struct trace_array *tr = inode->i_private; 4933 struct seq_file *m; 4934 int ret; 4935 4936 ret = tracing_check_open_get_tr(tr); 4937 if (ret) 4938 return ret; 4939 4940 ret = seq_open(file, &show_traces_seq_ops); 4941 if (ret) { 4942 trace_array_put(tr); 4943 return ret; 4944 } 4945 4946 m = file->private_data; 4947 m->private = tr; 4948 4949 return 0; 4950 } 4951 4952 static int tracing_seq_release(struct inode *inode, struct file *file) 4953 { 4954 struct trace_array *tr = inode->i_private; 4955 4956 trace_array_put(tr); 4957 return seq_release(inode, file); 4958 } 4959 4960 static ssize_t 4961 tracing_write_stub(struct file *filp, const char __user *ubuf, 4962 size_t count, loff_t *ppos) 4963 { 4964 return count; 4965 } 4966 4967 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 4968 { 4969 int ret; 4970 4971 if (file->f_mode & FMODE_READ) 4972 ret = seq_lseek(file, offset, whence); 4973 else 4974 file->f_pos = ret = 0; 4975 4976 return ret; 4977 } 4978 4979 static const struct file_operations tracing_fops = { 4980 .open = tracing_open, 4981 .read = seq_read, 4982 .read_iter = seq_read_iter, 4983 .splice_read = copy_splice_read, 4984 .write = tracing_write_stub, 4985 .llseek = tracing_lseek, 4986 .release = tracing_release, 4987 }; 4988 4989 static const struct file_operations show_traces_fops = { 4990 .open = show_traces_open, 4991 .read = seq_read, 4992 .llseek = seq_lseek, 4993 .release = tracing_seq_release, 4994 }; 4995 4996 static ssize_t 4997 tracing_cpumask_read(struct file *filp, char __user *ubuf, 4998 size_t count, loff_t *ppos) 4999 { 5000 struct trace_array *tr = file_inode(filp)->i_private; 5001 char *mask_str; 5002 int len; 5003 5004 len = snprintf(NULL, 0, "%*pb\n", 5005 cpumask_pr_args(tr->tracing_cpumask)) + 1; 5006 mask_str = kmalloc(len, GFP_KERNEL); 5007 if (!mask_str) 5008 return -ENOMEM; 5009 5010 len = snprintf(mask_str, len, "%*pb\n", 5011 cpumask_pr_args(tr->tracing_cpumask)); 5012 if (len >= count) { 5013 count = -EINVAL; 5014 goto out_err; 5015 } 5016 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5017 5018 out_err: 5019 kfree(mask_str); 5020 5021 return count; 5022 } 5023 5024 int tracing_set_cpumask(struct trace_array *tr, 5025 cpumask_var_t tracing_cpumask_new) 5026 { 5027 int cpu; 5028 5029 if (!tr) 5030 return -EINVAL; 5031 5032 local_irq_disable(); 5033 arch_spin_lock(&tr->max_lock); 5034 for_each_tracing_cpu(cpu) { 5035 /* 5036 * Increase/decrease the disabled counter if we are 5037 * about to flip a bit in the cpumask: 5038 */ 5039 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5040 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5041 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5042 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); 5043 #ifdef CONFIG_TRACER_MAX_TRACE 5044 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); 5045 #endif 5046 } 5047 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5048 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5049 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5050 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); 5051 #ifdef CONFIG_TRACER_MAX_TRACE 5052 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); 5053 #endif 5054 } 5055 } 5056 arch_spin_unlock(&tr->max_lock); 5057 local_irq_enable(); 5058 5059 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 5060 5061 return 0; 5062 } 5063 5064 static ssize_t 5065 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 5066 size_t count, loff_t *ppos) 5067 { 5068 struct trace_array *tr = file_inode(filp)->i_private; 5069 cpumask_var_t tracing_cpumask_new; 5070 int err; 5071 5072 if (count == 0 || count > KMALLOC_MAX_SIZE) 5073 return -EINVAL; 5074 5075 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 5076 return -ENOMEM; 5077 5078 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 5079 if (err) 5080 goto err_free; 5081 5082 err = tracing_set_cpumask(tr, tracing_cpumask_new); 5083 if (err) 5084 goto err_free; 5085 5086 free_cpumask_var(tracing_cpumask_new); 5087 5088 return count; 5089 5090 err_free: 5091 free_cpumask_var(tracing_cpumask_new); 5092 5093 return err; 5094 } 5095 5096 static const struct file_operations tracing_cpumask_fops = { 5097 .open = tracing_open_generic_tr, 5098 .read = tracing_cpumask_read, 5099 .write = tracing_cpumask_write, 5100 .release = tracing_release_generic_tr, 5101 .llseek = generic_file_llseek, 5102 }; 5103 5104 static int tracing_trace_options_show(struct seq_file *m, void *v) 5105 { 5106 struct tracer_opt *trace_opts; 5107 struct trace_array *tr = m->private; 5108 u32 tracer_flags; 5109 int i; 5110 5111 guard(mutex)(&trace_types_lock); 5112 5113 tracer_flags = tr->current_trace->flags->val; 5114 trace_opts = tr->current_trace->flags->opts; 5115 5116 for (i = 0; trace_options[i]; i++) { 5117 if (tr->trace_flags & (1 << i)) 5118 seq_printf(m, "%s\n", trace_options[i]); 5119 else 5120 seq_printf(m, "no%s\n", trace_options[i]); 5121 } 5122 5123 for (i = 0; trace_opts[i].name; i++) { 5124 if (tracer_flags & trace_opts[i].bit) 5125 seq_printf(m, "%s\n", trace_opts[i].name); 5126 else 5127 seq_printf(m, "no%s\n", trace_opts[i].name); 5128 } 5129 5130 return 0; 5131 } 5132 5133 static int __set_tracer_option(struct trace_array *tr, 5134 struct tracer_flags *tracer_flags, 5135 struct tracer_opt *opts, int neg) 5136 { 5137 struct tracer *trace = tracer_flags->trace; 5138 int ret; 5139 5140 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 5141 if (ret) 5142 return ret; 5143 5144 if (neg) 5145 tracer_flags->val &= ~opts->bit; 5146 else 5147 tracer_flags->val |= opts->bit; 5148 return 0; 5149 } 5150 5151 /* Try to assign a tracer specific option */ 5152 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 5153 { 5154 struct tracer *trace = tr->current_trace; 5155 struct tracer_flags *tracer_flags = trace->flags; 5156 struct tracer_opt *opts = NULL; 5157 int i; 5158 5159 for (i = 0; tracer_flags->opts[i].name; i++) { 5160 opts = &tracer_flags->opts[i]; 5161 5162 if (strcmp(cmp, opts->name) == 0) 5163 return __set_tracer_option(tr, trace->flags, opts, neg); 5164 } 5165 5166 return -EINVAL; 5167 } 5168 5169 /* Some tracers require overwrite to stay enabled */ 5170 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 5171 { 5172 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 5173 return -1; 5174 5175 return 0; 5176 } 5177 5178 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 5179 { 5180 if ((mask == TRACE_ITER_RECORD_TGID) || 5181 (mask == TRACE_ITER_RECORD_CMD) || 5182 (mask == TRACE_ITER_TRACE_PRINTK)) 5183 lockdep_assert_held(&event_mutex); 5184 5185 /* do nothing if flag is already set */ 5186 if (!!(tr->trace_flags & mask) == !!enabled) 5187 return 0; 5188 5189 /* Give the tracer a chance to approve the change */ 5190 if (tr->current_trace->flag_changed) 5191 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 5192 return -EINVAL; 5193 5194 if (mask == TRACE_ITER_TRACE_PRINTK) { 5195 if (enabled) { 5196 update_printk_trace(tr); 5197 } else { 5198 /* 5199 * The global_trace cannot clear this. 5200 * It's flag only gets cleared if another instance sets it. 5201 */ 5202 if (printk_trace == &global_trace) 5203 return -EINVAL; 5204 /* 5205 * An instance must always have it set. 5206 * by default, that's the global_trace instane. 5207 */ 5208 if (printk_trace == tr) 5209 update_printk_trace(&global_trace); 5210 } 5211 } 5212 5213 if (enabled) 5214 tr->trace_flags |= mask; 5215 else 5216 tr->trace_flags &= ~mask; 5217 5218 if (mask == TRACE_ITER_RECORD_CMD) 5219 trace_event_enable_cmd_record(enabled); 5220 5221 if (mask == TRACE_ITER_RECORD_TGID) { 5222 5223 if (trace_alloc_tgid_map() < 0) { 5224 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 5225 return -ENOMEM; 5226 } 5227 5228 trace_event_enable_tgid_record(enabled); 5229 } 5230 5231 if (mask == TRACE_ITER_EVENT_FORK) 5232 trace_event_follow_fork(tr, enabled); 5233 5234 if (mask == TRACE_ITER_FUNC_FORK) 5235 ftrace_pid_follow_fork(tr, enabled); 5236 5237 if (mask == TRACE_ITER_OVERWRITE) { 5238 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); 5239 #ifdef CONFIG_TRACER_MAX_TRACE 5240 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 5241 #endif 5242 } 5243 5244 if (mask == TRACE_ITER_PRINTK) { 5245 trace_printk_start_stop_comm(enabled); 5246 trace_printk_control(enabled); 5247 } 5248 5249 return 0; 5250 } 5251 5252 int trace_set_options(struct trace_array *tr, char *option) 5253 { 5254 char *cmp; 5255 int neg = 0; 5256 int ret; 5257 size_t orig_len = strlen(option); 5258 int len; 5259 5260 cmp = strstrip(option); 5261 5262 len = str_has_prefix(cmp, "no"); 5263 if (len) 5264 neg = 1; 5265 5266 cmp += len; 5267 5268 mutex_lock(&event_mutex); 5269 mutex_lock(&trace_types_lock); 5270 5271 ret = match_string(trace_options, -1, cmp); 5272 /* If no option could be set, test the specific tracer options */ 5273 if (ret < 0) 5274 ret = set_tracer_option(tr, cmp, neg); 5275 else 5276 ret = set_tracer_flag(tr, 1 << ret, !neg); 5277 5278 mutex_unlock(&trace_types_lock); 5279 mutex_unlock(&event_mutex); 5280 5281 /* 5282 * If the first trailing whitespace is replaced with '\0' by strstrip, 5283 * turn it back into a space. 5284 */ 5285 if (orig_len > strlen(option)) 5286 option[strlen(option)] = ' '; 5287 5288 return ret; 5289 } 5290 5291 static void __init apply_trace_boot_options(void) 5292 { 5293 char *buf = trace_boot_options_buf; 5294 char *option; 5295 5296 while (true) { 5297 option = strsep(&buf, ","); 5298 5299 if (!option) 5300 break; 5301 5302 if (*option) 5303 trace_set_options(&global_trace, option); 5304 5305 /* Put back the comma to allow this to be called again */ 5306 if (buf) 5307 *(buf - 1) = ','; 5308 } 5309 } 5310 5311 static ssize_t 5312 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 5313 size_t cnt, loff_t *ppos) 5314 { 5315 struct seq_file *m = filp->private_data; 5316 struct trace_array *tr = m->private; 5317 char buf[64]; 5318 int ret; 5319 5320 if (cnt >= sizeof(buf)) 5321 return -EINVAL; 5322 5323 if (copy_from_user(buf, ubuf, cnt)) 5324 return -EFAULT; 5325 5326 buf[cnt] = 0; 5327 5328 ret = trace_set_options(tr, buf); 5329 if (ret < 0) 5330 return ret; 5331 5332 *ppos += cnt; 5333 5334 return cnt; 5335 } 5336 5337 static int tracing_trace_options_open(struct inode *inode, struct file *file) 5338 { 5339 struct trace_array *tr = inode->i_private; 5340 int ret; 5341 5342 ret = tracing_check_open_get_tr(tr); 5343 if (ret) 5344 return ret; 5345 5346 ret = single_open(file, tracing_trace_options_show, inode->i_private); 5347 if (ret < 0) 5348 trace_array_put(tr); 5349 5350 return ret; 5351 } 5352 5353 static const struct file_operations tracing_iter_fops = { 5354 .open = tracing_trace_options_open, 5355 .read = seq_read, 5356 .llseek = seq_lseek, 5357 .release = tracing_single_release_tr, 5358 .write = tracing_trace_options_write, 5359 }; 5360 5361 static const char readme_msg[] = 5362 "tracing mini-HOWTO:\n\n" 5363 "By default tracefs removes all OTH file permission bits.\n" 5364 "When mounting tracefs an optional group id can be specified\n" 5365 "which adds the group to every directory and file in tracefs:\n\n" 5366 "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n" 5367 "# echo 0 > tracing_on : quick way to disable tracing\n" 5368 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 5369 " Important files:\n" 5370 " trace\t\t\t- The static contents of the buffer\n" 5371 "\t\t\t To clear the buffer write into this file: echo > trace\n" 5372 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 5373 " current_tracer\t- function and latency tracers\n" 5374 " available_tracers\t- list of configured tracers for current_tracer\n" 5375 " error_log\t- error log for failed commands (that support it)\n" 5376 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 5377 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 5378 " trace_clock\t\t- change the clock used to order events\n" 5379 " local: Per cpu clock but may not be synced across CPUs\n" 5380 " global: Synced across CPUs but slows tracing down.\n" 5381 " counter: Not a clock, but just an increment\n" 5382 " uptime: Jiffy counter from time of boot\n" 5383 " perf: Same clock that perf events use\n" 5384 #ifdef CONFIG_X86_64 5385 " x86-tsc: TSC cycle counter\n" 5386 #endif 5387 "\n timestamp_mode\t- view the mode used to timestamp events\n" 5388 " delta: Delta difference against a buffer-wide timestamp\n" 5389 " absolute: Absolute (standalone) timestamp\n" 5390 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 5391 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 5392 " tracing_cpumask\t- Limit which CPUs to trace\n" 5393 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 5394 "\t\t\t Remove sub-buffer with rmdir\n" 5395 " trace_options\t\t- Set format or modify how tracing happens\n" 5396 "\t\t\t Disable an option by prefixing 'no' to the\n" 5397 "\t\t\t option name\n" 5398 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 5399 #ifdef CONFIG_DYNAMIC_FTRACE 5400 "\n available_filter_functions - list of functions that can be filtered on\n" 5401 " set_ftrace_filter\t- echo function name in here to only trace these\n" 5402 "\t\t\t functions\n" 5403 "\t accepts: func_full_name or glob-matching-pattern\n" 5404 "\t modules: Can select a group via module\n" 5405 "\t Format: :mod:<module-name>\n" 5406 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 5407 "\t triggers: a command to perform when function is hit\n" 5408 "\t Format: <function>:<trigger>[:count]\n" 5409 "\t trigger: traceon, traceoff\n" 5410 "\t\t enable_event:<system>:<event>\n" 5411 "\t\t disable_event:<system>:<event>\n" 5412 #ifdef CONFIG_STACKTRACE 5413 "\t\t stacktrace\n" 5414 #endif 5415 #ifdef CONFIG_TRACER_SNAPSHOT 5416 "\t\t snapshot\n" 5417 #endif 5418 "\t\t dump\n" 5419 "\t\t cpudump\n" 5420 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 5421 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 5422 "\t The first one will disable tracing every time do_fault is hit\n" 5423 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 5424 "\t The first time do trap is hit and it disables tracing, the\n" 5425 "\t counter will decrement to 2. If tracing is already disabled,\n" 5426 "\t the counter will not decrement. It only decrements when the\n" 5427 "\t trigger did work\n" 5428 "\t To remove trigger without count:\n" 5429 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 5430 "\t To remove trigger with a count:\n" 5431 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 5432 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 5433 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 5434 "\t modules: Can select a group via module command :mod:\n" 5435 "\t Does not accept triggers\n" 5436 #endif /* CONFIG_DYNAMIC_FTRACE */ 5437 #ifdef CONFIG_FUNCTION_TRACER 5438 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 5439 "\t\t (function)\n" 5440 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" 5441 "\t\t (function)\n" 5442 #endif 5443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5444 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 5445 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 5446 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 5447 #endif 5448 #ifdef CONFIG_TRACER_SNAPSHOT 5449 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 5450 "\t\t\t snapshot buffer. Read the contents for more\n" 5451 "\t\t\t information\n" 5452 #endif 5453 #ifdef CONFIG_STACK_TRACER 5454 " stack_trace\t\t- Shows the max stack trace when active\n" 5455 " stack_max_size\t- Shows current max stack size that was traced\n" 5456 "\t\t\t Write into this file to reset the max size (trigger a\n" 5457 "\t\t\t new trace)\n" 5458 #ifdef CONFIG_DYNAMIC_FTRACE 5459 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 5460 "\t\t\t traces\n" 5461 #endif 5462 #endif /* CONFIG_STACK_TRACER */ 5463 #ifdef CONFIG_DYNAMIC_EVENTS 5464 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" 5465 "\t\t\t Write into this file to define/undefine new trace events.\n" 5466 #endif 5467 #ifdef CONFIG_KPROBE_EVENTS 5468 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" 5469 "\t\t\t Write into this file to define/undefine new trace events.\n" 5470 #endif 5471 #ifdef CONFIG_UPROBE_EVENTS 5472 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5473 "\t\t\t Write into this file to define/undefine new trace events.\n" 5474 #endif 5475 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5476 defined(CONFIG_FPROBE_EVENTS) 5477 "\t accepts: event-definitions (one definition per line)\n" 5478 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5479 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5480 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5481 #endif 5482 #ifdef CONFIG_FPROBE_EVENTS 5483 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5484 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" 5485 #endif 5486 #ifdef CONFIG_HIST_TRIGGERS 5487 "\t s:[synthetic/]<event> <field> [<field>]\n" 5488 #endif 5489 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" 5490 "\t -:[<group>/][<event>]\n" 5491 #ifdef CONFIG_KPROBE_EVENTS 5492 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 5493 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" 5494 #endif 5495 #ifdef CONFIG_UPROBE_EVENTS 5496 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" 5497 #endif 5498 "\t args: <name>=fetcharg[:type]\n" 5499 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" 5500 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 5501 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5502 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS 5503 "\t <argname>[->field[->field|.field...]],\n" 5504 #endif 5505 #else 5506 "\t $stack<index>, $stack, $retval, $comm,\n" 5507 #endif 5508 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" 5509 "\t kernel return probes support: $retval, $arg<N>, $comm\n" 5510 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" 5511 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" 5512 "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n" 5513 #ifdef CONFIG_HIST_TRIGGERS 5514 "\t field: <stype> <name>;\n" 5515 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 5516 "\t [unsigned] char/int/long\n" 5517 #endif 5518 "\t efield: For event probes ('e' types), the field is on of the fields\n" 5519 "\t of the <attached-group>/<attached-event>.\n" 5520 #endif 5521 " set_event\t\t- Enables events by name written into it\n" 5522 "\t\t\t Can enable module events via: :mod:<module>\n" 5523 " events/\t\t- Directory containing all trace event subsystems:\n" 5524 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 5525 " events/<system>/\t- Directory containing all trace events for <system>:\n" 5526 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 5527 "\t\t\t events\n" 5528 " filter\t\t- If set, only events passing filter are traced\n" 5529 " events/<system>/<event>/\t- Directory containing control files for\n" 5530 "\t\t\t <event>:\n" 5531 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 5532 " filter\t\t- If set, only events passing filter are traced\n" 5533 " trigger\t\t- If set, a command to perform when event is hit\n" 5534 "\t Format: <trigger>[:count][if <filter>]\n" 5535 "\t trigger: traceon, traceoff\n" 5536 "\t enable_event:<system>:<event>\n" 5537 "\t disable_event:<system>:<event>\n" 5538 #ifdef CONFIG_HIST_TRIGGERS 5539 "\t enable_hist:<system>:<event>\n" 5540 "\t disable_hist:<system>:<event>\n" 5541 #endif 5542 #ifdef CONFIG_STACKTRACE 5543 "\t\t stacktrace\n" 5544 #endif 5545 #ifdef CONFIG_TRACER_SNAPSHOT 5546 "\t\t snapshot\n" 5547 #endif 5548 #ifdef CONFIG_HIST_TRIGGERS 5549 "\t\t hist (see below)\n" 5550 #endif 5551 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 5552 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 5553 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 5554 "\t events/block/block_unplug/trigger\n" 5555 "\t The first disables tracing every time block_unplug is hit.\n" 5556 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 5557 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 5558 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 5559 "\t Like function triggers, the counter is only decremented if it\n" 5560 "\t enabled or disabled tracing.\n" 5561 "\t To remove a trigger without a count:\n" 5562 "\t echo '!<trigger> > <system>/<event>/trigger\n" 5563 "\t To remove a trigger with a count:\n" 5564 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 5565 "\t Filters can be ignored when removing a trigger.\n" 5566 #ifdef CONFIG_HIST_TRIGGERS 5567 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 5568 "\t Format: hist:keys=<field1[,field2,...]>\n" 5569 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" 5570 "\t [:values=<field1[,field2,...]>]\n" 5571 "\t [:sort=<field1[,field2,...]>]\n" 5572 "\t [:size=#entries]\n" 5573 "\t [:pause][:continue][:clear]\n" 5574 "\t [:name=histname1]\n" 5575 "\t [:nohitcount]\n" 5576 "\t [:<handler>.<action>]\n" 5577 "\t [if <filter>]\n\n" 5578 "\t Note, special fields can be used as well:\n" 5579 "\t common_timestamp - to record current timestamp\n" 5580 "\t common_cpu - to record the CPU the event happened on\n" 5581 "\n" 5582 "\t A hist trigger variable can be:\n" 5583 "\t - a reference to a field e.g. x=current_timestamp,\n" 5584 "\t - a reference to another variable e.g. y=$x,\n" 5585 "\t - a numeric literal: e.g. ms_per_sec=1000,\n" 5586 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" 5587 "\n" 5588 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" 5589 "\t multiplication(*) and division(/) operators. An operand can be either a\n" 5590 "\t variable reference, field or numeric literal.\n" 5591 "\n" 5592 "\t When a matching event is hit, an entry is added to a hash\n" 5593 "\t table using the key(s) and value(s) named, and the value of a\n" 5594 "\t sum called 'hitcount' is incremented. Keys and values\n" 5595 "\t correspond to fields in the event's format description. Keys\n" 5596 "\t can be any field, or the special string 'common_stacktrace'.\n" 5597 "\t Compound keys consisting of up to two fields can be specified\n" 5598 "\t by the 'keys' keyword. Values must correspond to numeric\n" 5599 "\t fields. Sort keys consisting of up to two fields can be\n" 5600 "\t specified using the 'sort' keyword. The sort direction can\n" 5601 "\t be modified by appending '.descending' or '.ascending' to a\n" 5602 "\t sort field. The 'size' parameter can be used to specify more\n" 5603 "\t or fewer than the default 2048 entries for the hashtable size.\n" 5604 "\t If a hist trigger is given a name using the 'name' parameter,\n" 5605 "\t its histogram data will be shared with other triggers of the\n" 5606 "\t same name, and trigger hits will update this common data.\n\n" 5607 "\t Reading the 'hist' file for the event will dump the hash\n" 5608 "\t table in its entirety to stdout. If there are multiple hist\n" 5609 "\t triggers attached to an event, there will be a table for each\n" 5610 "\t trigger in the output. The table displayed for a named\n" 5611 "\t trigger will be the same as any other instance having the\n" 5612 "\t same name. The default format used to display a given field\n" 5613 "\t can be modified by appending any of the following modifiers\n" 5614 "\t to the field name, as applicable:\n\n" 5615 "\t .hex display a number as a hex value\n" 5616 "\t .sym display an address as a symbol\n" 5617 "\t .sym-offset display an address as a symbol and offset\n" 5618 "\t .execname display a common_pid as a program name\n" 5619 "\t .syscall display a syscall id as a syscall name\n" 5620 "\t .log2 display log2 value rather than raw number\n" 5621 "\t .buckets=size display values in groups of size rather than raw number\n" 5622 "\t .usecs display a common_timestamp in microseconds\n" 5623 "\t .percent display a number of percentage value\n" 5624 "\t .graph display a bar-graph of a value\n\n" 5625 "\t The 'pause' parameter can be used to pause an existing hist\n" 5626 "\t trigger or to start a hist trigger but not log any events\n" 5627 "\t until told to do so. 'continue' can be used to start or\n" 5628 "\t restart a paused hist trigger.\n\n" 5629 "\t The 'clear' parameter will clear the contents of a running\n" 5630 "\t hist trigger and leave its current paused/active state\n" 5631 "\t unchanged.\n\n" 5632 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" 5633 "\t raw hitcount in the histogram.\n\n" 5634 "\t The enable_hist and disable_hist triggers can be used to\n" 5635 "\t have one event conditionally start and stop another event's\n" 5636 "\t already-attached hist trigger. The syntax is analogous to\n" 5637 "\t the enable_event and disable_event triggers.\n\n" 5638 "\t Hist trigger handlers and actions are executed whenever a\n" 5639 "\t a histogram entry is added or updated. They take the form:\n\n" 5640 "\t <handler>.<action>\n\n" 5641 "\t The available handlers are:\n\n" 5642 "\t onmatch(matching.event) - invoke on addition or update\n" 5643 "\t onmax(var) - invoke if var exceeds current max\n" 5644 "\t onchange(var) - invoke action if var changes\n\n" 5645 "\t The available actions are:\n\n" 5646 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 5647 "\t save(field,...) - save current event fields\n" 5648 #ifdef CONFIG_TRACER_SNAPSHOT 5649 "\t snapshot() - snapshot the trace buffer\n\n" 5650 #endif 5651 #ifdef CONFIG_SYNTH_EVENTS 5652 " events/synthetic_events\t- Create/append/remove/show synthetic events\n" 5653 "\t Write into this file to define/undefine new synthetic events.\n" 5654 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" 5655 #endif 5656 #endif 5657 ; 5658 5659 static ssize_t 5660 tracing_readme_read(struct file *filp, char __user *ubuf, 5661 size_t cnt, loff_t *ppos) 5662 { 5663 return simple_read_from_buffer(ubuf, cnt, ppos, 5664 readme_msg, strlen(readme_msg)); 5665 } 5666 5667 static const struct file_operations tracing_readme_fops = { 5668 .open = tracing_open_generic, 5669 .read = tracing_readme_read, 5670 .llseek = generic_file_llseek, 5671 }; 5672 5673 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 5674 static union trace_eval_map_item * 5675 update_eval_map(union trace_eval_map_item *ptr) 5676 { 5677 if (!ptr->map.eval_string) { 5678 if (ptr->tail.next) { 5679 ptr = ptr->tail.next; 5680 /* Set ptr to the next real item (skip head) */ 5681 ptr++; 5682 } else 5683 return NULL; 5684 } 5685 return ptr; 5686 } 5687 5688 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 5689 { 5690 union trace_eval_map_item *ptr = v; 5691 5692 /* 5693 * Paranoid! If ptr points to end, we don't want to increment past it. 5694 * This really should never happen. 5695 */ 5696 (*pos)++; 5697 ptr = update_eval_map(ptr); 5698 if (WARN_ON_ONCE(!ptr)) 5699 return NULL; 5700 5701 ptr++; 5702 ptr = update_eval_map(ptr); 5703 5704 return ptr; 5705 } 5706 5707 static void *eval_map_start(struct seq_file *m, loff_t *pos) 5708 { 5709 union trace_eval_map_item *v; 5710 loff_t l = 0; 5711 5712 mutex_lock(&trace_eval_mutex); 5713 5714 v = trace_eval_maps; 5715 if (v) 5716 v++; 5717 5718 while (v && l < *pos) { 5719 v = eval_map_next(m, v, &l); 5720 } 5721 5722 return v; 5723 } 5724 5725 static void eval_map_stop(struct seq_file *m, void *v) 5726 { 5727 mutex_unlock(&trace_eval_mutex); 5728 } 5729 5730 static int eval_map_show(struct seq_file *m, void *v) 5731 { 5732 union trace_eval_map_item *ptr = v; 5733 5734 seq_printf(m, "%s %ld (%s)\n", 5735 ptr->map.eval_string, ptr->map.eval_value, 5736 ptr->map.system); 5737 5738 return 0; 5739 } 5740 5741 static const struct seq_operations tracing_eval_map_seq_ops = { 5742 .start = eval_map_start, 5743 .next = eval_map_next, 5744 .stop = eval_map_stop, 5745 .show = eval_map_show, 5746 }; 5747 5748 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 5749 { 5750 int ret; 5751 5752 ret = tracing_check_open_get_tr(NULL); 5753 if (ret) 5754 return ret; 5755 5756 return seq_open(filp, &tracing_eval_map_seq_ops); 5757 } 5758 5759 static const struct file_operations tracing_eval_map_fops = { 5760 .open = tracing_eval_map_open, 5761 .read = seq_read, 5762 .llseek = seq_lseek, 5763 .release = seq_release, 5764 }; 5765 5766 static inline union trace_eval_map_item * 5767 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 5768 { 5769 /* Return tail of array given the head */ 5770 return ptr + ptr->head.length + 1; 5771 } 5772 5773 static void 5774 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 5775 int len) 5776 { 5777 struct trace_eval_map **stop; 5778 struct trace_eval_map **map; 5779 union trace_eval_map_item *map_array; 5780 union trace_eval_map_item *ptr; 5781 5782 stop = start + len; 5783 5784 /* 5785 * The trace_eval_maps contains the map plus a head and tail item, 5786 * where the head holds the module and length of array, and the 5787 * tail holds a pointer to the next list. 5788 */ 5789 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 5790 if (!map_array) { 5791 pr_warn("Unable to allocate trace eval mapping\n"); 5792 return; 5793 } 5794 5795 guard(mutex)(&trace_eval_mutex); 5796 5797 if (!trace_eval_maps) 5798 trace_eval_maps = map_array; 5799 else { 5800 ptr = trace_eval_maps; 5801 for (;;) { 5802 ptr = trace_eval_jmp_to_tail(ptr); 5803 if (!ptr->tail.next) 5804 break; 5805 ptr = ptr->tail.next; 5806 5807 } 5808 ptr->tail.next = map_array; 5809 } 5810 map_array->head.mod = mod; 5811 map_array->head.length = len; 5812 map_array++; 5813 5814 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 5815 map_array->map = **map; 5816 map_array++; 5817 } 5818 memset(map_array, 0, sizeof(*map_array)); 5819 } 5820 5821 static void trace_create_eval_file(struct dentry *d_tracer) 5822 { 5823 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, 5824 NULL, &tracing_eval_map_fops); 5825 } 5826 5827 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 5828 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 5829 static inline void trace_insert_eval_map_file(struct module *mod, 5830 struct trace_eval_map **start, int len) { } 5831 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 5832 5833 static void trace_insert_eval_map(struct module *mod, 5834 struct trace_eval_map **start, int len) 5835 { 5836 struct trace_eval_map **map; 5837 5838 if (len <= 0) 5839 return; 5840 5841 map = start; 5842 5843 trace_event_eval_update(map, len); 5844 5845 trace_insert_eval_map_file(mod, start, len); 5846 } 5847 5848 static ssize_t 5849 tracing_set_trace_read(struct file *filp, char __user *ubuf, 5850 size_t cnt, loff_t *ppos) 5851 { 5852 struct trace_array *tr = filp->private_data; 5853 char buf[MAX_TRACER_SIZE+2]; 5854 int r; 5855 5856 mutex_lock(&trace_types_lock); 5857 r = sprintf(buf, "%s\n", tr->current_trace->name); 5858 mutex_unlock(&trace_types_lock); 5859 5860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5861 } 5862 5863 int tracer_init(struct tracer *t, struct trace_array *tr) 5864 { 5865 tracing_reset_online_cpus(&tr->array_buffer); 5866 return t->init(tr); 5867 } 5868 5869 static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 5870 { 5871 int cpu; 5872 5873 for_each_tracing_cpu(cpu) 5874 per_cpu_ptr(buf->data, cpu)->entries = val; 5875 } 5876 5877 static void update_buffer_entries(struct array_buffer *buf, int cpu) 5878 { 5879 if (cpu == RING_BUFFER_ALL_CPUS) { 5880 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 5881 } else { 5882 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 5883 } 5884 } 5885 5886 #ifdef CONFIG_TRACER_MAX_TRACE 5887 /* resize @tr's buffer to the size of @size_tr's entries */ 5888 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 5889 struct array_buffer *size_buf, int cpu_id) 5890 { 5891 int cpu, ret = 0; 5892 5893 if (cpu_id == RING_BUFFER_ALL_CPUS) { 5894 for_each_tracing_cpu(cpu) { 5895 ret = ring_buffer_resize(trace_buf->buffer, 5896 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 5897 if (ret < 0) 5898 break; 5899 per_cpu_ptr(trace_buf->data, cpu)->entries = 5900 per_cpu_ptr(size_buf->data, cpu)->entries; 5901 } 5902 } else { 5903 ret = ring_buffer_resize(trace_buf->buffer, 5904 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 5905 if (ret == 0) 5906 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 5907 per_cpu_ptr(size_buf->data, cpu_id)->entries; 5908 } 5909 5910 return ret; 5911 } 5912 #endif /* CONFIG_TRACER_MAX_TRACE */ 5913 5914 static int __tracing_resize_ring_buffer(struct trace_array *tr, 5915 unsigned long size, int cpu) 5916 { 5917 int ret; 5918 5919 /* 5920 * If kernel or user changes the size of the ring buffer 5921 * we use the size that was given, and we can forget about 5922 * expanding it later. 5923 */ 5924 trace_set_ring_buffer_expanded(tr); 5925 5926 /* May be called before buffers are initialized */ 5927 if (!tr->array_buffer.buffer) 5928 return 0; 5929 5930 /* Do not allow tracing while resizing ring buffer */ 5931 tracing_stop_tr(tr); 5932 5933 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); 5934 if (ret < 0) 5935 goto out_start; 5936 5937 #ifdef CONFIG_TRACER_MAX_TRACE 5938 if (!tr->allocated_snapshot) 5939 goto out; 5940 5941 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 5942 if (ret < 0) { 5943 int r = resize_buffer_duplicate_size(&tr->array_buffer, 5944 &tr->array_buffer, cpu); 5945 if (r < 0) { 5946 /* 5947 * AARGH! We are left with different 5948 * size max buffer!!!! 5949 * The max buffer is our "snapshot" buffer. 5950 * When a tracer needs a snapshot (one of the 5951 * latency tracers), it swaps the max buffer 5952 * with the saved snap shot. We succeeded to 5953 * update the size of the main buffer, but failed to 5954 * update the size of the max buffer. But when we tried 5955 * to reset the main buffer to the original size, we 5956 * failed there too. This is very unlikely to 5957 * happen, but if it does, warn and kill all 5958 * tracing. 5959 */ 5960 WARN_ON(1); 5961 tracing_disabled = 1; 5962 } 5963 goto out_start; 5964 } 5965 5966 update_buffer_entries(&tr->max_buffer, cpu); 5967 5968 out: 5969 #endif /* CONFIG_TRACER_MAX_TRACE */ 5970 5971 update_buffer_entries(&tr->array_buffer, cpu); 5972 out_start: 5973 tracing_start_tr(tr); 5974 return ret; 5975 } 5976 5977 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 5978 unsigned long size, int cpu_id) 5979 { 5980 guard(mutex)(&trace_types_lock); 5981 5982 if (cpu_id != RING_BUFFER_ALL_CPUS) { 5983 /* make sure, this cpu is enabled in the mask */ 5984 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) 5985 return -EINVAL; 5986 } 5987 5988 return __tracing_resize_ring_buffer(tr, size, cpu_id); 5989 } 5990 5991 struct trace_mod_entry { 5992 unsigned long mod_addr; 5993 char mod_name[MODULE_NAME_LEN]; 5994 }; 5995 5996 struct trace_scratch { 5997 unsigned long kaslr_addr; 5998 unsigned long nr_entries; 5999 struct trace_mod_entry entries[]; 6000 }; 6001 6002 static DEFINE_MUTEX(scratch_mutex); 6003 6004 #ifdef CONFIG_MODULES 6005 static int save_mod(struct module *mod, void *data) 6006 { 6007 struct trace_array *tr = data; 6008 struct trace_scratch *tscratch; 6009 struct trace_mod_entry *entry; 6010 unsigned int size; 6011 6012 tscratch = tr->scratch; 6013 if (!tscratch) 6014 return -1; 6015 size = tr->scratch_size; 6016 6017 if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size) 6018 return -1; 6019 6020 entry = &tscratch->entries[tscratch->nr_entries]; 6021 6022 tscratch->nr_entries++; 6023 6024 entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base; 6025 strscpy(entry->mod_name, mod->name); 6026 6027 return 0; 6028 } 6029 #else 6030 static int save_mod(struct module *mod, void *data) 6031 { 6032 return 0; 6033 } 6034 #endif 6035 6036 static void update_last_data(struct trace_array *tr) 6037 { 6038 struct trace_scratch *tscratch; 6039 6040 if (!(tr->flags & TRACE_ARRAY_FL_BOOT)) 6041 return; 6042 6043 /* Reset the module list and reload them */ 6044 if (tr->scratch) { 6045 struct trace_scratch *tscratch = tr->scratch; 6046 6047 memset(tscratch->entries, 0, 6048 flex_array_size(tscratch, entries, tscratch->nr_entries)); 6049 tscratch->nr_entries = 0; 6050 6051 guard(mutex)(&scratch_mutex); 6052 module_for_each_mod(save_mod, tr); 6053 } 6054 6055 if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) 6056 return; 6057 6058 /* 6059 * Need to clear all CPU buffers as there cannot be events 6060 * from the previous boot mixed with events with this boot 6061 * as that will cause a confusing trace. Need to clear all 6062 * CPU buffers, even for those that may currently be offline. 6063 */ 6064 tracing_reset_all_cpus(&tr->array_buffer); 6065 6066 /* Using current data now */ 6067 tr->text_delta = 0; 6068 6069 if (!tr->scratch) 6070 return; 6071 6072 tscratch = tr->scratch; 6073 6074 /* Set the persistent ring buffer meta data to this address */ 6075 #ifdef CONFIG_RANDOMIZE_BASE 6076 tscratch->kaslr_addr = kaslr_offset(); 6077 #else 6078 tscratch->kaslr_addr = 0; 6079 #endif 6080 tr->flags &= ~TRACE_ARRAY_FL_LAST_BOOT; 6081 } 6082 6083 /** 6084 * tracing_update_buffers - used by tracing facility to expand ring buffers 6085 * @tr: The tracing instance 6086 * 6087 * To save on memory when the tracing is never used on a system with it 6088 * configured in. The ring buffers are set to a minimum size. But once 6089 * a user starts to use the tracing facility, then they need to grow 6090 * to their default size. 6091 * 6092 * This function is to be called when a tracer is about to be used. 6093 */ 6094 int tracing_update_buffers(struct trace_array *tr) 6095 { 6096 int ret = 0; 6097 6098 mutex_lock(&trace_types_lock); 6099 6100 update_last_data(tr); 6101 6102 if (!tr->ring_buffer_expanded) 6103 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6104 RING_BUFFER_ALL_CPUS); 6105 mutex_unlock(&trace_types_lock); 6106 6107 return ret; 6108 } 6109 6110 struct trace_option_dentry; 6111 6112 static void 6113 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 6114 6115 /* 6116 * Used to clear out the tracer before deletion of an instance. 6117 * Must have trace_types_lock held. 6118 */ 6119 static void tracing_set_nop(struct trace_array *tr) 6120 { 6121 if (tr->current_trace == &nop_trace) 6122 return; 6123 6124 tr->current_trace->enabled--; 6125 6126 if (tr->current_trace->reset) 6127 tr->current_trace->reset(tr); 6128 6129 tr->current_trace = &nop_trace; 6130 } 6131 6132 static bool tracer_options_updated; 6133 6134 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 6135 { 6136 /* Only enable if the directory has been created already. */ 6137 if (!tr->dir) 6138 return; 6139 6140 /* Only create trace option files after update_tracer_options finish */ 6141 if (!tracer_options_updated) 6142 return; 6143 6144 create_trace_option_files(tr, t); 6145 } 6146 6147 int tracing_set_tracer(struct trace_array *tr, const char *buf) 6148 { 6149 struct tracer *t; 6150 #ifdef CONFIG_TRACER_MAX_TRACE 6151 bool had_max_tr; 6152 #endif 6153 int ret; 6154 6155 guard(mutex)(&trace_types_lock); 6156 6157 update_last_data(tr); 6158 6159 if (!tr->ring_buffer_expanded) { 6160 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6161 RING_BUFFER_ALL_CPUS); 6162 if (ret < 0) 6163 return ret; 6164 ret = 0; 6165 } 6166 6167 for (t = trace_types; t; t = t->next) { 6168 if (strcmp(t->name, buf) == 0) 6169 break; 6170 } 6171 if (!t) 6172 return -EINVAL; 6173 6174 if (t == tr->current_trace) 6175 return 0; 6176 6177 #ifdef CONFIG_TRACER_SNAPSHOT 6178 if (t->use_max_tr) { 6179 local_irq_disable(); 6180 arch_spin_lock(&tr->max_lock); 6181 ret = tr->cond_snapshot ? -EBUSY : 0; 6182 arch_spin_unlock(&tr->max_lock); 6183 local_irq_enable(); 6184 if (ret) 6185 return ret; 6186 } 6187 #endif 6188 /* Some tracers won't work on kernel command line */ 6189 if (system_state < SYSTEM_RUNNING && t->noboot) { 6190 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 6191 t->name); 6192 return -EINVAL; 6193 } 6194 6195 /* Some tracers are only allowed for the top level buffer */ 6196 if (!trace_ok_for_array(t, tr)) 6197 return -EINVAL; 6198 6199 /* If trace pipe files are being read, we can't change the tracer */ 6200 if (tr->trace_ref) 6201 return -EBUSY; 6202 6203 trace_branch_disable(); 6204 6205 tr->current_trace->enabled--; 6206 6207 if (tr->current_trace->reset) 6208 tr->current_trace->reset(tr); 6209 6210 #ifdef CONFIG_TRACER_MAX_TRACE 6211 had_max_tr = tr->current_trace->use_max_tr; 6212 6213 /* Current trace needs to be nop_trace before synchronize_rcu */ 6214 tr->current_trace = &nop_trace; 6215 6216 if (had_max_tr && !t->use_max_tr) { 6217 /* 6218 * We need to make sure that the update_max_tr sees that 6219 * current_trace changed to nop_trace to keep it from 6220 * swapping the buffers after we resize it. 6221 * The update_max_tr is called from interrupts disabled 6222 * so a synchronized_sched() is sufficient. 6223 */ 6224 synchronize_rcu(); 6225 free_snapshot(tr); 6226 tracing_disarm_snapshot(tr); 6227 } 6228 6229 if (!had_max_tr && t->use_max_tr) { 6230 ret = tracing_arm_snapshot_locked(tr); 6231 if (ret) 6232 return ret; 6233 } 6234 #else 6235 tr->current_trace = &nop_trace; 6236 #endif 6237 6238 if (t->init) { 6239 ret = tracer_init(t, tr); 6240 if (ret) { 6241 #ifdef CONFIG_TRACER_MAX_TRACE 6242 if (t->use_max_tr) 6243 tracing_disarm_snapshot(tr); 6244 #endif 6245 return ret; 6246 } 6247 } 6248 6249 tr->current_trace = t; 6250 tr->current_trace->enabled++; 6251 trace_branch_enable(tr); 6252 6253 return 0; 6254 } 6255 6256 static ssize_t 6257 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 6258 size_t cnt, loff_t *ppos) 6259 { 6260 struct trace_array *tr = filp->private_data; 6261 char buf[MAX_TRACER_SIZE+1]; 6262 char *name; 6263 size_t ret; 6264 int err; 6265 6266 ret = cnt; 6267 6268 if (cnt > MAX_TRACER_SIZE) 6269 cnt = MAX_TRACER_SIZE; 6270 6271 if (copy_from_user(buf, ubuf, cnt)) 6272 return -EFAULT; 6273 6274 buf[cnt] = 0; 6275 6276 name = strim(buf); 6277 6278 err = tracing_set_tracer(tr, name); 6279 if (err) 6280 return err; 6281 6282 *ppos += ret; 6283 6284 return ret; 6285 } 6286 6287 static ssize_t 6288 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 6289 size_t cnt, loff_t *ppos) 6290 { 6291 char buf[64]; 6292 int r; 6293 6294 r = snprintf(buf, sizeof(buf), "%ld\n", 6295 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 6296 if (r > sizeof(buf)) 6297 r = sizeof(buf); 6298 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6299 } 6300 6301 static ssize_t 6302 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 6303 size_t cnt, loff_t *ppos) 6304 { 6305 unsigned long val; 6306 int ret; 6307 6308 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6309 if (ret) 6310 return ret; 6311 6312 *ptr = val * 1000; 6313 6314 return cnt; 6315 } 6316 6317 static ssize_t 6318 tracing_thresh_read(struct file *filp, char __user *ubuf, 6319 size_t cnt, loff_t *ppos) 6320 { 6321 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 6322 } 6323 6324 static ssize_t 6325 tracing_thresh_write(struct file *filp, const char __user *ubuf, 6326 size_t cnt, loff_t *ppos) 6327 { 6328 struct trace_array *tr = filp->private_data; 6329 int ret; 6330 6331 guard(mutex)(&trace_types_lock); 6332 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 6333 if (ret < 0) 6334 return ret; 6335 6336 if (tr->current_trace->update_thresh) { 6337 ret = tr->current_trace->update_thresh(tr); 6338 if (ret < 0) 6339 return ret; 6340 } 6341 6342 return cnt; 6343 } 6344 6345 #ifdef CONFIG_TRACER_MAX_TRACE 6346 6347 static ssize_t 6348 tracing_max_lat_read(struct file *filp, char __user *ubuf, 6349 size_t cnt, loff_t *ppos) 6350 { 6351 struct trace_array *tr = filp->private_data; 6352 6353 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); 6354 } 6355 6356 static ssize_t 6357 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 6358 size_t cnt, loff_t *ppos) 6359 { 6360 struct trace_array *tr = filp->private_data; 6361 6362 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); 6363 } 6364 6365 #endif 6366 6367 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 6368 { 6369 if (cpu == RING_BUFFER_ALL_CPUS) { 6370 if (cpumask_empty(tr->pipe_cpumask)) { 6371 cpumask_setall(tr->pipe_cpumask); 6372 return 0; 6373 } 6374 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { 6375 cpumask_set_cpu(cpu, tr->pipe_cpumask); 6376 return 0; 6377 } 6378 return -EBUSY; 6379 } 6380 6381 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) 6382 { 6383 if (cpu == RING_BUFFER_ALL_CPUS) { 6384 WARN_ON(!cpumask_full(tr->pipe_cpumask)); 6385 cpumask_clear(tr->pipe_cpumask); 6386 } else { 6387 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); 6388 cpumask_clear_cpu(cpu, tr->pipe_cpumask); 6389 } 6390 } 6391 6392 static int tracing_open_pipe(struct inode *inode, struct file *filp) 6393 { 6394 struct trace_array *tr = inode->i_private; 6395 struct trace_iterator *iter; 6396 int cpu; 6397 int ret; 6398 6399 ret = tracing_check_open_get_tr(tr); 6400 if (ret) 6401 return ret; 6402 6403 mutex_lock(&trace_types_lock); 6404 cpu = tracing_get_cpu(inode); 6405 ret = open_pipe_on_cpu(tr, cpu); 6406 if (ret) 6407 goto fail_pipe_on_cpu; 6408 6409 /* create a buffer to store the information to pass to userspace */ 6410 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6411 if (!iter) { 6412 ret = -ENOMEM; 6413 goto fail_alloc_iter; 6414 } 6415 6416 trace_seq_init(&iter->seq); 6417 iter->trace = tr->current_trace; 6418 6419 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 6420 ret = -ENOMEM; 6421 goto fail; 6422 } 6423 6424 /* trace pipe does not show start of buffer */ 6425 cpumask_setall(iter->started); 6426 6427 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 6428 iter->iter_flags |= TRACE_FILE_LAT_FMT; 6429 6430 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6431 if (trace_clocks[tr->clock_id].in_ns) 6432 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6433 6434 iter->tr = tr; 6435 iter->array_buffer = &tr->array_buffer; 6436 iter->cpu_file = cpu; 6437 mutex_init(&iter->mutex); 6438 filp->private_data = iter; 6439 6440 if (iter->trace->pipe_open) 6441 iter->trace->pipe_open(iter); 6442 6443 nonseekable_open(inode, filp); 6444 6445 tr->trace_ref++; 6446 6447 mutex_unlock(&trace_types_lock); 6448 return ret; 6449 6450 fail: 6451 kfree(iter); 6452 fail_alloc_iter: 6453 close_pipe_on_cpu(tr, cpu); 6454 fail_pipe_on_cpu: 6455 __trace_array_put(tr); 6456 mutex_unlock(&trace_types_lock); 6457 return ret; 6458 } 6459 6460 static int tracing_release_pipe(struct inode *inode, struct file *file) 6461 { 6462 struct trace_iterator *iter = file->private_data; 6463 struct trace_array *tr = inode->i_private; 6464 6465 mutex_lock(&trace_types_lock); 6466 6467 tr->trace_ref--; 6468 6469 if (iter->trace->pipe_close) 6470 iter->trace->pipe_close(iter); 6471 close_pipe_on_cpu(tr, iter->cpu_file); 6472 mutex_unlock(&trace_types_lock); 6473 6474 free_trace_iter_content(iter); 6475 kfree(iter); 6476 6477 trace_array_put(tr); 6478 6479 return 0; 6480 } 6481 6482 static __poll_t 6483 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 6484 { 6485 struct trace_array *tr = iter->tr; 6486 6487 /* Iterators are static, they should be filled or empty */ 6488 if (trace_buffer_iter(iter, iter->cpu_file)) 6489 return EPOLLIN | EPOLLRDNORM; 6490 6491 if (tr->trace_flags & TRACE_ITER_BLOCK) 6492 /* 6493 * Always select as readable when in blocking mode 6494 */ 6495 return EPOLLIN | EPOLLRDNORM; 6496 else 6497 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, 6498 filp, poll_table, iter->tr->buffer_percent); 6499 } 6500 6501 static __poll_t 6502 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 6503 { 6504 struct trace_iterator *iter = filp->private_data; 6505 6506 return trace_poll(iter, filp, poll_table); 6507 } 6508 6509 /* Must be called with iter->mutex held. */ 6510 static int tracing_wait_pipe(struct file *filp) 6511 { 6512 struct trace_iterator *iter = filp->private_data; 6513 int ret; 6514 6515 while (trace_empty(iter)) { 6516 6517 if ((filp->f_flags & O_NONBLOCK)) { 6518 return -EAGAIN; 6519 } 6520 6521 /* 6522 * We block until we read something and tracing is disabled. 6523 * We still block if tracing is disabled, but we have never 6524 * read anything. This allows a user to cat this file, and 6525 * then enable tracing. But after we have read something, 6526 * we give an EOF when tracing is again disabled. 6527 * 6528 * iter->pos will be 0 if we haven't read anything. 6529 */ 6530 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 6531 break; 6532 6533 mutex_unlock(&iter->mutex); 6534 6535 ret = wait_on_pipe(iter, 0); 6536 6537 mutex_lock(&iter->mutex); 6538 6539 if (ret) 6540 return ret; 6541 } 6542 6543 return 1; 6544 } 6545 6546 /* 6547 * Consumer reader. 6548 */ 6549 static ssize_t 6550 tracing_read_pipe(struct file *filp, char __user *ubuf, 6551 size_t cnt, loff_t *ppos) 6552 { 6553 struct trace_iterator *iter = filp->private_data; 6554 ssize_t sret; 6555 6556 /* 6557 * Avoid more than one consumer on a single file descriptor 6558 * This is just a matter of traces coherency, the ring buffer itself 6559 * is protected. 6560 */ 6561 guard(mutex)(&iter->mutex); 6562 6563 /* return any leftover data */ 6564 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6565 if (sret != -EBUSY) 6566 return sret; 6567 6568 trace_seq_init(&iter->seq); 6569 6570 if (iter->trace->read) { 6571 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 6572 if (sret) 6573 return sret; 6574 } 6575 6576 waitagain: 6577 sret = tracing_wait_pipe(filp); 6578 if (sret <= 0) 6579 return sret; 6580 6581 /* stop when tracing is finished */ 6582 if (trace_empty(iter)) 6583 return 0; 6584 6585 if (cnt >= TRACE_SEQ_BUFFER_SIZE) 6586 cnt = TRACE_SEQ_BUFFER_SIZE - 1; 6587 6588 /* reset all but tr, trace, and overruns */ 6589 trace_iterator_reset(iter); 6590 cpumask_clear(iter->started); 6591 trace_seq_init(&iter->seq); 6592 6593 trace_event_read_lock(); 6594 trace_access_lock(iter->cpu_file); 6595 while (trace_find_next_entry_inc(iter) != NULL) { 6596 enum print_line_t ret; 6597 int save_len = iter->seq.seq.len; 6598 6599 ret = print_trace_line(iter); 6600 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6601 /* 6602 * If one print_trace_line() fills entire trace_seq in one shot, 6603 * trace_seq_to_user() will returns -EBUSY because save_len == 0, 6604 * In this case, we need to consume it, otherwise, loop will peek 6605 * this event next time, resulting in an infinite loop. 6606 */ 6607 if (save_len == 0) { 6608 iter->seq.full = 0; 6609 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 6610 trace_consume(iter); 6611 break; 6612 } 6613 6614 /* In other cases, don't print partial lines */ 6615 iter->seq.seq.len = save_len; 6616 break; 6617 } 6618 if (ret != TRACE_TYPE_NO_CONSUME) 6619 trace_consume(iter); 6620 6621 if (trace_seq_used(&iter->seq) >= cnt) 6622 break; 6623 6624 /* 6625 * Setting the full flag means we reached the trace_seq buffer 6626 * size and we should leave by partial output condition above. 6627 * One of the trace_seq_* functions is not used properly. 6628 */ 6629 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 6630 iter->ent->type); 6631 } 6632 trace_access_unlock(iter->cpu_file); 6633 trace_event_read_unlock(); 6634 6635 /* Now copy what we have to the user */ 6636 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6637 if (iter->seq.readpos >= trace_seq_used(&iter->seq)) 6638 trace_seq_init(&iter->seq); 6639 6640 /* 6641 * If there was nothing to send to user, in spite of consuming trace 6642 * entries, go back to wait for more entries. 6643 */ 6644 if (sret == -EBUSY) 6645 goto waitagain; 6646 6647 return sret; 6648 } 6649 6650 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 6651 unsigned int idx) 6652 { 6653 __free_page(spd->pages[idx]); 6654 } 6655 6656 static size_t 6657 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 6658 { 6659 size_t count; 6660 int save_len; 6661 int ret; 6662 6663 /* Seq buffer is page-sized, exactly what we need. */ 6664 for (;;) { 6665 save_len = iter->seq.seq.len; 6666 ret = print_trace_line(iter); 6667 6668 if (trace_seq_has_overflowed(&iter->seq)) { 6669 iter->seq.seq.len = save_len; 6670 break; 6671 } 6672 6673 /* 6674 * This should not be hit, because it should only 6675 * be set if the iter->seq overflowed. But check it 6676 * anyway to be safe. 6677 */ 6678 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6679 iter->seq.seq.len = save_len; 6680 break; 6681 } 6682 6683 count = trace_seq_used(&iter->seq) - save_len; 6684 if (rem < count) { 6685 rem = 0; 6686 iter->seq.seq.len = save_len; 6687 break; 6688 } 6689 6690 if (ret != TRACE_TYPE_NO_CONSUME) 6691 trace_consume(iter); 6692 rem -= count; 6693 if (!trace_find_next_entry_inc(iter)) { 6694 rem = 0; 6695 iter->ent = NULL; 6696 break; 6697 } 6698 } 6699 6700 return rem; 6701 } 6702 6703 static ssize_t tracing_splice_read_pipe(struct file *filp, 6704 loff_t *ppos, 6705 struct pipe_inode_info *pipe, 6706 size_t len, 6707 unsigned int flags) 6708 { 6709 struct page *pages_def[PIPE_DEF_BUFFERS]; 6710 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 6711 struct trace_iterator *iter = filp->private_data; 6712 struct splice_pipe_desc spd = { 6713 .pages = pages_def, 6714 .partial = partial_def, 6715 .nr_pages = 0, /* This gets updated below. */ 6716 .nr_pages_max = PIPE_DEF_BUFFERS, 6717 .ops = &default_pipe_buf_ops, 6718 .spd_release = tracing_spd_release_pipe, 6719 }; 6720 ssize_t ret; 6721 size_t rem; 6722 unsigned int i; 6723 6724 if (splice_grow_spd(pipe, &spd)) 6725 return -ENOMEM; 6726 6727 mutex_lock(&iter->mutex); 6728 6729 if (iter->trace->splice_read) { 6730 ret = iter->trace->splice_read(iter, filp, 6731 ppos, pipe, len, flags); 6732 if (ret) 6733 goto out_err; 6734 } 6735 6736 ret = tracing_wait_pipe(filp); 6737 if (ret <= 0) 6738 goto out_err; 6739 6740 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 6741 ret = -EFAULT; 6742 goto out_err; 6743 } 6744 6745 trace_event_read_lock(); 6746 trace_access_lock(iter->cpu_file); 6747 6748 /* Fill as many pages as possible. */ 6749 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 6750 spd.pages[i] = alloc_page(GFP_KERNEL); 6751 if (!spd.pages[i]) 6752 break; 6753 6754 rem = tracing_fill_pipe_page(rem, iter); 6755 6756 /* Copy the data into the page, so we can start over. */ 6757 ret = trace_seq_to_buffer(&iter->seq, 6758 page_address(spd.pages[i]), 6759 trace_seq_used(&iter->seq)); 6760 if (ret < 0) { 6761 __free_page(spd.pages[i]); 6762 break; 6763 } 6764 spd.partial[i].offset = 0; 6765 spd.partial[i].len = trace_seq_used(&iter->seq); 6766 6767 trace_seq_init(&iter->seq); 6768 } 6769 6770 trace_access_unlock(iter->cpu_file); 6771 trace_event_read_unlock(); 6772 mutex_unlock(&iter->mutex); 6773 6774 spd.nr_pages = i; 6775 6776 if (i) 6777 ret = splice_to_pipe(pipe, &spd); 6778 else 6779 ret = 0; 6780 out: 6781 splice_shrink_spd(&spd); 6782 return ret; 6783 6784 out_err: 6785 mutex_unlock(&iter->mutex); 6786 goto out; 6787 } 6788 6789 static ssize_t 6790 tracing_entries_read(struct file *filp, char __user *ubuf, 6791 size_t cnt, loff_t *ppos) 6792 { 6793 struct inode *inode = file_inode(filp); 6794 struct trace_array *tr = inode->i_private; 6795 int cpu = tracing_get_cpu(inode); 6796 char buf[64]; 6797 int r = 0; 6798 ssize_t ret; 6799 6800 mutex_lock(&trace_types_lock); 6801 6802 if (cpu == RING_BUFFER_ALL_CPUS) { 6803 int cpu, buf_size_same; 6804 unsigned long size; 6805 6806 size = 0; 6807 buf_size_same = 1; 6808 /* check if all cpu sizes are same */ 6809 for_each_tracing_cpu(cpu) { 6810 /* fill in the size from first enabled cpu */ 6811 if (size == 0) 6812 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; 6813 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { 6814 buf_size_same = 0; 6815 break; 6816 } 6817 } 6818 6819 if (buf_size_same) { 6820 if (!tr->ring_buffer_expanded) 6821 r = sprintf(buf, "%lu (expanded: %lu)\n", 6822 size >> 10, 6823 trace_buf_size >> 10); 6824 else 6825 r = sprintf(buf, "%lu\n", size >> 10); 6826 } else 6827 r = sprintf(buf, "X\n"); 6828 } else 6829 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); 6830 6831 mutex_unlock(&trace_types_lock); 6832 6833 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6834 return ret; 6835 } 6836 6837 static ssize_t 6838 tracing_entries_write(struct file *filp, const char __user *ubuf, 6839 size_t cnt, loff_t *ppos) 6840 { 6841 struct inode *inode = file_inode(filp); 6842 struct trace_array *tr = inode->i_private; 6843 unsigned long val; 6844 int ret; 6845 6846 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6847 if (ret) 6848 return ret; 6849 6850 /* must have at least 1 entry */ 6851 if (!val) 6852 return -EINVAL; 6853 6854 /* value is in KB */ 6855 val <<= 10; 6856 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 6857 if (ret < 0) 6858 return ret; 6859 6860 *ppos += cnt; 6861 6862 return cnt; 6863 } 6864 6865 static ssize_t 6866 tracing_total_entries_read(struct file *filp, char __user *ubuf, 6867 size_t cnt, loff_t *ppos) 6868 { 6869 struct trace_array *tr = filp->private_data; 6870 char buf[64]; 6871 int r, cpu; 6872 unsigned long size = 0, expanded_size = 0; 6873 6874 mutex_lock(&trace_types_lock); 6875 for_each_tracing_cpu(cpu) { 6876 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; 6877 if (!tr->ring_buffer_expanded) 6878 expanded_size += trace_buf_size >> 10; 6879 } 6880 if (tr->ring_buffer_expanded) 6881 r = sprintf(buf, "%lu\n", size); 6882 else 6883 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 6884 mutex_unlock(&trace_types_lock); 6885 6886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6887 } 6888 6889 #define LAST_BOOT_HEADER ((void *)1) 6890 6891 static void *l_next(struct seq_file *m, void *v, loff_t *pos) 6892 { 6893 struct trace_array *tr = m->private; 6894 struct trace_scratch *tscratch = tr->scratch; 6895 unsigned int index = *pos; 6896 6897 (*pos)++; 6898 6899 if (*pos == 1) 6900 return LAST_BOOT_HEADER; 6901 6902 /* Only show offsets of the last boot data */ 6903 if (!tscratch || !(tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) 6904 return NULL; 6905 6906 /* *pos 0 is for the header, 1 is for the first module */ 6907 index--; 6908 6909 if (index >= tscratch->nr_entries) 6910 return NULL; 6911 6912 return &tscratch->entries[index]; 6913 } 6914 6915 static void *l_start(struct seq_file *m, loff_t *pos) 6916 { 6917 mutex_lock(&scratch_mutex); 6918 6919 return l_next(m, NULL, pos); 6920 } 6921 6922 static void l_stop(struct seq_file *m, void *p) 6923 { 6924 mutex_unlock(&scratch_mutex); 6925 } 6926 6927 static void show_last_boot_header(struct seq_file *m, struct trace_array *tr) 6928 { 6929 struct trace_scratch *tscratch = tr->scratch; 6930 6931 /* 6932 * Do not leak KASLR address. This only shows the KASLR address of 6933 * the last boot. When the ring buffer is started, the LAST_BOOT 6934 * flag gets cleared, and this should only report "current". 6935 * Otherwise it shows the KASLR address from the previous boot which 6936 * should not be the same as the current boot. 6937 */ 6938 if (tscratch && (tr->flags & TRACE_ARRAY_FL_LAST_BOOT)) 6939 seq_printf(m, "%lx\t[kernel]\n", tscratch->kaslr_addr); 6940 else 6941 seq_puts(m, "# Current\n"); 6942 } 6943 6944 static int l_show(struct seq_file *m, void *v) 6945 { 6946 struct trace_array *tr = m->private; 6947 struct trace_mod_entry *entry = v; 6948 6949 if (v == LAST_BOOT_HEADER) { 6950 show_last_boot_header(m, tr); 6951 return 0; 6952 } 6953 6954 seq_printf(m, "%lx\t%s\n", entry->mod_addr, entry->mod_name); 6955 return 0; 6956 } 6957 6958 static const struct seq_operations last_boot_seq_ops = { 6959 .start = l_start, 6960 .next = l_next, 6961 .stop = l_stop, 6962 .show = l_show, 6963 }; 6964 6965 static int tracing_last_boot_open(struct inode *inode, struct file *file) 6966 { 6967 struct trace_array *tr = inode->i_private; 6968 struct seq_file *m; 6969 int ret; 6970 6971 ret = tracing_check_open_get_tr(tr); 6972 if (ret) 6973 return ret; 6974 6975 ret = seq_open(file, &last_boot_seq_ops); 6976 if (ret) { 6977 trace_array_put(tr); 6978 return ret; 6979 } 6980 6981 m = file->private_data; 6982 m->private = tr; 6983 6984 return 0; 6985 } 6986 6987 static int tracing_buffer_meta_open(struct inode *inode, struct file *filp) 6988 { 6989 struct trace_array *tr = inode->i_private; 6990 int cpu = tracing_get_cpu(inode); 6991 int ret; 6992 6993 ret = tracing_check_open_get_tr(tr); 6994 if (ret) 6995 return ret; 6996 6997 ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); 6998 if (ret < 0) 6999 __trace_array_put(tr); 7000 return ret; 7001 } 7002 7003 static ssize_t 7004 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 7005 size_t cnt, loff_t *ppos) 7006 { 7007 /* 7008 * There is no need to read what the user has written, this function 7009 * is just to make sure that there is no error when "echo" is used 7010 */ 7011 7012 *ppos += cnt; 7013 7014 return cnt; 7015 } 7016 7017 static int 7018 tracing_free_buffer_release(struct inode *inode, struct file *filp) 7019 { 7020 struct trace_array *tr = inode->i_private; 7021 7022 /* disable tracing ? */ 7023 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 7024 tracer_tracing_off(tr); 7025 /* resize the ring buffer to 0 */ 7026 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 7027 7028 trace_array_put(tr); 7029 7030 return 0; 7031 } 7032 7033 #define TRACE_MARKER_MAX_SIZE 4096 7034 7035 static ssize_t 7036 tracing_mark_write(struct file *filp, const char __user *ubuf, 7037 size_t cnt, loff_t *fpos) 7038 { 7039 struct trace_array *tr = filp->private_data; 7040 struct ring_buffer_event *event; 7041 enum event_trigger_type tt = ETT_NONE; 7042 struct trace_buffer *buffer; 7043 struct print_entry *entry; 7044 int meta_size; 7045 ssize_t written; 7046 size_t size; 7047 int len; 7048 7049 /* Used in tracing_mark_raw_write() as well */ 7050 #define FAULTED_STR "<faulted>" 7051 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 7052 7053 if (tracing_disabled) 7054 return -EINVAL; 7055 7056 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7057 return -EINVAL; 7058 7059 if ((ssize_t)cnt < 0) 7060 return -EINVAL; 7061 7062 if (cnt > TRACE_MARKER_MAX_SIZE) 7063 cnt = TRACE_MARKER_MAX_SIZE; 7064 7065 meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ 7066 again: 7067 size = cnt + meta_size; 7068 7069 /* If less than "<faulted>", then make sure we can still add that */ 7070 if (cnt < FAULTED_SIZE) 7071 size += FAULTED_SIZE - cnt; 7072 7073 buffer = tr->array_buffer.buffer; 7074 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 7075 tracing_gen_ctx()); 7076 if (unlikely(!event)) { 7077 /* 7078 * If the size was greater than what was allowed, then 7079 * make it smaller and try again. 7080 */ 7081 if (size > ring_buffer_max_event_size(buffer)) { 7082 /* cnt < FAULTED size should never be bigger than max */ 7083 if (WARN_ON_ONCE(cnt < FAULTED_SIZE)) 7084 return -EBADF; 7085 cnt = ring_buffer_max_event_size(buffer) - meta_size; 7086 /* The above should only happen once */ 7087 if (WARN_ON_ONCE(cnt + meta_size == size)) 7088 return -EBADF; 7089 goto again; 7090 } 7091 7092 /* Ring buffer disabled, return as if not open for write */ 7093 return -EBADF; 7094 } 7095 7096 entry = ring_buffer_event_data(event); 7097 entry->ip = _THIS_IP_; 7098 7099 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 7100 if (len) { 7101 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7102 cnt = FAULTED_SIZE; 7103 written = -EFAULT; 7104 } else 7105 written = cnt; 7106 7107 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 7108 /* do not add \n before testing triggers, but add \0 */ 7109 entry->buf[cnt] = '\0'; 7110 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); 7111 } 7112 7113 if (entry->buf[cnt - 1] != '\n') { 7114 entry->buf[cnt] = '\n'; 7115 entry->buf[cnt + 1] = '\0'; 7116 } else 7117 entry->buf[cnt] = '\0'; 7118 7119 if (static_branch_unlikely(&trace_marker_exports_enabled)) 7120 ftrace_exports(event, TRACE_EXPORT_MARKER); 7121 __buffer_unlock_commit(buffer, event); 7122 7123 if (tt) 7124 event_triggers_post_call(tr->trace_marker_file, tt); 7125 7126 return written; 7127 } 7128 7129 static ssize_t 7130 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 7131 size_t cnt, loff_t *fpos) 7132 { 7133 struct trace_array *tr = filp->private_data; 7134 struct ring_buffer_event *event; 7135 struct trace_buffer *buffer; 7136 struct raw_data_entry *entry; 7137 ssize_t written; 7138 int size; 7139 int len; 7140 7141 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 7142 7143 if (tracing_disabled) 7144 return -EINVAL; 7145 7146 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7147 return -EINVAL; 7148 7149 /* The marker must at least have a tag id */ 7150 if (cnt < sizeof(unsigned int)) 7151 return -EINVAL; 7152 7153 size = sizeof(*entry) + cnt; 7154 if (cnt < FAULT_SIZE_ID) 7155 size += FAULT_SIZE_ID - cnt; 7156 7157 buffer = tr->array_buffer.buffer; 7158 7159 if (size > ring_buffer_max_event_size(buffer)) 7160 return -EINVAL; 7161 7162 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 7163 tracing_gen_ctx()); 7164 if (!event) 7165 /* Ring buffer disabled, return as if not open for write */ 7166 return -EBADF; 7167 7168 entry = ring_buffer_event_data(event); 7169 7170 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 7171 if (len) { 7172 entry->id = -1; 7173 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7174 written = -EFAULT; 7175 } else 7176 written = cnt; 7177 7178 __buffer_unlock_commit(buffer, event); 7179 7180 return written; 7181 } 7182 7183 static int tracing_clock_show(struct seq_file *m, void *v) 7184 { 7185 struct trace_array *tr = m->private; 7186 int i; 7187 7188 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 7189 seq_printf(m, 7190 "%s%s%s%s", i ? " " : "", 7191 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 7192 i == tr->clock_id ? "]" : ""); 7193 seq_putc(m, '\n'); 7194 7195 return 0; 7196 } 7197 7198 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 7199 { 7200 int i; 7201 7202 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 7203 if (strcmp(trace_clocks[i].name, clockstr) == 0) 7204 break; 7205 } 7206 if (i == ARRAY_SIZE(trace_clocks)) 7207 return -EINVAL; 7208 7209 mutex_lock(&trace_types_lock); 7210 7211 tr->clock_id = i; 7212 7213 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); 7214 7215 /* 7216 * New clock may not be consistent with the previous clock. 7217 * Reset the buffer so that it doesn't have incomparable timestamps. 7218 */ 7219 tracing_reset_online_cpus(&tr->array_buffer); 7220 7221 #ifdef CONFIG_TRACER_MAX_TRACE 7222 if (tr->max_buffer.buffer) 7223 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 7224 tracing_reset_online_cpus(&tr->max_buffer); 7225 #endif 7226 7227 mutex_unlock(&trace_types_lock); 7228 7229 return 0; 7230 } 7231 7232 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 7233 size_t cnt, loff_t *fpos) 7234 { 7235 struct seq_file *m = filp->private_data; 7236 struct trace_array *tr = m->private; 7237 char buf[64]; 7238 const char *clockstr; 7239 int ret; 7240 7241 if (cnt >= sizeof(buf)) 7242 return -EINVAL; 7243 7244 if (copy_from_user(buf, ubuf, cnt)) 7245 return -EFAULT; 7246 7247 buf[cnt] = 0; 7248 7249 clockstr = strstrip(buf); 7250 7251 ret = tracing_set_clock(tr, clockstr); 7252 if (ret) 7253 return ret; 7254 7255 *fpos += cnt; 7256 7257 return cnt; 7258 } 7259 7260 static int tracing_clock_open(struct inode *inode, struct file *file) 7261 { 7262 struct trace_array *tr = inode->i_private; 7263 int ret; 7264 7265 ret = tracing_check_open_get_tr(tr); 7266 if (ret) 7267 return ret; 7268 7269 ret = single_open(file, tracing_clock_show, inode->i_private); 7270 if (ret < 0) 7271 trace_array_put(tr); 7272 7273 return ret; 7274 } 7275 7276 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 7277 { 7278 struct trace_array *tr = m->private; 7279 7280 mutex_lock(&trace_types_lock); 7281 7282 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7283 seq_puts(m, "delta [absolute]\n"); 7284 else 7285 seq_puts(m, "[delta] absolute\n"); 7286 7287 mutex_unlock(&trace_types_lock); 7288 7289 return 0; 7290 } 7291 7292 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 7293 { 7294 struct trace_array *tr = inode->i_private; 7295 int ret; 7296 7297 ret = tracing_check_open_get_tr(tr); 7298 if (ret) 7299 return ret; 7300 7301 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 7302 if (ret < 0) 7303 trace_array_put(tr); 7304 7305 return ret; 7306 } 7307 7308 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) 7309 { 7310 if (rbe == this_cpu_read(trace_buffered_event)) 7311 return ring_buffer_time_stamp(buffer); 7312 7313 return ring_buffer_event_time_stamp(buffer, rbe); 7314 } 7315 7316 /* 7317 * Set or disable using the per CPU trace_buffer_event when possible. 7318 */ 7319 int tracing_set_filter_buffering(struct trace_array *tr, bool set) 7320 { 7321 guard(mutex)(&trace_types_lock); 7322 7323 if (set && tr->no_filter_buffering_ref++) 7324 return 0; 7325 7326 if (!set) { 7327 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) 7328 return -EINVAL; 7329 7330 --tr->no_filter_buffering_ref; 7331 } 7332 7333 return 0; 7334 } 7335 7336 struct ftrace_buffer_info { 7337 struct trace_iterator iter; 7338 void *spare; 7339 unsigned int spare_cpu; 7340 unsigned int spare_size; 7341 unsigned int read; 7342 }; 7343 7344 #ifdef CONFIG_TRACER_SNAPSHOT 7345 static int tracing_snapshot_open(struct inode *inode, struct file *file) 7346 { 7347 struct trace_array *tr = inode->i_private; 7348 struct trace_iterator *iter; 7349 struct seq_file *m; 7350 int ret; 7351 7352 ret = tracing_check_open_get_tr(tr); 7353 if (ret) 7354 return ret; 7355 7356 if (file->f_mode & FMODE_READ) { 7357 iter = __tracing_open(inode, file, true); 7358 if (IS_ERR(iter)) 7359 ret = PTR_ERR(iter); 7360 } else { 7361 /* Writes still need the seq_file to hold the private data */ 7362 ret = -ENOMEM; 7363 m = kzalloc(sizeof(*m), GFP_KERNEL); 7364 if (!m) 7365 goto out; 7366 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 7367 if (!iter) { 7368 kfree(m); 7369 goto out; 7370 } 7371 ret = 0; 7372 7373 iter->tr = tr; 7374 iter->array_buffer = &tr->max_buffer; 7375 iter->cpu_file = tracing_get_cpu(inode); 7376 m->private = iter; 7377 file->private_data = m; 7378 } 7379 out: 7380 if (ret < 0) 7381 trace_array_put(tr); 7382 7383 return ret; 7384 } 7385 7386 static void tracing_swap_cpu_buffer(void *tr) 7387 { 7388 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 7389 } 7390 7391 static ssize_t 7392 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 7393 loff_t *ppos) 7394 { 7395 struct seq_file *m = filp->private_data; 7396 struct trace_iterator *iter = m->private; 7397 struct trace_array *tr = iter->tr; 7398 unsigned long val; 7399 int ret; 7400 7401 ret = tracing_update_buffers(tr); 7402 if (ret < 0) 7403 return ret; 7404 7405 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7406 if (ret) 7407 return ret; 7408 7409 guard(mutex)(&trace_types_lock); 7410 7411 if (tr->current_trace->use_max_tr) 7412 return -EBUSY; 7413 7414 local_irq_disable(); 7415 arch_spin_lock(&tr->max_lock); 7416 if (tr->cond_snapshot) 7417 ret = -EBUSY; 7418 arch_spin_unlock(&tr->max_lock); 7419 local_irq_enable(); 7420 if (ret) 7421 return ret; 7422 7423 switch (val) { 7424 case 0: 7425 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7426 return -EINVAL; 7427 if (tr->allocated_snapshot) 7428 free_snapshot(tr); 7429 break; 7430 case 1: 7431 /* Only allow per-cpu swap if the ring buffer supports it */ 7432 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 7433 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) 7434 return -EINVAL; 7435 #endif 7436 if (tr->allocated_snapshot) 7437 ret = resize_buffer_duplicate_size(&tr->max_buffer, 7438 &tr->array_buffer, iter->cpu_file); 7439 7440 ret = tracing_arm_snapshot_locked(tr); 7441 if (ret) 7442 return ret; 7443 7444 /* Now, we're going to swap */ 7445 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 7446 local_irq_disable(); 7447 update_max_tr(tr, current, smp_processor_id(), NULL); 7448 local_irq_enable(); 7449 } else { 7450 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 7451 (void *)tr, 1); 7452 } 7453 tracing_disarm_snapshot(tr); 7454 break; 7455 default: 7456 if (tr->allocated_snapshot) { 7457 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 7458 tracing_reset_online_cpus(&tr->max_buffer); 7459 else 7460 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); 7461 } 7462 break; 7463 } 7464 7465 if (ret >= 0) { 7466 *ppos += cnt; 7467 ret = cnt; 7468 } 7469 7470 return ret; 7471 } 7472 7473 static int tracing_snapshot_release(struct inode *inode, struct file *file) 7474 { 7475 struct seq_file *m = file->private_data; 7476 int ret; 7477 7478 ret = tracing_release(inode, file); 7479 7480 if (file->f_mode & FMODE_READ) 7481 return ret; 7482 7483 /* If write only, the seq_file is just a stub */ 7484 if (m) 7485 kfree(m->private); 7486 kfree(m); 7487 7488 return 0; 7489 } 7490 7491 static int tracing_buffers_open(struct inode *inode, struct file *filp); 7492 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7493 size_t count, loff_t *ppos); 7494 static int tracing_buffers_release(struct inode *inode, struct file *file); 7495 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7496 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 7497 7498 static int snapshot_raw_open(struct inode *inode, struct file *filp) 7499 { 7500 struct ftrace_buffer_info *info; 7501 int ret; 7502 7503 /* The following checks for tracefs lockdown */ 7504 ret = tracing_buffers_open(inode, filp); 7505 if (ret < 0) 7506 return ret; 7507 7508 info = filp->private_data; 7509 7510 if (info->iter.trace->use_max_tr) { 7511 tracing_buffers_release(inode, filp); 7512 return -EBUSY; 7513 } 7514 7515 info->iter.snapshot = true; 7516 info->iter.array_buffer = &info->iter.tr->max_buffer; 7517 7518 return ret; 7519 } 7520 7521 #endif /* CONFIG_TRACER_SNAPSHOT */ 7522 7523 7524 static const struct file_operations tracing_thresh_fops = { 7525 .open = tracing_open_generic, 7526 .read = tracing_thresh_read, 7527 .write = tracing_thresh_write, 7528 .llseek = generic_file_llseek, 7529 }; 7530 7531 #ifdef CONFIG_TRACER_MAX_TRACE 7532 static const struct file_operations tracing_max_lat_fops = { 7533 .open = tracing_open_generic_tr, 7534 .read = tracing_max_lat_read, 7535 .write = tracing_max_lat_write, 7536 .llseek = generic_file_llseek, 7537 .release = tracing_release_generic_tr, 7538 }; 7539 #endif 7540 7541 static const struct file_operations set_tracer_fops = { 7542 .open = tracing_open_generic_tr, 7543 .read = tracing_set_trace_read, 7544 .write = tracing_set_trace_write, 7545 .llseek = generic_file_llseek, 7546 .release = tracing_release_generic_tr, 7547 }; 7548 7549 static const struct file_operations tracing_pipe_fops = { 7550 .open = tracing_open_pipe, 7551 .poll = tracing_poll_pipe, 7552 .read = tracing_read_pipe, 7553 .splice_read = tracing_splice_read_pipe, 7554 .release = tracing_release_pipe, 7555 }; 7556 7557 static const struct file_operations tracing_entries_fops = { 7558 .open = tracing_open_generic_tr, 7559 .read = tracing_entries_read, 7560 .write = tracing_entries_write, 7561 .llseek = generic_file_llseek, 7562 .release = tracing_release_generic_tr, 7563 }; 7564 7565 static const struct file_operations tracing_buffer_meta_fops = { 7566 .open = tracing_buffer_meta_open, 7567 .read = seq_read, 7568 .llseek = seq_lseek, 7569 .release = tracing_seq_release, 7570 }; 7571 7572 static const struct file_operations tracing_total_entries_fops = { 7573 .open = tracing_open_generic_tr, 7574 .read = tracing_total_entries_read, 7575 .llseek = generic_file_llseek, 7576 .release = tracing_release_generic_tr, 7577 }; 7578 7579 static const struct file_operations tracing_free_buffer_fops = { 7580 .open = tracing_open_generic_tr, 7581 .write = tracing_free_buffer_write, 7582 .release = tracing_free_buffer_release, 7583 }; 7584 7585 static const struct file_operations tracing_mark_fops = { 7586 .open = tracing_mark_open, 7587 .write = tracing_mark_write, 7588 .release = tracing_release_generic_tr, 7589 }; 7590 7591 static const struct file_operations tracing_mark_raw_fops = { 7592 .open = tracing_mark_open, 7593 .write = tracing_mark_raw_write, 7594 .release = tracing_release_generic_tr, 7595 }; 7596 7597 static const struct file_operations trace_clock_fops = { 7598 .open = tracing_clock_open, 7599 .read = seq_read, 7600 .llseek = seq_lseek, 7601 .release = tracing_single_release_tr, 7602 .write = tracing_clock_write, 7603 }; 7604 7605 static const struct file_operations trace_time_stamp_mode_fops = { 7606 .open = tracing_time_stamp_mode_open, 7607 .read = seq_read, 7608 .llseek = seq_lseek, 7609 .release = tracing_single_release_tr, 7610 }; 7611 7612 static const struct file_operations last_boot_fops = { 7613 .open = tracing_last_boot_open, 7614 .read = seq_read, 7615 .llseek = seq_lseek, 7616 .release = tracing_seq_release, 7617 }; 7618 7619 #ifdef CONFIG_TRACER_SNAPSHOT 7620 static const struct file_operations snapshot_fops = { 7621 .open = tracing_snapshot_open, 7622 .read = seq_read, 7623 .write = tracing_snapshot_write, 7624 .llseek = tracing_lseek, 7625 .release = tracing_snapshot_release, 7626 }; 7627 7628 static const struct file_operations snapshot_raw_fops = { 7629 .open = snapshot_raw_open, 7630 .read = tracing_buffers_read, 7631 .release = tracing_buffers_release, 7632 .splice_read = tracing_buffers_splice_read, 7633 }; 7634 7635 #endif /* CONFIG_TRACER_SNAPSHOT */ 7636 7637 /* 7638 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct 7639 * @filp: The active open file structure 7640 * @ubuf: The userspace provided buffer to read value into 7641 * @cnt: The maximum number of bytes to read 7642 * @ppos: The current "file" position 7643 * 7644 * This function implements the write interface for a struct trace_min_max_param. 7645 * The filp->private_data must point to a trace_min_max_param structure that 7646 * defines where to write the value, the min and the max acceptable values, 7647 * and a lock to protect the write. 7648 */ 7649 static ssize_t 7650 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) 7651 { 7652 struct trace_min_max_param *param = filp->private_data; 7653 u64 val; 7654 int err; 7655 7656 if (!param) 7657 return -EFAULT; 7658 7659 err = kstrtoull_from_user(ubuf, cnt, 10, &val); 7660 if (err) 7661 return err; 7662 7663 if (param->lock) 7664 mutex_lock(param->lock); 7665 7666 if (param->min && val < *param->min) 7667 err = -EINVAL; 7668 7669 if (param->max && val > *param->max) 7670 err = -EINVAL; 7671 7672 if (!err) 7673 *param->val = val; 7674 7675 if (param->lock) 7676 mutex_unlock(param->lock); 7677 7678 if (err) 7679 return err; 7680 7681 return cnt; 7682 } 7683 7684 /* 7685 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct 7686 * @filp: The active open file structure 7687 * @ubuf: The userspace provided buffer to read value into 7688 * @cnt: The maximum number of bytes to read 7689 * @ppos: The current "file" position 7690 * 7691 * This function implements the read interface for a struct trace_min_max_param. 7692 * The filp->private_data must point to a trace_min_max_param struct with valid 7693 * data. 7694 */ 7695 static ssize_t 7696 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 7697 { 7698 struct trace_min_max_param *param = filp->private_data; 7699 char buf[U64_STR_SIZE]; 7700 int len; 7701 u64 val; 7702 7703 if (!param) 7704 return -EFAULT; 7705 7706 val = *param->val; 7707 7708 if (cnt > sizeof(buf)) 7709 cnt = sizeof(buf); 7710 7711 len = snprintf(buf, sizeof(buf), "%llu\n", val); 7712 7713 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 7714 } 7715 7716 const struct file_operations trace_min_max_fops = { 7717 .open = tracing_open_generic, 7718 .read = trace_min_max_read, 7719 .write = trace_min_max_write, 7720 }; 7721 7722 #define TRACING_LOG_ERRS_MAX 8 7723 #define TRACING_LOG_LOC_MAX 128 7724 7725 #define CMD_PREFIX " Command: " 7726 7727 struct err_info { 7728 const char **errs; /* ptr to loc-specific array of err strings */ 7729 u8 type; /* index into errs -> specific err string */ 7730 u16 pos; /* caret position */ 7731 u64 ts; 7732 }; 7733 7734 struct tracing_log_err { 7735 struct list_head list; 7736 struct err_info info; 7737 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 7738 char *cmd; /* what caused err */ 7739 }; 7740 7741 static DEFINE_MUTEX(tracing_err_log_lock); 7742 7743 static struct tracing_log_err *alloc_tracing_log_err(int len) 7744 { 7745 struct tracing_log_err *err; 7746 7747 err = kzalloc(sizeof(*err), GFP_KERNEL); 7748 if (!err) 7749 return ERR_PTR(-ENOMEM); 7750 7751 err->cmd = kzalloc(len, GFP_KERNEL); 7752 if (!err->cmd) { 7753 kfree(err); 7754 return ERR_PTR(-ENOMEM); 7755 } 7756 7757 return err; 7758 } 7759 7760 static void free_tracing_log_err(struct tracing_log_err *err) 7761 { 7762 kfree(err->cmd); 7763 kfree(err); 7764 } 7765 7766 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, 7767 int len) 7768 { 7769 struct tracing_log_err *err; 7770 char *cmd; 7771 7772 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 7773 err = alloc_tracing_log_err(len); 7774 if (PTR_ERR(err) != -ENOMEM) 7775 tr->n_err_log_entries++; 7776 7777 return err; 7778 } 7779 cmd = kzalloc(len, GFP_KERNEL); 7780 if (!cmd) 7781 return ERR_PTR(-ENOMEM); 7782 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 7783 kfree(err->cmd); 7784 err->cmd = cmd; 7785 list_del(&err->list); 7786 7787 return err; 7788 } 7789 7790 /** 7791 * err_pos - find the position of a string within a command for error careting 7792 * @cmd: The tracing command that caused the error 7793 * @str: The string to position the caret at within @cmd 7794 * 7795 * Finds the position of the first occurrence of @str within @cmd. The 7796 * return value can be passed to tracing_log_err() for caret placement 7797 * within @cmd. 7798 * 7799 * Returns the index within @cmd of the first occurrence of @str or 0 7800 * if @str was not found. 7801 */ 7802 unsigned int err_pos(char *cmd, const char *str) 7803 { 7804 char *found; 7805 7806 if (WARN_ON(!strlen(cmd))) 7807 return 0; 7808 7809 found = strstr(cmd, str); 7810 if (found) 7811 return found - cmd; 7812 7813 return 0; 7814 } 7815 7816 /** 7817 * tracing_log_err - write an error to the tracing error log 7818 * @tr: The associated trace array for the error (NULL for top level array) 7819 * @loc: A string describing where the error occurred 7820 * @cmd: The tracing command that caused the error 7821 * @errs: The array of loc-specific static error strings 7822 * @type: The index into errs[], which produces the specific static err string 7823 * @pos: The position the caret should be placed in the cmd 7824 * 7825 * Writes an error into tracing/error_log of the form: 7826 * 7827 * <loc>: error: <text> 7828 * Command: <cmd> 7829 * ^ 7830 * 7831 * tracing/error_log is a small log file containing the last 7832 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 7833 * unless there has been a tracing error, and the error log can be 7834 * cleared and have its memory freed by writing the empty string in 7835 * truncation mode to it i.e. echo > tracing/error_log. 7836 * 7837 * NOTE: the @errs array along with the @type param are used to 7838 * produce a static error string - this string is not copied and saved 7839 * when the error is logged - only a pointer to it is saved. See 7840 * existing callers for examples of how static strings are typically 7841 * defined for use with tracing_log_err(). 7842 */ 7843 void tracing_log_err(struct trace_array *tr, 7844 const char *loc, const char *cmd, 7845 const char **errs, u8 type, u16 pos) 7846 { 7847 struct tracing_log_err *err; 7848 int len = 0; 7849 7850 if (!tr) 7851 tr = &global_trace; 7852 7853 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; 7854 7855 guard(mutex)(&tracing_err_log_lock); 7856 7857 err = get_tracing_log_err(tr, len); 7858 if (PTR_ERR(err) == -ENOMEM) 7859 return; 7860 7861 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 7862 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); 7863 7864 err->info.errs = errs; 7865 err->info.type = type; 7866 err->info.pos = pos; 7867 err->info.ts = local_clock(); 7868 7869 list_add_tail(&err->list, &tr->err_log); 7870 } 7871 7872 static void clear_tracing_err_log(struct trace_array *tr) 7873 { 7874 struct tracing_log_err *err, *next; 7875 7876 mutex_lock(&tracing_err_log_lock); 7877 list_for_each_entry_safe(err, next, &tr->err_log, list) { 7878 list_del(&err->list); 7879 free_tracing_log_err(err); 7880 } 7881 7882 tr->n_err_log_entries = 0; 7883 mutex_unlock(&tracing_err_log_lock); 7884 } 7885 7886 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 7887 { 7888 struct trace_array *tr = m->private; 7889 7890 mutex_lock(&tracing_err_log_lock); 7891 7892 return seq_list_start(&tr->err_log, *pos); 7893 } 7894 7895 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 7896 { 7897 struct trace_array *tr = m->private; 7898 7899 return seq_list_next(v, &tr->err_log, pos); 7900 } 7901 7902 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 7903 { 7904 mutex_unlock(&tracing_err_log_lock); 7905 } 7906 7907 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) 7908 { 7909 u16 i; 7910 7911 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 7912 seq_putc(m, ' '); 7913 for (i = 0; i < pos; i++) 7914 seq_putc(m, ' '); 7915 seq_puts(m, "^\n"); 7916 } 7917 7918 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 7919 { 7920 struct tracing_log_err *err = v; 7921 7922 if (err) { 7923 const char *err_text = err->info.errs[err->info.type]; 7924 u64 sec = err->info.ts; 7925 u32 nsec; 7926 7927 nsec = do_div(sec, NSEC_PER_SEC); 7928 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 7929 err->loc, err_text); 7930 seq_printf(m, "%s", err->cmd); 7931 tracing_err_log_show_pos(m, err->info.pos); 7932 } 7933 7934 return 0; 7935 } 7936 7937 static const struct seq_operations tracing_err_log_seq_ops = { 7938 .start = tracing_err_log_seq_start, 7939 .next = tracing_err_log_seq_next, 7940 .stop = tracing_err_log_seq_stop, 7941 .show = tracing_err_log_seq_show 7942 }; 7943 7944 static int tracing_err_log_open(struct inode *inode, struct file *file) 7945 { 7946 struct trace_array *tr = inode->i_private; 7947 int ret = 0; 7948 7949 ret = tracing_check_open_get_tr(tr); 7950 if (ret) 7951 return ret; 7952 7953 /* If this file was opened for write, then erase contents */ 7954 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 7955 clear_tracing_err_log(tr); 7956 7957 if (file->f_mode & FMODE_READ) { 7958 ret = seq_open(file, &tracing_err_log_seq_ops); 7959 if (!ret) { 7960 struct seq_file *m = file->private_data; 7961 m->private = tr; 7962 } else { 7963 trace_array_put(tr); 7964 } 7965 } 7966 return ret; 7967 } 7968 7969 static ssize_t tracing_err_log_write(struct file *file, 7970 const char __user *buffer, 7971 size_t count, loff_t *ppos) 7972 { 7973 return count; 7974 } 7975 7976 static int tracing_err_log_release(struct inode *inode, struct file *file) 7977 { 7978 struct trace_array *tr = inode->i_private; 7979 7980 trace_array_put(tr); 7981 7982 if (file->f_mode & FMODE_READ) 7983 seq_release(inode, file); 7984 7985 return 0; 7986 } 7987 7988 static const struct file_operations tracing_err_log_fops = { 7989 .open = tracing_err_log_open, 7990 .write = tracing_err_log_write, 7991 .read = seq_read, 7992 .llseek = tracing_lseek, 7993 .release = tracing_err_log_release, 7994 }; 7995 7996 static int tracing_buffers_open(struct inode *inode, struct file *filp) 7997 { 7998 struct trace_array *tr = inode->i_private; 7999 struct ftrace_buffer_info *info; 8000 int ret; 8001 8002 ret = tracing_check_open_get_tr(tr); 8003 if (ret) 8004 return ret; 8005 8006 info = kvzalloc(sizeof(*info), GFP_KERNEL); 8007 if (!info) { 8008 trace_array_put(tr); 8009 return -ENOMEM; 8010 } 8011 8012 mutex_lock(&trace_types_lock); 8013 8014 info->iter.tr = tr; 8015 info->iter.cpu_file = tracing_get_cpu(inode); 8016 info->iter.trace = tr->current_trace; 8017 info->iter.array_buffer = &tr->array_buffer; 8018 info->spare = NULL; 8019 /* Force reading ring buffer for first read */ 8020 info->read = (unsigned int)-1; 8021 8022 filp->private_data = info; 8023 8024 tr->trace_ref++; 8025 8026 mutex_unlock(&trace_types_lock); 8027 8028 ret = nonseekable_open(inode, filp); 8029 if (ret < 0) 8030 trace_array_put(tr); 8031 8032 return ret; 8033 } 8034 8035 static __poll_t 8036 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 8037 { 8038 struct ftrace_buffer_info *info = filp->private_data; 8039 struct trace_iterator *iter = &info->iter; 8040 8041 return trace_poll(iter, filp, poll_table); 8042 } 8043 8044 static ssize_t 8045 tracing_buffers_read(struct file *filp, char __user *ubuf, 8046 size_t count, loff_t *ppos) 8047 { 8048 struct ftrace_buffer_info *info = filp->private_data; 8049 struct trace_iterator *iter = &info->iter; 8050 void *trace_data; 8051 int page_size; 8052 ssize_t ret = 0; 8053 ssize_t size; 8054 8055 if (!count) 8056 return 0; 8057 8058 #ifdef CONFIG_TRACER_MAX_TRACE 8059 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8060 return -EBUSY; 8061 #endif 8062 8063 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 8064 8065 /* Make sure the spare matches the current sub buffer size */ 8066 if (info->spare) { 8067 if (page_size != info->spare_size) { 8068 ring_buffer_free_read_page(iter->array_buffer->buffer, 8069 info->spare_cpu, info->spare); 8070 info->spare = NULL; 8071 } 8072 } 8073 8074 if (!info->spare) { 8075 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, 8076 iter->cpu_file); 8077 if (IS_ERR(info->spare)) { 8078 ret = PTR_ERR(info->spare); 8079 info->spare = NULL; 8080 } else { 8081 info->spare_cpu = iter->cpu_file; 8082 info->spare_size = page_size; 8083 } 8084 } 8085 if (!info->spare) 8086 return ret; 8087 8088 /* Do we have previous read data to read? */ 8089 if (info->read < page_size) 8090 goto read; 8091 8092 again: 8093 trace_access_lock(iter->cpu_file); 8094 ret = ring_buffer_read_page(iter->array_buffer->buffer, 8095 info->spare, 8096 count, 8097 iter->cpu_file, 0); 8098 trace_access_unlock(iter->cpu_file); 8099 8100 if (ret < 0) { 8101 if (trace_empty(iter) && !iter->closed) { 8102 if ((filp->f_flags & O_NONBLOCK)) 8103 return -EAGAIN; 8104 8105 ret = wait_on_pipe(iter, 0); 8106 if (ret) 8107 return ret; 8108 8109 goto again; 8110 } 8111 return 0; 8112 } 8113 8114 info->read = 0; 8115 read: 8116 size = page_size - info->read; 8117 if (size > count) 8118 size = count; 8119 trace_data = ring_buffer_read_page_data(info->spare); 8120 ret = copy_to_user(ubuf, trace_data + info->read, size); 8121 if (ret == size) 8122 return -EFAULT; 8123 8124 size -= ret; 8125 8126 *ppos += size; 8127 info->read += size; 8128 8129 return size; 8130 } 8131 8132 static int tracing_buffers_flush(struct file *file, fl_owner_t id) 8133 { 8134 struct ftrace_buffer_info *info = file->private_data; 8135 struct trace_iterator *iter = &info->iter; 8136 8137 iter->closed = true; 8138 /* Make sure the waiters see the new wait_index */ 8139 (void)atomic_fetch_inc_release(&iter->wait_index); 8140 8141 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8142 8143 return 0; 8144 } 8145 8146 static int tracing_buffers_release(struct inode *inode, struct file *file) 8147 { 8148 struct ftrace_buffer_info *info = file->private_data; 8149 struct trace_iterator *iter = &info->iter; 8150 8151 mutex_lock(&trace_types_lock); 8152 8153 iter->tr->trace_ref--; 8154 8155 __trace_array_put(iter->tr); 8156 8157 if (info->spare) 8158 ring_buffer_free_read_page(iter->array_buffer->buffer, 8159 info->spare_cpu, info->spare); 8160 kvfree(info); 8161 8162 mutex_unlock(&trace_types_lock); 8163 8164 return 0; 8165 } 8166 8167 struct buffer_ref { 8168 struct trace_buffer *buffer; 8169 void *page; 8170 int cpu; 8171 refcount_t refcount; 8172 }; 8173 8174 static void buffer_ref_release(struct buffer_ref *ref) 8175 { 8176 if (!refcount_dec_and_test(&ref->refcount)) 8177 return; 8178 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 8179 kfree(ref); 8180 } 8181 8182 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 8183 struct pipe_buffer *buf) 8184 { 8185 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8186 8187 buffer_ref_release(ref); 8188 buf->private = 0; 8189 } 8190 8191 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, 8192 struct pipe_buffer *buf) 8193 { 8194 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8195 8196 if (refcount_read(&ref->refcount) > INT_MAX/2) 8197 return false; 8198 8199 refcount_inc(&ref->refcount); 8200 return true; 8201 } 8202 8203 /* Pipe buffer operations for a buffer. */ 8204 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 8205 .release = buffer_pipe_buf_release, 8206 .get = buffer_pipe_buf_get, 8207 }; 8208 8209 /* 8210 * Callback from splice_to_pipe(), if we need to release some pages 8211 * at the end of the spd in case we error'ed out in filling the pipe. 8212 */ 8213 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 8214 { 8215 struct buffer_ref *ref = 8216 (struct buffer_ref *)spd->partial[i].private; 8217 8218 buffer_ref_release(ref); 8219 spd->partial[i].private = 0; 8220 } 8221 8222 static ssize_t 8223 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8224 struct pipe_inode_info *pipe, size_t len, 8225 unsigned int flags) 8226 { 8227 struct ftrace_buffer_info *info = file->private_data; 8228 struct trace_iterator *iter = &info->iter; 8229 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 8230 struct page *pages_def[PIPE_DEF_BUFFERS]; 8231 struct splice_pipe_desc spd = { 8232 .pages = pages_def, 8233 .partial = partial_def, 8234 .nr_pages_max = PIPE_DEF_BUFFERS, 8235 .ops = &buffer_pipe_buf_ops, 8236 .spd_release = buffer_spd_release, 8237 }; 8238 struct buffer_ref *ref; 8239 bool woken = false; 8240 int page_size; 8241 int entries, i; 8242 ssize_t ret = 0; 8243 8244 #ifdef CONFIG_TRACER_MAX_TRACE 8245 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8246 return -EBUSY; 8247 #endif 8248 8249 page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); 8250 if (*ppos & (page_size - 1)) 8251 return -EINVAL; 8252 8253 if (len & (page_size - 1)) { 8254 if (len < page_size) 8255 return -EINVAL; 8256 len &= (~(page_size - 1)); 8257 } 8258 8259 if (splice_grow_spd(pipe, &spd)) 8260 return -ENOMEM; 8261 8262 again: 8263 trace_access_lock(iter->cpu_file); 8264 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8265 8266 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { 8267 struct page *page; 8268 int r; 8269 8270 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 8271 if (!ref) { 8272 ret = -ENOMEM; 8273 break; 8274 } 8275 8276 refcount_set(&ref->refcount, 1); 8277 ref->buffer = iter->array_buffer->buffer; 8278 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 8279 if (IS_ERR(ref->page)) { 8280 ret = PTR_ERR(ref->page); 8281 ref->page = NULL; 8282 kfree(ref); 8283 break; 8284 } 8285 ref->cpu = iter->cpu_file; 8286 8287 r = ring_buffer_read_page(ref->buffer, ref->page, 8288 len, iter->cpu_file, 1); 8289 if (r < 0) { 8290 ring_buffer_free_read_page(ref->buffer, ref->cpu, 8291 ref->page); 8292 kfree(ref); 8293 break; 8294 } 8295 8296 page = virt_to_page(ring_buffer_read_page_data(ref->page)); 8297 8298 spd.pages[i] = page; 8299 spd.partial[i].len = page_size; 8300 spd.partial[i].offset = 0; 8301 spd.partial[i].private = (unsigned long)ref; 8302 spd.nr_pages++; 8303 *ppos += page_size; 8304 8305 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8306 } 8307 8308 trace_access_unlock(iter->cpu_file); 8309 spd.nr_pages = i; 8310 8311 /* did we read anything? */ 8312 if (!spd.nr_pages) { 8313 8314 if (ret) 8315 goto out; 8316 8317 if (woken) 8318 goto out; 8319 8320 ret = -EAGAIN; 8321 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 8322 goto out; 8323 8324 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); 8325 if (ret) 8326 goto out; 8327 8328 /* No need to wait after waking up when tracing is off */ 8329 if (!tracer_tracing_is_on(iter->tr)) 8330 goto out; 8331 8332 /* Iterate one more time to collect any new data then exit */ 8333 woken = true; 8334 8335 goto again; 8336 } 8337 8338 ret = splice_to_pipe(pipe, &spd); 8339 out: 8340 splice_shrink_spd(&spd); 8341 8342 return ret; 8343 } 8344 8345 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8346 { 8347 struct ftrace_buffer_info *info = file->private_data; 8348 struct trace_iterator *iter = &info->iter; 8349 int err; 8350 8351 if (cmd == TRACE_MMAP_IOCTL_GET_READER) { 8352 if (!(file->f_flags & O_NONBLOCK)) { 8353 err = ring_buffer_wait(iter->array_buffer->buffer, 8354 iter->cpu_file, 8355 iter->tr->buffer_percent, 8356 NULL, NULL); 8357 if (err) 8358 return err; 8359 } 8360 8361 return ring_buffer_map_get_reader(iter->array_buffer->buffer, 8362 iter->cpu_file); 8363 } else if (cmd) { 8364 return -ENOTTY; 8365 } 8366 8367 /* 8368 * An ioctl call with cmd 0 to the ring buffer file will wake up all 8369 * waiters 8370 */ 8371 mutex_lock(&trace_types_lock); 8372 8373 /* Make sure the waiters see the new wait_index */ 8374 (void)atomic_fetch_inc_release(&iter->wait_index); 8375 8376 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8377 8378 mutex_unlock(&trace_types_lock); 8379 return 0; 8380 } 8381 8382 #ifdef CONFIG_TRACER_MAX_TRACE 8383 static int get_snapshot_map(struct trace_array *tr) 8384 { 8385 int err = 0; 8386 8387 /* 8388 * Called with mmap_lock held. lockdep would be unhappy if we would now 8389 * take trace_types_lock. Instead use the specific 8390 * snapshot_trigger_lock. 8391 */ 8392 spin_lock(&tr->snapshot_trigger_lock); 8393 8394 if (tr->snapshot || tr->mapped == UINT_MAX) 8395 err = -EBUSY; 8396 else 8397 tr->mapped++; 8398 8399 spin_unlock(&tr->snapshot_trigger_lock); 8400 8401 /* Wait for update_max_tr() to observe iter->tr->mapped */ 8402 if (tr->mapped == 1) 8403 synchronize_rcu(); 8404 8405 return err; 8406 8407 } 8408 static void put_snapshot_map(struct trace_array *tr) 8409 { 8410 spin_lock(&tr->snapshot_trigger_lock); 8411 if (!WARN_ON(!tr->mapped)) 8412 tr->mapped--; 8413 spin_unlock(&tr->snapshot_trigger_lock); 8414 } 8415 #else 8416 static inline int get_snapshot_map(struct trace_array *tr) { return 0; } 8417 static inline void put_snapshot_map(struct trace_array *tr) { } 8418 #endif 8419 8420 static void tracing_buffers_mmap_close(struct vm_area_struct *vma) 8421 { 8422 struct ftrace_buffer_info *info = vma->vm_file->private_data; 8423 struct trace_iterator *iter = &info->iter; 8424 8425 WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); 8426 put_snapshot_map(iter->tr); 8427 } 8428 8429 static const struct vm_operations_struct tracing_buffers_vmops = { 8430 .close = tracing_buffers_mmap_close, 8431 }; 8432 8433 static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) 8434 { 8435 struct ftrace_buffer_info *info = filp->private_data; 8436 struct trace_iterator *iter = &info->iter; 8437 int ret = 0; 8438 8439 /* Currently the boot mapped buffer is not supported for mmap */ 8440 if (iter->tr->flags & TRACE_ARRAY_FL_BOOT) 8441 return -ENODEV; 8442 8443 ret = get_snapshot_map(iter->tr); 8444 if (ret) 8445 return ret; 8446 8447 ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); 8448 if (ret) 8449 put_snapshot_map(iter->tr); 8450 8451 vma->vm_ops = &tracing_buffers_vmops; 8452 8453 return ret; 8454 } 8455 8456 static const struct file_operations tracing_buffers_fops = { 8457 .open = tracing_buffers_open, 8458 .read = tracing_buffers_read, 8459 .poll = tracing_buffers_poll, 8460 .release = tracing_buffers_release, 8461 .flush = tracing_buffers_flush, 8462 .splice_read = tracing_buffers_splice_read, 8463 .unlocked_ioctl = tracing_buffers_ioctl, 8464 .mmap = tracing_buffers_mmap, 8465 }; 8466 8467 static ssize_t 8468 tracing_stats_read(struct file *filp, char __user *ubuf, 8469 size_t count, loff_t *ppos) 8470 { 8471 struct inode *inode = file_inode(filp); 8472 struct trace_array *tr = inode->i_private; 8473 struct array_buffer *trace_buf = &tr->array_buffer; 8474 int cpu = tracing_get_cpu(inode); 8475 struct trace_seq *s; 8476 unsigned long cnt; 8477 unsigned long long t; 8478 unsigned long usec_rem; 8479 8480 s = kmalloc(sizeof(*s), GFP_KERNEL); 8481 if (!s) 8482 return -ENOMEM; 8483 8484 trace_seq_init(s); 8485 8486 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 8487 trace_seq_printf(s, "entries: %ld\n", cnt); 8488 8489 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 8490 trace_seq_printf(s, "overrun: %ld\n", cnt); 8491 8492 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 8493 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 8494 8495 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 8496 trace_seq_printf(s, "bytes: %ld\n", cnt); 8497 8498 if (trace_clocks[tr->clock_id].in_ns) { 8499 /* local or global for trace_clock */ 8500 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8501 usec_rem = do_div(t, USEC_PER_SEC); 8502 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 8503 t, usec_rem); 8504 8505 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); 8506 usec_rem = do_div(t, USEC_PER_SEC); 8507 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 8508 } else { 8509 /* counter or tsc mode for trace_clock */ 8510 trace_seq_printf(s, "oldest event ts: %llu\n", 8511 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8512 8513 trace_seq_printf(s, "now ts: %llu\n", 8514 ring_buffer_time_stamp(trace_buf->buffer)); 8515 } 8516 8517 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 8518 trace_seq_printf(s, "dropped events: %ld\n", cnt); 8519 8520 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 8521 trace_seq_printf(s, "read events: %ld\n", cnt); 8522 8523 count = simple_read_from_buffer(ubuf, count, ppos, 8524 s->buffer, trace_seq_used(s)); 8525 8526 kfree(s); 8527 8528 return count; 8529 } 8530 8531 static const struct file_operations tracing_stats_fops = { 8532 .open = tracing_open_generic_tr, 8533 .read = tracing_stats_read, 8534 .llseek = generic_file_llseek, 8535 .release = tracing_release_generic_tr, 8536 }; 8537 8538 #ifdef CONFIG_DYNAMIC_FTRACE 8539 8540 static ssize_t 8541 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 8542 size_t cnt, loff_t *ppos) 8543 { 8544 ssize_t ret; 8545 char *buf; 8546 int r; 8547 8548 /* 512 should be plenty to hold the amount needed */ 8549 #define DYN_INFO_BUF_SIZE 512 8550 8551 buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL); 8552 if (!buf) 8553 return -ENOMEM; 8554 8555 r = scnprintf(buf, DYN_INFO_BUF_SIZE, 8556 "%ld pages:%ld groups: %ld\n" 8557 "ftrace boot update time = %llu (ns)\n" 8558 "ftrace module total update time = %llu (ns)\n", 8559 ftrace_update_tot_cnt, 8560 ftrace_number_of_pages, 8561 ftrace_number_of_groups, 8562 ftrace_update_time, 8563 ftrace_total_mod_time); 8564 8565 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8566 kfree(buf); 8567 return ret; 8568 } 8569 8570 static const struct file_operations tracing_dyn_info_fops = { 8571 .open = tracing_open_generic, 8572 .read = tracing_read_dyn_info, 8573 .llseek = generic_file_llseek, 8574 }; 8575 #endif /* CONFIG_DYNAMIC_FTRACE */ 8576 8577 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 8578 static void 8579 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 8580 struct trace_array *tr, struct ftrace_probe_ops *ops, 8581 void *data) 8582 { 8583 tracing_snapshot_instance(tr); 8584 } 8585 8586 static void 8587 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 8588 struct trace_array *tr, struct ftrace_probe_ops *ops, 8589 void *data) 8590 { 8591 struct ftrace_func_mapper *mapper = data; 8592 long *count = NULL; 8593 8594 if (mapper) 8595 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8596 8597 if (count) { 8598 8599 if (*count <= 0) 8600 return; 8601 8602 (*count)--; 8603 } 8604 8605 tracing_snapshot_instance(tr); 8606 } 8607 8608 static int 8609 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 8610 struct ftrace_probe_ops *ops, void *data) 8611 { 8612 struct ftrace_func_mapper *mapper = data; 8613 long *count = NULL; 8614 8615 seq_printf(m, "%ps:", (void *)ip); 8616 8617 seq_puts(m, "snapshot"); 8618 8619 if (mapper) 8620 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8621 8622 if (count) 8623 seq_printf(m, ":count=%ld\n", *count); 8624 else 8625 seq_puts(m, ":unlimited\n"); 8626 8627 return 0; 8628 } 8629 8630 static int 8631 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 8632 unsigned long ip, void *init_data, void **data) 8633 { 8634 struct ftrace_func_mapper *mapper = *data; 8635 8636 if (!mapper) { 8637 mapper = allocate_ftrace_func_mapper(); 8638 if (!mapper) 8639 return -ENOMEM; 8640 *data = mapper; 8641 } 8642 8643 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 8644 } 8645 8646 static void 8647 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 8648 unsigned long ip, void *data) 8649 { 8650 struct ftrace_func_mapper *mapper = data; 8651 8652 if (!ip) { 8653 if (!mapper) 8654 return; 8655 free_ftrace_func_mapper(mapper, NULL); 8656 return; 8657 } 8658 8659 ftrace_func_mapper_remove_ip(mapper, ip); 8660 } 8661 8662 static struct ftrace_probe_ops snapshot_probe_ops = { 8663 .func = ftrace_snapshot, 8664 .print = ftrace_snapshot_print, 8665 }; 8666 8667 static struct ftrace_probe_ops snapshot_count_probe_ops = { 8668 .func = ftrace_count_snapshot, 8669 .print = ftrace_snapshot_print, 8670 .init = ftrace_snapshot_init, 8671 .free = ftrace_snapshot_free, 8672 }; 8673 8674 static int 8675 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 8676 char *glob, char *cmd, char *param, int enable) 8677 { 8678 struct ftrace_probe_ops *ops; 8679 void *count = (void *)-1; 8680 char *number; 8681 int ret; 8682 8683 if (!tr) 8684 return -ENODEV; 8685 8686 /* hash funcs only work with set_ftrace_filter */ 8687 if (!enable) 8688 return -EINVAL; 8689 8690 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 8691 8692 if (glob[0] == '!') { 8693 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 8694 if (!ret) 8695 tracing_disarm_snapshot(tr); 8696 8697 return ret; 8698 } 8699 8700 if (!param) 8701 goto out_reg; 8702 8703 number = strsep(¶m, ":"); 8704 8705 if (!strlen(number)) 8706 goto out_reg; 8707 8708 /* 8709 * We use the callback data field (which is a pointer) 8710 * as our counter. 8711 */ 8712 ret = kstrtoul(number, 0, (unsigned long *)&count); 8713 if (ret) 8714 return ret; 8715 8716 out_reg: 8717 ret = tracing_arm_snapshot(tr); 8718 if (ret < 0) 8719 goto out; 8720 8721 ret = register_ftrace_function_probe(glob, tr, ops, count); 8722 if (ret < 0) 8723 tracing_disarm_snapshot(tr); 8724 out: 8725 return ret < 0 ? ret : 0; 8726 } 8727 8728 static struct ftrace_func_command ftrace_snapshot_cmd = { 8729 .name = "snapshot", 8730 .func = ftrace_trace_snapshot_callback, 8731 }; 8732 8733 static __init int register_snapshot_cmd(void) 8734 { 8735 return register_ftrace_command(&ftrace_snapshot_cmd); 8736 } 8737 #else 8738 static inline __init int register_snapshot_cmd(void) { return 0; } 8739 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 8740 8741 static struct dentry *tracing_get_dentry(struct trace_array *tr) 8742 { 8743 if (WARN_ON(!tr->dir)) 8744 return ERR_PTR(-ENODEV); 8745 8746 /* Top directory uses NULL as the parent */ 8747 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 8748 return NULL; 8749 8750 /* All sub buffers have a descriptor */ 8751 return tr->dir; 8752 } 8753 8754 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 8755 { 8756 struct dentry *d_tracer; 8757 8758 if (tr->percpu_dir) 8759 return tr->percpu_dir; 8760 8761 d_tracer = tracing_get_dentry(tr); 8762 if (IS_ERR(d_tracer)) 8763 return NULL; 8764 8765 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 8766 8767 MEM_FAIL(!tr->percpu_dir, 8768 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 8769 8770 return tr->percpu_dir; 8771 } 8772 8773 static struct dentry * 8774 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 8775 void *data, long cpu, const struct file_operations *fops) 8776 { 8777 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 8778 8779 if (ret) /* See tracing_get_cpu() */ 8780 d_inode(ret)->i_cdev = (void *)(cpu + 1); 8781 return ret; 8782 } 8783 8784 static void 8785 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 8786 { 8787 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 8788 struct dentry *d_cpu; 8789 char cpu_dir[30]; /* 30 characters should be more than enough */ 8790 8791 if (!d_percpu) 8792 return; 8793 8794 snprintf(cpu_dir, 30, "cpu%ld", cpu); 8795 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 8796 if (!d_cpu) { 8797 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 8798 return; 8799 } 8800 8801 /* per cpu trace_pipe */ 8802 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, 8803 tr, cpu, &tracing_pipe_fops); 8804 8805 /* per cpu trace */ 8806 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, 8807 tr, cpu, &tracing_fops); 8808 8809 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, 8810 tr, cpu, &tracing_buffers_fops); 8811 8812 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, 8813 tr, cpu, &tracing_stats_fops); 8814 8815 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, 8816 tr, cpu, &tracing_entries_fops); 8817 8818 if (tr->range_addr_start) 8819 trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu, 8820 tr, cpu, &tracing_buffer_meta_fops); 8821 #ifdef CONFIG_TRACER_SNAPSHOT 8822 if (!tr->range_addr_start) { 8823 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, 8824 tr, cpu, &snapshot_fops); 8825 8826 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, 8827 tr, cpu, &snapshot_raw_fops); 8828 } 8829 #endif 8830 } 8831 8832 #ifdef CONFIG_FTRACE_SELFTEST 8833 /* Let selftest have access to static functions in this file */ 8834 #include "trace_selftest.c" 8835 #endif 8836 8837 static ssize_t 8838 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 8839 loff_t *ppos) 8840 { 8841 struct trace_option_dentry *topt = filp->private_data; 8842 char *buf; 8843 8844 if (topt->flags->val & topt->opt->bit) 8845 buf = "1\n"; 8846 else 8847 buf = "0\n"; 8848 8849 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8850 } 8851 8852 static ssize_t 8853 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 8854 loff_t *ppos) 8855 { 8856 struct trace_option_dentry *topt = filp->private_data; 8857 unsigned long val; 8858 int ret; 8859 8860 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8861 if (ret) 8862 return ret; 8863 8864 if (val != 0 && val != 1) 8865 return -EINVAL; 8866 8867 if (!!(topt->flags->val & topt->opt->bit) != val) { 8868 mutex_lock(&trace_types_lock); 8869 ret = __set_tracer_option(topt->tr, topt->flags, 8870 topt->opt, !val); 8871 mutex_unlock(&trace_types_lock); 8872 if (ret) 8873 return ret; 8874 } 8875 8876 *ppos += cnt; 8877 8878 return cnt; 8879 } 8880 8881 static int tracing_open_options(struct inode *inode, struct file *filp) 8882 { 8883 struct trace_option_dentry *topt = inode->i_private; 8884 int ret; 8885 8886 ret = tracing_check_open_get_tr(topt->tr); 8887 if (ret) 8888 return ret; 8889 8890 filp->private_data = inode->i_private; 8891 return 0; 8892 } 8893 8894 static int tracing_release_options(struct inode *inode, struct file *file) 8895 { 8896 struct trace_option_dentry *topt = file->private_data; 8897 8898 trace_array_put(topt->tr); 8899 return 0; 8900 } 8901 8902 static const struct file_operations trace_options_fops = { 8903 .open = tracing_open_options, 8904 .read = trace_options_read, 8905 .write = trace_options_write, 8906 .llseek = generic_file_llseek, 8907 .release = tracing_release_options, 8908 }; 8909 8910 /* 8911 * In order to pass in both the trace_array descriptor as well as the index 8912 * to the flag that the trace option file represents, the trace_array 8913 * has a character array of trace_flags_index[], which holds the index 8914 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 8915 * The address of this character array is passed to the flag option file 8916 * read/write callbacks. 8917 * 8918 * In order to extract both the index and the trace_array descriptor, 8919 * get_tr_index() uses the following algorithm. 8920 * 8921 * idx = *ptr; 8922 * 8923 * As the pointer itself contains the address of the index (remember 8924 * index[1] == 1). 8925 * 8926 * Then to get the trace_array descriptor, by subtracting that index 8927 * from the ptr, we get to the start of the index itself. 8928 * 8929 * ptr - idx == &index[0] 8930 * 8931 * Then a simple container_of() from that pointer gets us to the 8932 * trace_array descriptor. 8933 */ 8934 static void get_tr_index(void *data, struct trace_array **ptr, 8935 unsigned int *pindex) 8936 { 8937 *pindex = *(unsigned char *)data; 8938 8939 *ptr = container_of(data - *pindex, struct trace_array, 8940 trace_flags_index); 8941 } 8942 8943 static ssize_t 8944 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 8945 loff_t *ppos) 8946 { 8947 void *tr_index = filp->private_data; 8948 struct trace_array *tr; 8949 unsigned int index; 8950 char *buf; 8951 8952 get_tr_index(tr_index, &tr, &index); 8953 8954 if (tr->trace_flags & (1 << index)) 8955 buf = "1\n"; 8956 else 8957 buf = "0\n"; 8958 8959 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8960 } 8961 8962 static ssize_t 8963 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 8964 loff_t *ppos) 8965 { 8966 void *tr_index = filp->private_data; 8967 struct trace_array *tr; 8968 unsigned int index; 8969 unsigned long val; 8970 int ret; 8971 8972 get_tr_index(tr_index, &tr, &index); 8973 8974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8975 if (ret) 8976 return ret; 8977 8978 if (val != 0 && val != 1) 8979 return -EINVAL; 8980 8981 mutex_lock(&event_mutex); 8982 mutex_lock(&trace_types_lock); 8983 ret = set_tracer_flag(tr, 1 << index, val); 8984 mutex_unlock(&trace_types_lock); 8985 mutex_unlock(&event_mutex); 8986 8987 if (ret < 0) 8988 return ret; 8989 8990 *ppos += cnt; 8991 8992 return cnt; 8993 } 8994 8995 static const struct file_operations trace_options_core_fops = { 8996 .open = tracing_open_generic, 8997 .read = trace_options_core_read, 8998 .write = trace_options_core_write, 8999 .llseek = generic_file_llseek, 9000 }; 9001 9002 struct dentry *trace_create_file(const char *name, 9003 umode_t mode, 9004 struct dentry *parent, 9005 void *data, 9006 const struct file_operations *fops) 9007 { 9008 struct dentry *ret; 9009 9010 ret = tracefs_create_file(name, mode, parent, data, fops); 9011 if (!ret) 9012 pr_warn("Could not create tracefs '%s' entry\n", name); 9013 9014 return ret; 9015 } 9016 9017 9018 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 9019 { 9020 struct dentry *d_tracer; 9021 9022 if (tr->options) 9023 return tr->options; 9024 9025 d_tracer = tracing_get_dentry(tr); 9026 if (IS_ERR(d_tracer)) 9027 return NULL; 9028 9029 tr->options = tracefs_create_dir("options", d_tracer); 9030 if (!tr->options) { 9031 pr_warn("Could not create tracefs directory 'options'\n"); 9032 return NULL; 9033 } 9034 9035 return tr->options; 9036 } 9037 9038 static void 9039 create_trace_option_file(struct trace_array *tr, 9040 struct trace_option_dentry *topt, 9041 struct tracer_flags *flags, 9042 struct tracer_opt *opt) 9043 { 9044 struct dentry *t_options; 9045 9046 t_options = trace_options_init_dentry(tr); 9047 if (!t_options) 9048 return; 9049 9050 topt->flags = flags; 9051 topt->opt = opt; 9052 topt->tr = tr; 9053 9054 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, 9055 t_options, topt, &trace_options_fops); 9056 9057 } 9058 9059 static void 9060 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 9061 { 9062 struct trace_option_dentry *topts; 9063 struct trace_options *tr_topts; 9064 struct tracer_flags *flags; 9065 struct tracer_opt *opts; 9066 int cnt; 9067 int i; 9068 9069 if (!tracer) 9070 return; 9071 9072 flags = tracer->flags; 9073 9074 if (!flags || !flags->opts) 9075 return; 9076 9077 /* 9078 * If this is an instance, only create flags for tracers 9079 * the instance may have. 9080 */ 9081 if (!trace_ok_for_array(tracer, tr)) 9082 return; 9083 9084 for (i = 0; i < tr->nr_topts; i++) { 9085 /* Make sure there's no duplicate flags. */ 9086 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 9087 return; 9088 } 9089 9090 opts = flags->opts; 9091 9092 for (cnt = 0; opts[cnt].name; cnt++) 9093 ; 9094 9095 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 9096 if (!topts) 9097 return; 9098 9099 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 9100 GFP_KERNEL); 9101 if (!tr_topts) { 9102 kfree(topts); 9103 return; 9104 } 9105 9106 tr->topts = tr_topts; 9107 tr->topts[tr->nr_topts].tracer = tracer; 9108 tr->topts[tr->nr_topts].topts = topts; 9109 tr->nr_topts++; 9110 9111 for (cnt = 0; opts[cnt].name; cnt++) { 9112 create_trace_option_file(tr, &topts[cnt], flags, 9113 &opts[cnt]); 9114 MEM_FAIL(topts[cnt].entry == NULL, 9115 "Failed to create trace option: %s", 9116 opts[cnt].name); 9117 } 9118 } 9119 9120 static struct dentry * 9121 create_trace_option_core_file(struct trace_array *tr, 9122 const char *option, long index) 9123 { 9124 struct dentry *t_options; 9125 9126 t_options = trace_options_init_dentry(tr); 9127 if (!t_options) 9128 return NULL; 9129 9130 return trace_create_file(option, TRACE_MODE_WRITE, t_options, 9131 (void *)&tr->trace_flags_index[index], 9132 &trace_options_core_fops); 9133 } 9134 9135 static void create_trace_options_dir(struct trace_array *tr) 9136 { 9137 struct dentry *t_options; 9138 bool top_level = tr == &global_trace; 9139 int i; 9140 9141 t_options = trace_options_init_dentry(tr); 9142 if (!t_options) 9143 return; 9144 9145 for (i = 0; trace_options[i]; i++) { 9146 if (top_level || 9147 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 9148 create_trace_option_core_file(tr, trace_options[i], i); 9149 } 9150 } 9151 9152 static ssize_t 9153 rb_simple_read(struct file *filp, char __user *ubuf, 9154 size_t cnt, loff_t *ppos) 9155 { 9156 struct trace_array *tr = filp->private_data; 9157 char buf[64]; 9158 int r; 9159 9160 r = tracer_tracing_is_on(tr); 9161 r = sprintf(buf, "%d\n", r); 9162 9163 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9164 } 9165 9166 static ssize_t 9167 rb_simple_write(struct file *filp, const char __user *ubuf, 9168 size_t cnt, loff_t *ppos) 9169 { 9170 struct trace_array *tr = filp->private_data; 9171 struct trace_buffer *buffer = tr->array_buffer.buffer; 9172 unsigned long val; 9173 int ret; 9174 9175 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9176 if (ret) 9177 return ret; 9178 9179 if (buffer) { 9180 mutex_lock(&trace_types_lock); 9181 if (!!val == tracer_tracing_is_on(tr)) { 9182 val = 0; /* do nothing */ 9183 } else if (val) { 9184 tracer_tracing_on(tr); 9185 if (tr->current_trace->start) 9186 tr->current_trace->start(tr); 9187 } else { 9188 tracer_tracing_off(tr); 9189 if (tr->current_trace->stop) 9190 tr->current_trace->stop(tr); 9191 /* Wake up any waiters */ 9192 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9193 } 9194 mutex_unlock(&trace_types_lock); 9195 } 9196 9197 (*ppos)++; 9198 9199 return cnt; 9200 } 9201 9202 static const struct file_operations rb_simple_fops = { 9203 .open = tracing_open_generic_tr, 9204 .read = rb_simple_read, 9205 .write = rb_simple_write, 9206 .release = tracing_release_generic_tr, 9207 .llseek = default_llseek, 9208 }; 9209 9210 static ssize_t 9211 buffer_percent_read(struct file *filp, char __user *ubuf, 9212 size_t cnt, loff_t *ppos) 9213 { 9214 struct trace_array *tr = filp->private_data; 9215 char buf[64]; 9216 int r; 9217 9218 r = tr->buffer_percent; 9219 r = sprintf(buf, "%d\n", r); 9220 9221 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9222 } 9223 9224 static ssize_t 9225 buffer_percent_write(struct file *filp, const char __user *ubuf, 9226 size_t cnt, loff_t *ppos) 9227 { 9228 struct trace_array *tr = filp->private_data; 9229 unsigned long val; 9230 int ret; 9231 9232 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9233 if (ret) 9234 return ret; 9235 9236 if (val > 100) 9237 return -EINVAL; 9238 9239 tr->buffer_percent = val; 9240 9241 (*ppos)++; 9242 9243 return cnt; 9244 } 9245 9246 static const struct file_operations buffer_percent_fops = { 9247 .open = tracing_open_generic_tr, 9248 .read = buffer_percent_read, 9249 .write = buffer_percent_write, 9250 .release = tracing_release_generic_tr, 9251 .llseek = default_llseek, 9252 }; 9253 9254 static ssize_t 9255 buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 9256 { 9257 struct trace_array *tr = filp->private_data; 9258 size_t size; 9259 char buf[64]; 9260 int order; 9261 int r; 9262 9263 order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9264 size = (PAGE_SIZE << order) / 1024; 9265 9266 r = sprintf(buf, "%zd\n", size); 9267 9268 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9269 } 9270 9271 static ssize_t 9272 buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, 9273 size_t cnt, loff_t *ppos) 9274 { 9275 struct trace_array *tr = filp->private_data; 9276 unsigned long val; 9277 int old_order; 9278 int order; 9279 int pages; 9280 int ret; 9281 9282 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9283 if (ret) 9284 return ret; 9285 9286 val *= 1024; /* value passed in is in KB */ 9287 9288 pages = DIV_ROUND_UP(val, PAGE_SIZE); 9289 order = fls(pages - 1); 9290 9291 /* limit between 1 and 128 system pages */ 9292 if (order < 0 || order > 7) 9293 return -EINVAL; 9294 9295 /* Do not allow tracing while changing the order of the ring buffer */ 9296 tracing_stop_tr(tr); 9297 9298 old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); 9299 if (old_order == order) 9300 goto out; 9301 9302 ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); 9303 if (ret) 9304 goto out; 9305 9306 #ifdef CONFIG_TRACER_MAX_TRACE 9307 9308 if (!tr->allocated_snapshot) 9309 goto out_max; 9310 9311 ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); 9312 if (ret) { 9313 /* Put back the old order */ 9314 cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); 9315 if (WARN_ON_ONCE(cnt)) { 9316 /* 9317 * AARGH! We are left with different orders! 9318 * The max buffer is our "snapshot" buffer. 9319 * When a tracer needs a snapshot (one of the 9320 * latency tracers), it swaps the max buffer 9321 * with the saved snap shot. We succeeded to 9322 * update the order of the main buffer, but failed to 9323 * update the order of the max buffer. But when we tried 9324 * to reset the main buffer to the original size, we 9325 * failed there too. This is very unlikely to 9326 * happen, but if it does, warn and kill all 9327 * tracing. 9328 */ 9329 tracing_disabled = 1; 9330 } 9331 goto out; 9332 } 9333 out_max: 9334 #endif 9335 (*ppos)++; 9336 out: 9337 if (ret) 9338 cnt = ret; 9339 tracing_start_tr(tr); 9340 return cnt; 9341 } 9342 9343 static const struct file_operations buffer_subbuf_size_fops = { 9344 .open = tracing_open_generic_tr, 9345 .read = buffer_subbuf_size_read, 9346 .write = buffer_subbuf_size_write, 9347 .release = tracing_release_generic_tr, 9348 .llseek = default_llseek, 9349 }; 9350 9351 static struct dentry *trace_instance_dir; 9352 9353 static void 9354 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 9355 9356 static void setup_trace_scratch(struct trace_array *tr, 9357 struct trace_scratch *tscratch, unsigned int size) 9358 { 9359 struct trace_mod_entry *entry; 9360 9361 if (!tscratch) 9362 return; 9363 9364 tr->scratch = tscratch; 9365 tr->scratch_size = size; 9366 9367 #ifdef CONFIG_RANDOMIZE_BASE 9368 if (tscratch->kaslr_addr) 9369 tr->text_delta = kaslr_offset() - tscratch->kaslr_addr; 9370 #endif 9371 9372 if (struct_size(tscratch, entries, tscratch->nr_entries) > size) 9373 goto reset; 9374 9375 /* Check if each module name is a valid string */ 9376 for (int i = 0; i < tscratch->nr_entries; i++) { 9377 int n; 9378 9379 entry = &tscratch->entries[i]; 9380 9381 for (n = 0; n < MODULE_NAME_LEN; n++) { 9382 if (entry->mod_name[n] == '\0') 9383 break; 9384 if (!isprint(entry->mod_name[n])) 9385 goto reset; 9386 } 9387 if (n == MODULE_NAME_LEN) 9388 goto reset; 9389 } 9390 return; 9391 reset: 9392 /* Invalid trace modules */ 9393 memset(tscratch, 0, size); 9394 } 9395 9396 static int 9397 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 9398 { 9399 enum ring_buffer_flags rb_flags; 9400 struct trace_scratch *tscratch; 9401 unsigned int scratch_size = 0; 9402 9403 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 9404 9405 buf->tr = tr; 9406 9407 if (tr->range_addr_start && tr->range_addr_size) { 9408 /* Add scratch buffer to handle 128 modules */ 9409 buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, 9410 tr->range_addr_start, 9411 tr->range_addr_size, 9412 struct_size(tscratch, entries, 128)); 9413 9414 tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size); 9415 setup_trace_scratch(tr, tscratch, scratch_size); 9416 9417 /* 9418 * This is basically the same as a mapped buffer, 9419 * with the same restrictions. 9420 */ 9421 tr->mapped++; 9422 } else { 9423 buf->buffer = ring_buffer_alloc(size, rb_flags); 9424 } 9425 if (!buf->buffer) 9426 return -ENOMEM; 9427 9428 buf->data = alloc_percpu(struct trace_array_cpu); 9429 if (!buf->data) { 9430 ring_buffer_free(buf->buffer); 9431 buf->buffer = NULL; 9432 return -ENOMEM; 9433 } 9434 9435 /* Allocate the first page for all buffers */ 9436 set_buffer_entries(&tr->array_buffer, 9437 ring_buffer_size(tr->array_buffer.buffer, 0)); 9438 9439 return 0; 9440 } 9441 9442 static void free_trace_buffer(struct array_buffer *buf) 9443 { 9444 if (buf->buffer) { 9445 ring_buffer_free(buf->buffer); 9446 buf->buffer = NULL; 9447 free_percpu(buf->data); 9448 buf->data = NULL; 9449 } 9450 } 9451 9452 static int allocate_trace_buffers(struct trace_array *tr, int size) 9453 { 9454 int ret; 9455 9456 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); 9457 if (ret) 9458 return ret; 9459 9460 #ifdef CONFIG_TRACER_MAX_TRACE 9461 /* Fix mapped buffer trace arrays do not have snapshot buffers */ 9462 if (tr->range_addr_start) 9463 return 0; 9464 9465 ret = allocate_trace_buffer(tr, &tr->max_buffer, 9466 allocate_snapshot ? size : 1); 9467 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9468 free_trace_buffer(&tr->array_buffer); 9469 return -ENOMEM; 9470 } 9471 tr->allocated_snapshot = allocate_snapshot; 9472 9473 allocate_snapshot = false; 9474 #endif 9475 9476 return 0; 9477 } 9478 9479 static void free_trace_buffers(struct trace_array *tr) 9480 { 9481 if (!tr) 9482 return; 9483 9484 free_trace_buffer(&tr->array_buffer); 9485 9486 #ifdef CONFIG_TRACER_MAX_TRACE 9487 free_trace_buffer(&tr->max_buffer); 9488 #endif 9489 9490 if (tr->range_addr_start) 9491 vunmap((void *)tr->range_addr_start); 9492 } 9493 9494 static void init_trace_flags_index(struct trace_array *tr) 9495 { 9496 int i; 9497 9498 /* Used by the trace options files */ 9499 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 9500 tr->trace_flags_index[i] = i; 9501 } 9502 9503 static void __update_tracer_options(struct trace_array *tr) 9504 { 9505 struct tracer *t; 9506 9507 for (t = trace_types; t; t = t->next) 9508 add_tracer_options(tr, t); 9509 } 9510 9511 static void update_tracer_options(struct trace_array *tr) 9512 { 9513 mutex_lock(&trace_types_lock); 9514 tracer_options_updated = true; 9515 __update_tracer_options(tr); 9516 mutex_unlock(&trace_types_lock); 9517 } 9518 9519 /* Must have trace_types_lock held */ 9520 struct trace_array *trace_array_find(const char *instance) 9521 { 9522 struct trace_array *tr, *found = NULL; 9523 9524 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9525 if (tr->name && strcmp(tr->name, instance) == 0) { 9526 found = tr; 9527 break; 9528 } 9529 } 9530 9531 return found; 9532 } 9533 9534 struct trace_array *trace_array_find_get(const char *instance) 9535 { 9536 struct trace_array *tr; 9537 9538 mutex_lock(&trace_types_lock); 9539 tr = trace_array_find(instance); 9540 if (tr) 9541 tr->ref++; 9542 mutex_unlock(&trace_types_lock); 9543 9544 return tr; 9545 } 9546 9547 static int trace_array_create_dir(struct trace_array *tr) 9548 { 9549 int ret; 9550 9551 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); 9552 if (!tr->dir) 9553 return -EINVAL; 9554 9555 ret = event_trace_add_tracer(tr->dir, tr); 9556 if (ret) { 9557 tracefs_remove(tr->dir); 9558 return ret; 9559 } 9560 9561 init_tracer_tracefs(tr, tr->dir); 9562 __update_tracer_options(tr); 9563 9564 return ret; 9565 } 9566 9567 static struct trace_array * 9568 trace_array_create_systems(const char *name, const char *systems, 9569 unsigned long range_addr_start, 9570 unsigned long range_addr_size) 9571 { 9572 struct trace_array *tr; 9573 int ret; 9574 9575 ret = -ENOMEM; 9576 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 9577 if (!tr) 9578 return ERR_PTR(ret); 9579 9580 tr->name = kstrdup(name, GFP_KERNEL); 9581 if (!tr->name) 9582 goto out_free_tr; 9583 9584 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 9585 goto out_free_tr; 9586 9587 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) 9588 goto out_free_tr; 9589 9590 if (systems) { 9591 tr->system_names = kstrdup_const(systems, GFP_KERNEL); 9592 if (!tr->system_names) 9593 goto out_free_tr; 9594 } 9595 9596 /* Only for boot up memory mapped ring buffers */ 9597 tr->range_addr_start = range_addr_start; 9598 tr->range_addr_size = range_addr_size; 9599 9600 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 9601 9602 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 9603 9604 raw_spin_lock_init(&tr->start_lock); 9605 9606 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9607 #ifdef CONFIG_TRACER_MAX_TRACE 9608 spin_lock_init(&tr->snapshot_trigger_lock); 9609 #endif 9610 tr->current_trace = &nop_trace; 9611 9612 INIT_LIST_HEAD(&tr->systems); 9613 INIT_LIST_HEAD(&tr->events); 9614 INIT_LIST_HEAD(&tr->hist_vars); 9615 INIT_LIST_HEAD(&tr->err_log); 9616 9617 #ifdef CONFIG_MODULES 9618 INIT_LIST_HEAD(&tr->mod_events); 9619 #endif 9620 9621 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 9622 goto out_free_tr; 9623 9624 /* The ring buffer is defaultly expanded */ 9625 trace_set_ring_buffer_expanded(tr); 9626 9627 if (ftrace_allocate_ftrace_ops(tr) < 0) 9628 goto out_free_tr; 9629 9630 ftrace_init_trace_array(tr); 9631 9632 init_trace_flags_index(tr); 9633 9634 if (trace_instance_dir) { 9635 ret = trace_array_create_dir(tr); 9636 if (ret) 9637 goto out_free_tr; 9638 } else 9639 __trace_early_add_events(tr); 9640 9641 list_add(&tr->list, &ftrace_trace_arrays); 9642 9643 tr->ref++; 9644 9645 return tr; 9646 9647 out_free_tr: 9648 ftrace_free_ftrace_ops(tr); 9649 free_trace_buffers(tr); 9650 free_cpumask_var(tr->pipe_cpumask); 9651 free_cpumask_var(tr->tracing_cpumask); 9652 kfree_const(tr->system_names); 9653 kfree(tr->range_name); 9654 kfree(tr->name); 9655 kfree(tr); 9656 9657 return ERR_PTR(ret); 9658 } 9659 9660 static struct trace_array *trace_array_create(const char *name) 9661 { 9662 return trace_array_create_systems(name, NULL, 0, 0); 9663 } 9664 9665 static int instance_mkdir(const char *name) 9666 { 9667 struct trace_array *tr; 9668 int ret; 9669 9670 guard(mutex)(&event_mutex); 9671 guard(mutex)(&trace_types_lock); 9672 9673 ret = -EEXIST; 9674 if (trace_array_find(name)) 9675 return -EEXIST; 9676 9677 tr = trace_array_create(name); 9678 9679 ret = PTR_ERR_OR_ZERO(tr); 9680 9681 return ret; 9682 } 9683 9684 static u64 map_pages(u64 start, u64 size) 9685 { 9686 struct page **pages; 9687 phys_addr_t page_start; 9688 unsigned int page_count; 9689 unsigned int i; 9690 void *vaddr; 9691 9692 page_count = DIV_ROUND_UP(size, PAGE_SIZE); 9693 9694 page_start = start; 9695 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 9696 if (!pages) 9697 return 0; 9698 9699 for (i = 0; i < page_count; i++) { 9700 phys_addr_t addr = page_start + i * PAGE_SIZE; 9701 pages[i] = pfn_to_page(addr >> PAGE_SHIFT); 9702 } 9703 vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); 9704 kfree(pages); 9705 9706 return (u64)(unsigned long)vaddr; 9707 } 9708 9709 /** 9710 * trace_array_get_by_name - Create/Lookup a trace array, given its name. 9711 * @name: The name of the trace array to be looked up/created. 9712 * @systems: A list of systems to create event directories for (NULL for all) 9713 * 9714 * Returns pointer to trace array with given name. 9715 * NULL, if it cannot be created. 9716 * 9717 * NOTE: This function increments the reference counter associated with the 9718 * trace array returned. This makes sure it cannot be freed while in use. 9719 * Use trace_array_put() once the trace array is no longer needed. 9720 * If the trace_array is to be freed, trace_array_destroy() needs to 9721 * be called after the trace_array_put(), or simply let user space delete 9722 * it from the tracefs instances directory. But until the 9723 * trace_array_put() is called, user space can not delete it. 9724 * 9725 */ 9726 struct trace_array *trace_array_get_by_name(const char *name, const char *systems) 9727 { 9728 struct trace_array *tr; 9729 9730 guard(mutex)(&event_mutex); 9731 guard(mutex)(&trace_types_lock); 9732 9733 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9734 if (tr->name && strcmp(tr->name, name) == 0) { 9735 tr->ref++; 9736 return tr; 9737 } 9738 } 9739 9740 tr = trace_array_create_systems(name, systems, 0, 0); 9741 9742 if (IS_ERR(tr)) 9743 tr = NULL; 9744 else 9745 tr->ref++; 9746 9747 return tr; 9748 } 9749 EXPORT_SYMBOL_GPL(trace_array_get_by_name); 9750 9751 static int __remove_instance(struct trace_array *tr) 9752 { 9753 int i; 9754 9755 /* Reference counter for a newly created trace array = 1. */ 9756 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) 9757 return -EBUSY; 9758 9759 list_del(&tr->list); 9760 9761 /* Disable all the flags that were enabled coming in */ 9762 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9763 if ((1 << i) & ZEROED_TRACE_FLAGS) 9764 set_tracer_flag(tr, 1 << i, 0); 9765 } 9766 9767 if (printk_trace == tr) 9768 update_printk_trace(&global_trace); 9769 9770 tracing_set_nop(tr); 9771 clear_ftrace_function_probes(tr); 9772 event_trace_del_tracer(tr); 9773 ftrace_clear_pids(tr); 9774 ftrace_destroy_function_files(tr); 9775 tracefs_remove(tr->dir); 9776 free_percpu(tr->last_func_repeats); 9777 free_trace_buffers(tr); 9778 clear_tracing_err_log(tr); 9779 9780 if (tr->range_name) { 9781 reserve_mem_release_by_name(tr->range_name); 9782 kfree(tr->range_name); 9783 } 9784 9785 for (i = 0; i < tr->nr_topts; i++) { 9786 kfree(tr->topts[i].topts); 9787 } 9788 kfree(tr->topts); 9789 9790 free_cpumask_var(tr->pipe_cpumask); 9791 free_cpumask_var(tr->tracing_cpumask); 9792 kfree_const(tr->system_names); 9793 kfree(tr->name); 9794 kfree(tr); 9795 9796 return 0; 9797 } 9798 9799 int trace_array_destroy(struct trace_array *this_tr) 9800 { 9801 struct trace_array *tr; 9802 9803 if (!this_tr) 9804 return -EINVAL; 9805 9806 guard(mutex)(&event_mutex); 9807 guard(mutex)(&trace_types_lock); 9808 9809 9810 /* Making sure trace array exists before destroying it. */ 9811 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9812 if (tr == this_tr) 9813 return __remove_instance(tr); 9814 } 9815 9816 return -ENODEV; 9817 } 9818 EXPORT_SYMBOL_GPL(trace_array_destroy); 9819 9820 static int instance_rmdir(const char *name) 9821 { 9822 struct trace_array *tr; 9823 9824 guard(mutex)(&event_mutex); 9825 guard(mutex)(&trace_types_lock); 9826 9827 tr = trace_array_find(name); 9828 if (!tr) 9829 return -ENODEV; 9830 9831 return __remove_instance(tr); 9832 } 9833 9834 static __init void create_trace_instances(struct dentry *d_tracer) 9835 { 9836 struct trace_array *tr; 9837 9838 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 9839 instance_mkdir, 9840 instance_rmdir); 9841 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) 9842 return; 9843 9844 guard(mutex)(&event_mutex); 9845 guard(mutex)(&trace_types_lock); 9846 9847 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9848 if (!tr->name) 9849 continue; 9850 if (MEM_FAIL(trace_array_create_dir(tr) < 0, 9851 "Failed to create instance directory\n")) 9852 return; 9853 } 9854 } 9855 9856 static void 9857 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 9858 { 9859 int cpu; 9860 9861 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, 9862 tr, &show_traces_fops); 9863 9864 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, 9865 tr, &set_tracer_fops); 9866 9867 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, 9868 tr, &tracing_cpumask_fops); 9869 9870 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, 9871 tr, &tracing_iter_fops); 9872 9873 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, 9874 tr, &tracing_fops); 9875 9876 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, 9877 tr, &tracing_pipe_fops); 9878 9879 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, 9880 tr, &tracing_entries_fops); 9881 9882 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, 9883 tr, &tracing_total_entries_fops); 9884 9885 trace_create_file("free_buffer", 0200, d_tracer, 9886 tr, &tracing_free_buffer_fops); 9887 9888 trace_create_file("trace_marker", 0220, d_tracer, 9889 tr, &tracing_mark_fops); 9890 9891 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); 9892 9893 trace_create_file("trace_marker_raw", 0220, d_tracer, 9894 tr, &tracing_mark_raw_fops); 9895 9896 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, 9897 &trace_clock_fops); 9898 9899 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, 9900 tr, &rb_simple_fops); 9901 9902 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, 9903 &trace_time_stamp_mode_fops); 9904 9905 tr->buffer_percent = 50; 9906 9907 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, 9908 tr, &buffer_percent_fops); 9909 9910 trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer, 9911 tr, &buffer_subbuf_size_fops); 9912 9913 create_trace_options_dir(tr); 9914 9915 #ifdef CONFIG_TRACER_MAX_TRACE 9916 trace_create_maxlat_file(tr, d_tracer); 9917 #endif 9918 9919 if (ftrace_create_function_files(tr, d_tracer)) 9920 MEM_FAIL(1, "Could not allocate function filter files"); 9921 9922 if (tr->range_addr_start) { 9923 trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer, 9924 tr, &last_boot_fops); 9925 #ifdef CONFIG_TRACER_SNAPSHOT 9926 } else { 9927 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, 9928 tr, &snapshot_fops); 9929 #endif 9930 } 9931 9932 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, 9933 tr, &tracing_err_log_fops); 9934 9935 for_each_tracing_cpu(cpu) 9936 tracing_init_tracefs_percpu(tr, cpu); 9937 9938 ftrace_init_tracefs(tr, d_tracer); 9939 } 9940 9941 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 9942 { 9943 struct vfsmount *mnt; 9944 struct file_system_type *type; 9945 9946 /* 9947 * To maintain backward compatibility for tools that mount 9948 * debugfs to get to the tracing facility, tracefs is automatically 9949 * mounted to the debugfs/tracing directory. 9950 */ 9951 type = get_fs_type("tracefs"); 9952 if (!type) 9953 return NULL; 9954 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 9955 put_filesystem(type); 9956 if (IS_ERR(mnt)) 9957 return NULL; 9958 mntget(mnt); 9959 9960 return mnt; 9961 } 9962 9963 /** 9964 * tracing_init_dentry - initialize top level trace array 9965 * 9966 * This is called when creating files or directories in the tracing 9967 * directory. It is called via fs_initcall() by any of the boot up code 9968 * and expects to return the dentry of the top level tracing directory. 9969 */ 9970 int tracing_init_dentry(void) 9971 { 9972 struct trace_array *tr = &global_trace; 9973 9974 if (security_locked_down(LOCKDOWN_TRACEFS)) { 9975 pr_warn("Tracing disabled due to lockdown\n"); 9976 return -EPERM; 9977 } 9978 9979 /* The top level trace array uses NULL as parent */ 9980 if (tr->dir) 9981 return 0; 9982 9983 if (WARN_ON(!tracefs_initialized())) 9984 return -ENODEV; 9985 9986 /* 9987 * As there may still be users that expect the tracing 9988 * files to exist in debugfs/tracing, we must automount 9989 * the tracefs file system there, so older tools still 9990 * work with the newer kernel. 9991 */ 9992 tr->dir = debugfs_create_automount("tracing", NULL, 9993 trace_automount, NULL); 9994 9995 return 0; 9996 } 9997 9998 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 9999 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 10000 10001 static struct workqueue_struct *eval_map_wq __initdata; 10002 static struct work_struct eval_map_work __initdata; 10003 static struct work_struct tracerfs_init_work __initdata; 10004 10005 static void __init eval_map_work_func(struct work_struct *work) 10006 { 10007 int len; 10008 10009 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 10010 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 10011 } 10012 10013 static int __init trace_eval_init(void) 10014 { 10015 INIT_WORK(&eval_map_work, eval_map_work_func); 10016 10017 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); 10018 if (!eval_map_wq) { 10019 pr_err("Unable to allocate eval_map_wq\n"); 10020 /* Do work here */ 10021 eval_map_work_func(&eval_map_work); 10022 return -ENOMEM; 10023 } 10024 10025 queue_work(eval_map_wq, &eval_map_work); 10026 return 0; 10027 } 10028 10029 subsys_initcall(trace_eval_init); 10030 10031 static int __init trace_eval_sync(void) 10032 { 10033 /* Make sure the eval map updates are finished */ 10034 if (eval_map_wq) 10035 destroy_workqueue(eval_map_wq); 10036 return 0; 10037 } 10038 10039 late_initcall_sync(trace_eval_sync); 10040 10041 10042 #ifdef CONFIG_MODULES 10043 10044 bool module_exists(const char *module) 10045 { 10046 /* All modules have the symbol __this_module */ 10047 static const char this_mod[] = "__this_module"; 10048 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 10049 unsigned long val; 10050 int n; 10051 10052 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 10053 10054 if (n > sizeof(modname) - 1) 10055 return false; 10056 10057 val = module_kallsyms_lookup_name(modname); 10058 return val != 0; 10059 } 10060 10061 static void trace_module_add_evals(struct module *mod) 10062 { 10063 if (!mod->num_trace_evals) 10064 return; 10065 10066 /* 10067 * Modules with bad taint do not have events created, do 10068 * not bother with enums either. 10069 */ 10070 if (trace_module_has_bad_taint(mod)) 10071 return; 10072 10073 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 10074 } 10075 10076 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 10077 static void trace_module_remove_evals(struct module *mod) 10078 { 10079 union trace_eval_map_item *map; 10080 union trace_eval_map_item **last = &trace_eval_maps; 10081 10082 if (!mod->num_trace_evals) 10083 return; 10084 10085 guard(mutex)(&trace_eval_mutex); 10086 10087 map = trace_eval_maps; 10088 10089 while (map) { 10090 if (map->head.mod == mod) 10091 break; 10092 map = trace_eval_jmp_to_tail(map); 10093 last = &map->tail.next; 10094 map = map->tail.next; 10095 } 10096 if (!map) 10097 return; 10098 10099 *last = trace_eval_jmp_to_tail(map)->tail.next; 10100 kfree(map); 10101 } 10102 #else 10103 static inline void trace_module_remove_evals(struct module *mod) { } 10104 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 10105 10106 static bool trace_array_active(struct trace_array *tr) 10107 { 10108 if (tr->current_trace != &nop_trace) 10109 return true; 10110 10111 /* 0 is no events, 1 is all disabled */ 10112 return trace_events_enabled(tr, NULL) > 1; 10113 } 10114 10115 static void trace_module_record(struct module *mod) 10116 { 10117 struct trace_array *tr; 10118 10119 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10120 /* Update any persistent trace array that has already been started */ 10121 if ((tr->flags & (TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT)) == 10122 TRACE_ARRAY_FL_BOOT) { 10123 /* Only update if the trace array is active */ 10124 if (trace_array_active(tr)) { 10125 guard(mutex)(&scratch_mutex); 10126 save_mod(mod, tr); 10127 } 10128 } 10129 } 10130 } 10131 10132 static int trace_module_notify(struct notifier_block *self, 10133 unsigned long val, void *data) 10134 { 10135 struct module *mod = data; 10136 10137 switch (val) { 10138 case MODULE_STATE_COMING: 10139 trace_module_add_evals(mod); 10140 trace_module_record(mod); 10141 break; 10142 case MODULE_STATE_GOING: 10143 trace_module_remove_evals(mod); 10144 break; 10145 } 10146 10147 return NOTIFY_OK; 10148 } 10149 10150 static struct notifier_block trace_module_nb = { 10151 .notifier_call = trace_module_notify, 10152 .priority = 0, 10153 }; 10154 #endif /* CONFIG_MODULES */ 10155 10156 static __init void tracer_init_tracefs_work_func(struct work_struct *work) 10157 { 10158 10159 event_trace_init(); 10160 10161 init_tracer_tracefs(&global_trace, NULL); 10162 ftrace_init_tracefs_toplevel(&global_trace, NULL); 10163 10164 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, 10165 &global_trace, &tracing_thresh_fops); 10166 10167 trace_create_file("README", TRACE_MODE_READ, NULL, 10168 NULL, &tracing_readme_fops); 10169 10170 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, 10171 NULL, &tracing_saved_cmdlines_fops); 10172 10173 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, 10174 NULL, &tracing_saved_cmdlines_size_fops); 10175 10176 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, 10177 NULL, &tracing_saved_tgids_fops); 10178 10179 trace_create_eval_file(NULL); 10180 10181 #ifdef CONFIG_MODULES 10182 register_module_notifier(&trace_module_nb); 10183 #endif 10184 10185 #ifdef CONFIG_DYNAMIC_FTRACE 10186 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, 10187 NULL, &tracing_dyn_info_fops); 10188 #endif 10189 10190 create_trace_instances(NULL); 10191 10192 update_tracer_options(&global_trace); 10193 } 10194 10195 static __init int tracer_init_tracefs(void) 10196 { 10197 int ret; 10198 10199 trace_access_lock_init(); 10200 10201 ret = tracing_init_dentry(); 10202 if (ret) 10203 return 0; 10204 10205 if (eval_map_wq) { 10206 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); 10207 queue_work(eval_map_wq, &tracerfs_init_work); 10208 } else { 10209 tracer_init_tracefs_work_func(NULL); 10210 } 10211 10212 rv_init_interface(); 10213 10214 return 0; 10215 } 10216 10217 fs_initcall(tracer_init_tracefs); 10218 10219 static int trace_die_panic_handler(struct notifier_block *self, 10220 unsigned long ev, void *unused); 10221 10222 static struct notifier_block trace_panic_notifier = { 10223 .notifier_call = trace_die_panic_handler, 10224 .priority = INT_MAX - 1, 10225 }; 10226 10227 static struct notifier_block trace_die_notifier = { 10228 .notifier_call = trace_die_panic_handler, 10229 .priority = INT_MAX - 1, 10230 }; 10231 10232 /* 10233 * The idea is to execute the following die/panic callback early, in order 10234 * to avoid showing irrelevant information in the trace (like other panic 10235 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall 10236 * warnings get disabled (to prevent potential log flooding). 10237 */ 10238 static int trace_die_panic_handler(struct notifier_block *self, 10239 unsigned long ev, void *unused) 10240 { 10241 if (!ftrace_dump_on_oops_enabled()) 10242 return NOTIFY_DONE; 10243 10244 /* The die notifier requires DIE_OOPS to trigger */ 10245 if (self == &trace_die_notifier && ev != DIE_OOPS) 10246 return NOTIFY_DONE; 10247 10248 ftrace_dump(DUMP_PARAM); 10249 10250 return NOTIFY_DONE; 10251 } 10252 10253 /* 10254 * printk is set to max of 1024, we really don't need it that big. 10255 * Nothing should be printing 1000 characters anyway. 10256 */ 10257 #define TRACE_MAX_PRINT 1000 10258 10259 /* 10260 * Define here KERN_TRACE so that we have one place to modify 10261 * it if we decide to change what log level the ftrace dump 10262 * should be at. 10263 */ 10264 #define KERN_TRACE KERN_EMERG 10265 10266 void 10267 trace_printk_seq(struct trace_seq *s) 10268 { 10269 /* Probably should print a warning here. */ 10270 if (s->seq.len >= TRACE_MAX_PRINT) 10271 s->seq.len = TRACE_MAX_PRINT; 10272 10273 /* 10274 * More paranoid code. Although the buffer size is set to 10275 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 10276 * an extra layer of protection. 10277 */ 10278 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 10279 s->seq.len = s->seq.size - 1; 10280 10281 /* should be zero ended, but we are paranoid. */ 10282 s->buffer[s->seq.len] = 0; 10283 10284 printk(KERN_TRACE "%s", s->buffer); 10285 10286 trace_seq_init(s); 10287 } 10288 10289 static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr) 10290 { 10291 iter->tr = tr; 10292 iter->trace = iter->tr->current_trace; 10293 iter->cpu_file = RING_BUFFER_ALL_CPUS; 10294 iter->array_buffer = &tr->array_buffer; 10295 10296 if (iter->trace && iter->trace->open) 10297 iter->trace->open(iter); 10298 10299 /* Annotate start of buffers if we had overruns */ 10300 if (ring_buffer_overruns(iter->array_buffer->buffer)) 10301 iter->iter_flags |= TRACE_FILE_ANNOTATE; 10302 10303 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 10304 if (trace_clocks[iter->tr->clock_id].in_ns) 10305 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 10306 10307 /* Can not use kmalloc for iter.temp and iter.fmt */ 10308 iter->temp = static_temp_buf; 10309 iter->temp_size = STATIC_TEMP_BUF_SIZE; 10310 iter->fmt = static_fmt_buf; 10311 iter->fmt_size = STATIC_FMT_BUF_SIZE; 10312 } 10313 10314 void trace_init_global_iter(struct trace_iterator *iter) 10315 { 10316 trace_init_iter(iter, &global_trace); 10317 } 10318 10319 static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode) 10320 { 10321 /* use static because iter can be a bit big for the stack */ 10322 static struct trace_iterator iter; 10323 unsigned int old_userobj; 10324 unsigned long flags; 10325 int cnt = 0, cpu; 10326 10327 /* 10328 * Always turn off tracing when we dump. 10329 * We don't need to show trace output of what happens 10330 * between multiple crashes. 10331 * 10332 * If the user does a sysrq-z, then they can re-enable 10333 * tracing with echo 1 > tracing_on. 10334 */ 10335 tracer_tracing_off(tr); 10336 10337 local_irq_save(flags); 10338 10339 /* Simulate the iterator */ 10340 trace_init_iter(&iter, tr); 10341 10342 for_each_tracing_cpu(cpu) { 10343 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10344 } 10345 10346 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 10347 10348 /* don't look at user memory in panic mode */ 10349 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 10350 10351 if (dump_mode == DUMP_ORIG) 10352 iter.cpu_file = raw_smp_processor_id(); 10353 else 10354 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10355 10356 if (tr == &global_trace) 10357 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 10358 else 10359 printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); 10360 10361 /* Did function tracer already get disabled? */ 10362 if (ftrace_is_dead()) { 10363 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 10364 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 10365 } 10366 10367 /* 10368 * We need to stop all tracing on all CPUS to read 10369 * the next buffer. This is a bit expensive, but is 10370 * not done often. We fill all what we can read, 10371 * and then release the locks again. 10372 */ 10373 10374 while (!trace_empty(&iter)) { 10375 10376 if (!cnt) 10377 printk(KERN_TRACE "---------------------------------\n"); 10378 10379 cnt++; 10380 10381 trace_iterator_reset(&iter); 10382 iter.iter_flags |= TRACE_FILE_LAT_FMT; 10383 10384 if (trace_find_next_entry_inc(&iter) != NULL) { 10385 int ret; 10386 10387 ret = print_trace_line(&iter); 10388 if (ret != TRACE_TYPE_NO_CONSUME) 10389 trace_consume(&iter); 10390 } 10391 touch_nmi_watchdog(); 10392 10393 trace_printk_seq(&iter.seq); 10394 } 10395 10396 if (!cnt) 10397 printk(KERN_TRACE " (ftrace buffer empty)\n"); 10398 else 10399 printk(KERN_TRACE "---------------------------------\n"); 10400 10401 tr->trace_flags |= old_userobj; 10402 10403 for_each_tracing_cpu(cpu) { 10404 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10405 } 10406 local_irq_restore(flags); 10407 } 10408 10409 static void ftrace_dump_by_param(void) 10410 { 10411 bool first_param = true; 10412 char dump_param[MAX_TRACER_SIZE]; 10413 char *buf, *token, *inst_name; 10414 struct trace_array *tr; 10415 10416 strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE); 10417 buf = dump_param; 10418 10419 while ((token = strsep(&buf, ",")) != NULL) { 10420 if (first_param) { 10421 first_param = false; 10422 if (!strcmp("0", token)) 10423 continue; 10424 else if (!strcmp("1", token)) { 10425 ftrace_dump_one(&global_trace, DUMP_ALL); 10426 continue; 10427 } 10428 else if (!strcmp("2", token) || 10429 !strcmp("orig_cpu", token)) { 10430 ftrace_dump_one(&global_trace, DUMP_ORIG); 10431 continue; 10432 } 10433 } 10434 10435 inst_name = strsep(&token, "="); 10436 tr = trace_array_find(inst_name); 10437 if (!tr) { 10438 printk(KERN_TRACE "Instance %s not found\n", inst_name); 10439 continue; 10440 } 10441 10442 if (token && (!strcmp("2", token) || 10443 !strcmp("orig_cpu", token))) 10444 ftrace_dump_one(tr, DUMP_ORIG); 10445 else 10446 ftrace_dump_one(tr, DUMP_ALL); 10447 } 10448 } 10449 10450 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 10451 { 10452 static atomic_t dump_running; 10453 10454 /* Only allow one dump user at a time. */ 10455 if (atomic_inc_return(&dump_running) != 1) { 10456 atomic_dec(&dump_running); 10457 return; 10458 } 10459 10460 switch (oops_dump_mode) { 10461 case DUMP_ALL: 10462 ftrace_dump_one(&global_trace, DUMP_ALL); 10463 break; 10464 case DUMP_ORIG: 10465 ftrace_dump_one(&global_trace, DUMP_ORIG); 10466 break; 10467 case DUMP_PARAM: 10468 ftrace_dump_by_param(); 10469 break; 10470 case DUMP_NONE: 10471 break; 10472 default: 10473 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 10474 ftrace_dump_one(&global_trace, DUMP_ALL); 10475 } 10476 10477 atomic_dec(&dump_running); 10478 } 10479 EXPORT_SYMBOL_GPL(ftrace_dump); 10480 10481 #define WRITE_BUFSIZE 4096 10482 10483 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 10484 size_t count, loff_t *ppos, 10485 int (*createfn)(const char *)) 10486 { 10487 char *kbuf, *buf, *tmp; 10488 int ret = 0; 10489 size_t done = 0; 10490 size_t size; 10491 10492 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 10493 if (!kbuf) 10494 return -ENOMEM; 10495 10496 while (done < count) { 10497 size = count - done; 10498 10499 if (size >= WRITE_BUFSIZE) 10500 size = WRITE_BUFSIZE - 1; 10501 10502 if (copy_from_user(kbuf, buffer + done, size)) { 10503 ret = -EFAULT; 10504 goto out; 10505 } 10506 kbuf[size] = '\0'; 10507 buf = kbuf; 10508 do { 10509 tmp = strchr(buf, '\n'); 10510 if (tmp) { 10511 *tmp = '\0'; 10512 size = tmp - buf + 1; 10513 } else { 10514 size = strlen(buf); 10515 if (done + size < count) { 10516 if (buf != kbuf) 10517 break; 10518 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10519 pr_warn("Line length is too long: Should be less than %d\n", 10520 WRITE_BUFSIZE - 2); 10521 ret = -EINVAL; 10522 goto out; 10523 } 10524 } 10525 done += size; 10526 10527 /* Remove comments */ 10528 tmp = strchr(buf, '#'); 10529 10530 if (tmp) 10531 *tmp = '\0'; 10532 10533 ret = createfn(buf); 10534 if (ret) 10535 goto out; 10536 buf += size; 10537 10538 } while (done < count); 10539 } 10540 ret = done; 10541 10542 out: 10543 kfree(kbuf); 10544 10545 return ret; 10546 } 10547 10548 #ifdef CONFIG_TRACER_MAX_TRACE 10549 __init static bool tr_needs_alloc_snapshot(const char *name) 10550 { 10551 char *test; 10552 int len = strlen(name); 10553 bool ret; 10554 10555 if (!boot_snapshot_index) 10556 return false; 10557 10558 if (strncmp(name, boot_snapshot_info, len) == 0 && 10559 boot_snapshot_info[len] == '\t') 10560 return true; 10561 10562 test = kmalloc(strlen(name) + 3, GFP_KERNEL); 10563 if (!test) 10564 return false; 10565 10566 sprintf(test, "\t%s\t", name); 10567 ret = strstr(boot_snapshot_info, test) == NULL; 10568 kfree(test); 10569 return ret; 10570 } 10571 10572 __init static void do_allocate_snapshot(const char *name) 10573 { 10574 if (!tr_needs_alloc_snapshot(name)) 10575 return; 10576 10577 /* 10578 * When allocate_snapshot is set, the next call to 10579 * allocate_trace_buffers() (called by trace_array_get_by_name()) 10580 * will allocate the snapshot buffer. That will alse clear 10581 * this flag. 10582 */ 10583 allocate_snapshot = true; 10584 } 10585 #else 10586 static inline void do_allocate_snapshot(const char *name) { } 10587 #endif 10588 10589 __init static void enable_instances(void) 10590 { 10591 struct trace_array *tr; 10592 char *curr_str; 10593 char *name; 10594 char *str; 10595 char *tok; 10596 10597 /* A tab is always appended */ 10598 boot_instance_info[boot_instance_index - 1] = '\0'; 10599 str = boot_instance_info; 10600 10601 while ((curr_str = strsep(&str, "\t"))) { 10602 phys_addr_t start = 0; 10603 phys_addr_t size = 0; 10604 unsigned long addr = 0; 10605 bool traceprintk = false; 10606 bool traceoff = false; 10607 char *flag_delim; 10608 char *addr_delim; 10609 char *rname __free(kfree) = NULL; 10610 10611 tok = strsep(&curr_str, ","); 10612 10613 flag_delim = strchr(tok, '^'); 10614 addr_delim = strchr(tok, '@'); 10615 10616 if (addr_delim) 10617 *addr_delim++ = '\0'; 10618 10619 if (flag_delim) 10620 *flag_delim++ = '\0'; 10621 10622 name = tok; 10623 10624 if (flag_delim) { 10625 char *flag; 10626 10627 while ((flag = strsep(&flag_delim, "^"))) { 10628 if (strcmp(flag, "traceoff") == 0) { 10629 traceoff = true; 10630 } else if ((strcmp(flag, "printk") == 0) || 10631 (strcmp(flag, "traceprintk") == 0) || 10632 (strcmp(flag, "trace_printk") == 0)) { 10633 traceprintk = true; 10634 } else { 10635 pr_info("Tracing: Invalid instance flag '%s' for %s\n", 10636 flag, name); 10637 } 10638 } 10639 } 10640 10641 tok = addr_delim; 10642 if (tok && isdigit(*tok)) { 10643 start = memparse(tok, &tok); 10644 if (!start) { 10645 pr_warn("Tracing: Invalid boot instance address for %s\n", 10646 name); 10647 continue; 10648 } 10649 if (*tok != ':') { 10650 pr_warn("Tracing: No size specified for instance %s\n", name); 10651 continue; 10652 } 10653 tok++; 10654 size = memparse(tok, &tok); 10655 if (!size) { 10656 pr_warn("Tracing: Invalid boot instance size for %s\n", 10657 name); 10658 continue; 10659 } 10660 } else if (tok) { 10661 if (!reserve_mem_find_by_name(tok, &start, &size)) { 10662 start = 0; 10663 pr_warn("Failed to map boot instance %s to %s\n", name, tok); 10664 continue; 10665 } 10666 rname = kstrdup(tok, GFP_KERNEL); 10667 } 10668 10669 if (start) { 10670 addr = map_pages(start, size); 10671 if (addr) { 10672 pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n", 10673 name, &start, (unsigned long)size); 10674 } else { 10675 pr_warn("Tracing: Failed to map boot instance %s\n", name); 10676 continue; 10677 } 10678 } else { 10679 /* Only non mapped buffers have snapshot buffers */ 10680 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) 10681 do_allocate_snapshot(name); 10682 } 10683 10684 tr = trace_array_create_systems(name, NULL, addr, size); 10685 if (IS_ERR(tr)) { 10686 pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str); 10687 continue; 10688 } 10689 10690 if (traceoff) 10691 tracer_tracing_off(tr); 10692 10693 if (traceprintk) 10694 update_printk_trace(tr); 10695 10696 /* 10697 * If start is set, then this is a mapped buffer, and 10698 * cannot be deleted by user space, so keep the reference 10699 * to it. 10700 */ 10701 if (start) { 10702 tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT; 10703 tr->range_name = no_free_ptr(rname); 10704 } 10705 10706 while ((tok = strsep(&curr_str, ","))) { 10707 early_enable_events(tr, tok, true); 10708 } 10709 } 10710 } 10711 10712 __init static int tracer_alloc_buffers(void) 10713 { 10714 int ring_buf_size; 10715 int ret = -ENOMEM; 10716 10717 10718 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10719 pr_warn("Tracing disabled due to lockdown\n"); 10720 return -EPERM; 10721 } 10722 10723 /* 10724 * Make sure we don't accidentally add more trace options 10725 * than we have bits for. 10726 */ 10727 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10728 10729 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10730 goto out; 10731 10732 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10733 goto out_free_buffer_mask; 10734 10735 /* Only allocate trace_printk buffers if a trace_printk exists */ 10736 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) 10737 /* Must be called before global_trace.buffer is allocated */ 10738 trace_printk_init_buffers(); 10739 10740 /* To save memory, keep the ring buffer size to its minimum */ 10741 if (global_trace.ring_buffer_expanded) 10742 ring_buf_size = trace_buf_size; 10743 else 10744 ring_buf_size = 1; 10745 10746 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 10747 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 10748 10749 raw_spin_lock_init(&global_trace.start_lock); 10750 10751 /* 10752 * The prepare callbacks allocates some memory for the ring buffer. We 10753 * don't free the buffer if the CPU goes down. If we were to free 10754 * the buffer, then the user would lose any trace that was in the 10755 * buffer. The memory will be removed once the "instance" is removed. 10756 */ 10757 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 10758 "trace/RB:prepare", trace_rb_cpu_prepare, 10759 NULL); 10760 if (ret < 0) 10761 goto out_free_cpumask; 10762 /* Used for event triggers */ 10763 ret = -ENOMEM; 10764 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 10765 if (!temp_buffer) 10766 goto out_rm_hp_state; 10767 10768 if (trace_create_savedcmd() < 0) 10769 goto out_free_temp_buffer; 10770 10771 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) 10772 goto out_free_savedcmd; 10773 10774 /* TODO: make the number of buffers hot pluggable with CPUS */ 10775 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 10776 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); 10777 goto out_free_pipe_cpumask; 10778 } 10779 if (global_trace.buffer_disabled) 10780 tracing_off(); 10781 10782 if (trace_boot_clock) { 10783 ret = tracing_set_clock(&global_trace, trace_boot_clock); 10784 if (ret < 0) 10785 pr_warn("Trace clock %s not defined, going back to default\n", 10786 trace_boot_clock); 10787 } 10788 10789 /* 10790 * register_tracer() might reference current_trace, so it 10791 * needs to be set before we register anything. This is 10792 * just a bootstrap of current_trace anyway. 10793 */ 10794 global_trace.current_trace = &nop_trace; 10795 10796 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 10797 #ifdef CONFIG_TRACER_MAX_TRACE 10798 spin_lock_init(&global_trace.snapshot_trigger_lock); 10799 #endif 10800 ftrace_init_global_array_ops(&global_trace); 10801 10802 #ifdef CONFIG_MODULES 10803 INIT_LIST_HEAD(&global_trace.mod_events); 10804 #endif 10805 10806 init_trace_flags_index(&global_trace); 10807 10808 register_tracer(&nop_trace); 10809 10810 /* Function tracing may start here (via kernel command line) */ 10811 init_function_trace(); 10812 10813 /* All seems OK, enable tracing */ 10814 tracing_disabled = 0; 10815 10816 atomic_notifier_chain_register(&panic_notifier_list, 10817 &trace_panic_notifier); 10818 10819 register_die_notifier(&trace_die_notifier); 10820 10821 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 10822 10823 INIT_LIST_HEAD(&global_trace.systems); 10824 INIT_LIST_HEAD(&global_trace.events); 10825 INIT_LIST_HEAD(&global_trace.hist_vars); 10826 INIT_LIST_HEAD(&global_trace.err_log); 10827 list_add(&global_trace.list, &ftrace_trace_arrays); 10828 10829 apply_trace_boot_options(); 10830 10831 register_snapshot_cmd(); 10832 10833 return 0; 10834 10835 out_free_pipe_cpumask: 10836 free_cpumask_var(global_trace.pipe_cpumask); 10837 out_free_savedcmd: 10838 trace_free_saved_cmdlines_buffer(); 10839 out_free_temp_buffer: 10840 ring_buffer_free(temp_buffer); 10841 out_rm_hp_state: 10842 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 10843 out_free_cpumask: 10844 free_cpumask_var(global_trace.tracing_cpumask); 10845 out_free_buffer_mask: 10846 free_cpumask_var(tracing_buffer_mask); 10847 out: 10848 return ret; 10849 } 10850 10851 #ifdef CONFIG_FUNCTION_TRACER 10852 /* Used to set module cached ftrace filtering at boot up */ 10853 __init struct trace_array *trace_get_global_array(void) 10854 { 10855 return &global_trace; 10856 } 10857 #endif 10858 10859 void __init ftrace_boot_snapshot(void) 10860 { 10861 #ifdef CONFIG_TRACER_MAX_TRACE 10862 struct trace_array *tr; 10863 10864 if (!snapshot_at_boot) 10865 return; 10866 10867 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10868 if (!tr->allocated_snapshot) 10869 continue; 10870 10871 tracing_snapshot_instance(tr); 10872 trace_array_puts(tr, "** Boot snapshot taken **\n"); 10873 } 10874 #endif 10875 } 10876 10877 void __init early_trace_init(void) 10878 { 10879 if (tracepoint_printk) { 10880 tracepoint_print_iter = 10881 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 10882 if (MEM_FAIL(!tracepoint_print_iter, 10883 "Failed to allocate trace iterator\n")) 10884 tracepoint_printk = 0; 10885 else 10886 static_key_enable(&tracepoint_printk_key.key); 10887 } 10888 tracer_alloc_buffers(); 10889 10890 init_events(); 10891 } 10892 10893 void __init trace_init(void) 10894 { 10895 trace_event_init(); 10896 10897 if (boot_instance_index) 10898 enable_instances(); 10899 } 10900 10901 __init static void clear_boot_tracer(void) 10902 { 10903 /* 10904 * The default tracer at boot buffer is an init section. 10905 * This function is called in lateinit. If we did not 10906 * find the boot tracer, then clear it out, to prevent 10907 * later registration from accessing the buffer that is 10908 * about to be freed. 10909 */ 10910 if (!default_bootup_tracer) 10911 return; 10912 10913 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 10914 default_bootup_tracer); 10915 default_bootup_tracer = NULL; 10916 } 10917 10918 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 10919 __init static void tracing_set_default_clock(void) 10920 { 10921 /* sched_clock_stable() is determined in late_initcall */ 10922 if (!trace_boot_clock && !sched_clock_stable()) { 10923 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10924 pr_warn("Can not set tracing clock due to lockdown\n"); 10925 return; 10926 } 10927 10928 printk(KERN_WARNING 10929 "Unstable clock detected, switching default tracing clock to \"global\"\n" 10930 "If you want to keep using the local clock, then add:\n" 10931 " \"trace_clock=local\"\n" 10932 "on the kernel command line\n"); 10933 tracing_set_clock(&global_trace, "global"); 10934 } 10935 } 10936 #else 10937 static inline void tracing_set_default_clock(void) { } 10938 #endif 10939 10940 __init static int late_trace_init(void) 10941 { 10942 if (tracepoint_printk && tracepoint_printk_stop_on_boot) { 10943 static_key_disable(&tracepoint_printk_key.key); 10944 tracepoint_printk = 0; 10945 } 10946 10947 tracing_set_default_clock(); 10948 clear_boot_tracer(); 10949 return 0; 10950 } 10951 10952 late_initcall_sync(late_trace_init); 10953