1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Function graph tracer. 5 * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> 6 * Mostly borrowed from function tracer which 7 * is Copyright (c) Steven Rostedt <[email protected]> 8 * 9 */ 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/interrupt.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 16 #include "trace.h" 17 #include "trace_output.h" 18 19 /* When set, irq functions will be ignored */ 20 static int ftrace_graph_skip_irqs; 21 22 struct fgraph_cpu_data { 23 pid_t last_pid; 24 int depth; 25 int depth_irq; 26 int ignore; 27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; 28 }; 29 30 struct fgraph_data { 31 struct fgraph_cpu_data __percpu *cpu_data; 32 33 /* Place to preserve last processed entry. */ 34 union { 35 struct ftrace_graph_ent_entry ent; 36 struct fgraph_retaddr_ent_entry rent; 37 } ent; 38 struct ftrace_graph_ret_entry ret; 39 int failed; 40 int cpu; 41 }; 42 43 #define TRACE_GRAPH_INDENT 2 44 45 unsigned int fgraph_max_depth; 46 47 static struct tracer_opt trace_opts[] = { 48 /* Display overruns? (for self-debug purpose) */ 49 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 50 /* Display CPU ? */ 51 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 52 /* Display Overhead ? */ 53 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 54 /* Display proc name/pid */ 55 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 56 /* Display duration of execution */ 57 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 58 /* Display absolute time of an entry */ 59 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 60 /* Display interrupts */ 61 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 62 /* Display function name after trailing } */ 63 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, 64 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 65 /* Display function return value ? */ 66 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) }, 67 /* Display function return value in hexadecimal format ? */ 68 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) }, 69 #endif 70 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 71 /* Display function return address ? */ 72 { TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) }, 73 #endif 74 /* Include sleep time (scheduled out) between entry and return */ 75 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 76 77 #ifdef CONFIG_FUNCTION_PROFILER 78 /* Include time within nested functions */ 79 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 80 #endif 81 82 { } /* Empty entry */ 83 }; 84 85 static struct tracer_flags tracer_flags = { 86 /* Don't display overruns, proc, or tail by default */ 87 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 88 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | 89 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, 90 .opts = trace_opts 91 }; 92 93 static bool tracer_flags_is_set(u32 flags) 94 { 95 return (tracer_flags.val & flags) == flags; 96 } 97 98 /* 99 * DURATION column is being also used to display IRQ signs, 100 * following values are used by print_graph_irq and others 101 * to fill in space into DURATION column. 102 */ 103 enum { 104 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 105 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 106 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 107 }; 108 109 static void 110 print_graph_duration(struct trace_array *tr, unsigned long long duration, 111 struct trace_seq *s, u32 flags); 112 113 int __trace_graph_entry(struct trace_array *tr, 114 struct ftrace_graph_ent *trace, 115 unsigned int trace_ctx) 116 { 117 struct trace_event_call *call = &event_funcgraph_entry; 118 struct ring_buffer_event *event; 119 struct trace_buffer *buffer = tr->array_buffer.buffer; 120 struct ftrace_graph_ent_entry *entry; 121 122 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 123 sizeof(*entry), trace_ctx); 124 if (!event) 125 return 0; 126 entry = ring_buffer_event_data(event); 127 entry->graph_ent = *trace; 128 if (!call_filter_check_discard(call, entry, buffer, event)) 129 trace_buffer_unlock_commit_nostack(buffer, event); 130 131 return 1; 132 } 133 134 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 135 int __trace_graph_retaddr_entry(struct trace_array *tr, 136 struct ftrace_graph_ent *trace, 137 unsigned int trace_ctx, 138 unsigned long retaddr) 139 { 140 struct trace_event_call *call = &event_fgraph_retaddr_entry; 141 struct ring_buffer_event *event; 142 struct trace_buffer *buffer = tr->array_buffer.buffer; 143 struct fgraph_retaddr_ent_entry *entry; 144 145 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT, 146 sizeof(*entry), trace_ctx); 147 if (!event) 148 return 0; 149 entry = ring_buffer_event_data(event); 150 entry->graph_ent.func = trace->func; 151 entry->graph_ent.depth = trace->depth; 152 entry->graph_ent.retaddr = retaddr; 153 if (!call_filter_check_discard(call, entry, buffer, event)) 154 trace_buffer_unlock_commit_nostack(buffer, event); 155 156 return 1; 157 } 158 #else 159 int __trace_graph_retaddr_entry(struct trace_array *tr, 160 struct ftrace_graph_ent *trace, 161 unsigned int trace_ctx, 162 unsigned long retaddr) 163 { 164 return 1; 165 } 166 #endif 167 168 static inline int ftrace_graph_ignore_irqs(void) 169 { 170 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) 171 return 0; 172 173 return in_hardirq(); 174 } 175 176 struct fgraph_times { 177 unsigned long long calltime; 178 unsigned long long sleeptime; /* may be optional! */ 179 }; 180 181 int trace_graph_entry(struct ftrace_graph_ent *trace, 182 struct fgraph_ops *gops) 183 { 184 unsigned long *task_var = fgraph_get_task_var(gops); 185 struct trace_array *tr = gops->private; 186 struct trace_array_cpu *data; 187 struct fgraph_times *ftimes; 188 unsigned long flags; 189 unsigned int trace_ctx; 190 long disabled; 191 int ret; 192 int cpu; 193 194 if (*task_var & TRACE_GRAPH_NOTRACE) 195 return 0; 196 197 /* 198 * Do not trace a function if it's filtered by set_graph_notrace. 199 * Make the index of ret stack negative to indicate that it should 200 * ignore further functions. But it needs its own ret stack entry 201 * to recover the original index in order to continue tracing after 202 * returning from the function. 203 */ 204 if (ftrace_graph_notrace_addr(trace->func)) { 205 *task_var |= TRACE_GRAPH_NOTRACE_BIT; 206 /* 207 * Need to return 1 to have the return called 208 * that will clear the NOTRACE bit. 209 */ 210 return 1; 211 } 212 213 if (!ftrace_trace_task(tr)) 214 return 0; 215 216 if (ftrace_graph_ignore_func(gops, trace)) 217 return 0; 218 219 if (ftrace_graph_ignore_irqs()) 220 return 0; 221 222 if (fgraph_sleep_time) { 223 /* Only need to record the calltime */ 224 ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime)); 225 } else { 226 ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes)); 227 if (ftimes) 228 ftimes->sleeptime = current->ftrace_sleeptime; 229 } 230 if (!ftimes) 231 return 0; 232 233 ftimes->calltime = trace_clock_local(); 234 235 /* 236 * Stop here if tracing_threshold is set. We only write function return 237 * events to the ring buffer. 238 */ 239 if (tracing_thresh) 240 return 1; 241 242 local_irq_save(flags); 243 cpu = raw_smp_processor_id(); 244 data = per_cpu_ptr(tr->array_buffer.data, cpu); 245 disabled = atomic_inc_return(&data->disabled); 246 if (likely(disabled == 1)) { 247 trace_ctx = tracing_gen_ctx_flags(flags); 248 if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) && 249 tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) { 250 unsigned long retaddr = ftrace_graph_top_ret_addr(current); 251 252 ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr); 253 } else 254 ret = __trace_graph_entry(tr, trace, trace_ctx); 255 } else { 256 ret = 0; 257 } 258 259 atomic_dec(&data->disabled); 260 local_irq_restore(flags); 261 262 return ret; 263 } 264 265 static void 266 __trace_graph_function(struct trace_array *tr, 267 unsigned long ip, unsigned int trace_ctx) 268 { 269 u64 time = trace_clock_local(); 270 struct ftrace_graph_ent ent = { 271 .func = ip, 272 .depth = 0, 273 }; 274 struct ftrace_graph_ret ret = { 275 .func = ip, 276 .depth = 0, 277 .calltime = time, 278 .rettime = time, 279 }; 280 281 __trace_graph_entry(tr, &ent, trace_ctx); 282 __trace_graph_return(tr, &ret, trace_ctx); 283 } 284 285 void 286 trace_graph_function(struct trace_array *tr, 287 unsigned long ip, unsigned long parent_ip, 288 unsigned int trace_ctx) 289 { 290 __trace_graph_function(tr, ip, trace_ctx); 291 } 292 293 void __trace_graph_return(struct trace_array *tr, 294 struct ftrace_graph_ret *trace, 295 unsigned int trace_ctx) 296 { 297 struct trace_event_call *call = &event_funcgraph_exit; 298 struct ring_buffer_event *event; 299 struct trace_buffer *buffer = tr->array_buffer.buffer; 300 struct ftrace_graph_ret_entry *entry; 301 302 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 303 sizeof(*entry), trace_ctx); 304 if (!event) 305 return; 306 entry = ring_buffer_event_data(event); 307 entry->ret = *trace; 308 if (!call_filter_check_discard(call, entry, buffer, event)) 309 trace_buffer_unlock_commit_nostack(buffer, event); 310 } 311 312 static void handle_nosleeptime(struct ftrace_graph_ret *trace, 313 struct fgraph_times *ftimes, 314 int size) 315 { 316 if (fgraph_sleep_time || size < sizeof(*ftimes)) 317 return; 318 319 ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime; 320 } 321 322 void trace_graph_return(struct ftrace_graph_ret *trace, 323 struct fgraph_ops *gops) 324 { 325 unsigned long *task_var = fgraph_get_task_var(gops); 326 struct trace_array *tr = gops->private; 327 struct trace_array_cpu *data; 328 struct fgraph_times *ftimes; 329 unsigned long flags; 330 unsigned int trace_ctx; 331 long disabled; 332 int size; 333 int cpu; 334 335 ftrace_graph_addr_finish(gops, trace); 336 337 if (*task_var & TRACE_GRAPH_NOTRACE) { 338 *task_var &= ~TRACE_GRAPH_NOTRACE; 339 return; 340 } 341 342 ftimes = fgraph_retrieve_data(gops->idx, &size); 343 if (!ftimes) 344 return; 345 346 handle_nosleeptime(trace, ftimes, size); 347 348 trace->calltime = ftimes->calltime; 349 350 local_irq_save(flags); 351 cpu = raw_smp_processor_id(); 352 data = per_cpu_ptr(tr->array_buffer.data, cpu); 353 disabled = atomic_inc_return(&data->disabled); 354 if (likely(disabled == 1)) { 355 trace_ctx = tracing_gen_ctx_flags(flags); 356 __trace_graph_return(tr, trace, trace_ctx); 357 } 358 atomic_dec(&data->disabled); 359 local_irq_restore(flags); 360 } 361 362 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, 363 struct fgraph_ops *gops) 364 { 365 struct fgraph_times *ftimes; 366 int size; 367 368 ftrace_graph_addr_finish(gops, trace); 369 370 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 371 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 372 return; 373 } 374 375 ftimes = fgraph_retrieve_data(gops->idx, &size); 376 if (!ftimes) 377 return; 378 379 handle_nosleeptime(trace, ftimes, size); 380 381 trace->calltime = ftimes->calltime; 382 383 if (tracing_thresh && 384 (trace->rettime - ftimes->calltime < tracing_thresh)) 385 return; 386 else 387 trace_graph_return(trace, gops); 388 } 389 390 static struct fgraph_ops funcgraph_ops = { 391 .entryfunc = &trace_graph_entry, 392 .retfunc = &trace_graph_return, 393 }; 394 395 int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) 396 { 397 struct fgraph_ops *gops; 398 399 gops = kzalloc(sizeof(*gops), GFP_KERNEL); 400 if (!gops) 401 return -ENOMEM; 402 403 gops->entryfunc = &trace_graph_entry; 404 gops->retfunc = &trace_graph_return; 405 406 tr->gops = gops; 407 gops->private = tr; 408 409 fgraph_init_ops(&gops->ops, ops); 410 411 return 0; 412 } 413 414 void free_fgraph_ops(struct trace_array *tr) 415 { 416 kfree(tr->gops); 417 } 418 419 __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) 420 { 421 tr->gops = &funcgraph_ops; 422 funcgraph_ops.private = tr; 423 fgraph_init_ops(&tr->gops->ops, ops); 424 } 425 426 static int graph_trace_init(struct trace_array *tr) 427 { 428 int ret; 429 430 tr->gops->entryfunc = trace_graph_entry; 431 432 if (tracing_thresh) 433 tr->gops->retfunc = trace_graph_thresh_return; 434 else 435 tr->gops->retfunc = trace_graph_return; 436 437 /* Make gops functions are visible before we start tracing */ 438 smp_mb(); 439 440 ret = register_ftrace_graph(tr->gops); 441 if (ret) 442 return ret; 443 tracing_start_cmdline_record(); 444 445 return 0; 446 } 447 448 static void graph_trace_reset(struct trace_array *tr) 449 { 450 tracing_stop_cmdline_record(); 451 unregister_ftrace_graph(tr->gops); 452 } 453 454 static int graph_trace_update_thresh(struct trace_array *tr) 455 { 456 graph_trace_reset(tr); 457 return graph_trace_init(tr); 458 } 459 460 static int max_bytes_for_cpu; 461 462 static void print_graph_cpu(struct trace_seq *s, int cpu) 463 { 464 /* 465 * Start with a space character - to make it stand out 466 * to the right a bit when trace output is pasted into 467 * email: 468 */ 469 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); 470 } 471 472 #define TRACE_GRAPH_PROCINFO_LENGTH 14 473 474 static void print_graph_proc(struct trace_seq *s, pid_t pid) 475 { 476 char comm[TASK_COMM_LEN]; 477 /* sign + log10(MAX_INT) + '\0' */ 478 char pid_str[11]; 479 int spaces = 0; 480 int len; 481 int i; 482 483 trace_find_cmdline(pid, comm); 484 comm[7] = '\0'; 485 sprintf(pid_str, "%d", pid); 486 487 /* 1 stands for the "-" character */ 488 len = strlen(comm) + strlen(pid_str) + 1; 489 490 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 491 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 492 493 /* First spaces to align center */ 494 for (i = 0; i < spaces / 2; i++) 495 trace_seq_putc(s, ' '); 496 497 trace_seq_printf(s, "%s-%s", comm, pid_str); 498 499 /* Last spaces to align center */ 500 for (i = 0; i < spaces - (spaces / 2); i++) 501 trace_seq_putc(s, ' '); 502 } 503 504 505 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 506 { 507 trace_seq_putc(s, ' '); 508 trace_print_lat_fmt(s, entry); 509 trace_seq_puts(s, " | "); 510 } 511 512 /* If the pid changed since the last trace, output this event */ 513 static void 514 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 515 { 516 pid_t prev_pid; 517 pid_t *last_pid; 518 519 if (!data) 520 return; 521 522 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 523 524 if (*last_pid == pid) 525 return; 526 527 prev_pid = *last_pid; 528 *last_pid = pid; 529 530 if (prev_pid == -1) 531 return; 532 /* 533 * Context-switch trace line: 534 535 ------------------------------------------ 536 | 1) migration/0--1 => sshd-1755 537 ------------------------------------------ 538 539 */ 540 trace_seq_puts(s, " ------------------------------------------\n"); 541 print_graph_cpu(s, cpu); 542 print_graph_proc(s, prev_pid); 543 trace_seq_puts(s, " => "); 544 print_graph_proc(s, pid); 545 trace_seq_puts(s, "\n ------------------------------------------\n\n"); 546 } 547 548 static struct ftrace_graph_ret_entry * 549 get_return_for_leaf(struct trace_iterator *iter, 550 struct ftrace_graph_ent_entry *curr) 551 { 552 struct fgraph_data *data = iter->private; 553 struct ring_buffer_iter *ring_iter = NULL; 554 struct ring_buffer_event *event; 555 struct ftrace_graph_ret_entry *next; 556 557 /* 558 * If the previous output failed to write to the seq buffer, 559 * then we just reuse the data from before. 560 */ 561 if (data && data->failed) { 562 curr = &data->ent.ent; 563 next = &data->ret; 564 } else { 565 566 ring_iter = trace_buffer_iter(iter, iter->cpu); 567 568 /* First peek to compare current entry and the next one */ 569 if (ring_iter) 570 event = ring_buffer_iter_peek(ring_iter, NULL); 571 else { 572 /* 573 * We need to consume the current entry to see 574 * the next one. 575 */ 576 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, 577 NULL, NULL); 578 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu, 579 NULL, NULL); 580 } 581 582 if (!event) 583 return NULL; 584 585 next = ring_buffer_event_data(event); 586 587 if (data) { 588 /* 589 * Save current and next entries for later reference 590 * if the output fails. 591 */ 592 if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT)) 593 data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr; 594 else 595 data->ent.ent = *curr; 596 /* 597 * If the next event is not a return type, then 598 * we only care about what type it is. Otherwise we can 599 * safely copy the entire event. 600 */ 601 if (next->ent.type == TRACE_GRAPH_RET) 602 data->ret = *next; 603 else 604 data->ret.ent.type = next->ent.type; 605 } 606 } 607 608 if (next->ent.type != TRACE_GRAPH_RET) 609 return NULL; 610 611 if (curr->ent.pid != next->ent.pid || 612 curr->graph_ent.func != next->ret.func) 613 return NULL; 614 615 /* this is a leaf, now advance the iterator */ 616 if (ring_iter) 617 ring_buffer_iter_advance(ring_iter); 618 619 return next; 620 } 621 622 static void print_graph_abs_time(u64 t, struct trace_seq *s) 623 { 624 unsigned long usecs_rem; 625 626 usecs_rem = do_div(t, NSEC_PER_SEC); 627 usecs_rem /= 1000; 628 629 trace_seq_printf(s, "%5lu.%06lu | ", 630 (unsigned long)t, usecs_rem); 631 } 632 633 static void 634 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s) 635 { 636 unsigned long long usecs; 637 638 usecs = iter->ts - iter->array_buffer->time_start; 639 do_div(usecs, NSEC_PER_USEC); 640 641 trace_seq_printf(s, "%9llu us | ", usecs); 642 } 643 644 static void 645 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 646 enum trace_type type, int cpu, pid_t pid, u32 flags) 647 { 648 struct trace_array *tr = iter->tr; 649 struct trace_seq *s = &iter->seq; 650 struct trace_entry *ent = iter->ent; 651 652 addr += iter->tr->text_delta; 653 654 if (addr < (unsigned long)__irqentry_text_start || 655 addr >= (unsigned long)__irqentry_text_end) 656 return; 657 658 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 659 /* Absolute time */ 660 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 661 print_graph_abs_time(iter->ts, s); 662 663 /* Relative time */ 664 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 665 print_graph_rel_time(iter, s); 666 667 /* Cpu */ 668 if (flags & TRACE_GRAPH_PRINT_CPU) 669 print_graph_cpu(s, cpu); 670 671 /* Proc */ 672 if (flags & TRACE_GRAPH_PRINT_PROC) { 673 print_graph_proc(s, pid); 674 trace_seq_puts(s, " | "); 675 } 676 677 /* Latency format */ 678 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 679 print_graph_lat_fmt(s, ent); 680 } 681 682 /* No overhead */ 683 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); 684 685 if (type == TRACE_GRAPH_ENT) 686 trace_seq_puts(s, "==========>"); 687 else 688 trace_seq_puts(s, "<=========="); 689 690 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); 691 trace_seq_putc(s, '\n'); 692 } 693 694 void 695 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 696 { 697 unsigned long nsecs_rem = do_div(duration, 1000); 698 /* log10(ULONG_MAX) + '\0' */ 699 char usecs_str[21]; 700 char nsecs_str[5]; 701 int len; 702 int i; 703 704 sprintf(usecs_str, "%lu", (unsigned long) duration); 705 706 /* Print msecs */ 707 trace_seq_printf(s, "%s", usecs_str); 708 709 len = strlen(usecs_str); 710 711 /* Print nsecs (we don't want to exceed 7 numbers) */ 712 if (len < 7) { 713 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); 714 715 snprintf(nsecs_str, slen, "%03lu", nsecs_rem); 716 trace_seq_printf(s, ".%s", nsecs_str); 717 len += strlen(nsecs_str) + 1; 718 } 719 720 trace_seq_puts(s, " us "); 721 722 /* Print remaining spaces to fit the row's width */ 723 for (i = len; i < 8; i++) 724 trace_seq_putc(s, ' '); 725 } 726 727 static void 728 print_graph_duration(struct trace_array *tr, unsigned long long duration, 729 struct trace_seq *s, u32 flags) 730 { 731 if (!(flags & TRACE_GRAPH_PRINT_DURATION) || 732 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 733 return; 734 735 /* No real adata, just filling the column with spaces */ 736 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 737 case FLAGS_FILL_FULL: 738 trace_seq_puts(s, " | "); 739 return; 740 case FLAGS_FILL_START: 741 trace_seq_puts(s, " "); 742 return; 743 case FLAGS_FILL_END: 744 trace_seq_puts(s, " |"); 745 return; 746 } 747 748 /* Signal a overhead of time execution to the output */ 749 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) 750 trace_seq_printf(s, "%c ", trace_find_mark(duration)); 751 else 752 trace_seq_puts(s, " "); 753 754 trace_print_graph_duration(duration, s); 755 trace_seq_puts(s, "| "); 756 } 757 758 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 759 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL 760 #else 761 #define __TRACE_GRAPH_PRINT_RETVAL 0 762 #endif 763 764 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 765 #define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR 766 static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry, 767 u32 trace_flags, bool comment) 768 { 769 if (comment) 770 trace_seq_puts(s, " /*"); 771 772 trace_seq_puts(s, " <-"); 773 seq_print_ip_sym(s, entry->graph_ent.retaddr, trace_flags | TRACE_ITER_SYM_OFFSET); 774 775 if (comment) 776 trace_seq_puts(s, " */"); 777 } 778 #else 779 #define __TRACE_GRAPH_PRINT_RETADDR 0 780 #define print_graph_retaddr(_seq, _entry, _tflags, _comment) do { } while (0) 781 #endif 782 783 #if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR) 784 785 static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry, 786 struct ftrace_graph_ret *graph_ret, void *func, 787 u32 opt_flags, u32 trace_flags) 788 { 789 unsigned long err_code = 0; 790 unsigned long retval = 0; 791 bool print_retaddr = false; 792 bool print_retval = false; 793 bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX); 794 795 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 796 retval = graph_ret->retval; 797 print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL); 798 #endif 799 800 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 801 print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR); 802 #endif 803 804 if (print_retval && retval && !hex_format) { 805 /* Check if the return value matches the negative format */ 806 if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) && 807 (((u64)retval) >> 32) == 0) { 808 err_code = sign_extend64(retval, 31); 809 } else { 810 err_code = retval; 811 } 812 813 if (!IS_ERR_VALUE(err_code)) 814 err_code = 0; 815 } 816 817 if (entry) { 818 if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT) 819 print_retaddr = false; 820 821 trace_seq_printf(s, "%ps();", func); 822 if (print_retval || print_retaddr) 823 trace_seq_puts(s, " /*"); 824 else 825 trace_seq_putc(s, '\n'); 826 } else { 827 print_retaddr = false; 828 trace_seq_printf(s, "} /* %ps", func); 829 } 830 831 if (print_retaddr) 832 print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry, 833 trace_flags, false); 834 835 if (print_retval) { 836 if (hex_format || (err_code == 0)) 837 trace_seq_printf(s, " ret=0x%lx", retval); 838 else 839 trace_seq_printf(s, " ret=%ld", err_code); 840 } 841 842 if (!entry || print_retval || print_retaddr) 843 trace_seq_puts(s, " */\n"); 844 } 845 846 #else 847 848 #define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags) do {} while (0) 849 850 #endif 851 852 /* Case of a leaf function on its call entry */ 853 static enum print_line_t 854 print_graph_entry_leaf(struct trace_iterator *iter, 855 struct ftrace_graph_ent_entry *entry, 856 struct ftrace_graph_ret_entry *ret_entry, 857 struct trace_seq *s, u32 flags) 858 { 859 struct fgraph_data *data = iter->private; 860 struct trace_array *tr = iter->tr; 861 struct ftrace_graph_ret *graph_ret; 862 struct ftrace_graph_ent *call; 863 unsigned long long duration; 864 unsigned long func; 865 int cpu = iter->cpu; 866 int i; 867 868 graph_ret = &ret_entry->ret; 869 call = &entry->graph_ent; 870 duration = graph_ret->rettime - graph_ret->calltime; 871 872 func = call->func + iter->tr->text_delta; 873 874 if (data) { 875 struct fgraph_cpu_data *cpu_data; 876 877 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 878 879 /* 880 * Comments display at + 1 to depth. Since 881 * this is a leaf function, keep the comments 882 * equal to this depth. 883 */ 884 cpu_data->depth = call->depth - 1; 885 886 /* No need to keep this function around for this depth */ 887 if (call->depth < FTRACE_RETFUNC_DEPTH && 888 !WARN_ON_ONCE(call->depth < 0)) 889 cpu_data->enter_funcs[call->depth] = 0; 890 } 891 892 /* Overhead and duration */ 893 print_graph_duration(tr, duration, s, flags); 894 895 /* Function */ 896 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 897 trace_seq_putc(s, ' '); 898 899 /* 900 * Write out the function return value or return address 901 */ 902 if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) { 903 print_graph_retval(s, entry, graph_ret, 904 (void *)graph_ret->func + iter->tr->text_delta, 905 flags, tr->trace_flags); 906 } else { 907 trace_seq_printf(s, "%ps();\n", (void *)func); 908 } 909 910 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, 911 cpu, iter->ent->pid, flags); 912 913 return trace_handle_return(s); 914 } 915 916 static enum print_line_t 917 print_graph_entry_nested(struct trace_iterator *iter, 918 struct ftrace_graph_ent_entry *entry, 919 struct trace_seq *s, int cpu, u32 flags) 920 { 921 struct ftrace_graph_ent *call = &entry->graph_ent; 922 struct fgraph_data *data = iter->private; 923 struct trace_array *tr = iter->tr; 924 unsigned long func; 925 int i; 926 927 if (data) { 928 struct fgraph_cpu_data *cpu_data; 929 int cpu = iter->cpu; 930 931 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 932 cpu_data->depth = call->depth; 933 934 /* Save this function pointer to see if the exit matches */ 935 if (call->depth < FTRACE_RETFUNC_DEPTH && 936 !WARN_ON_ONCE(call->depth < 0)) 937 cpu_data->enter_funcs[call->depth] = call->func; 938 } 939 940 /* No time */ 941 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 942 943 /* Function */ 944 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 945 trace_seq_putc(s, ' '); 946 947 func = call->func + iter->tr->text_delta; 948 949 trace_seq_printf(s, "%ps() {", (void *)func); 950 if (flags & __TRACE_GRAPH_PRINT_RETADDR && 951 entry->ent.type == TRACE_GRAPH_RETADDR_ENT) 952 print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry, 953 tr->trace_flags, true); 954 trace_seq_putc(s, '\n'); 955 956 if (trace_seq_has_overflowed(s)) 957 return TRACE_TYPE_PARTIAL_LINE; 958 959 /* 960 * we already consumed the current entry to check the next one 961 * and see if this is a leaf. 962 */ 963 return TRACE_TYPE_NO_CONSUME; 964 } 965 966 static void 967 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 968 int type, unsigned long addr, u32 flags) 969 { 970 struct fgraph_data *data = iter->private; 971 struct trace_entry *ent = iter->ent; 972 struct trace_array *tr = iter->tr; 973 int cpu = iter->cpu; 974 975 /* Pid */ 976 verif_pid(s, ent->pid, cpu, data); 977 978 if (type) 979 /* Interrupt */ 980 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); 981 982 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 983 return; 984 985 /* Absolute time */ 986 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 987 print_graph_abs_time(iter->ts, s); 988 989 /* Relative time */ 990 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 991 print_graph_rel_time(iter, s); 992 993 /* Cpu */ 994 if (flags & TRACE_GRAPH_PRINT_CPU) 995 print_graph_cpu(s, cpu); 996 997 /* Proc */ 998 if (flags & TRACE_GRAPH_PRINT_PROC) { 999 print_graph_proc(s, ent->pid); 1000 trace_seq_puts(s, " | "); 1001 } 1002 1003 /* Latency format */ 1004 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 1005 print_graph_lat_fmt(s, ent); 1006 1007 return; 1008 } 1009 1010 /* 1011 * Entry check for irq code 1012 * 1013 * returns 1 if 1014 * - we are inside irq code 1015 * - we just entered irq code 1016 * 1017 * returns 0 if 1018 * - funcgraph-interrupts option is set 1019 * - we are not inside irq code 1020 */ 1021 static int 1022 check_irq_entry(struct trace_iterator *iter, u32 flags, 1023 unsigned long addr, int depth) 1024 { 1025 int cpu = iter->cpu; 1026 int *depth_irq; 1027 struct fgraph_data *data = iter->private; 1028 1029 addr += iter->tr->text_delta; 1030 1031 /* 1032 * If we are either displaying irqs, or we got called as 1033 * a graph event and private data does not exist, 1034 * then we bypass the irq check. 1035 */ 1036 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 1037 (!data)) 1038 return 0; 1039 1040 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1041 1042 /* 1043 * We are inside the irq code 1044 */ 1045 if (*depth_irq >= 0) 1046 return 1; 1047 1048 if ((addr < (unsigned long)__irqentry_text_start) || 1049 (addr >= (unsigned long)__irqentry_text_end)) 1050 return 0; 1051 1052 /* 1053 * We are entering irq code. 1054 */ 1055 *depth_irq = depth; 1056 return 1; 1057 } 1058 1059 /* 1060 * Return check for irq code 1061 * 1062 * returns 1 if 1063 * - we are inside irq code 1064 * - we just left irq code 1065 * 1066 * returns 0 if 1067 * - funcgraph-interrupts option is set 1068 * - we are not inside irq code 1069 */ 1070 static int 1071 check_irq_return(struct trace_iterator *iter, u32 flags, int depth) 1072 { 1073 int cpu = iter->cpu; 1074 int *depth_irq; 1075 struct fgraph_data *data = iter->private; 1076 1077 /* 1078 * If we are either displaying irqs, or we got called as 1079 * a graph event and private data does not exist, 1080 * then we bypass the irq check. 1081 */ 1082 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 1083 (!data)) 1084 return 0; 1085 1086 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1087 1088 /* 1089 * We are not inside the irq code. 1090 */ 1091 if (*depth_irq == -1) 1092 return 0; 1093 1094 /* 1095 * We are inside the irq code, and this is returning entry. 1096 * Let's not trace it and clear the entry depth, since 1097 * we are out of irq code. 1098 * 1099 * This condition ensures that we 'leave the irq code' once 1100 * we are out of the entry depth. Thus protecting us from 1101 * the RETURN entry loss. 1102 */ 1103 if (*depth_irq >= depth) { 1104 *depth_irq = -1; 1105 return 1; 1106 } 1107 1108 /* 1109 * We are inside the irq code, and this is not the entry. 1110 */ 1111 return 1; 1112 } 1113 1114 static enum print_line_t 1115 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 1116 struct trace_iterator *iter, u32 flags) 1117 { 1118 struct fgraph_data *data = iter->private; 1119 struct ftrace_graph_ent *call = &field->graph_ent; 1120 struct ftrace_graph_ret_entry *leaf_ret; 1121 static enum print_line_t ret; 1122 int cpu = iter->cpu; 1123 1124 if (check_irq_entry(iter, flags, call->func, call->depth)) 1125 return TRACE_TYPE_HANDLED; 1126 1127 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); 1128 1129 leaf_ret = get_return_for_leaf(iter, field); 1130 if (leaf_ret) 1131 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); 1132 else 1133 ret = print_graph_entry_nested(iter, field, s, cpu, flags); 1134 1135 if (data) { 1136 /* 1137 * If we failed to write our output, then we need to make 1138 * note of it. Because we already consumed our entry. 1139 */ 1140 if (s->full) { 1141 data->failed = 1; 1142 data->cpu = cpu; 1143 } else 1144 data->failed = 0; 1145 } 1146 1147 return ret; 1148 } 1149 1150 static enum print_line_t 1151 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 1152 struct trace_entry *ent, struct trace_iterator *iter, 1153 u32 flags) 1154 { 1155 unsigned long long duration = trace->rettime - trace->calltime; 1156 struct fgraph_data *data = iter->private; 1157 struct trace_array *tr = iter->tr; 1158 unsigned long func; 1159 pid_t pid = ent->pid; 1160 int cpu = iter->cpu; 1161 int func_match = 1; 1162 int i; 1163 1164 func = trace->func + iter->tr->text_delta; 1165 1166 if (check_irq_return(iter, flags, trace->depth)) 1167 return TRACE_TYPE_HANDLED; 1168 1169 if (data) { 1170 struct fgraph_cpu_data *cpu_data; 1171 int cpu = iter->cpu; 1172 1173 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 1174 1175 /* 1176 * Comments display at + 1 to depth. This is the 1177 * return from a function, we now want the comments 1178 * to display at the same level of the bracket. 1179 */ 1180 cpu_data->depth = trace->depth - 1; 1181 1182 if (trace->depth < FTRACE_RETFUNC_DEPTH && 1183 !WARN_ON_ONCE(trace->depth < 0)) { 1184 if (cpu_data->enter_funcs[trace->depth] != trace->func) 1185 func_match = 0; 1186 cpu_data->enter_funcs[trace->depth] = 0; 1187 } 1188 } 1189 1190 print_graph_prologue(iter, s, 0, 0, flags); 1191 1192 /* Overhead and duration */ 1193 print_graph_duration(tr, duration, s, flags); 1194 1195 /* Closing brace */ 1196 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) 1197 trace_seq_putc(s, ' '); 1198 1199 /* 1200 * Always write out the function name and its return value if the 1201 * funcgraph-retval option is enabled. 1202 */ 1203 if (flags & __TRACE_GRAPH_PRINT_RETVAL) { 1204 print_graph_retval(s, NULL, trace, (void *)func, flags, tr->trace_flags); 1205 } else { 1206 /* 1207 * If the return function does not have a matching entry, 1208 * then the entry was lost. Instead of just printing 1209 * the '}' and letting the user guess what function this 1210 * belongs to, write out the function name. Always do 1211 * that if the funcgraph-tail option is enabled. 1212 */ 1213 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) 1214 trace_seq_puts(s, "}\n"); 1215 else 1216 trace_seq_printf(s, "} /* %ps */\n", (void *)func); 1217 } 1218 1219 /* Overrun */ 1220 if (flags & TRACE_GRAPH_PRINT_OVERRUN) 1221 trace_seq_printf(s, " (Overruns: %u)\n", 1222 trace->overrun); 1223 1224 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, 1225 cpu, pid, flags); 1226 1227 return trace_handle_return(s); 1228 } 1229 1230 static enum print_line_t 1231 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 1232 struct trace_iterator *iter, u32 flags) 1233 { 1234 struct trace_array *tr = iter->tr; 1235 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 1236 struct fgraph_data *data = iter->private; 1237 struct trace_event *event; 1238 int depth = 0; 1239 int ret; 1240 int i; 1241 1242 if (data) 1243 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 1244 1245 print_graph_prologue(iter, s, 0, 0, flags); 1246 1247 /* No time */ 1248 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 1249 1250 /* Indentation */ 1251 if (depth > 0) 1252 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) 1253 trace_seq_putc(s, ' '); 1254 1255 /* The comment */ 1256 trace_seq_puts(s, "/* "); 1257 1258 switch (iter->ent->type) { 1259 case TRACE_BPUTS: 1260 ret = trace_print_bputs_msg_only(iter); 1261 if (ret != TRACE_TYPE_HANDLED) 1262 return ret; 1263 break; 1264 case TRACE_BPRINT: 1265 ret = trace_print_bprintk_msg_only(iter); 1266 if (ret != TRACE_TYPE_HANDLED) 1267 return ret; 1268 break; 1269 case TRACE_PRINT: 1270 ret = trace_print_printk_msg_only(iter); 1271 if (ret != TRACE_TYPE_HANDLED) 1272 return ret; 1273 break; 1274 default: 1275 event = ftrace_find_event(ent->type); 1276 if (!event) 1277 return TRACE_TYPE_UNHANDLED; 1278 1279 ret = event->funcs->trace(iter, sym_flags, event); 1280 if (ret != TRACE_TYPE_HANDLED) 1281 return ret; 1282 } 1283 1284 if (trace_seq_has_overflowed(s)) 1285 goto out; 1286 1287 /* Strip ending newline */ 1288 if (s->buffer[s->seq.len - 1] == '\n') { 1289 s->buffer[s->seq.len - 1] = '\0'; 1290 s->seq.len--; 1291 } 1292 1293 trace_seq_puts(s, " */\n"); 1294 out: 1295 return trace_handle_return(s); 1296 } 1297 1298 1299 enum print_line_t 1300 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1301 { 1302 struct ftrace_graph_ent_entry *field; 1303 struct fgraph_data *data = iter->private; 1304 struct trace_entry *entry = iter->ent; 1305 struct trace_seq *s = &iter->seq; 1306 int cpu = iter->cpu; 1307 int ret; 1308 1309 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { 1310 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; 1311 return TRACE_TYPE_HANDLED; 1312 } 1313 1314 /* 1315 * If the last output failed, there's a possibility we need 1316 * to print out the missing entry which would never go out. 1317 */ 1318 if (data && data->failed) { 1319 field = &data->ent.ent; 1320 iter->cpu = data->cpu; 1321 ret = print_graph_entry(field, s, iter, flags); 1322 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1323 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1324 ret = TRACE_TYPE_NO_CONSUME; 1325 } 1326 iter->cpu = cpu; 1327 return ret; 1328 } 1329 1330 switch (entry->type) { 1331 case TRACE_GRAPH_ENT: { 1332 /* 1333 * print_graph_entry() may consume the current event, 1334 * thus @field may become invalid, so we need to save it. 1335 * sizeof(struct ftrace_graph_ent_entry) is very small, 1336 * it can be safely saved at the stack. 1337 */ 1338 struct ftrace_graph_ent_entry saved; 1339 trace_assign_type(field, entry); 1340 saved = *field; 1341 return print_graph_entry(&saved, s, iter, flags); 1342 } 1343 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 1344 case TRACE_GRAPH_RETADDR_ENT: { 1345 struct fgraph_retaddr_ent_entry saved; 1346 struct fgraph_retaddr_ent_entry *rfield; 1347 1348 trace_assign_type(rfield, entry); 1349 saved = *rfield; 1350 return print_graph_entry((struct ftrace_graph_ent_entry *)&saved, s, iter, flags); 1351 } 1352 #endif 1353 case TRACE_GRAPH_RET: { 1354 struct ftrace_graph_ret_entry *field; 1355 trace_assign_type(field, entry); 1356 return print_graph_return(&field->ret, s, entry, iter, flags); 1357 } 1358 case TRACE_STACK: 1359 case TRACE_FN: 1360 /* dont trace stack and functions as comments */ 1361 return TRACE_TYPE_UNHANDLED; 1362 1363 default: 1364 return print_graph_comment(s, entry, iter, flags); 1365 } 1366 1367 return TRACE_TYPE_HANDLED; 1368 } 1369 1370 static enum print_line_t 1371 print_graph_function(struct trace_iterator *iter) 1372 { 1373 return print_graph_function_flags(iter, tracer_flags.val); 1374 } 1375 1376 static enum print_line_t 1377 print_graph_function_event(struct trace_iterator *iter, int flags, 1378 struct trace_event *event) 1379 { 1380 return print_graph_function(iter); 1381 } 1382 1383 static void print_lat_header(struct seq_file *s, u32 flags) 1384 { 1385 static const char spaces[] = " " /* 16 spaces */ 1386 " " /* 4 spaces */ 1387 " "; /* 17 spaces */ 1388 int size = 0; 1389 1390 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1391 size += 16; 1392 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1393 size += 16; 1394 if (flags & TRACE_GRAPH_PRINT_CPU) 1395 size += 4; 1396 if (flags & TRACE_GRAPH_PRINT_PROC) 1397 size += 17; 1398 1399 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1400 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); 1401 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); 1402 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); 1403 seq_printf(s, "#%.*s||| / \n", size, spaces); 1404 } 1405 1406 static void __print_graph_headers_flags(struct trace_array *tr, 1407 struct seq_file *s, u32 flags) 1408 { 1409 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; 1410 1411 if (lat) 1412 print_lat_header(s, flags); 1413 1414 /* 1st line */ 1415 seq_putc(s, '#'); 1416 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1417 seq_puts(s, " TIME "); 1418 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1419 seq_puts(s, " REL TIME "); 1420 if (flags & TRACE_GRAPH_PRINT_CPU) 1421 seq_puts(s, " CPU"); 1422 if (flags & TRACE_GRAPH_PRINT_PROC) 1423 seq_puts(s, " TASK/PID "); 1424 if (lat) 1425 seq_puts(s, "|||| "); 1426 if (flags & TRACE_GRAPH_PRINT_DURATION) 1427 seq_puts(s, " DURATION "); 1428 seq_puts(s, " FUNCTION CALLS\n"); 1429 1430 /* 2nd line */ 1431 seq_putc(s, '#'); 1432 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1433 seq_puts(s, " | "); 1434 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1435 seq_puts(s, " | "); 1436 if (flags & TRACE_GRAPH_PRINT_CPU) 1437 seq_puts(s, " | "); 1438 if (flags & TRACE_GRAPH_PRINT_PROC) 1439 seq_puts(s, " | | "); 1440 if (lat) 1441 seq_puts(s, "|||| "); 1442 if (flags & TRACE_GRAPH_PRINT_DURATION) 1443 seq_puts(s, " | | "); 1444 seq_puts(s, " | | | |\n"); 1445 } 1446 1447 static void print_graph_headers(struct seq_file *s) 1448 { 1449 print_graph_headers_flags(s, tracer_flags.val); 1450 } 1451 1452 void print_graph_headers_flags(struct seq_file *s, u32 flags) 1453 { 1454 struct trace_iterator *iter = s->private; 1455 struct trace_array *tr = iter->tr; 1456 1457 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 1458 return; 1459 1460 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { 1461 /* print nothing if the buffers are empty */ 1462 if (trace_empty(iter)) 1463 return; 1464 1465 print_trace_header(s, iter); 1466 } 1467 1468 __print_graph_headers_flags(tr, s, flags); 1469 } 1470 1471 void graph_trace_open(struct trace_iterator *iter) 1472 { 1473 /* pid and depth on the last trace processed */ 1474 struct fgraph_data *data; 1475 gfp_t gfpflags; 1476 int cpu; 1477 1478 iter->private = NULL; 1479 1480 /* We can be called in atomic context via ftrace_dump() */ 1481 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 1482 1483 data = kzalloc(sizeof(*data), gfpflags); 1484 if (!data) 1485 goto out_err; 1486 1487 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); 1488 if (!data->cpu_data) 1489 goto out_err_free; 1490 1491 for_each_possible_cpu(cpu) { 1492 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 1493 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 1494 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); 1495 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1496 1497 *pid = -1; 1498 *depth = 0; 1499 *ignore = 0; 1500 *depth_irq = -1; 1501 } 1502 1503 iter->private = data; 1504 1505 return; 1506 1507 out_err_free: 1508 kfree(data); 1509 out_err: 1510 pr_warn("function graph tracer: not enough memory\n"); 1511 } 1512 1513 void graph_trace_close(struct trace_iterator *iter) 1514 { 1515 struct fgraph_data *data = iter->private; 1516 1517 if (data) { 1518 free_percpu(data->cpu_data); 1519 kfree(data); 1520 } 1521 } 1522 1523 static int 1524 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1525 { 1526 if (bit == TRACE_GRAPH_PRINT_IRQS) 1527 ftrace_graph_skip_irqs = !set; 1528 1529 if (bit == TRACE_GRAPH_SLEEP_TIME) 1530 ftrace_graph_sleep_time_control(set); 1531 1532 if (bit == TRACE_GRAPH_GRAPH_TIME) 1533 ftrace_graph_graph_time_control(set); 1534 1535 return 0; 1536 } 1537 1538 static struct trace_event_functions graph_functions = { 1539 .trace = print_graph_function_event, 1540 }; 1541 1542 static struct trace_event graph_trace_entry_event = { 1543 .type = TRACE_GRAPH_ENT, 1544 .funcs = &graph_functions, 1545 }; 1546 1547 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 1548 static struct trace_event graph_trace_retaddr_entry_event = { 1549 .type = TRACE_GRAPH_RETADDR_ENT, 1550 .funcs = &graph_functions, 1551 }; 1552 #endif 1553 1554 static struct trace_event graph_trace_ret_event = { 1555 .type = TRACE_GRAPH_RET, 1556 .funcs = &graph_functions 1557 }; 1558 1559 static struct tracer graph_trace __tracer_data = { 1560 .name = "function_graph", 1561 .update_thresh = graph_trace_update_thresh, 1562 .open = graph_trace_open, 1563 .pipe_open = graph_trace_open, 1564 .close = graph_trace_close, 1565 .pipe_close = graph_trace_close, 1566 .init = graph_trace_init, 1567 .reset = graph_trace_reset, 1568 .print_line = print_graph_function, 1569 .print_header = print_graph_headers, 1570 .flags = &tracer_flags, 1571 .set_flag = func_graph_set_flag, 1572 .allow_instances = true, 1573 #ifdef CONFIG_FTRACE_SELFTEST 1574 .selftest = trace_selftest_startup_function_graph, 1575 #endif 1576 }; 1577 1578 1579 static ssize_t 1580 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, 1581 loff_t *ppos) 1582 { 1583 unsigned long val; 1584 int ret; 1585 1586 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1587 if (ret) 1588 return ret; 1589 1590 fgraph_max_depth = val; 1591 1592 *ppos += cnt; 1593 1594 return cnt; 1595 } 1596 1597 static ssize_t 1598 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, 1599 loff_t *ppos) 1600 { 1601 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ 1602 int n; 1603 1604 n = sprintf(buf, "%d\n", fgraph_max_depth); 1605 1606 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); 1607 } 1608 1609 static const struct file_operations graph_depth_fops = { 1610 .open = tracing_open_generic, 1611 .write = graph_depth_write, 1612 .read = graph_depth_read, 1613 .llseek = generic_file_llseek, 1614 }; 1615 1616 static __init int init_graph_tracefs(void) 1617 { 1618 int ret; 1619 1620 ret = tracing_init_dentry(); 1621 if (ret) 1622 return 0; 1623 1624 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL, 1625 NULL, &graph_depth_fops); 1626 1627 return 0; 1628 } 1629 fs_initcall(init_graph_tracefs); 1630 1631 static __init int init_graph_trace(void) 1632 { 1633 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); 1634 1635 if (!register_trace_event(&graph_trace_entry_event)) { 1636 pr_warn("Warning: could not register graph trace events\n"); 1637 return 1; 1638 } 1639 1640 #ifdef CONFIG_FUNCTION_GRAPH_RETADDR 1641 if (!register_trace_event(&graph_trace_retaddr_entry_event)) { 1642 pr_warn("Warning: could not register graph trace retaddr events\n"); 1643 return 1; 1644 } 1645 #endif 1646 1647 if (!register_trace_event(&graph_trace_ret_event)) { 1648 pr_warn("Warning: could not register graph trace events\n"); 1649 return 1; 1650 } 1651 1652 return register_tracer(&graph_trace); 1653 } 1654 1655 core_initcall(init_graph_trace); 1656