1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Function graph tracer. 5 * Copyright (c) 2008-2009 Frederic Weisbecker <[email protected]> 6 * Mostly borrowed from function tracer which 7 * is Copyright (c) Steven Rostedt <[email protected]> 8 * 9 */ 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/interrupt.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 16 #include "trace.h" 17 #include "trace_output.h" 18 19 /* When set, irq functions will be ignored */ 20 static int ftrace_graph_skip_irqs; 21 22 struct fgraph_cpu_data { 23 pid_t last_pid; 24 int depth; 25 int depth_irq; 26 int ignore; 27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; 28 }; 29 30 struct fgraph_data { 31 struct fgraph_cpu_data __percpu *cpu_data; 32 33 /* Place to preserve last processed entry. */ 34 struct ftrace_graph_ent_entry ent; 35 struct ftrace_graph_ret_entry ret; 36 int failed; 37 int cpu; 38 }; 39 40 #define TRACE_GRAPH_INDENT 2 41 42 unsigned int fgraph_max_depth; 43 44 static struct tracer_opt trace_opts[] = { 45 /* Display overruns? (for self-debug purpose) */ 46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 47 /* Display CPU ? */ 48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 49 /* Display Overhead ? */ 50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 51 /* Display proc name/pid */ 52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 53 /* Display duration of execution */ 54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 55 /* Display absolute time of an entry */ 56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 57 /* Display interrupts */ 58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 59 /* Display function name after trailing } */ 60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, 61 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 62 /* Display function return value ? */ 63 { TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) }, 64 /* Display function return value in hexadecimal format ? */ 65 { TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) }, 66 #endif 67 /* Include sleep time (scheduled out) between entry and return */ 68 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 69 70 #ifdef CONFIG_FUNCTION_PROFILER 71 /* Include time within nested functions */ 72 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 73 #endif 74 75 { } /* Empty entry */ 76 }; 77 78 static struct tracer_flags tracer_flags = { 79 /* Don't display overruns, proc, or tail by default */ 80 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 81 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | 82 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, 83 .opts = trace_opts 84 }; 85 86 /* 87 * DURATION column is being also used to display IRQ signs, 88 * following values are used by print_graph_irq and others 89 * to fill in space into DURATION column. 90 */ 91 enum { 92 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 93 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 94 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 95 }; 96 97 static void 98 print_graph_duration(struct trace_array *tr, unsigned long long duration, 99 struct trace_seq *s, u32 flags); 100 101 int __trace_graph_entry(struct trace_array *tr, 102 struct ftrace_graph_ent *trace, 103 unsigned int trace_ctx) 104 { 105 struct trace_event_call *call = &event_funcgraph_entry; 106 struct ring_buffer_event *event; 107 struct trace_buffer *buffer = tr->array_buffer.buffer; 108 struct ftrace_graph_ent_entry *entry; 109 110 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 111 sizeof(*entry), trace_ctx); 112 if (!event) 113 return 0; 114 entry = ring_buffer_event_data(event); 115 entry->graph_ent = *trace; 116 if (!call_filter_check_discard(call, entry, buffer, event)) 117 trace_buffer_unlock_commit_nostack(buffer, event); 118 119 return 1; 120 } 121 122 static inline int ftrace_graph_ignore_irqs(void) 123 { 124 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) 125 return 0; 126 127 return in_hardirq(); 128 } 129 130 int trace_graph_entry(struct ftrace_graph_ent *trace, 131 struct fgraph_ops *gops) 132 { 133 struct trace_array *tr = gops->private; 134 struct trace_array_cpu *data; 135 unsigned long flags; 136 unsigned int trace_ctx; 137 long disabled; 138 int ret; 139 int cpu; 140 141 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) 142 return 0; 143 144 /* 145 * Do not trace a function if it's filtered by set_graph_notrace. 146 * Make the index of ret stack negative to indicate that it should 147 * ignore further functions. But it needs its own ret stack entry 148 * to recover the original index in order to continue tracing after 149 * returning from the function. 150 */ 151 if (ftrace_graph_notrace_addr(trace->func)) { 152 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); 153 /* 154 * Need to return 1 to have the return called 155 * that will clear the NOTRACE bit. 156 */ 157 return 1; 158 } 159 160 if (!ftrace_trace_task(tr)) 161 return 0; 162 163 if (ftrace_graph_ignore_func(trace)) 164 return 0; 165 166 if (ftrace_graph_ignore_irqs()) 167 return 0; 168 169 /* 170 * Stop here if tracing_threshold is set. We only write function return 171 * events to the ring buffer. 172 */ 173 if (tracing_thresh) 174 return 1; 175 176 local_irq_save(flags); 177 cpu = raw_smp_processor_id(); 178 data = per_cpu_ptr(tr->array_buffer.data, cpu); 179 disabled = atomic_inc_return(&data->disabled); 180 if (likely(disabled == 1)) { 181 trace_ctx = tracing_gen_ctx_flags(flags); 182 ret = __trace_graph_entry(tr, trace, trace_ctx); 183 } else { 184 ret = 0; 185 } 186 187 atomic_dec(&data->disabled); 188 local_irq_restore(flags); 189 190 return ret; 191 } 192 193 static void 194 __trace_graph_function(struct trace_array *tr, 195 unsigned long ip, unsigned int trace_ctx) 196 { 197 u64 time = trace_clock_local(); 198 struct ftrace_graph_ent ent = { 199 .func = ip, 200 .depth = 0, 201 }; 202 struct ftrace_graph_ret ret = { 203 .func = ip, 204 .depth = 0, 205 .calltime = time, 206 .rettime = time, 207 }; 208 209 __trace_graph_entry(tr, &ent, trace_ctx); 210 __trace_graph_return(tr, &ret, trace_ctx); 211 } 212 213 void 214 trace_graph_function(struct trace_array *tr, 215 unsigned long ip, unsigned long parent_ip, 216 unsigned int trace_ctx) 217 { 218 __trace_graph_function(tr, ip, trace_ctx); 219 } 220 221 void __trace_graph_return(struct trace_array *tr, 222 struct ftrace_graph_ret *trace, 223 unsigned int trace_ctx) 224 { 225 struct trace_event_call *call = &event_funcgraph_exit; 226 struct ring_buffer_event *event; 227 struct trace_buffer *buffer = tr->array_buffer.buffer; 228 struct ftrace_graph_ret_entry *entry; 229 230 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 231 sizeof(*entry), trace_ctx); 232 if (!event) 233 return; 234 entry = ring_buffer_event_data(event); 235 entry->ret = *trace; 236 if (!call_filter_check_discard(call, entry, buffer, event)) 237 trace_buffer_unlock_commit_nostack(buffer, event); 238 } 239 240 void trace_graph_return(struct ftrace_graph_ret *trace, 241 struct fgraph_ops *gops) 242 { 243 struct trace_array *tr = gops->private; 244 struct trace_array_cpu *data; 245 unsigned long flags; 246 unsigned int trace_ctx; 247 long disabled; 248 int cpu; 249 250 ftrace_graph_addr_finish(trace); 251 252 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 253 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 254 return; 255 } 256 257 local_irq_save(flags); 258 cpu = raw_smp_processor_id(); 259 data = per_cpu_ptr(tr->array_buffer.data, cpu); 260 disabled = atomic_inc_return(&data->disabled); 261 if (likely(disabled == 1)) { 262 trace_ctx = tracing_gen_ctx_flags(flags); 263 __trace_graph_return(tr, trace, trace_ctx); 264 } 265 atomic_dec(&data->disabled); 266 local_irq_restore(flags); 267 } 268 269 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, 270 struct fgraph_ops *gops) 271 { 272 ftrace_graph_addr_finish(trace); 273 274 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 275 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 276 return; 277 } 278 279 if (tracing_thresh && 280 (trace->rettime - trace->calltime < tracing_thresh)) 281 return; 282 else 283 trace_graph_return(trace, gops); 284 } 285 286 static struct fgraph_ops funcgraph_ops = { 287 .entryfunc = &trace_graph_entry, 288 .retfunc = &trace_graph_return, 289 }; 290 291 int allocate_fgraph_ops(struct trace_array *tr) 292 { 293 struct fgraph_ops *gops; 294 295 gops = kzalloc(sizeof(*gops), GFP_KERNEL); 296 if (!gops) 297 return -ENOMEM; 298 299 gops->entryfunc = &trace_graph_entry; 300 gops->retfunc = &trace_graph_return; 301 302 tr->gops = gops; 303 gops->private = tr; 304 return 0; 305 } 306 307 void free_fgraph_ops(struct trace_array *tr) 308 { 309 kfree(tr->gops); 310 } 311 312 __init void init_array_fgraph_ops(struct trace_array *tr) 313 { 314 tr->gops = &funcgraph_ops; 315 funcgraph_ops.private = tr; 316 } 317 318 static int graph_trace_init(struct trace_array *tr) 319 { 320 int ret; 321 322 tr->gops->entryfunc = trace_graph_entry; 323 324 if (tracing_thresh) 325 tr->gops->retfunc = trace_graph_thresh_return; 326 else 327 tr->gops->retfunc = trace_graph_return; 328 329 /* Make gops functions are visible before we start tracing */ 330 smp_mb(); 331 332 ret = register_ftrace_graph(tr->gops); 333 if (ret) 334 return ret; 335 tracing_start_cmdline_record(); 336 337 return 0; 338 } 339 340 static void graph_trace_reset(struct trace_array *tr) 341 { 342 tracing_stop_cmdline_record(); 343 unregister_ftrace_graph(tr->gops); 344 } 345 346 static int graph_trace_update_thresh(struct trace_array *tr) 347 { 348 graph_trace_reset(tr); 349 return graph_trace_init(tr); 350 } 351 352 static int max_bytes_for_cpu; 353 354 static void print_graph_cpu(struct trace_seq *s, int cpu) 355 { 356 /* 357 * Start with a space character - to make it stand out 358 * to the right a bit when trace output is pasted into 359 * email: 360 */ 361 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); 362 } 363 364 #define TRACE_GRAPH_PROCINFO_LENGTH 14 365 366 static void print_graph_proc(struct trace_seq *s, pid_t pid) 367 { 368 char comm[TASK_COMM_LEN]; 369 /* sign + log10(MAX_INT) + '\0' */ 370 char pid_str[11]; 371 int spaces = 0; 372 int len; 373 int i; 374 375 trace_find_cmdline(pid, comm); 376 comm[7] = '\0'; 377 sprintf(pid_str, "%d", pid); 378 379 /* 1 stands for the "-" character */ 380 len = strlen(comm) + strlen(pid_str) + 1; 381 382 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 383 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 384 385 /* First spaces to align center */ 386 for (i = 0; i < spaces / 2; i++) 387 trace_seq_putc(s, ' '); 388 389 trace_seq_printf(s, "%s-%s", comm, pid_str); 390 391 /* Last spaces to align center */ 392 for (i = 0; i < spaces - (spaces / 2); i++) 393 trace_seq_putc(s, ' '); 394 } 395 396 397 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 398 { 399 trace_seq_putc(s, ' '); 400 trace_print_lat_fmt(s, entry); 401 trace_seq_puts(s, " | "); 402 } 403 404 /* If the pid changed since the last trace, output this event */ 405 static void 406 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 407 { 408 pid_t prev_pid; 409 pid_t *last_pid; 410 411 if (!data) 412 return; 413 414 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 415 416 if (*last_pid == pid) 417 return; 418 419 prev_pid = *last_pid; 420 *last_pid = pid; 421 422 if (prev_pid == -1) 423 return; 424 /* 425 * Context-switch trace line: 426 427 ------------------------------------------ 428 | 1) migration/0--1 => sshd-1755 429 ------------------------------------------ 430 431 */ 432 trace_seq_puts(s, " ------------------------------------------\n"); 433 print_graph_cpu(s, cpu); 434 print_graph_proc(s, prev_pid); 435 trace_seq_puts(s, " => "); 436 print_graph_proc(s, pid); 437 trace_seq_puts(s, "\n ------------------------------------------\n\n"); 438 } 439 440 static struct ftrace_graph_ret_entry * 441 get_return_for_leaf(struct trace_iterator *iter, 442 struct ftrace_graph_ent_entry *curr) 443 { 444 struct fgraph_data *data = iter->private; 445 struct ring_buffer_iter *ring_iter = NULL; 446 struct ring_buffer_event *event; 447 struct ftrace_graph_ret_entry *next; 448 449 /* 450 * If the previous output failed to write to the seq buffer, 451 * then we just reuse the data from before. 452 */ 453 if (data && data->failed) { 454 curr = &data->ent; 455 next = &data->ret; 456 } else { 457 458 ring_iter = trace_buffer_iter(iter, iter->cpu); 459 460 /* First peek to compare current entry and the next one */ 461 if (ring_iter) 462 event = ring_buffer_iter_peek(ring_iter, NULL); 463 else { 464 /* 465 * We need to consume the current entry to see 466 * the next one. 467 */ 468 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, 469 NULL, NULL); 470 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu, 471 NULL, NULL); 472 } 473 474 if (!event) 475 return NULL; 476 477 next = ring_buffer_event_data(event); 478 479 if (data) { 480 /* 481 * Save current and next entries for later reference 482 * if the output fails. 483 */ 484 data->ent = *curr; 485 /* 486 * If the next event is not a return type, then 487 * we only care about what type it is. Otherwise we can 488 * safely copy the entire event. 489 */ 490 if (next->ent.type == TRACE_GRAPH_RET) 491 data->ret = *next; 492 else 493 data->ret.ent.type = next->ent.type; 494 } 495 } 496 497 if (next->ent.type != TRACE_GRAPH_RET) 498 return NULL; 499 500 if (curr->ent.pid != next->ent.pid || 501 curr->graph_ent.func != next->ret.func) 502 return NULL; 503 504 /* this is a leaf, now advance the iterator */ 505 if (ring_iter) 506 ring_buffer_iter_advance(ring_iter); 507 508 return next; 509 } 510 511 static void print_graph_abs_time(u64 t, struct trace_seq *s) 512 { 513 unsigned long usecs_rem; 514 515 usecs_rem = do_div(t, NSEC_PER_SEC); 516 usecs_rem /= 1000; 517 518 trace_seq_printf(s, "%5lu.%06lu | ", 519 (unsigned long)t, usecs_rem); 520 } 521 522 static void 523 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s) 524 { 525 unsigned long long usecs; 526 527 usecs = iter->ts - iter->array_buffer->time_start; 528 do_div(usecs, NSEC_PER_USEC); 529 530 trace_seq_printf(s, "%9llu us | ", usecs); 531 } 532 533 static void 534 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 535 enum trace_type type, int cpu, pid_t pid, u32 flags) 536 { 537 struct trace_array *tr = iter->tr; 538 struct trace_seq *s = &iter->seq; 539 struct trace_entry *ent = iter->ent; 540 541 if (addr < (unsigned long)__irqentry_text_start || 542 addr >= (unsigned long)__irqentry_text_end) 543 return; 544 545 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 546 /* Absolute time */ 547 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 548 print_graph_abs_time(iter->ts, s); 549 550 /* Relative time */ 551 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 552 print_graph_rel_time(iter, s); 553 554 /* Cpu */ 555 if (flags & TRACE_GRAPH_PRINT_CPU) 556 print_graph_cpu(s, cpu); 557 558 /* Proc */ 559 if (flags & TRACE_GRAPH_PRINT_PROC) { 560 print_graph_proc(s, pid); 561 trace_seq_puts(s, " | "); 562 } 563 564 /* Latency format */ 565 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 566 print_graph_lat_fmt(s, ent); 567 } 568 569 /* No overhead */ 570 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); 571 572 if (type == TRACE_GRAPH_ENT) 573 trace_seq_puts(s, "==========>"); 574 else 575 trace_seq_puts(s, "<=========="); 576 577 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); 578 trace_seq_putc(s, '\n'); 579 } 580 581 void 582 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 583 { 584 unsigned long nsecs_rem = do_div(duration, 1000); 585 /* log10(ULONG_MAX) + '\0' */ 586 char usecs_str[21]; 587 char nsecs_str[5]; 588 int len; 589 int i; 590 591 sprintf(usecs_str, "%lu", (unsigned long) duration); 592 593 /* Print msecs */ 594 trace_seq_printf(s, "%s", usecs_str); 595 596 len = strlen(usecs_str); 597 598 /* Print nsecs (we don't want to exceed 7 numbers) */ 599 if (len < 7) { 600 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); 601 602 snprintf(nsecs_str, slen, "%03lu", nsecs_rem); 603 trace_seq_printf(s, ".%s", nsecs_str); 604 len += strlen(nsecs_str) + 1; 605 } 606 607 trace_seq_puts(s, " us "); 608 609 /* Print remaining spaces to fit the row's width */ 610 for (i = len; i < 8; i++) 611 trace_seq_putc(s, ' '); 612 } 613 614 static void 615 print_graph_duration(struct trace_array *tr, unsigned long long duration, 616 struct trace_seq *s, u32 flags) 617 { 618 if (!(flags & TRACE_GRAPH_PRINT_DURATION) || 619 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 620 return; 621 622 /* No real adata, just filling the column with spaces */ 623 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 624 case FLAGS_FILL_FULL: 625 trace_seq_puts(s, " | "); 626 return; 627 case FLAGS_FILL_START: 628 trace_seq_puts(s, " "); 629 return; 630 case FLAGS_FILL_END: 631 trace_seq_puts(s, " |"); 632 return; 633 } 634 635 /* Signal a overhead of time execution to the output */ 636 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) 637 trace_seq_printf(s, "%c ", trace_find_mark(duration)); 638 else 639 trace_seq_puts(s, " "); 640 641 trace_print_graph_duration(duration, s); 642 trace_seq_puts(s, "| "); 643 } 644 645 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 646 647 #define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL 648 649 static void print_graph_retval(struct trace_seq *s, unsigned long retval, 650 bool leaf, void *func, bool hex_format) 651 { 652 unsigned long err_code = 0; 653 654 if (retval == 0 || hex_format) 655 goto done; 656 657 /* Check if the return value matches the negative format */ 658 if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) && 659 (((u64)retval) >> 32) == 0) { 660 /* sign extension */ 661 err_code = (unsigned long)(s32)retval; 662 } else { 663 err_code = retval; 664 } 665 666 if (!IS_ERR_VALUE(err_code)) 667 err_code = 0; 668 669 done: 670 if (leaf) { 671 if (hex_format || (err_code == 0)) 672 trace_seq_printf(s, "%ps(); /* = 0x%lx */\n", 673 func, retval); 674 else 675 trace_seq_printf(s, "%ps(); /* = %ld */\n", 676 func, err_code); 677 } else { 678 if (hex_format || (err_code == 0)) 679 trace_seq_printf(s, "} /* %ps = 0x%lx */\n", 680 func, retval); 681 else 682 trace_seq_printf(s, "} /* %ps = %ld */\n", 683 func, err_code); 684 } 685 } 686 687 #else 688 689 #define __TRACE_GRAPH_PRINT_RETVAL 0 690 691 #define print_graph_retval(_seq, _retval, _leaf, _func, _format) do {} while (0) 692 693 #endif 694 695 /* Case of a leaf function on its call entry */ 696 static enum print_line_t 697 print_graph_entry_leaf(struct trace_iterator *iter, 698 struct ftrace_graph_ent_entry *entry, 699 struct ftrace_graph_ret_entry *ret_entry, 700 struct trace_seq *s, u32 flags) 701 { 702 struct fgraph_data *data = iter->private; 703 struct trace_array *tr = iter->tr; 704 struct ftrace_graph_ret *graph_ret; 705 struct ftrace_graph_ent *call; 706 unsigned long long duration; 707 int cpu = iter->cpu; 708 int i; 709 710 graph_ret = &ret_entry->ret; 711 call = &entry->graph_ent; 712 duration = graph_ret->rettime - graph_ret->calltime; 713 714 if (data) { 715 struct fgraph_cpu_data *cpu_data; 716 717 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 718 719 /* 720 * Comments display at + 1 to depth. Since 721 * this is a leaf function, keep the comments 722 * equal to this depth. 723 */ 724 cpu_data->depth = call->depth - 1; 725 726 /* No need to keep this function around for this depth */ 727 if (call->depth < FTRACE_RETFUNC_DEPTH && 728 !WARN_ON_ONCE(call->depth < 0)) 729 cpu_data->enter_funcs[call->depth] = 0; 730 } 731 732 /* Overhead and duration */ 733 print_graph_duration(tr, duration, s, flags); 734 735 /* Function */ 736 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 737 trace_seq_putc(s, ' '); 738 739 /* 740 * Write out the function return value if the option function-retval is 741 * enabled. 742 */ 743 if (flags & __TRACE_GRAPH_PRINT_RETVAL) 744 print_graph_retval(s, graph_ret->retval, true, (void *)call->func, 745 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX)); 746 else 747 trace_seq_printf(s, "%ps();\n", (void *)call->func); 748 749 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, 750 cpu, iter->ent->pid, flags); 751 752 return trace_handle_return(s); 753 } 754 755 static enum print_line_t 756 print_graph_entry_nested(struct trace_iterator *iter, 757 struct ftrace_graph_ent_entry *entry, 758 struct trace_seq *s, int cpu, u32 flags) 759 { 760 struct ftrace_graph_ent *call = &entry->graph_ent; 761 struct fgraph_data *data = iter->private; 762 struct trace_array *tr = iter->tr; 763 int i; 764 765 if (data) { 766 struct fgraph_cpu_data *cpu_data; 767 int cpu = iter->cpu; 768 769 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 770 cpu_data->depth = call->depth; 771 772 /* Save this function pointer to see if the exit matches */ 773 if (call->depth < FTRACE_RETFUNC_DEPTH && 774 !WARN_ON_ONCE(call->depth < 0)) 775 cpu_data->enter_funcs[call->depth] = call->func; 776 } 777 778 /* No time */ 779 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 780 781 /* Function */ 782 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 783 trace_seq_putc(s, ' '); 784 785 trace_seq_printf(s, "%ps() {\n", (void *)call->func); 786 787 if (trace_seq_has_overflowed(s)) 788 return TRACE_TYPE_PARTIAL_LINE; 789 790 /* 791 * we already consumed the current entry to check the next one 792 * and see if this is a leaf. 793 */ 794 return TRACE_TYPE_NO_CONSUME; 795 } 796 797 static void 798 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 799 int type, unsigned long addr, u32 flags) 800 { 801 struct fgraph_data *data = iter->private; 802 struct trace_entry *ent = iter->ent; 803 struct trace_array *tr = iter->tr; 804 int cpu = iter->cpu; 805 806 /* Pid */ 807 verif_pid(s, ent->pid, cpu, data); 808 809 if (type) 810 /* Interrupt */ 811 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); 812 813 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 814 return; 815 816 /* Absolute time */ 817 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 818 print_graph_abs_time(iter->ts, s); 819 820 /* Relative time */ 821 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 822 print_graph_rel_time(iter, s); 823 824 /* Cpu */ 825 if (flags & TRACE_GRAPH_PRINT_CPU) 826 print_graph_cpu(s, cpu); 827 828 /* Proc */ 829 if (flags & TRACE_GRAPH_PRINT_PROC) { 830 print_graph_proc(s, ent->pid); 831 trace_seq_puts(s, " | "); 832 } 833 834 /* Latency format */ 835 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 836 print_graph_lat_fmt(s, ent); 837 838 return; 839 } 840 841 /* 842 * Entry check for irq code 843 * 844 * returns 1 if 845 * - we are inside irq code 846 * - we just entered irq code 847 * 848 * returns 0 if 849 * - funcgraph-interrupts option is set 850 * - we are not inside irq code 851 */ 852 static int 853 check_irq_entry(struct trace_iterator *iter, u32 flags, 854 unsigned long addr, int depth) 855 { 856 int cpu = iter->cpu; 857 int *depth_irq; 858 struct fgraph_data *data = iter->private; 859 860 /* 861 * If we are either displaying irqs, or we got called as 862 * a graph event and private data does not exist, 863 * then we bypass the irq check. 864 */ 865 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 866 (!data)) 867 return 0; 868 869 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 870 871 /* 872 * We are inside the irq code 873 */ 874 if (*depth_irq >= 0) 875 return 1; 876 877 if ((addr < (unsigned long)__irqentry_text_start) || 878 (addr >= (unsigned long)__irqentry_text_end)) 879 return 0; 880 881 /* 882 * We are entering irq code. 883 */ 884 *depth_irq = depth; 885 return 1; 886 } 887 888 /* 889 * Return check for irq code 890 * 891 * returns 1 if 892 * - we are inside irq code 893 * - we just left irq code 894 * 895 * returns 0 if 896 * - funcgraph-interrupts option is set 897 * - we are not inside irq code 898 */ 899 static int 900 check_irq_return(struct trace_iterator *iter, u32 flags, int depth) 901 { 902 int cpu = iter->cpu; 903 int *depth_irq; 904 struct fgraph_data *data = iter->private; 905 906 /* 907 * If we are either displaying irqs, or we got called as 908 * a graph event and private data does not exist, 909 * then we bypass the irq check. 910 */ 911 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 912 (!data)) 913 return 0; 914 915 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 916 917 /* 918 * We are not inside the irq code. 919 */ 920 if (*depth_irq == -1) 921 return 0; 922 923 /* 924 * We are inside the irq code, and this is returning entry. 925 * Let's not trace it and clear the entry depth, since 926 * we are out of irq code. 927 * 928 * This condition ensures that we 'leave the irq code' once 929 * we are out of the entry depth. Thus protecting us from 930 * the RETURN entry loss. 931 */ 932 if (*depth_irq >= depth) { 933 *depth_irq = -1; 934 return 1; 935 } 936 937 /* 938 * We are inside the irq code, and this is not the entry. 939 */ 940 return 1; 941 } 942 943 static enum print_line_t 944 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 945 struct trace_iterator *iter, u32 flags) 946 { 947 struct fgraph_data *data = iter->private; 948 struct ftrace_graph_ent *call = &field->graph_ent; 949 struct ftrace_graph_ret_entry *leaf_ret; 950 static enum print_line_t ret; 951 int cpu = iter->cpu; 952 953 if (check_irq_entry(iter, flags, call->func, call->depth)) 954 return TRACE_TYPE_HANDLED; 955 956 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); 957 958 leaf_ret = get_return_for_leaf(iter, field); 959 if (leaf_ret) 960 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); 961 else 962 ret = print_graph_entry_nested(iter, field, s, cpu, flags); 963 964 if (data) { 965 /* 966 * If we failed to write our output, then we need to make 967 * note of it. Because we already consumed our entry. 968 */ 969 if (s->full) { 970 data->failed = 1; 971 data->cpu = cpu; 972 } else 973 data->failed = 0; 974 } 975 976 return ret; 977 } 978 979 static enum print_line_t 980 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 981 struct trace_entry *ent, struct trace_iterator *iter, 982 u32 flags) 983 { 984 unsigned long long duration = trace->rettime - trace->calltime; 985 struct fgraph_data *data = iter->private; 986 struct trace_array *tr = iter->tr; 987 pid_t pid = ent->pid; 988 int cpu = iter->cpu; 989 int func_match = 1; 990 int i; 991 992 if (check_irq_return(iter, flags, trace->depth)) 993 return TRACE_TYPE_HANDLED; 994 995 if (data) { 996 struct fgraph_cpu_data *cpu_data; 997 int cpu = iter->cpu; 998 999 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 1000 1001 /* 1002 * Comments display at + 1 to depth. This is the 1003 * return from a function, we now want the comments 1004 * to display at the same level of the bracket. 1005 */ 1006 cpu_data->depth = trace->depth - 1; 1007 1008 if (trace->depth < FTRACE_RETFUNC_DEPTH && 1009 !WARN_ON_ONCE(trace->depth < 0)) { 1010 if (cpu_data->enter_funcs[trace->depth] != trace->func) 1011 func_match = 0; 1012 cpu_data->enter_funcs[trace->depth] = 0; 1013 } 1014 } 1015 1016 print_graph_prologue(iter, s, 0, 0, flags); 1017 1018 /* Overhead and duration */ 1019 print_graph_duration(tr, duration, s, flags); 1020 1021 /* Closing brace */ 1022 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) 1023 trace_seq_putc(s, ' '); 1024 1025 /* 1026 * Always write out the function name and its return value if the 1027 * function-retval option is enabled. 1028 */ 1029 if (flags & __TRACE_GRAPH_PRINT_RETVAL) { 1030 print_graph_retval(s, trace->retval, false, (void *)trace->func, 1031 !!(flags & TRACE_GRAPH_PRINT_RETVAL_HEX)); 1032 } else { 1033 /* 1034 * If the return function does not have a matching entry, 1035 * then the entry was lost. Instead of just printing 1036 * the '}' and letting the user guess what function this 1037 * belongs to, write out the function name. Always do 1038 * that if the funcgraph-tail option is enabled. 1039 */ 1040 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) 1041 trace_seq_puts(s, "}\n"); 1042 else 1043 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); 1044 } 1045 1046 /* Overrun */ 1047 if (flags & TRACE_GRAPH_PRINT_OVERRUN) 1048 trace_seq_printf(s, " (Overruns: %u)\n", 1049 trace->overrun); 1050 1051 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, 1052 cpu, pid, flags); 1053 1054 return trace_handle_return(s); 1055 } 1056 1057 static enum print_line_t 1058 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 1059 struct trace_iterator *iter, u32 flags) 1060 { 1061 struct trace_array *tr = iter->tr; 1062 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 1063 struct fgraph_data *data = iter->private; 1064 struct trace_event *event; 1065 int depth = 0; 1066 int ret; 1067 int i; 1068 1069 if (data) 1070 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 1071 1072 print_graph_prologue(iter, s, 0, 0, flags); 1073 1074 /* No time */ 1075 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 1076 1077 /* Indentation */ 1078 if (depth > 0) 1079 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) 1080 trace_seq_putc(s, ' '); 1081 1082 /* The comment */ 1083 trace_seq_puts(s, "/* "); 1084 1085 switch (iter->ent->type) { 1086 case TRACE_BPUTS: 1087 ret = trace_print_bputs_msg_only(iter); 1088 if (ret != TRACE_TYPE_HANDLED) 1089 return ret; 1090 break; 1091 case TRACE_BPRINT: 1092 ret = trace_print_bprintk_msg_only(iter); 1093 if (ret != TRACE_TYPE_HANDLED) 1094 return ret; 1095 break; 1096 case TRACE_PRINT: 1097 ret = trace_print_printk_msg_only(iter); 1098 if (ret != TRACE_TYPE_HANDLED) 1099 return ret; 1100 break; 1101 default: 1102 event = ftrace_find_event(ent->type); 1103 if (!event) 1104 return TRACE_TYPE_UNHANDLED; 1105 1106 ret = event->funcs->trace(iter, sym_flags, event); 1107 if (ret != TRACE_TYPE_HANDLED) 1108 return ret; 1109 } 1110 1111 if (trace_seq_has_overflowed(s)) 1112 goto out; 1113 1114 /* Strip ending newline */ 1115 if (s->buffer[s->seq.len - 1] == '\n') { 1116 s->buffer[s->seq.len - 1] = '\0'; 1117 s->seq.len--; 1118 } 1119 1120 trace_seq_puts(s, " */\n"); 1121 out: 1122 return trace_handle_return(s); 1123 } 1124 1125 1126 enum print_line_t 1127 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1128 { 1129 struct ftrace_graph_ent_entry *field; 1130 struct fgraph_data *data = iter->private; 1131 struct trace_entry *entry = iter->ent; 1132 struct trace_seq *s = &iter->seq; 1133 int cpu = iter->cpu; 1134 int ret; 1135 1136 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { 1137 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; 1138 return TRACE_TYPE_HANDLED; 1139 } 1140 1141 /* 1142 * If the last output failed, there's a possibility we need 1143 * to print out the missing entry which would never go out. 1144 */ 1145 if (data && data->failed) { 1146 field = &data->ent; 1147 iter->cpu = data->cpu; 1148 ret = print_graph_entry(field, s, iter, flags); 1149 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1150 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1151 ret = TRACE_TYPE_NO_CONSUME; 1152 } 1153 iter->cpu = cpu; 1154 return ret; 1155 } 1156 1157 switch (entry->type) { 1158 case TRACE_GRAPH_ENT: { 1159 /* 1160 * print_graph_entry() may consume the current event, 1161 * thus @field may become invalid, so we need to save it. 1162 * sizeof(struct ftrace_graph_ent_entry) is very small, 1163 * it can be safely saved at the stack. 1164 */ 1165 struct ftrace_graph_ent_entry saved; 1166 trace_assign_type(field, entry); 1167 saved = *field; 1168 return print_graph_entry(&saved, s, iter, flags); 1169 } 1170 case TRACE_GRAPH_RET: { 1171 struct ftrace_graph_ret_entry *field; 1172 trace_assign_type(field, entry); 1173 return print_graph_return(&field->ret, s, entry, iter, flags); 1174 } 1175 case TRACE_STACK: 1176 case TRACE_FN: 1177 /* dont trace stack and functions as comments */ 1178 return TRACE_TYPE_UNHANDLED; 1179 1180 default: 1181 return print_graph_comment(s, entry, iter, flags); 1182 } 1183 1184 return TRACE_TYPE_HANDLED; 1185 } 1186 1187 static enum print_line_t 1188 print_graph_function(struct trace_iterator *iter) 1189 { 1190 return print_graph_function_flags(iter, tracer_flags.val); 1191 } 1192 1193 static enum print_line_t 1194 print_graph_function_event(struct trace_iterator *iter, int flags, 1195 struct trace_event *event) 1196 { 1197 return print_graph_function(iter); 1198 } 1199 1200 static void print_lat_header(struct seq_file *s, u32 flags) 1201 { 1202 static const char spaces[] = " " /* 16 spaces */ 1203 " " /* 4 spaces */ 1204 " "; /* 17 spaces */ 1205 int size = 0; 1206 1207 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1208 size += 16; 1209 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1210 size += 16; 1211 if (flags & TRACE_GRAPH_PRINT_CPU) 1212 size += 4; 1213 if (flags & TRACE_GRAPH_PRINT_PROC) 1214 size += 17; 1215 1216 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1217 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); 1218 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); 1219 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); 1220 seq_printf(s, "#%.*s||| / \n", size, spaces); 1221 } 1222 1223 static void __print_graph_headers_flags(struct trace_array *tr, 1224 struct seq_file *s, u32 flags) 1225 { 1226 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; 1227 1228 if (lat) 1229 print_lat_header(s, flags); 1230 1231 /* 1st line */ 1232 seq_putc(s, '#'); 1233 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1234 seq_puts(s, " TIME "); 1235 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1236 seq_puts(s, " REL TIME "); 1237 if (flags & TRACE_GRAPH_PRINT_CPU) 1238 seq_puts(s, " CPU"); 1239 if (flags & TRACE_GRAPH_PRINT_PROC) 1240 seq_puts(s, " TASK/PID "); 1241 if (lat) 1242 seq_puts(s, "|||| "); 1243 if (flags & TRACE_GRAPH_PRINT_DURATION) 1244 seq_puts(s, " DURATION "); 1245 seq_puts(s, " FUNCTION CALLS\n"); 1246 1247 /* 2nd line */ 1248 seq_putc(s, '#'); 1249 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1250 seq_puts(s, " | "); 1251 if (flags & TRACE_GRAPH_PRINT_REL_TIME) 1252 seq_puts(s, " | "); 1253 if (flags & TRACE_GRAPH_PRINT_CPU) 1254 seq_puts(s, " | "); 1255 if (flags & TRACE_GRAPH_PRINT_PROC) 1256 seq_puts(s, " | | "); 1257 if (lat) 1258 seq_puts(s, "|||| "); 1259 if (flags & TRACE_GRAPH_PRINT_DURATION) 1260 seq_puts(s, " | | "); 1261 seq_puts(s, " | | | |\n"); 1262 } 1263 1264 static void print_graph_headers(struct seq_file *s) 1265 { 1266 print_graph_headers_flags(s, tracer_flags.val); 1267 } 1268 1269 void print_graph_headers_flags(struct seq_file *s, u32 flags) 1270 { 1271 struct trace_iterator *iter = s->private; 1272 struct trace_array *tr = iter->tr; 1273 1274 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 1275 return; 1276 1277 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { 1278 /* print nothing if the buffers are empty */ 1279 if (trace_empty(iter)) 1280 return; 1281 1282 print_trace_header(s, iter); 1283 } 1284 1285 __print_graph_headers_flags(tr, s, flags); 1286 } 1287 1288 void graph_trace_open(struct trace_iterator *iter) 1289 { 1290 /* pid and depth on the last trace processed */ 1291 struct fgraph_data *data; 1292 gfp_t gfpflags; 1293 int cpu; 1294 1295 iter->private = NULL; 1296 1297 /* We can be called in atomic context via ftrace_dump() */ 1298 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 1299 1300 data = kzalloc(sizeof(*data), gfpflags); 1301 if (!data) 1302 goto out_err; 1303 1304 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); 1305 if (!data->cpu_data) 1306 goto out_err_free; 1307 1308 for_each_possible_cpu(cpu) { 1309 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 1310 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 1311 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); 1312 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1313 1314 *pid = -1; 1315 *depth = 0; 1316 *ignore = 0; 1317 *depth_irq = -1; 1318 } 1319 1320 iter->private = data; 1321 1322 return; 1323 1324 out_err_free: 1325 kfree(data); 1326 out_err: 1327 pr_warn("function graph tracer: not enough memory\n"); 1328 } 1329 1330 void graph_trace_close(struct trace_iterator *iter) 1331 { 1332 struct fgraph_data *data = iter->private; 1333 1334 if (data) { 1335 free_percpu(data->cpu_data); 1336 kfree(data); 1337 } 1338 } 1339 1340 static int 1341 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1342 { 1343 if (bit == TRACE_GRAPH_PRINT_IRQS) 1344 ftrace_graph_skip_irqs = !set; 1345 1346 if (bit == TRACE_GRAPH_SLEEP_TIME) 1347 ftrace_graph_sleep_time_control(set); 1348 1349 if (bit == TRACE_GRAPH_GRAPH_TIME) 1350 ftrace_graph_graph_time_control(set); 1351 1352 return 0; 1353 } 1354 1355 static struct trace_event_functions graph_functions = { 1356 .trace = print_graph_function_event, 1357 }; 1358 1359 static struct trace_event graph_trace_entry_event = { 1360 .type = TRACE_GRAPH_ENT, 1361 .funcs = &graph_functions, 1362 }; 1363 1364 static struct trace_event graph_trace_ret_event = { 1365 .type = TRACE_GRAPH_RET, 1366 .funcs = &graph_functions 1367 }; 1368 1369 static struct tracer graph_trace __tracer_data = { 1370 .name = "function_graph", 1371 .update_thresh = graph_trace_update_thresh, 1372 .open = graph_trace_open, 1373 .pipe_open = graph_trace_open, 1374 .close = graph_trace_close, 1375 .pipe_close = graph_trace_close, 1376 .init = graph_trace_init, 1377 .reset = graph_trace_reset, 1378 .print_line = print_graph_function, 1379 .print_header = print_graph_headers, 1380 .flags = &tracer_flags, 1381 .set_flag = func_graph_set_flag, 1382 .allow_instances = true, 1383 #ifdef CONFIG_FTRACE_SELFTEST 1384 .selftest = trace_selftest_startup_function_graph, 1385 #endif 1386 }; 1387 1388 1389 static ssize_t 1390 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, 1391 loff_t *ppos) 1392 { 1393 unsigned long val; 1394 int ret; 1395 1396 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1397 if (ret) 1398 return ret; 1399 1400 fgraph_max_depth = val; 1401 1402 *ppos += cnt; 1403 1404 return cnt; 1405 } 1406 1407 static ssize_t 1408 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, 1409 loff_t *ppos) 1410 { 1411 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ 1412 int n; 1413 1414 n = sprintf(buf, "%d\n", fgraph_max_depth); 1415 1416 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); 1417 } 1418 1419 static const struct file_operations graph_depth_fops = { 1420 .open = tracing_open_generic, 1421 .write = graph_depth_write, 1422 .read = graph_depth_read, 1423 .llseek = generic_file_llseek, 1424 }; 1425 1426 static __init int init_graph_tracefs(void) 1427 { 1428 int ret; 1429 1430 ret = tracing_init_dentry(); 1431 if (ret) 1432 return 0; 1433 1434 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL, 1435 NULL, &graph_depth_fops); 1436 1437 return 0; 1438 } 1439 fs_initcall(init_graph_tracefs); 1440 1441 static __init int init_graph_trace(void) 1442 { 1443 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); 1444 1445 if (!register_trace_event(&graph_trace_entry_event)) { 1446 pr_warn("Warning: could not register graph trace events\n"); 1447 return 1; 1448 } 1449 1450 if (!register_trace_event(&graph_trace_ret_event)) { 1451 pr_warn("Warning: could not register graph trace events\n"); 1452 return 1; 1453 } 1454 1455 return register_tracer(&graph_trace); 1456 } 1457 1458 core_initcall(init_graph_trace); 1459