1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <[email protected]> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_sem); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 24 { 25 struct trace_seq *s = &iter->seq; 26 struct trace_entry *entry = iter->ent; 27 struct bputs_entry *field; 28 29 trace_assign_type(field, entry); 30 31 trace_seq_puts(s, field->str); 32 33 return trace_handle_return(s); 34 } 35 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 37 { 38 struct trace_seq *s = &iter->seq; 39 struct trace_entry *entry = iter->ent; 40 struct bprint_entry *field; 41 42 trace_assign_type(field, entry); 43 44 trace_seq_bprintf(s, field->fmt, field->buf); 45 46 return trace_handle_return(s); 47 } 48 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 50 { 51 struct trace_seq *s = &iter->seq; 52 struct trace_entry *entry = iter->ent; 53 struct print_entry *field; 54 55 trace_assign_type(field, entry); 56 57 trace_seq_puts(s, field->buf); 58 59 return trace_handle_return(s); 60 } 61 62 const char * 63 trace_print_flags_seq(struct trace_seq *p, const char *delim, 64 unsigned long flags, 65 const struct trace_print_flags *flag_array) 66 { 67 unsigned long mask; 68 const char *str; 69 const char *ret = trace_seq_buffer_ptr(p); 70 int i, first = 1; 71 72 for (i = 0; flag_array[i].name && flags; i++) { 73 74 mask = flag_array[i].mask; 75 if ((flags & mask) != mask) 76 continue; 77 78 str = flag_array[i].name; 79 flags &= ~mask; 80 if (!first && delim) 81 trace_seq_puts(p, delim); 82 else 83 first = 0; 84 trace_seq_puts(p, str); 85 } 86 87 /* check for left over flags */ 88 if (flags) { 89 if (!first && delim) 90 trace_seq_puts(p, delim); 91 trace_seq_printf(p, "0x%lx", flags); 92 } 93 94 trace_seq_putc(p, 0); 95 96 return ret; 97 } 98 EXPORT_SYMBOL(trace_print_flags_seq); 99 100 const char * 101 trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 const struct trace_print_flags *symbol_array) 103 { 104 int i; 105 const char *ret = trace_seq_buffer_ptr(p); 106 107 for (i = 0; symbol_array[i].name; i++) { 108 109 if (val != symbol_array[i].mask) 110 continue; 111 112 trace_seq_puts(p, symbol_array[i].name); 113 break; 114 } 115 116 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 117 trace_seq_printf(p, "0x%lx", val); 118 119 trace_seq_putc(p, 0); 120 121 return ret; 122 } 123 EXPORT_SYMBOL(trace_print_symbols_seq); 124 125 #if BITS_PER_LONG == 32 126 const char * 127 trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 128 const struct trace_print_flags_u64 *symbol_array) 129 { 130 int i; 131 const char *ret = trace_seq_buffer_ptr(p); 132 133 for (i = 0; symbol_array[i].name; i++) { 134 135 if (val != symbol_array[i].mask) 136 continue; 137 138 trace_seq_puts(p, symbol_array[i].name); 139 break; 140 } 141 142 if (ret == (const char *)(trace_seq_buffer_ptr(p))) 143 trace_seq_printf(p, "0x%llx", val); 144 145 trace_seq_putc(p, 0); 146 147 return ret; 148 } 149 EXPORT_SYMBOL(trace_print_symbols_seq_u64); 150 #endif 151 152 const char * 153 trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 unsigned int bitmask_size) 155 { 156 const char *ret = trace_seq_buffer_ptr(p); 157 158 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 159 trace_seq_putc(p, 0); 160 161 return ret; 162 } 163 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); 164 165 const char * 166 trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len, 167 bool spacing) 168 { 169 int i; 170 const char *ret = trace_seq_buffer_ptr(p); 171 172 for (i = 0; i < buf_len; i++) 173 trace_seq_printf(p, "%s%2.2x", !spacing || i == 0 ? "" : " ", 174 buf[i]); 175 trace_seq_putc(p, 0); 176 177 return ret; 178 } 179 EXPORT_SYMBOL(trace_print_hex_seq); 180 181 const char * 182 trace_print_array_seq(struct trace_seq *p, const void *buf, int count, 183 size_t el_size) 184 { 185 const char *ret = trace_seq_buffer_ptr(p); 186 const char *prefix = ""; 187 void *ptr = (void *)buf; 188 size_t buf_len = count * el_size; 189 190 trace_seq_putc(p, '{'); 191 192 while (ptr < buf + buf_len) { 193 switch (el_size) { 194 case 1: 195 trace_seq_printf(p, "%s0x%x", prefix, 196 *(u8 *)ptr); 197 break; 198 case 2: 199 trace_seq_printf(p, "%s0x%x", prefix, 200 *(u16 *)ptr); 201 break; 202 case 4: 203 trace_seq_printf(p, "%s0x%x", prefix, 204 *(u32 *)ptr); 205 break; 206 case 8: 207 trace_seq_printf(p, "%s0x%llx", prefix, 208 *(u64 *)ptr); 209 break; 210 default: 211 trace_seq_printf(p, "BAD SIZE:%zu 0x%x", el_size, 212 *(u8 *)ptr); 213 el_size = 1; 214 } 215 prefix = ","; 216 ptr += el_size; 217 } 218 219 trace_seq_putc(p, '}'); 220 trace_seq_putc(p, 0); 221 222 return ret; 223 } 224 EXPORT_SYMBOL(trace_print_array_seq); 225 226 int trace_raw_output_prep(struct trace_iterator *iter, 227 struct trace_event *trace_event) 228 { 229 struct trace_event_call *event; 230 struct trace_seq *s = &iter->seq; 231 struct trace_seq *p = &iter->tmp_seq; 232 struct trace_entry *entry; 233 234 event = container_of(trace_event, struct trace_event_call, event); 235 entry = iter->ent; 236 237 if (entry->type != event->event.type) { 238 WARN_ON_ONCE(1); 239 return TRACE_TYPE_UNHANDLED; 240 } 241 242 trace_seq_init(p); 243 trace_seq_printf(s, "%s: ", trace_event_name(event)); 244 245 return trace_handle_return(s); 246 } 247 EXPORT_SYMBOL(trace_raw_output_prep); 248 249 static int trace_output_raw(struct trace_iterator *iter, char *name, 250 char *fmt, va_list ap) 251 { 252 struct trace_seq *s = &iter->seq; 253 254 trace_seq_printf(s, "%s: ", name); 255 trace_seq_vprintf(s, fmt, ap); 256 257 return trace_handle_return(s); 258 } 259 260 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 261 { 262 va_list ap; 263 int ret; 264 265 va_start(ap, fmt); 266 ret = trace_output_raw(iter, name, fmt, ap); 267 va_end(ap); 268 269 return ret; 270 } 271 EXPORT_SYMBOL_GPL(trace_output_call); 272 273 #ifdef CONFIG_KRETPROBES 274 static inline const char *kretprobed(const char *name) 275 { 276 static const char tramp_name[] = "kretprobe_trampoline"; 277 int size = sizeof(tramp_name); 278 279 if (strncmp(tramp_name, name, size) == 0) 280 return "[unknown/kretprobe'd]"; 281 return name; 282 } 283 #else 284 static inline const char *kretprobed(const char *name) 285 { 286 return name; 287 } 288 #endif /* CONFIG_KRETPROBES */ 289 290 static void 291 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 292 { 293 #ifdef CONFIG_KALLSYMS 294 char str[KSYM_SYMBOL_LEN]; 295 const char *name; 296 297 kallsyms_lookup(address, NULL, NULL, NULL, str); 298 299 name = kretprobed(str); 300 301 trace_seq_printf(s, fmt, name); 302 #endif 303 } 304 305 static void 306 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 307 unsigned long address) 308 { 309 #ifdef CONFIG_KALLSYMS 310 char str[KSYM_SYMBOL_LEN]; 311 const char *name; 312 313 sprint_symbol(str, address); 314 name = kretprobed(str); 315 316 trace_seq_printf(s, fmt, name); 317 #endif 318 } 319 320 #ifndef CONFIG_64BIT 321 # define IP_FMT "%08lx" 322 #else 323 # define IP_FMT "%016lx" 324 #endif 325 326 static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 327 unsigned long ip, unsigned long sym_flags) 328 { 329 struct file *file = NULL; 330 unsigned long vmstart = 0; 331 int ret = 1; 332 333 if (s->full) 334 return 0; 335 336 if (mm) { 337 const struct vm_area_struct *vma; 338 339 down_read(&mm->mmap_sem); 340 vma = find_vma(mm, ip); 341 if (vma) { 342 file = vma->vm_file; 343 vmstart = vma->vm_start; 344 } 345 if (file) { 346 ret = trace_seq_path(s, &file->f_path); 347 if (ret) 348 trace_seq_printf(s, "[+0x%lx]", 349 ip - vmstart); 350 } 351 up_read(&mm->mmap_sem); 352 } 353 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 354 trace_seq_printf(s, " <" IP_FMT ">", ip); 355 return !trace_seq_has_overflowed(s); 356 } 357 358 int 359 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 360 { 361 if (!ip) { 362 trace_seq_putc(s, '0'); 363 goto out; 364 } 365 366 if (sym_flags & TRACE_ITER_SYM_OFFSET) 367 seq_print_sym_offset(s, "%s", ip); 368 else 369 seq_print_sym_short(s, "%s", ip); 370 371 if (sym_flags & TRACE_ITER_SYM_ADDR) 372 trace_seq_printf(s, " <" IP_FMT ">", ip); 373 374 out: 375 return !trace_seq_has_overflowed(s); 376 } 377 378 /** 379 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 380 * @s: trace seq struct to write to 381 * @entry: The trace entry field from the ring buffer 382 * 383 * Prints the generic fields of irqs off, in hard or softirq, preempt 384 * count. 385 */ 386 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 387 { 388 char hardsoft_irq; 389 char need_resched; 390 char irqs_off; 391 int hardirq; 392 int softirq; 393 int nmi; 394 395 nmi = entry->flags & TRACE_FLAG_NMI; 396 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 397 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 398 399 irqs_off = 400 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 401 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 402 '.'; 403 404 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | 405 TRACE_FLAG_PREEMPT_RESCHED)) { 406 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: 407 need_resched = 'N'; 408 break; 409 case TRACE_FLAG_NEED_RESCHED: 410 need_resched = 'n'; 411 break; 412 case TRACE_FLAG_PREEMPT_RESCHED: 413 need_resched = 'p'; 414 break; 415 default: 416 need_resched = '.'; 417 break; 418 } 419 420 hardsoft_irq = 421 (nmi && hardirq) ? 'Z' : 422 nmi ? 'z' : 423 (hardirq && softirq) ? 'H' : 424 hardirq ? 'h' : 425 softirq ? 's' : 426 '.' ; 427 428 trace_seq_printf(s, "%c%c%c", 429 irqs_off, need_resched, hardsoft_irq); 430 431 if (entry->preempt_count) 432 trace_seq_printf(s, "%x", entry->preempt_count); 433 else 434 trace_seq_putc(s, '.'); 435 436 return !trace_seq_has_overflowed(s); 437 } 438 439 static int 440 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 441 { 442 char comm[TASK_COMM_LEN]; 443 444 trace_find_cmdline(entry->pid, comm); 445 446 trace_seq_printf(s, "%8.8s-%-5d %3d", 447 comm, entry->pid, cpu); 448 449 return trace_print_lat_fmt(s, entry); 450 } 451 452 #undef MARK 453 #define MARK(v, s) {.val = v, .sym = s} 454 /* trace overhead mark */ 455 static const struct trace_mark { 456 unsigned long long val; /* unit: nsec */ 457 char sym; 458 } mark[] = { 459 MARK(1000000000ULL , '$'), /* 1 sec */ 460 MARK(100000000ULL , '@'), /* 100 msec */ 461 MARK(10000000ULL , '*'), /* 10 msec */ 462 MARK(1000000ULL , '#'), /* 1000 usecs */ 463 MARK(100000ULL , '!'), /* 100 usecs */ 464 MARK(10000ULL , '+'), /* 10 usecs */ 465 }; 466 #undef MARK 467 468 char trace_find_mark(unsigned long long d) 469 { 470 int i; 471 int size = ARRAY_SIZE(mark); 472 473 for (i = 0; i < size; i++) { 474 if (d > mark[i].val) 475 break; 476 } 477 478 return (i == size) ? ' ' : mark[i].sym; 479 } 480 481 static int 482 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 483 { 484 struct trace_array *tr = iter->tr; 485 unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE; 486 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 487 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 488 unsigned long long rel_ts = next_ts - iter->ts; 489 struct trace_seq *s = &iter->seq; 490 491 if (in_ns) { 492 abs_ts = ns2usecs(abs_ts); 493 rel_ts = ns2usecs(rel_ts); 494 } 495 496 if (verbose && in_ns) { 497 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 498 unsigned long abs_msec = (unsigned long)abs_ts; 499 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 500 unsigned long rel_msec = (unsigned long)rel_ts; 501 502 trace_seq_printf( 503 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 504 ns2usecs(iter->ts), 505 abs_msec, abs_usec, 506 rel_msec, rel_usec); 507 508 } else if (verbose && !in_ns) { 509 trace_seq_printf( 510 s, "[%016llx] %lld (+%lld): ", 511 iter->ts, abs_ts, rel_ts); 512 513 } else if (!verbose && in_ns) { 514 trace_seq_printf( 515 s, " %4lldus%c: ", 516 abs_ts, 517 trace_find_mark(rel_ts * NSEC_PER_USEC)); 518 519 } else { /* !verbose && !in_ns */ 520 trace_seq_printf(s, " %4lld: ", abs_ts); 521 } 522 523 return !trace_seq_has_overflowed(s); 524 } 525 526 int trace_print_context(struct trace_iterator *iter) 527 { 528 struct trace_array *tr = iter->tr; 529 struct trace_seq *s = &iter->seq; 530 struct trace_entry *entry = iter->ent; 531 unsigned long long t; 532 unsigned long secs, usec_rem; 533 char comm[TASK_COMM_LEN]; 534 535 trace_find_cmdline(entry->pid, comm); 536 537 trace_seq_printf(s, "%16s-%-5d [%03d] ", 538 comm, entry->pid, iter->cpu); 539 540 if (tr->trace_flags & TRACE_ITER_IRQ_INFO) 541 trace_print_lat_fmt(s, entry); 542 543 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 544 t = ns2usecs(iter->ts); 545 usec_rem = do_div(t, USEC_PER_SEC); 546 secs = (unsigned long)t; 547 trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 548 } else 549 trace_seq_printf(s, " %12llu: ", iter->ts); 550 551 return !trace_seq_has_overflowed(s); 552 } 553 554 int trace_print_lat_context(struct trace_iterator *iter) 555 { 556 struct trace_array *tr = iter->tr; 557 /* trace_find_next_entry will reset ent_size */ 558 int ent_size = iter->ent_size; 559 struct trace_seq *s = &iter->seq; 560 u64 next_ts; 561 struct trace_entry *entry = iter->ent, 562 *next_entry = trace_find_next_entry(iter, NULL, 563 &next_ts); 564 unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE); 565 566 /* Restore the original ent_size */ 567 iter->ent_size = ent_size; 568 569 if (!next_entry) 570 next_ts = iter->ts; 571 572 if (verbose) { 573 char comm[TASK_COMM_LEN]; 574 575 trace_find_cmdline(entry->pid, comm); 576 577 trace_seq_printf( 578 s, "%16s %5d %3d %d %08x %08lx ", 579 comm, entry->pid, iter->cpu, entry->flags, 580 entry->preempt_count, iter->idx); 581 } else { 582 lat_print_generic(s, entry, iter->cpu); 583 } 584 585 lat_print_timestamp(iter, next_ts); 586 587 return !trace_seq_has_overflowed(s); 588 } 589 590 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 591 592 static int task_state_char(unsigned long state) 593 { 594 int bit = state ? __ffs(state) + 1 : 0; 595 596 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 597 } 598 599 /** 600 * ftrace_find_event - find a registered event 601 * @type: the type of event to look for 602 * 603 * Returns an event of type @type otherwise NULL 604 * Called with trace_event_read_lock() held. 605 */ 606 struct trace_event *ftrace_find_event(int type) 607 { 608 struct trace_event *event; 609 unsigned key; 610 611 key = type & (EVENT_HASHSIZE - 1); 612 613 hlist_for_each_entry(event, &event_hash[key], node) { 614 if (event->type == type) 615 return event; 616 } 617 618 return NULL; 619 } 620 621 static LIST_HEAD(ftrace_event_list); 622 623 static int trace_search_list(struct list_head **list) 624 { 625 struct trace_event *e; 626 int last = __TRACE_LAST_TYPE; 627 628 if (list_empty(&ftrace_event_list)) { 629 *list = &ftrace_event_list; 630 return last + 1; 631 } 632 633 /* 634 * We used up all possible max events, 635 * lets see if somebody freed one. 636 */ 637 list_for_each_entry(e, &ftrace_event_list, list) { 638 if (e->type != last + 1) 639 break; 640 last++; 641 } 642 643 /* Did we used up all 65 thousand events??? */ 644 if ((last + 1) > TRACE_EVENT_TYPE_MAX) 645 return 0; 646 647 *list = &e->list; 648 return last + 1; 649 } 650 651 void trace_event_read_lock(void) 652 { 653 down_read(&trace_event_sem); 654 } 655 656 void trace_event_read_unlock(void) 657 { 658 up_read(&trace_event_sem); 659 } 660 661 /** 662 * register_trace_event - register output for an event type 663 * @event: the event type to register 664 * 665 * Event types are stored in a hash and this hash is used to 666 * find a way to print an event. If the @event->type is set 667 * then it will use that type, otherwise it will assign a 668 * type to use. 669 * 670 * If you assign your own type, please make sure it is added 671 * to the trace_type enum in trace.h, to avoid collisions 672 * with the dynamic types. 673 * 674 * Returns the event type number or zero on error. 675 */ 676 int register_trace_event(struct trace_event *event) 677 { 678 unsigned key; 679 int ret = 0; 680 681 down_write(&trace_event_sem); 682 683 if (WARN_ON(!event)) 684 goto out; 685 686 if (WARN_ON(!event->funcs)) 687 goto out; 688 689 INIT_LIST_HEAD(&event->list); 690 691 if (!event->type) { 692 struct list_head *list = NULL; 693 694 if (next_event_type > TRACE_EVENT_TYPE_MAX) { 695 696 event->type = trace_search_list(&list); 697 if (!event->type) 698 goto out; 699 700 } else { 701 702 event->type = next_event_type++; 703 list = &ftrace_event_list; 704 } 705 706 if (WARN_ON(ftrace_find_event(event->type))) 707 goto out; 708 709 list_add_tail(&event->list, list); 710 711 } else if (event->type > __TRACE_LAST_TYPE) { 712 printk(KERN_WARNING "Need to add type to trace.h\n"); 713 WARN_ON(1); 714 goto out; 715 } else { 716 /* Is this event already used */ 717 if (ftrace_find_event(event->type)) 718 goto out; 719 } 720 721 if (event->funcs->trace == NULL) 722 event->funcs->trace = trace_nop_print; 723 if (event->funcs->raw == NULL) 724 event->funcs->raw = trace_nop_print; 725 if (event->funcs->hex == NULL) 726 event->funcs->hex = trace_nop_print; 727 if (event->funcs->binary == NULL) 728 event->funcs->binary = trace_nop_print; 729 730 key = event->type & (EVENT_HASHSIZE - 1); 731 732 hlist_add_head(&event->node, &event_hash[key]); 733 734 ret = event->type; 735 out: 736 up_write(&trace_event_sem); 737 738 return ret; 739 } 740 EXPORT_SYMBOL_GPL(register_trace_event); 741 742 /* 743 * Used by module code with the trace_event_sem held for write. 744 */ 745 int __unregister_trace_event(struct trace_event *event) 746 { 747 hlist_del(&event->node); 748 list_del(&event->list); 749 return 0; 750 } 751 752 /** 753 * unregister_trace_event - remove a no longer used event 754 * @event: the event to remove 755 */ 756 int unregister_trace_event(struct trace_event *event) 757 { 758 down_write(&trace_event_sem); 759 __unregister_trace_event(event); 760 up_write(&trace_event_sem); 761 762 return 0; 763 } 764 EXPORT_SYMBOL_GPL(unregister_trace_event); 765 766 /* 767 * Standard events 768 */ 769 770 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 771 struct trace_event *event) 772 { 773 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); 774 775 return trace_handle_return(&iter->seq); 776 } 777 778 /* TRACE_FN */ 779 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 780 struct trace_event *event) 781 { 782 struct ftrace_entry *field; 783 struct trace_seq *s = &iter->seq; 784 785 trace_assign_type(field, iter->ent); 786 787 seq_print_ip_sym(s, field->ip, flags); 788 789 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 790 trace_seq_puts(s, " <-"); 791 seq_print_ip_sym(s, field->parent_ip, flags); 792 } 793 794 trace_seq_putc(s, '\n'); 795 796 return trace_handle_return(s); 797 } 798 799 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 800 struct trace_event *event) 801 { 802 struct ftrace_entry *field; 803 804 trace_assign_type(field, iter->ent); 805 806 trace_seq_printf(&iter->seq, "%lx %lx\n", 807 field->ip, 808 field->parent_ip); 809 810 return trace_handle_return(&iter->seq); 811 } 812 813 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 814 struct trace_event *event) 815 { 816 struct ftrace_entry *field; 817 struct trace_seq *s = &iter->seq; 818 819 trace_assign_type(field, iter->ent); 820 821 SEQ_PUT_HEX_FIELD(s, field->ip); 822 SEQ_PUT_HEX_FIELD(s, field->parent_ip); 823 824 return trace_handle_return(s); 825 } 826 827 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 828 struct trace_event *event) 829 { 830 struct ftrace_entry *field; 831 struct trace_seq *s = &iter->seq; 832 833 trace_assign_type(field, iter->ent); 834 835 SEQ_PUT_FIELD(s, field->ip); 836 SEQ_PUT_FIELD(s, field->parent_ip); 837 838 return trace_handle_return(s); 839 } 840 841 static struct trace_event_functions trace_fn_funcs = { 842 .trace = trace_fn_trace, 843 .raw = trace_fn_raw, 844 .hex = trace_fn_hex, 845 .binary = trace_fn_bin, 846 }; 847 848 static struct trace_event trace_fn_event = { 849 .type = TRACE_FN, 850 .funcs = &trace_fn_funcs, 851 }; 852 853 /* TRACE_CTX an TRACE_WAKE */ 854 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 855 char *delim) 856 { 857 struct ctx_switch_entry *field; 858 char comm[TASK_COMM_LEN]; 859 int S, T; 860 861 862 trace_assign_type(field, iter->ent); 863 864 T = task_state_char(field->next_state); 865 S = task_state_char(field->prev_state); 866 trace_find_cmdline(field->next_pid, comm); 867 trace_seq_printf(&iter->seq, 868 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 869 field->prev_pid, 870 field->prev_prio, 871 S, delim, 872 field->next_cpu, 873 field->next_pid, 874 field->next_prio, 875 T, comm); 876 877 return trace_handle_return(&iter->seq); 878 } 879 880 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 881 struct trace_event *event) 882 { 883 return trace_ctxwake_print(iter, "==>"); 884 } 885 886 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 887 int flags, struct trace_event *event) 888 { 889 return trace_ctxwake_print(iter, " +"); 890 } 891 892 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 893 { 894 struct ctx_switch_entry *field; 895 int T; 896 897 trace_assign_type(field, iter->ent); 898 899 if (!S) 900 S = task_state_char(field->prev_state); 901 T = task_state_char(field->next_state); 902 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 903 field->prev_pid, 904 field->prev_prio, 905 S, 906 field->next_cpu, 907 field->next_pid, 908 field->next_prio, 909 T); 910 911 return trace_handle_return(&iter->seq); 912 } 913 914 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 915 struct trace_event *event) 916 { 917 return trace_ctxwake_raw(iter, 0); 918 } 919 920 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 921 struct trace_event *event) 922 { 923 return trace_ctxwake_raw(iter, '+'); 924 } 925 926 927 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 928 { 929 struct ctx_switch_entry *field; 930 struct trace_seq *s = &iter->seq; 931 int T; 932 933 trace_assign_type(field, iter->ent); 934 935 if (!S) 936 S = task_state_char(field->prev_state); 937 T = task_state_char(field->next_state); 938 939 SEQ_PUT_HEX_FIELD(s, field->prev_pid); 940 SEQ_PUT_HEX_FIELD(s, field->prev_prio); 941 SEQ_PUT_HEX_FIELD(s, S); 942 SEQ_PUT_HEX_FIELD(s, field->next_cpu); 943 SEQ_PUT_HEX_FIELD(s, field->next_pid); 944 SEQ_PUT_HEX_FIELD(s, field->next_prio); 945 SEQ_PUT_HEX_FIELD(s, T); 946 947 return trace_handle_return(s); 948 } 949 950 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 951 struct trace_event *event) 952 { 953 return trace_ctxwake_hex(iter, 0); 954 } 955 956 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 957 struct trace_event *event) 958 { 959 return trace_ctxwake_hex(iter, '+'); 960 } 961 962 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 963 int flags, struct trace_event *event) 964 { 965 struct ctx_switch_entry *field; 966 struct trace_seq *s = &iter->seq; 967 968 trace_assign_type(field, iter->ent); 969 970 SEQ_PUT_FIELD(s, field->prev_pid); 971 SEQ_PUT_FIELD(s, field->prev_prio); 972 SEQ_PUT_FIELD(s, field->prev_state); 973 SEQ_PUT_FIELD(s, field->next_cpu); 974 SEQ_PUT_FIELD(s, field->next_pid); 975 SEQ_PUT_FIELD(s, field->next_prio); 976 SEQ_PUT_FIELD(s, field->next_state); 977 978 return trace_handle_return(s); 979 } 980 981 static struct trace_event_functions trace_ctx_funcs = { 982 .trace = trace_ctx_print, 983 .raw = trace_ctx_raw, 984 .hex = trace_ctx_hex, 985 .binary = trace_ctxwake_bin, 986 }; 987 988 static struct trace_event trace_ctx_event = { 989 .type = TRACE_CTX, 990 .funcs = &trace_ctx_funcs, 991 }; 992 993 static struct trace_event_functions trace_wake_funcs = { 994 .trace = trace_wake_print, 995 .raw = trace_wake_raw, 996 .hex = trace_wake_hex, 997 .binary = trace_ctxwake_bin, 998 }; 999 1000 static struct trace_event trace_wake_event = { 1001 .type = TRACE_WAKE, 1002 .funcs = &trace_wake_funcs, 1003 }; 1004 1005 /* TRACE_STACK */ 1006 1007 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1008 int flags, struct trace_event *event) 1009 { 1010 struct stack_entry *field; 1011 struct trace_seq *s = &iter->seq; 1012 unsigned long *p; 1013 unsigned long *end; 1014 1015 trace_assign_type(field, iter->ent); 1016 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1017 1018 trace_seq_puts(s, "<stack trace>\n"); 1019 1020 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1021 1022 if (trace_seq_has_overflowed(s)) 1023 break; 1024 1025 trace_seq_puts(s, " => "); 1026 seq_print_ip_sym(s, *p, flags); 1027 trace_seq_putc(s, '\n'); 1028 } 1029 1030 return trace_handle_return(s); 1031 } 1032 1033 static struct trace_event_functions trace_stack_funcs = { 1034 .trace = trace_stack_print, 1035 }; 1036 1037 static struct trace_event trace_stack_event = { 1038 .type = TRACE_STACK, 1039 .funcs = &trace_stack_funcs, 1040 }; 1041 1042 /* TRACE_USER_STACK */ 1043 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1044 int flags, struct trace_event *event) 1045 { 1046 struct trace_array *tr = iter->tr; 1047 struct userstack_entry *field; 1048 struct trace_seq *s = &iter->seq; 1049 struct mm_struct *mm = NULL; 1050 unsigned int i; 1051 1052 trace_assign_type(field, iter->ent); 1053 1054 trace_seq_puts(s, "<user stack trace>\n"); 1055 1056 if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) { 1057 struct task_struct *task; 1058 /* 1059 * we do the lookup on the thread group leader, 1060 * since individual threads might have already quit! 1061 */ 1062 rcu_read_lock(); 1063 task = find_task_by_vpid(field->tgid); 1064 if (task) 1065 mm = get_task_mm(task); 1066 rcu_read_unlock(); 1067 } 1068 1069 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1070 unsigned long ip = field->caller[i]; 1071 1072 if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 1073 break; 1074 1075 trace_seq_puts(s, " => "); 1076 1077 if (!ip) { 1078 trace_seq_puts(s, "??"); 1079 trace_seq_putc(s, '\n'); 1080 continue; 1081 } 1082 1083 seq_print_user_ip(s, mm, ip, flags); 1084 trace_seq_putc(s, '\n'); 1085 } 1086 1087 if (mm) 1088 mmput(mm); 1089 1090 return trace_handle_return(s); 1091 } 1092 1093 static struct trace_event_functions trace_user_stack_funcs = { 1094 .trace = trace_user_stack_print, 1095 }; 1096 1097 static struct trace_event trace_user_stack_event = { 1098 .type = TRACE_USER_STACK, 1099 .funcs = &trace_user_stack_funcs, 1100 }; 1101 1102 /* TRACE_HWLAT */ 1103 static enum print_line_t 1104 trace_hwlat_print(struct trace_iterator *iter, int flags, 1105 struct trace_event *event) 1106 { 1107 struct trace_entry *entry = iter->ent; 1108 struct trace_seq *s = &iter->seq; 1109 struct hwlat_entry *field; 1110 1111 trace_assign_type(field, entry); 1112 1113 trace_seq_printf(s, "#%-5u inner/outer(us): %4llu/%-5llu ts:%ld.%09ld", 1114 field->seqnum, 1115 field->duration, 1116 field->outer_duration, 1117 field->timestamp.tv_sec, 1118 field->timestamp.tv_nsec); 1119 1120 if (field->nmi_count) { 1121 /* 1122 * The generic sched_clock() is not NMI safe, thus 1123 * we only record the count and not the time. 1124 */ 1125 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) 1126 trace_seq_printf(s, " nmi-total:%llu", 1127 field->nmi_total_ts); 1128 trace_seq_printf(s, " nmi-count:%u", 1129 field->nmi_count); 1130 } 1131 1132 trace_seq_putc(s, '\n'); 1133 1134 return trace_handle_return(s); 1135 } 1136 1137 1138 static enum print_line_t 1139 trace_hwlat_raw(struct trace_iterator *iter, int flags, 1140 struct trace_event *event) 1141 { 1142 struct hwlat_entry *field; 1143 struct trace_seq *s = &iter->seq; 1144 1145 trace_assign_type(field, iter->ent); 1146 1147 trace_seq_printf(s, "%llu %lld %ld %09ld %u\n", 1148 field->duration, 1149 field->outer_duration, 1150 field->timestamp.tv_sec, 1151 field->timestamp.tv_nsec, 1152 field->seqnum); 1153 1154 return trace_handle_return(s); 1155 } 1156 1157 static struct trace_event_functions trace_hwlat_funcs = { 1158 .trace = trace_hwlat_print, 1159 .raw = trace_hwlat_raw, 1160 }; 1161 1162 static struct trace_event trace_hwlat_event = { 1163 .type = TRACE_HWLAT, 1164 .funcs = &trace_hwlat_funcs, 1165 }; 1166 1167 /* TRACE_BPUTS */ 1168 static enum print_line_t 1169 trace_bputs_print(struct trace_iterator *iter, int flags, 1170 struct trace_event *event) 1171 { 1172 struct trace_entry *entry = iter->ent; 1173 struct trace_seq *s = &iter->seq; 1174 struct bputs_entry *field; 1175 1176 trace_assign_type(field, entry); 1177 1178 seq_print_ip_sym(s, field->ip, flags); 1179 trace_seq_puts(s, ": "); 1180 trace_seq_puts(s, field->str); 1181 1182 return trace_handle_return(s); 1183 } 1184 1185 1186 static enum print_line_t 1187 trace_bputs_raw(struct trace_iterator *iter, int flags, 1188 struct trace_event *event) 1189 { 1190 struct bputs_entry *field; 1191 struct trace_seq *s = &iter->seq; 1192 1193 trace_assign_type(field, iter->ent); 1194 1195 trace_seq_printf(s, ": %lx : ", field->ip); 1196 trace_seq_puts(s, field->str); 1197 1198 return trace_handle_return(s); 1199 } 1200 1201 static struct trace_event_functions trace_bputs_funcs = { 1202 .trace = trace_bputs_print, 1203 .raw = trace_bputs_raw, 1204 }; 1205 1206 static struct trace_event trace_bputs_event = { 1207 .type = TRACE_BPUTS, 1208 .funcs = &trace_bputs_funcs, 1209 }; 1210 1211 /* TRACE_BPRINT */ 1212 static enum print_line_t 1213 trace_bprint_print(struct trace_iterator *iter, int flags, 1214 struct trace_event *event) 1215 { 1216 struct trace_entry *entry = iter->ent; 1217 struct trace_seq *s = &iter->seq; 1218 struct bprint_entry *field; 1219 1220 trace_assign_type(field, entry); 1221 1222 seq_print_ip_sym(s, field->ip, flags); 1223 trace_seq_puts(s, ": "); 1224 trace_seq_bprintf(s, field->fmt, field->buf); 1225 1226 return trace_handle_return(s); 1227 } 1228 1229 1230 static enum print_line_t 1231 trace_bprint_raw(struct trace_iterator *iter, int flags, 1232 struct trace_event *event) 1233 { 1234 struct bprint_entry *field; 1235 struct trace_seq *s = &iter->seq; 1236 1237 trace_assign_type(field, iter->ent); 1238 1239 trace_seq_printf(s, ": %lx : ", field->ip); 1240 trace_seq_bprintf(s, field->fmt, field->buf); 1241 1242 return trace_handle_return(s); 1243 } 1244 1245 static struct trace_event_functions trace_bprint_funcs = { 1246 .trace = trace_bprint_print, 1247 .raw = trace_bprint_raw, 1248 }; 1249 1250 static struct trace_event trace_bprint_event = { 1251 .type = TRACE_BPRINT, 1252 .funcs = &trace_bprint_funcs, 1253 }; 1254 1255 /* TRACE_PRINT */ 1256 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1257 int flags, struct trace_event *event) 1258 { 1259 struct print_entry *field; 1260 struct trace_seq *s = &iter->seq; 1261 1262 trace_assign_type(field, iter->ent); 1263 1264 seq_print_ip_sym(s, field->ip, flags); 1265 trace_seq_printf(s, ": %s", field->buf); 1266 1267 return trace_handle_return(s); 1268 } 1269 1270 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1271 struct trace_event *event) 1272 { 1273 struct print_entry *field; 1274 1275 trace_assign_type(field, iter->ent); 1276 1277 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); 1278 1279 return trace_handle_return(&iter->seq); 1280 } 1281 1282 static struct trace_event_functions trace_print_funcs = { 1283 .trace = trace_print_print, 1284 .raw = trace_print_raw, 1285 }; 1286 1287 static struct trace_event trace_print_event = { 1288 .type = TRACE_PRINT, 1289 .funcs = &trace_print_funcs, 1290 }; 1291 1292 static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags, 1293 struct trace_event *event) 1294 { 1295 struct raw_data_entry *field; 1296 int i; 1297 1298 trace_assign_type(field, iter->ent); 1299 1300 trace_seq_printf(&iter->seq, "# %x buf:", field->id); 1301 1302 for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++) 1303 trace_seq_printf(&iter->seq, " %02x", 1304 (unsigned char)field->buf[i]); 1305 1306 trace_seq_putc(&iter->seq, '\n'); 1307 1308 return trace_handle_return(&iter->seq); 1309 } 1310 1311 static struct trace_event_functions trace_raw_data_funcs = { 1312 .trace = trace_raw_data, 1313 .raw = trace_raw_data, 1314 }; 1315 1316 static struct trace_event trace_raw_data_event = { 1317 .type = TRACE_RAW_DATA, 1318 .funcs = &trace_raw_data_funcs, 1319 }; 1320 1321 1322 static struct trace_event *events[] __initdata = { 1323 &trace_fn_event, 1324 &trace_ctx_event, 1325 &trace_wake_event, 1326 &trace_stack_event, 1327 &trace_user_stack_event, 1328 &trace_bputs_event, 1329 &trace_bprint_event, 1330 &trace_print_event, 1331 &trace_hwlat_event, 1332 &trace_raw_data_event, 1333 NULL 1334 }; 1335 1336 __init static int init_events(void) 1337 { 1338 struct trace_event *event; 1339 int i, ret; 1340 1341 for (i = 0; events[i]; i++) { 1342 event = events[i]; 1343 1344 ret = register_trace_event(event); 1345 if (!ret) { 1346 printk(KERN_WARNING "event %d failed to register\n", 1347 event->type); 1348 WARN_ON_ONCE(1); 1349 } 1350 } 1351 1352 return 0; 1353 } 1354 early_initcall(init_events); 1355