1 // SPDX-License-Identifier: GPL-2.0 2 /* Include in trace.c */ 3 4 #include <uapi/linux/sched/types.h> 5 #include <linux/stringify.h> 6 #include <linux/kthread.h> 7 #include <linux/delay.h> 8 #include <linux/slab.h> 9 10 static inline int trace_valid_entry(struct trace_entry *entry) 11 { 12 switch (entry->type) { 13 case TRACE_FN: 14 case TRACE_CTX: 15 case TRACE_WAKE: 16 case TRACE_STACK: 17 case TRACE_PRINT: 18 case TRACE_BRANCH: 19 case TRACE_GRAPH_ENT: 20 case TRACE_GRAPH_RET: 21 return 1; 22 } 23 return 0; 24 } 25 26 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) 27 { 28 struct ring_buffer_event *event; 29 struct trace_entry *entry; 30 unsigned int loops = 0; 31 32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 33 entry = ring_buffer_event_data(event); 34 35 /* 36 * The ring buffer is a size of trace_buf_size, if 37 * we loop more than the size, there's something wrong 38 * with the ring buffer. 39 */ 40 if (loops++ > trace_buf_size) { 41 printk(KERN_CONT ".. bad ring buffer "); 42 goto failed; 43 } 44 if (!trace_valid_entry(entry)) { 45 printk(KERN_CONT ".. invalid entry %d ", 46 entry->type); 47 goto failed; 48 } 49 } 50 return 0; 51 52 failed: 53 /* disable tracing */ 54 tracing_disabled = 1; 55 printk(KERN_CONT ".. corrupted trace buffer .. "); 56 return -1; 57 } 58 59 /* 60 * Test the trace buffer to see if all the elements 61 * are still sane. 62 */ 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) 64 { 65 unsigned long flags, cnt = 0; 66 int cpu, ret = 0; 67 68 /* Don't allow flipping of max traces now */ 69 local_irq_save(flags); 70 arch_spin_lock(&buf->tr->max_lock); 71 72 cnt = ring_buffer_entries(buf->buffer); 73 74 /* 75 * The trace_test_buffer_cpu runs a while loop to consume all data. 76 * If the calling tracer is broken, and is constantly filling 77 * the buffer, this will run forever, and hard lock the box. 78 * We disable the ring buffer while we do this test to prevent 79 * a hard lock up. 80 */ 81 tracing_off(); 82 for_each_possible_cpu(cpu) { 83 ret = trace_test_buffer_cpu(buf, cpu); 84 if (ret) 85 break; 86 } 87 tracing_on(); 88 arch_spin_unlock(&buf->tr->max_lock); 89 local_irq_restore(flags); 90 91 if (count) 92 *count = cnt; 93 94 return ret; 95 } 96 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 98 { 99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 100 trace->name, init_ret); 101 } 102 #ifdef CONFIG_FUNCTION_TRACER 103 104 #ifdef CONFIG_DYNAMIC_FTRACE 105 106 static int trace_selftest_test_probe1_cnt; 107 static void trace_selftest_test_probe1_func(unsigned long ip, 108 unsigned long pip, 109 struct ftrace_ops *op, 110 struct ftrace_regs *fregs) 111 { 112 trace_selftest_test_probe1_cnt++; 113 } 114 115 static int trace_selftest_test_probe2_cnt; 116 static void trace_selftest_test_probe2_func(unsigned long ip, 117 unsigned long pip, 118 struct ftrace_ops *op, 119 struct ftrace_regs *fregs) 120 { 121 trace_selftest_test_probe2_cnt++; 122 } 123 124 static int trace_selftest_test_probe3_cnt; 125 static void trace_selftest_test_probe3_func(unsigned long ip, 126 unsigned long pip, 127 struct ftrace_ops *op, 128 struct ftrace_regs *fregs) 129 { 130 trace_selftest_test_probe3_cnt++; 131 } 132 133 static int trace_selftest_test_global_cnt; 134 static void trace_selftest_test_global_func(unsigned long ip, 135 unsigned long pip, 136 struct ftrace_ops *op, 137 struct ftrace_regs *fregs) 138 { 139 trace_selftest_test_global_cnt++; 140 } 141 142 static int trace_selftest_test_dyn_cnt; 143 static void trace_selftest_test_dyn_func(unsigned long ip, 144 unsigned long pip, 145 struct ftrace_ops *op, 146 struct ftrace_regs *fregs) 147 { 148 trace_selftest_test_dyn_cnt++; 149 } 150 151 static struct ftrace_ops test_probe1 = { 152 .func = trace_selftest_test_probe1_func, 153 }; 154 155 static struct ftrace_ops test_probe2 = { 156 .func = trace_selftest_test_probe2_func, 157 }; 158 159 static struct ftrace_ops test_probe3 = { 160 .func = trace_selftest_test_probe3_func, 161 }; 162 163 static void print_counts(void) 164 { 165 printk("(%d %d %d %d %d) ", 166 trace_selftest_test_probe1_cnt, 167 trace_selftest_test_probe2_cnt, 168 trace_selftest_test_probe3_cnt, 169 trace_selftest_test_global_cnt, 170 trace_selftest_test_dyn_cnt); 171 } 172 173 static void reset_counts(void) 174 { 175 trace_selftest_test_probe1_cnt = 0; 176 trace_selftest_test_probe2_cnt = 0; 177 trace_selftest_test_probe3_cnt = 0; 178 trace_selftest_test_global_cnt = 0; 179 trace_selftest_test_dyn_cnt = 0; 180 } 181 182 static int trace_selftest_ops(struct trace_array *tr, int cnt) 183 { 184 int save_ftrace_enabled = ftrace_enabled; 185 struct ftrace_ops *dyn_ops; 186 char *func1_name; 187 char *func2_name; 188 int len1; 189 int len2; 190 int ret = -1; 191 192 printk(KERN_CONT "PASSED\n"); 193 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 194 195 ftrace_enabled = 1; 196 reset_counts(); 197 198 /* Handle PPC64 '.' name */ 199 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 200 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 201 len1 = strlen(func1_name); 202 len2 = strlen(func2_name); 203 204 /* 205 * Probe 1 will trace function 1. 206 * Probe 2 will trace function 2. 207 * Probe 3 will trace functions 1 and 2. 208 */ 209 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 210 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 211 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 212 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 213 214 register_ftrace_function(&test_probe1); 215 register_ftrace_function(&test_probe2); 216 register_ftrace_function(&test_probe3); 217 /* First time we are running with main function */ 218 if (cnt > 1) { 219 ftrace_init_array_ops(tr, trace_selftest_test_global_func); 220 register_ftrace_function(tr->ops); 221 } 222 223 DYN_FTRACE_TEST_NAME(); 224 225 print_counts(); 226 227 if (trace_selftest_test_probe1_cnt != 1) 228 goto out; 229 if (trace_selftest_test_probe2_cnt != 0) 230 goto out; 231 if (trace_selftest_test_probe3_cnt != 1) 232 goto out; 233 if (cnt > 1) { 234 if (trace_selftest_test_global_cnt == 0) 235 goto out; 236 } 237 238 DYN_FTRACE_TEST_NAME2(); 239 240 print_counts(); 241 242 if (trace_selftest_test_probe1_cnt != 1) 243 goto out; 244 if (trace_selftest_test_probe2_cnt != 1) 245 goto out; 246 if (trace_selftest_test_probe3_cnt != 2) 247 goto out; 248 249 /* Add a dynamic probe */ 250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 251 if (!dyn_ops) { 252 printk("MEMORY ERROR "); 253 goto out; 254 } 255 256 dyn_ops->func = trace_selftest_test_dyn_func; 257 258 register_ftrace_function(dyn_ops); 259 260 trace_selftest_test_global_cnt = 0; 261 262 DYN_FTRACE_TEST_NAME(); 263 264 print_counts(); 265 266 if (trace_selftest_test_probe1_cnt != 2) 267 goto out_free; 268 if (trace_selftest_test_probe2_cnt != 1) 269 goto out_free; 270 if (trace_selftest_test_probe3_cnt != 3) 271 goto out_free; 272 if (cnt > 1) { 273 if (trace_selftest_test_global_cnt == 0) 274 goto out_free; 275 } 276 if (trace_selftest_test_dyn_cnt == 0) 277 goto out_free; 278 279 DYN_FTRACE_TEST_NAME2(); 280 281 print_counts(); 282 283 if (trace_selftest_test_probe1_cnt != 2) 284 goto out_free; 285 if (trace_selftest_test_probe2_cnt != 2) 286 goto out_free; 287 if (trace_selftest_test_probe3_cnt != 4) 288 goto out_free; 289 290 /* Remove trace function from probe 3 */ 291 func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); 292 len1 = strlen(func1_name); 293 294 ftrace_set_filter(&test_probe3, func1_name, len1, 0); 295 296 DYN_FTRACE_TEST_NAME(); 297 298 print_counts(); 299 300 if (trace_selftest_test_probe1_cnt != 3) 301 goto out_free; 302 if (trace_selftest_test_probe2_cnt != 2) 303 goto out_free; 304 if (trace_selftest_test_probe3_cnt != 4) 305 goto out_free; 306 if (cnt > 1) { 307 if (trace_selftest_test_global_cnt == 0) 308 goto out_free; 309 } 310 if (trace_selftest_test_dyn_cnt == 0) 311 goto out_free; 312 313 DYN_FTRACE_TEST_NAME2(); 314 315 print_counts(); 316 317 if (trace_selftest_test_probe1_cnt != 3) 318 goto out_free; 319 if (trace_selftest_test_probe2_cnt != 3) 320 goto out_free; 321 if (trace_selftest_test_probe3_cnt != 5) 322 goto out_free; 323 324 ret = 0; 325 out_free: 326 unregister_ftrace_function(dyn_ops); 327 kfree(dyn_ops); 328 329 out: 330 /* Purposely unregister in the same order */ 331 unregister_ftrace_function(&test_probe1); 332 unregister_ftrace_function(&test_probe2); 333 unregister_ftrace_function(&test_probe3); 334 if (cnt > 1) 335 unregister_ftrace_function(tr->ops); 336 ftrace_reset_array_ops(tr); 337 338 /* Make sure everything is off */ 339 reset_counts(); 340 DYN_FTRACE_TEST_NAME(); 341 DYN_FTRACE_TEST_NAME(); 342 343 if (trace_selftest_test_probe1_cnt || 344 trace_selftest_test_probe2_cnt || 345 trace_selftest_test_probe3_cnt || 346 trace_selftest_test_global_cnt || 347 trace_selftest_test_dyn_cnt) 348 ret = -1; 349 350 ftrace_enabled = save_ftrace_enabled; 351 352 return ret; 353 } 354 355 /* Test dynamic code modification and ftrace filters */ 356 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 357 struct trace_array *tr, 358 int (*func)(void)) 359 { 360 int save_ftrace_enabled = ftrace_enabled; 361 unsigned long count; 362 char *func_name; 363 int ret; 364 365 /* The ftrace test PASSED */ 366 printk(KERN_CONT "PASSED\n"); 367 pr_info("Testing dynamic ftrace: "); 368 369 /* enable tracing, and record the filter function */ 370 ftrace_enabled = 1; 371 372 /* passed in by parameter to fool gcc from optimizing */ 373 func(); 374 375 /* 376 * Some archs *cough*PowerPC*cough* add characters to the 377 * start of the function names. We simply put a '*' to 378 * accommodate them. 379 */ 380 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 381 382 /* filter only on our function */ 383 ftrace_set_global_filter(func_name, strlen(func_name), 1); 384 385 /* enable tracing */ 386 ret = tracer_init(trace, tr); 387 if (ret) { 388 warn_failed_init_tracer(trace, ret); 389 goto out; 390 } 391 392 /* Sleep for a 1/10 of a second */ 393 msleep(100); 394 395 /* we should have nothing in the buffer */ 396 ret = trace_test_buffer(&tr->array_buffer, &count); 397 if (ret) 398 goto out; 399 400 if (count) { 401 ret = -1; 402 printk(KERN_CONT ".. filter did not filter .. "); 403 goto out; 404 } 405 406 /* call our function again */ 407 func(); 408 409 /* sleep again */ 410 msleep(100); 411 412 /* stop the tracing. */ 413 tracing_stop(); 414 ftrace_enabled = 0; 415 416 /* check the trace buffer */ 417 ret = trace_test_buffer(&tr->array_buffer, &count); 418 419 ftrace_enabled = 1; 420 tracing_start(); 421 422 /* we should only have one item */ 423 if (!ret && count != 1) { 424 trace->reset(tr); 425 printk(KERN_CONT ".. filter failed count=%ld ..", count); 426 ret = -1; 427 goto out; 428 } 429 430 /* Test the ops with global tracing running */ 431 ret = trace_selftest_ops(tr, 1); 432 trace->reset(tr); 433 434 out: 435 ftrace_enabled = save_ftrace_enabled; 436 437 /* Enable tracing on all functions again */ 438 ftrace_set_global_filter(NULL, 0, 1); 439 440 /* Test the ops with global tracing off */ 441 if (!ret) 442 ret = trace_selftest_ops(tr, 2); 443 444 return ret; 445 } 446 447 static int trace_selftest_recursion_cnt; 448 static void trace_selftest_test_recursion_func(unsigned long ip, 449 unsigned long pip, 450 struct ftrace_ops *op, 451 struct ftrace_regs *fregs) 452 { 453 /* 454 * This function is registered without the recursion safe flag. 455 * The ftrace infrastructure should provide the recursion 456 * protection. If not, this will crash the kernel! 457 */ 458 if (trace_selftest_recursion_cnt++ > 10) 459 return; 460 DYN_FTRACE_TEST_NAME(); 461 } 462 463 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 464 unsigned long pip, 465 struct ftrace_ops *op, 466 struct ftrace_regs *fregs) 467 { 468 /* 469 * We said we would provide our own recursion. By calling 470 * this function again, we should recurse back into this function 471 * and count again. But this only happens if the arch supports 472 * all of ftrace features and nothing else is using the function 473 * tracing utility. 474 */ 475 if (trace_selftest_recursion_cnt++) 476 return; 477 DYN_FTRACE_TEST_NAME(); 478 } 479 480 static struct ftrace_ops test_rec_probe = { 481 .func = trace_selftest_test_recursion_func, 482 .flags = FTRACE_OPS_FL_RECURSION, 483 }; 484 485 static struct ftrace_ops test_recsafe_probe = { 486 .func = trace_selftest_test_recursion_safe_func, 487 }; 488 489 static int 490 trace_selftest_function_recursion(void) 491 { 492 int save_ftrace_enabled = ftrace_enabled; 493 char *func_name; 494 int len; 495 int ret; 496 497 /* The previous test PASSED */ 498 pr_cont("PASSED\n"); 499 pr_info("Testing ftrace recursion: "); 500 501 502 /* enable tracing, and record the filter function */ 503 ftrace_enabled = 1; 504 505 /* Handle PPC64 '.' name */ 506 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 507 len = strlen(func_name); 508 509 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 510 if (ret) { 511 pr_cont("*Could not set filter* "); 512 goto out; 513 } 514 515 ret = register_ftrace_function(&test_rec_probe); 516 if (ret) { 517 pr_cont("*could not register callback* "); 518 goto out; 519 } 520 521 DYN_FTRACE_TEST_NAME(); 522 523 unregister_ftrace_function(&test_rec_probe); 524 525 ret = -1; 526 /* 527 * Recursion allows for transitions between context, 528 * and may call the callback twice. 529 */ 530 if (trace_selftest_recursion_cnt != 1 && 531 trace_selftest_recursion_cnt != 2) { 532 pr_cont("*callback not called once (or twice) (%d)* ", 533 trace_selftest_recursion_cnt); 534 goto out; 535 } 536 537 trace_selftest_recursion_cnt = 1; 538 539 pr_cont("PASSED\n"); 540 pr_info("Testing ftrace recursion safe: "); 541 542 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 543 if (ret) { 544 pr_cont("*Could not set filter* "); 545 goto out; 546 } 547 548 ret = register_ftrace_function(&test_recsafe_probe); 549 if (ret) { 550 pr_cont("*could not register callback* "); 551 goto out; 552 } 553 554 DYN_FTRACE_TEST_NAME(); 555 556 unregister_ftrace_function(&test_recsafe_probe); 557 558 ret = -1; 559 if (trace_selftest_recursion_cnt != 2) { 560 pr_cont("*callback not called expected 2 times (%d)* ", 561 trace_selftest_recursion_cnt); 562 goto out; 563 } 564 565 ret = 0; 566 out: 567 ftrace_enabled = save_ftrace_enabled; 568 569 return ret; 570 } 571 #else 572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 573 # define trace_selftest_function_recursion() ({ 0; }) 574 #endif /* CONFIG_DYNAMIC_FTRACE */ 575 576 static enum { 577 TRACE_SELFTEST_REGS_START, 578 TRACE_SELFTEST_REGS_FOUND, 579 TRACE_SELFTEST_REGS_NOT_FOUND, 580 } trace_selftest_regs_stat; 581 582 static void trace_selftest_test_regs_func(unsigned long ip, 583 unsigned long pip, 584 struct ftrace_ops *op, 585 struct ftrace_regs *fregs) 586 { 587 struct pt_regs *regs = ftrace_get_regs(fregs); 588 589 if (regs) 590 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 591 else 592 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 593 } 594 595 static struct ftrace_ops test_regs_probe = { 596 .func = trace_selftest_test_regs_func, 597 .flags = FTRACE_OPS_FL_SAVE_REGS, 598 }; 599 600 static int 601 trace_selftest_function_regs(void) 602 { 603 int save_ftrace_enabled = ftrace_enabled; 604 char *func_name; 605 int len; 606 int ret; 607 int supported = 0; 608 609 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 610 supported = 1; 611 #endif 612 613 /* The previous test PASSED */ 614 pr_cont("PASSED\n"); 615 pr_info("Testing ftrace regs%s: ", 616 !supported ? "(no arch support)" : ""); 617 618 /* enable tracing, and record the filter function */ 619 ftrace_enabled = 1; 620 621 /* Handle PPC64 '.' name */ 622 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 623 len = strlen(func_name); 624 625 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 626 /* 627 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 628 * This test really doesn't care. 629 */ 630 if (ret && ret != -ENODEV) { 631 pr_cont("*Could not set filter* "); 632 goto out; 633 } 634 635 ret = register_ftrace_function(&test_regs_probe); 636 /* 637 * Now if the arch does not support passing regs, then this should 638 * have failed. 639 */ 640 if (!supported) { 641 if (!ret) { 642 pr_cont("*registered save-regs without arch support* "); 643 goto out; 644 } 645 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 646 ret = register_ftrace_function(&test_regs_probe); 647 } 648 if (ret) { 649 pr_cont("*could not register callback* "); 650 goto out; 651 } 652 653 654 DYN_FTRACE_TEST_NAME(); 655 656 unregister_ftrace_function(&test_regs_probe); 657 658 ret = -1; 659 660 switch (trace_selftest_regs_stat) { 661 case TRACE_SELFTEST_REGS_START: 662 pr_cont("*callback never called* "); 663 goto out; 664 665 case TRACE_SELFTEST_REGS_FOUND: 666 if (supported) 667 break; 668 pr_cont("*callback received regs without arch support* "); 669 goto out; 670 671 case TRACE_SELFTEST_REGS_NOT_FOUND: 672 if (!supported) 673 break; 674 pr_cont("*callback received NULL regs* "); 675 goto out; 676 } 677 678 ret = 0; 679 out: 680 ftrace_enabled = save_ftrace_enabled; 681 682 return ret; 683 } 684 685 /* 686 * Simple verification test of ftrace function tracer. 687 * Enable ftrace, sleep 1/10 second, and then read the trace 688 * buffer to see if all is in order. 689 */ 690 __init int 691 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 692 { 693 int save_ftrace_enabled = ftrace_enabled; 694 unsigned long count; 695 int ret; 696 697 #ifdef CONFIG_DYNAMIC_FTRACE 698 if (ftrace_filter_param) { 699 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 700 return 0; 701 } 702 #endif 703 704 /* make sure msleep has been recorded */ 705 msleep(1); 706 707 /* start the tracing */ 708 ftrace_enabled = 1; 709 710 ret = tracer_init(trace, tr); 711 if (ret) { 712 warn_failed_init_tracer(trace, ret); 713 goto out; 714 } 715 716 /* Sleep for a 1/10 of a second */ 717 msleep(100); 718 /* stop the tracing. */ 719 tracing_stop(); 720 ftrace_enabled = 0; 721 722 /* check the trace buffer */ 723 ret = trace_test_buffer(&tr->array_buffer, &count); 724 725 ftrace_enabled = 1; 726 trace->reset(tr); 727 tracing_start(); 728 729 if (!ret && !count) { 730 printk(KERN_CONT ".. no entries found .."); 731 ret = -1; 732 goto out; 733 } 734 735 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 736 DYN_FTRACE_TEST_NAME); 737 if (ret) 738 goto out; 739 740 ret = trace_selftest_function_recursion(); 741 if (ret) 742 goto out; 743 744 ret = trace_selftest_function_regs(); 745 out: 746 ftrace_enabled = save_ftrace_enabled; 747 748 /* kill ftrace totally if we failed */ 749 if (ret) 750 ftrace_kill(); 751 752 return ret; 753 } 754 #endif /* CONFIG_FUNCTION_TRACER */ 755 756 757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 758 759 /* Maximum number of functions to trace before diagnosing a hang */ 760 #define GRAPH_MAX_FUNC_TEST 100000000 761 762 static unsigned int graph_hang_thresh; 763 764 /* Wrap the real function entry probe to avoid possible hanging */ 765 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 766 { 767 /* This is harmlessly racy, we want to approximately detect a hang */ 768 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 769 ftrace_graph_stop(); 770 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 771 if (ftrace_dump_on_oops) { 772 ftrace_dump(DUMP_ALL); 773 /* ftrace_dump() disables tracing */ 774 tracing_on(); 775 } 776 return 0; 777 } 778 779 return trace_graph_entry(trace); 780 } 781 782 static struct fgraph_ops fgraph_ops __initdata = { 783 .entryfunc = &trace_graph_entry_watchdog, 784 .retfunc = &trace_graph_return, 785 }; 786 787 noinline __noclone static void trace_direct_tramp(void) { } 788 789 /* 790 * Pretty much the same than for the function tracer from which the selftest 791 * has been borrowed. 792 */ 793 __init int 794 trace_selftest_startup_function_graph(struct tracer *trace, 795 struct trace_array *tr) 796 { 797 int ret; 798 unsigned long count; 799 char *func_name __maybe_unused; 800 801 #ifdef CONFIG_DYNAMIC_FTRACE 802 if (ftrace_filter_param) { 803 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 804 return 0; 805 } 806 #endif 807 808 /* 809 * Simulate the init() callback but we attach a watchdog callback 810 * to detect and recover from possible hangs 811 */ 812 tracing_reset_online_cpus(&tr->array_buffer); 813 set_graph_array(tr); 814 ret = register_ftrace_graph(&fgraph_ops); 815 if (ret) { 816 warn_failed_init_tracer(trace, ret); 817 goto out; 818 } 819 tracing_start_cmdline_record(); 820 821 /* Sleep for a 1/10 of a second */ 822 msleep(100); 823 824 /* Have we just recovered from a hang? */ 825 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 826 disable_tracing_selftest("recovering from a hang"); 827 ret = -1; 828 goto out; 829 } 830 831 tracing_stop(); 832 833 /* check the trace buffer */ 834 ret = trace_test_buffer(&tr->array_buffer, &count); 835 836 /* Need to also simulate the tr->reset to remove this fgraph_ops */ 837 tracing_stop_cmdline_record(); 838 unregister_ftrace_graph(&fgraph_ops); 839 840 tracing_start(); 841 842 if (!ret && !count) { 843 printk(KERN_CONT ".. no entries found .."); 844 ret = -1; 845 goto out; 846 } 847 848 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 849 tracing_reset_online_cpus(&tr->array_buffer); 850 set_graph_array(tr); 851 852 /* 853 * Some archs *cough*PowerPC*cough* add characters to the 854 * start of the function names. We simply put a '*' to 855 * accommodate them. 856 */ 857 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 858 ftrace_set_global_filter(func_name, strlen(func_name), 1); 859 860 /* 861 * Register direct function together with graph tracer 862 * and make sure we get graph trace. 863 */ 864 ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 865 (unsigned long) trace_direct_tramp); 866 if (ret) 867 goto out; 868 869 ret = register_ftrace_graph(&fgraph_ops); 870 if (ret) { 871 warn_failed_init_tracer(trace, ret); 872 goto out; 873 } 874 875 DYN_FTRACE_TEST_NAME(); 876 877 count = 0; 878 879 tracing_stop(); 880 /* check the trace buffer */ 881 ret = trace_test_buffer(&tr->array_buffer, &count); 882 883 unregister_ftrace_graph(&fgraph_ops); 884 885 ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 886 (unsigned long) trace_direct_tramp); 887 if (ret) 888 goto out; 889 890 tracing_start(); 891 892 if (!ret && !count) { 893 ret = -1; 894 goto out; 895 } 896 #endif 897 898 /* Don't test dynamic tracing, the function tracer already did */ 899 out: 900 /* Stop it if we failed */ 901 if (ret) 902 ftrace_graph_stop(); 903 904 return ret; 905 } 906 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 907 908 909 #ifdef CONFIG_IRQSOFF_TRACER 910 int 911 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 912 { 913 unsigned long save_max = tr->max_latency; 914 unsigned long count; 915 int ret; 916 917 /* start the tracing */ 918 ret = tracer_init(trace, tr); 919 if (ret) { 920 warn_failed_init_tracer(trace, ret); 921 return ret; 922 } 923 924 /* reset the max latency */ 925 tr->max_latency = 0; 926 /* disable interrupts for a bit */ 927 local_irq_disable(); 928 udelay(100); 929 local_irq_enable(); 930 931 /* 932 * Stop the tracer to avoid a warning subsequent 933 * to buffer flipping failure because tracing_stop() 934 * disables the tr and max buffers, making flipping impossible 935 * in case of parallels max irqs off latencies. 936 */ 937 trace->stop(tr); 938 /* stop the tracing. */ 939 tracing_stop(); 940 /* check both trace buffers */ 941 ret = trace_test_buffer(&tr->array_buffer, NULL); 942 if (!ret) 943 ret = trace_test_buffer(&tr->max_buffer, &count); 944 trace->reset(tr); 945 tracing_start(); 946 947 if (!ret && !count) { 948 printk(KERN_CONT ".. no entries found .."); 949 ret = -1; 950 } 951 952 tr->max_latency = save_max; 953 954 return ret; 955 } 956 #endif /* CONFIG_IRQSOFF_TRACER */ 957 958 #ifdef CONFIG_PREEMPT_TRACER 959 int 960 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 961 { 962 unsigned long save_max = tr->max_latency; 963 unsigned long count; 964 int ret; 965 966 /* 967 * Now that the big kernel lock is no longer preemptible, 968 * and this is called with the BKL held, it will always 969 * fail. If preemption is already disabled, simply 970 * pass the test. When the BKL is removed, or becomes 971 * preemptible again, we will once again test this, 972 * so keep it in. 973 */ 974 if (preempt_count()) { 975 printk(KERN_CONT "can not test ... force "); 976 return 0; 977 } 978 979 /* start the tracing */ 980 ret = tracer_init(trace, tr); 981 if (ret) { 982 warn_failed_init_tracer(trace, ret); 983 return ret; 984 } 985 986 /* reset the max latency */ 987 tr->max_latency = 0; 988 /* disable preemption for a bit */ 989 preempt_disable(); 990 udelay(100); 991 preempt_enable(); 992 993 /* 994 * Stop the tracer to avoid a warning subsequent 995 * to buffer flipping failure because tracing_stop() 996 * disables the tr and max buffers, making flipping impossible 997 * in case of parallels max preempt off latencies. 998 */ 999 trace->stop(tr); 1000 /* stop the tracing. */ 1001 tracing_stop(); 1002 /* check both trace buffers */ 1003 ret = trace_test_buffer(&tr->array_buffer, NULL); 1004 if (!ret) 1005 ret = trace_test_buffer(&tr->max_buffer, &count); 1006 trace->reset(tr); 1007 tracing_start(); 1008 1009 if (!ret && !count) { 1010 printk(KERN_CONT ".. no entries found .."); 1011 ret = -1; 1012 } 1013 1014 tr->max_latency = save_max; 1015 1016 return ret; 1017 } 1018 #endif /* CONFIG_PREEMPT_TRACER */ 1019 1020 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 1021 int 1022 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 1023 { 1024 unsigned long save_max = tr->max_latency; 1025 unsigned long count; 1026 int ret; 1027 1028 /* 1029 * Now that the big kernel lock is no longer preemptible, 1030 * and this is called with the BKL held, it will always 1031 * fail. If preemption is already disabled, simply 1032 * pass the test. When the BKL is removed, or becomes 1033 * preemptible again, we will once again test this, 1034 * so keep it in. 1035 */ 1036 if (preempt_count()) { 1037 printk(KERN_CONT "can not test ... force "); 1038 return 0; 1039 } 1040 1041 /* start the tracing */ 1042 ret = tracer_init(trace, tr); 1043 if (ret) { 1044 warn_failed_init_tracer(trace, ret); 1045 goto out_no_start; 1046 } 1047 1048 /* reset the max latency */ 1049 tr->max_latency = 0; 1050 1051 /* disable preemption and interrupts for a bit */ 1052 preempt_disable(); 1053 local_irq_disable(); 1054 udelay(100); 1055 preempt_enable(); 1056 /* reverse the order of preempt vs irqs */ 1057 local_irq_enable(); 1058 1059 /* 1060 * Stop the tracer to avoid a warning subsequent 1061 * to buffer flipping failure because tracing_stop() 1062 * disables the tr and max buffers, making flipping impossible 1063 * in case of parallels max irqs/preempt off latencies. 1064 */ 1065 trace->stop(tr); 1066 /* stop the tracing. */ 1067 tracing_stop(); 1068 /* check both trace buffers */ 1069 ret = trace_test_buffer(&tr->array_buffer, NULL); 1070 if (ret) 1071 goto out; 1072 1073 ret = trace_test_buffer(&tr->max_buffer, &count); 1074 if (ret) 1075 goto out; 1076 1077 if (!ret && !count) { 1078 printk(KERN_CONT ".. no entries found .."); 1079 ret = -1; 1080 goto out; 1081 } 1082 1083 /* do the test by disabling interrupts first this time */ 1084 tr->max_latency = 0; 1085 tracing_start(); 1086 trace->start(tr); 1087 1088 preempt_disable(); 1089 local_irq_disable(); 1090 udelay(100); 1091 preempt_enable(); 1092 /* reverse the order of preempt vs irqs */ 1093 local_irq_enable(); 1094 1095 trace->stop(tr); 1096 /* stop the tracing. */ 1097 tracing_stop(); 1098 /* check both trace buffers */ 1099 ret = trace_test_buffer(&tr->array_buffer, NULL); 1100 if (ret) 1101 goto out; 1102 1103 ret = trace_test_buffer(&tr->max_buffer, &count); 1104 1105 if (!ret && !count) { 1106 printk(KERN_CONT ".. no entries found .."); 1107 ret = -1; 1108 goto out; 1109 } 1110 1111 out: 1112 tracing_start(); 1113 out_no_start: 1114 trace->reset(tr); 1115 tr->max_latency = save_max; 1116 1117 return ret; 1118 } 1119 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1120 1121 #ifdef CONFIG_NOP_TRACER 1122 int 1123 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1124 { 1125 /* What could possibly go wrong? */ 1126 return 0; 1127 } 1128 #endif 1129 1130 #ifdef CONFIG_SCHED_TRACER 1131 1132 struct wakeup_test_data { 1133 struct completion is_ready; 1134 int go; 1135 }; 1136 1137 static int trace_wakeup_test_thread(void *data) 1138 { 1139 /* Make this a -deadline thread */ 1140 static const struct sched_attr attr = { 1141 .sched_policy = SCHED_DEADLINE, 1142 .sched_runtime = 100000ULL, 1143 .sched_deadline = 10000000ULL, 1144 .sched_period = 10000000ULL 1145 }; 1146 struct wakeup_test_data *x = data; 1147 1148 sched_setattr(current, &attr); 1149 1150 /* Make it know we have a new prio */ 1151 complete(&x->is_ready); 1152 1153 /* now go to sleep and let the test wake us up */ 1154 set_current_state(TASK_INTERRUPTIBLE); 1155 while (!x->go) { 1156 schedule(); 1157 set_current_state(TASK_INTERRUPTIBLE); 1158 } 1159 1160 complete(&x->is_ready); 1161 1162 set_current_state(TASK_INTERRUPTIBLE); 1163 1164 /* we are awake, now wait to disappear */ 1165 while (!kthread_should_stop()) { 1166 schedule(); 1167 set_current_state(TASK_INTERRUPTIBLE); 1168 } 1169 1170 __set_current_state(TASK_RUNNING); 1171 1172 return 0; 1173 } 1174 int 1175 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1176 { 1177 unsigned long save_max = tr->max_latency; 1178 struct task_struct *p; 1179 struct wakeup_test_data data; 1180 unsigned long count; 1181 int ret; 1182 1183 memset(&data, 0, sizeof(data)); 1184 1185 init_completion(&data.is_ready); 1186 1187 /* create a -deadline thread */ 1188 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); 1189 if (IS_ERR(p)) { 1190 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1191 return -1; 1192 } 1193 1194 /* make sure the thread is running at -deadline policy */ 1195 wait_for_completion(&data.is_ready); 1196 1197 /* start the tracing */ 1198 ret = tracer_init(trace, tr); 1199 if (ret) { 1200 warn_failed_init_tracer(trace, ret); 1201 return ret; 1202 } 1203 1204 /* reset the max latency */ 1205 tr->max_latency = 0; 1206 1207 while (p->on_rq) { 1208 /* 1209 * Sleep to make sure the -deadline thread is asleep too. 1210 * On virtual machines we can't rely on timings, 1211 * but we want to make sure this test still works. 1212 */ 1213 msleep(100); 1214 } 1215 1216 init_completion(&data.is_ready); 1217 1218 data.go = 1; 1219 /* memory barrier is in the wake_up_process() */ 1220 1221 wake_up_process(p); 1222 1223 /* Wait for the task to wake up */ 1224 wait_for_completion(&data.is_ready); 1225 1226 /* stop the tracing. */ 1227 tracing_stop(); 1228 /* check both trace buffers */ 1229 ret = trace_test_buffer(&tr->array_buffer, NULL); 1230 if (!ret) 1231 ret = trace_test_buffer(&tr->max_buffer, &count); 1232 1233 1234 trace->reset(tr); 1235 tracing_start(); 1236 1237 tr->max_latency = save_max; 1238 1239 /* kill the thread */ 1240 kthread_stop(p); 1241 1242 if (!ret && !count) { 1243 printk(KERN_CONT ".. no entries found .."); 1244 ret = -1; 1245 } 1246 1247 return ret; 1248 } 1249 #endif /* CONFIG_SCHED_TRACER */ 1250 1251 #ifdef CONFIG_BRANCH_TRACER 1252 int 1253 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1254 { 1255 unsigned long count; 1256 int ret; 1257 1258 /* start the tracing */ 1259 ret = tracer_init(trace, tr); 1260 if (ret) { 1261 warn_failed_init_tracer(trace, ret); 1262 return ret; 1263 } 1264 1265 /* Sleep for a 1/10 of a second */ 1266 msleep(100); 1267 /* stop the tracing. */ 1268 tracing_stop(); 1269 /* check the trace buffer */ 1270 ret = trace_test_buffer(&tr->array_buffer, &count); 1271 trace->reset(tr); 1272 tracing_start(); 1273 1274 if (!ret && !count) { 1275 printk(KERN_CONT ".. no entries found .."); 1276 ret = -1; 1277 } 1278 1279 return ret; 1280 } 1281 #endif /* CONFIG_BRANCH_TRACER */ 1282 1283