1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
260a11774SSteven Rostedt /* Include in trace.c */
360a11774SSteven Rostedt
4ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
59cc26a26SSteven Rostedt #include <linux/stringify.h>
660a11774SSteven Rostedt #include <linux/kthread.h>
7c7aafc54SIngo Molnar #include <linux/delay.h>
85a0e3ad6STejun Heo #include <linux/slab.h>
960a11774SSteven Rostedt
trace_valid_entry(struct trace_entry * entry)10e309b41dSIngo Molnar static inline int trace_valid_entry(struct trace_entry *entry)
1160a11774SSteven Rostedt {
1260a11774SSteven Rostedt switch (entry->type) {
1360a11774SSteven Rostedt case TRACE_FN:
1460a11774SSteven Rostedt case TRACE_CTX:
1557422797SIngo Molnar case TRACE_WAKE:
1606fa75abSSteven Rostedt case TRACE_STACK:
17dd0e545fSSteven Rostedt case TRACE_PRINT:
1880e5ea45SSteven Rostedt case TRACE_BRANCH:
197447dce9SFrederic Weisbecker case TRACE_GRAPH_ENT:
2021e92806SDonglin Peng case TRACE_GRAPH_RETADDR_ENT:
217447dce9SFrederic Weisbecker case TRACE_GRAPH_RET:
2260a11774SSteven Rostedt return 1;
2360a11774SSteven Rostedt }
2460a11774SSteven Rostedt return 0;
2560a11774SSteven Rostedt }
2660a11774SSteven Rostedt
trace_test_buffer_cpu(struct array_buffer * buf,int cpu)271c5eb448SSteven Rostedt (VMware) static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
2860a11774SSteven Rostedt {
293928a8a2SSteven Rostedt struct ring_buffer_event *event;
303928a8a2SSteven Rostedt struct trace_entry *entry;
314b3e3d22SSteven Rostedt unsigned int loops = 0;
3260a11774SSteven Rostedt
3312883efbSSteven Rostedt (Red Hat) while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
343928a8a2SSteven Rostedt entry = ring_buffer_event_data(event);
3560a11774SSteven Rostedt
364b3e3d22SSteven Rostedt /*
374b3e3d22SSteven Rostedt * The ring buffer is a size of trace_buf_size, if
384b3e3d22SSteven Rostedt * we loop more than the size, there's something wrong
394b3e3d22SSteven Rostedt * with the ring buffer.
404b3e3d22SSteven Rostedt */
414b3e3d22SSteven Rostedt if (loops++ > trace_buf_size) {
424b3e3d22SSteven Rostedt printk(KERN_CONT ".. bad ring buffer ");
434b3e3d22SSteven Rostedt goto failed;
444b3e3d22SSteven Rostedt }
453928a8a2SSteven Rostedt if (!trace_valid_entry(entry)) {
46c7aafc54SIngo Molnar printk(KERN_CONT ".. invalid entry %d ",
473928a8a2SSteven Rostedt entry->type);
4860a11774SSteven Rostedt goto failed;
4960a11774SSteven Rostedt }
5060a11774SSteven Rostedt }
5160a11774SSteven Rostedt return 0;
5260a11774SSteven Rostedt
5360a11774SSteven Rostedt failed:
5408bafa0eSSteven Rostedt /* disable tracing */
5508bafa0eSSteven Rostedt tracing_disabled = 1;
5660a11774SSteven Rostedt printk(KERN_CONT ".. corrupted trace buffer .. ");
5760a11774SSteven Rostedt return -1;
5860a11774SSteven Rostedt }
5960a11774SSteven Rostedt
6060a11774SSteven Rostedt /*
6160a11774SSteven Rostedt * Test the trace buffer to see if all the elements
6260a11774SSteven Rostedt * are still sane.
6360a11774SSteven Rostedt */
trace_test_buffer(struct array_buffer * buf,unsigned long * count)641c5eb448SSteven Rostedt (VMware) static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
6560a11774SSteven Rostedt {
6630afdcb1SSteven Rostedt unsigned long flags, cnt = 0;
6730afdcb1SSteven Rostedt int cpu, ret = 0;
6860a11774SSteven Rostedt
6930afdcb1SSteven Rostedt /* Don't allow flipping of max traces now */
70d51ad7acSSteven Rostedt local_irq_save(flags);
710b9b12c1SSteven Rostedt (Red Hat) arch_spin_lock(&buf->tr->max_lock);
723928a8a2SSteven Rostedt
7312883efbSSteven Rostedt (Red Hat) cnt = ring_buffer_entries(buf->buffer);
743928a8a2SSteven Rostedt
750c5119c1SSteven Rostedt /*
760c5119c1SSteven Rostedt * The trace_test_buffer_cpu runs a while loop to consume all data.
770c5119c1SSteven Rostedt * If the calling tracer is broken, and is constantly filling
780c5119c1SSteven Rostedt * the buffer, this will run forever, and hard lock the box.
790c5119c1SSteven Rostedt * We disable the ring buffer while we do this test to prevent
800c5119c1SSteven Rostedt * a hard lock up.
810c5119c1SSteven Rostedt */
820c5119c1SSteven Rostedt tracing_off();
8360a11774SSteven Rostedt for_each_possible_cpu(cpu) {
8412883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer_cpu(buf, cpu);
8560a11774SSteven Rostedt if (ret)
8660a11774SSteven Rostedt break;
8760a11774SSteven Rostedt }
880c5119c1SSteven Rostedt tracing_on();
890b9b12c1SSteven Rostedt (Red Hat) arch_spin_unlock(&buf->tr->max_lock);
90d51ad7acSSteven Rostedt local_irq_restore(flags);
9160a11774SSteven Rostedt
9260a11774SSteven Rostedt if (count)
9360a11774SSteven Rostedt *count = cnt;
9460a11774SSteven Rostedt
9560a11774SSteven Rostedt return ret;
9660a11774SSteven Rostedt }
9760a11774SSteven Rostedt
warn_failed_init_tracer(struct tracer * trace,int init_ret)981c80025aSFrederic Weisbecker static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
991c80025aSFrederic Weisbecker {
1001c80025aSFrederic Weisbecker printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
1011c80025aSFrederic Weisbecker trace->name, init_ret);
1021c80025aSFrederic Weisbecker }
103606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
10477a2b37dSSteven Rostedt
10577a2b37dSSteven Rostedt #ifdef CONFIG_DYNAMIC_FTRACE
10677a2b37dSSteven Rostedt
10795950c2eSSteven Rostedt static int trace_selftest_test_probe1_cnt;
trace_selftest_test_probe1_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)10895950c2eSSteven Rostedt static void trace_selftest_test_probe1_func(unsigned long ip,
1092f5f6ad9SSteven Rostedt unsigned long pip,
110a1e2e31dSSteven Rostedt struct ftrace_ops *op,
111d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
11295950c2eSSteven Rostedt {
11395950c2eSSteven Rostedt trace_selftest_test_probe1_cnt++;
11495950c2eSSteven Rostedt }
11595950c2eSSteven Rostedt
11695950c2eSSteven Rostedt static int trace_selftest_test_probe2_cnt;
trace_selftest_test_probe2_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)11795950c2eSSteven Rostedt static void trace_selftest_test_probe2_func(unsigned long ip,
1182f5f6ad9SSteven Rostedt unsigned long pip,
119a1e2e31dSSteven Rostedt struct ftrace_ops *op,
120d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
12195950c2eSSteven Rostedt {
12295950c2eSSteven Rostedt trace_selftest_test_probe2_cnt++;
12395950c2eSSteven Rostedt }
12495950c2eSSteven Rostedt
12595950c2eSSteven Rostedt static int trace_selftest_test_probe3_cnt;
trace_selftest_test_probe3_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)12695950c2eSSteven Rostedt static void trace_selftest_test_probe3_func(unsigned long ip,
1272f5f6ad9SSteven Rostedt unsigned long pip,
128a1e2e31dSSteven Rostedt struct ftrace_ops *op,
129d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
13095950c2eSSteven Rostedt {
13195950c2eSSteven Rostedt trace_selftest_test_probe3_cnt++;
13295950c2eSSteven Rostedt }
13395950c2eSSteven Rostedt
13495950c2eSSteven Rostedt static int trace_selftest_test_global_cnt;
trace_selftest_test_global_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)13595950c2eSSteven Rostedt static void trace_selftest_test_global_func(unsigned long ip,
1362f5f6ad9SSteven Rostedt unsigned long pip,
137a1e2e31dSSteven Rostedt struct ftrace_ops *op,
138d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
13995950c2eSSteven Rostedt {
14095950c2eSSteven Rostedt trace_selftest_test_global_cnt++;
14195950c2eSSteven Rostedt }
14295950c2eSSteven Rostedt
14395950c2eSSteven Rostedt static int trace_selftest_test_dyn_cnt;
trace_selftest_test_dyn_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)14495950c2eSSteven Rostedt static void trace_selftest_test_dyn_func(unsigned long ip,
1452f5f6ad9SSteven Rostedt unsigned long pip,
146a1e2e31dSSteven Rostedt struct ftrace_ops *op,
147d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
14895950c2eSSteven Rostedt {
14995950c2eSSteven Rostedt trace_selftest_test_dyn_cnt++;
15095950c2eSSteven Rostedt }
15195950c2eSSteven Rostedt
15295950c2eSSteven Rostedt static struct ftrace_ops test_probe1 = {
15395950c2eSSteven Rostedt .func = trace_selftest_test_probe1_func,
15495950c2eSSteven Rostedt };
15595950c2eSSteven Rostedt
15695950c2eSSteven Rostedt static struct ftrace_ops test_probe2 = {
15795950c2eSSteven Rostedt .func = trace_selftest_test_probe2_func,
15895950c2eSSteven Rostedt };
15995950c2eSSteven Rostedt
16095950c2eSSteven Rostedt static struct ftrace_ops test_probe3 = {
16195950c2eSSteven Rostedt .func = trace_selftest_test_probe3_func,
16295950c2eSSteven Rostedt };
16395950c2eSSteven Rostedt
print_counts(void)16495950c2eSSteven Rostedt static void print_counts(void)
16595950c2eSSteven Rostedt {
16695950c2eSSteven Rostedt printk("(%d %d %d %d %d) ",
16795950c2eSSteven Rostedt trace_selftest_test_probe1_cnt,
16895950c2eSSteven Rostedt trace_selftest_test_probe2_cnt,
16995950c2eSSteven Rostedt trace_selftest_test_probe3_cnt,
17095950c2eSSteven Rostedt trace_selftest_test_global_cnt,
17195950c2eSSteven Rostedt trace_selftest_test_dyn_cnt);
17295950c2eSSteven Rostedt }
17395950c2eSSteven Rostedt
reset_counts(void)17495950c2eSSteven Rostedt static void reset_counts(void)
17595950c2eSSteven Rostedt {
17695950c2eSSteven Rostedt trace_selftest_test_probe1_cnt = 0;
17795950c2eSSteven Rostedt trace_selftest_test_probe2_cnt = 0;
17895950c2eSSteven Rostedt trace_selftest_test_probe3_cnt = 0;
17995950c2eSSteven Rostedt trace_selftest_test_global_cnt = 0;
18095950c2eSSteven Rostedt trace_selftest_test_dyn_cnt = 0;
18195950c2eSSteven Rostedt }
18295950c2eSSteven Rostedt
trace_selftest_ops(struct trace_array * tr,int cnt)1834104d326SSteven Rostedt (Red Hat) static int trace_selftest_ops(struct trace_array *tr, int cnt)
18495950c2eSSteven Rostedt {
18595950c2eSSteven Rostedt int save_ftrace_enabled = ftrace_enabled;
18695950c2eSSteven Rostedt struct ftrace_ops *dyn_ops;
18795950c2eSSteven Rostedt char *func1_name;
18895950c2eSSteven Rostedt char *func2_name;
18995950c2eSSteven Rostedt int len1;
19095950c2eSSteven Rostedt int len2;
19195950c2eSSteven Rostedt int ret = -1;
19295950c2eSSteven Rostedt
19395950c2eSSteven Rostedt printk(KERN_CONT "PASSED\n");
19495950c2eSSteven Rostedt pr_info("Testing dynamic ftrace ops #%d: ", cnt);
19595950c2eSSteven Rostedt
19695950c2eSSteven Rostedt ftrace_enabled = 1;
19795950c2eSSteven Rostedt reset_counts();
19895950c2eSSteven Rostedt
19995950c2eSSteven Rostedt /* Handle PPC64 '.' name */
20095950c2eSSteven Rostedt func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
20195950c2eSSteven Rostedt func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
20295950c2eSSteven Rostedt len1 = strlen(func1_name);
20395950c2eSSteven Rostedt len2 = strlen(func2_name);
20495950c2eSSteven Rostedt
20595950c2eSSteven Rostedt /*
20695950c2eSSteven Rostedt * Probe 1 will trace function 1.
20795950c2eSSteven Rostedt * Probe 2 will trace function 2.
20895950c2eSSteven Rostedt * Probe 3 will trace functions 1 and 2.
20995950c2eSSteven Rostedt */
21095950c2eSSteven Rostedt ftrace_set_filter(&test_probe1, func1_name, len1, 1);
21195950c2eSSteven Rostedt ftrace_set_filter(&test_probe2, func2_name, len2, 1);
21295950c2eSSteven Rostedt ftrace_set_filter(&test_probe3, func1_name, len1, 1);
21395950c2eSSteven Rostedt ftrace_set_filter(&test_probe3, func2_name, len2, 0);
21495950c2eSSteven Rostedt
21595950c2eSSteven Rostedt register_ftrace_function(&test_probe1);
21695950c2eSSteven Rostedt register_ftrace_function(&test_probe2);
21795950c2eSSteven Rostedt register_ftrace_function(&test_probe3);
2184104d326SSteven Rostedt (Red Hat) /* First time we are running with main function */
2194104d326SSteven Rostedt (Red Hat) if (cnt > 1) {
2204104d326SSteven Rostedt (Red Hat) ftrace_init_array_ops(tr, trace_selftest_test_global_func);
2214104d326SSteven Rostedt (Red Hat) register_ftrace_function(tr->ops);
2224104d326SSteven Rostedt (Red Hat) }
22395950c2eSSteven Rostedt
22495950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME();
22595950c2eSSteven Rostedt
22695950c2eSSteven Rostedt print_counts();
22795950c2eSSteven Rostedt
22895950c2eSSteven Rostedt if (trace_selftest_test_probe1_cnt != 1)
22995950c2eSSteven Rostedt goto out;
23095950c2eSSteven Rostedt if (trace_selftest_test_probe2_cnt != 0)
23195950c2eSSteven Rostedt goto out;
23295950c2eSSteven Rostedt if (trace_selftest_test_probe3_cnt != 1)
23395950c2eSSteven Rostedt goto out;
2344104d326SSteven Rostedt (Red Hat) if (cnt > 1) {
23595950c2eSSteven Rostedt if (trace_selftest_test_global_cnt == 0)
23695950c2eSSteven Rostedt goto out;
2374104d326SSteven Rostedt (Red Hat) }
23895950c2eSSteven Rostedt
23995950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME2();
24095950c2eSSteven Rostedt
24195950c2eSSteven Rostedt print_counts();
24295950c2eSSteven Rostedt
24395950c2eSSteven Rostedt if (trace_selftest_test_probe1_cnt != 1)
24495950c2eSSteven Rostedt goto out;
24595950c2eSSteven Rostedt if (trace_selftest_test_probe2_cnt != 1)
24695950c2eSSteven Rostedt goto out;
24795950c2eSSteven Rostedt if (trace_selftest_test_probe3_cnt != 2)
24895950c2eSSteven Rostedt goto out;
24995950c2eSSteven Rostedt
25095950c2eSSteven Rostedt /* Add a dynamic probe */
25195950c2eSSteven Rostedt dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
25295950c2eSSteven Rostedt if (!dyn_ops) {
25395950c2eSSteven Rostedt printk("MEMORY ERROR ");
25495950c2eSSteven Rostedt goto out;
25595950c2eSSteven Rostedt }
25695950c2eSSteven Rostedt
25795950c2eSSteven Rostedt dyn_ops->func = trace_selftest_test_dyn_func;
25895950c2eSSteven Rostedt
25995950c2eSSteven Rostedt register_ftrace_function(dyn_ops);
26095950c2eSSteven Rostedt
26195950c2eSSteven Rostedt trace_selftest_test_global_cnt = 0;
26295950c2eSSteven Rostedt
26395950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME();
26495950c2eSSteven Rostedt
26595950c2eSSteven Rostedt print_counts();
26695950c2eSSteven Rostedt
26795950c2eSSteven Rostedt if (trace_selftest_test_probe1_cnt != 2)
26895950c2eSSteven Rostedt goto out_free;
26995950c2eSSteven Rostedt if (trace_selftest_test_probe2_cnt != 1)
27095950c2eSSteven Rostedt goto out_free;
27195950c2eSSteven Rostedt if (trace_selftest_test_probe3_cnt != 3)
27295950c2eSSteven Rostedt goto out_free;
2734104d326SSteven Rostedt (Red Hat) if (cnt > 1) {
27495950c2eSSteven Rostedt if (trace_selftest_test_global_cnt == 0)
27546320a6aSSteven Rostedt (VMware) goto out_free;
2764104d326SSteven Rostedt (Red Hat) }
27795950c2eSSteven Rostedt if (trace_selftest_test_dyn_cnt == 0)
27895950c2eSSteven Rostedt goto out_free;
27995950c2eSSteven Rostedt
28095950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME2();
28195950c2eSSteven Rostedt
28295950c2eSSteven Rostedt print_counts();
28395950c2eSSteven Rostedt
28495950c2eSSteven Rostedt if (trace_selftest_test_probe1_cnt != 2)
28595950c2eSSteven Rostedt goto out_free;
28695950c2eSSteven Rostedt if (trace_selftest_test_probe2_cnt != 2)
28795950c2eSSteven Rostedt goto out_free;
28895950c2eSSteven Rostedt if (trace_selftest_test_probe3_cnt != 4)
28995950c2eSSteven Rostedt goto out_free;
29095950c2eSSteven Rostedt
29143c9dd8dSCarles Pey /* Remove trace function from probe 3 */
29243c9dd8dSCarles Pey func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
29343c9dd8dSCarles Pey len1 = strlen(func1_name);
29443c9dd8dSCarles Pey
29543c9dd8dSCarles Pey ftrace_set_filter(&test_probe3, func1_name, len1, 0);
29643c9dd8dSCarles Pey
29743c9dd8dSCarles Pey DYN_FTRACE_TEST_NAME();
29843c9dd8dSCarles Pey
29943c9dd8dSCarles Pey print_counts();
30043c9dd8dSCarles Pey
30143c9dd8dSCarles Pey if (trace_selftest_test_probe1_cnt != 3)
30243c9dd8dSCarles Pey goto out_free;
30343c9dd8dSCarles Pey if (trace_selftest_test_probe2_cnt != 2)
30443c9dd8dSCarles Pey goto out_free;
30543c9dd8dSCarles Pey if (trace_selftest_test_probe3_cnt != 4)
30643c9dd8dSCarles Pey goto out_free;
30743c9dd8dSCarles Pey if (cnt > 1) {
30843c9dd8dSCarles Pey if (trace_selftest_test_global_cnt == 0)
30943c9dd8dSCarles Pey goto out_free;
31043c9dd8dSCarles Pey }
31143c9dd8dSCarles Pey if (trace_selftest_test_dyn_cnt == 0)
31243c9dd8dSCarles Pey goto out_free;
31343c9dd8dSCarles Pey
31443c9dd8dSCarles Pey DYN_FTRACE_TEST_NAME2();
31543c9dd8dSCarles Pey
31643c9dd8dSCarles Pey print_counts();
31743c9dd8dSCarles Pey
31843c9dd8dSCarles Pey if (trace_selftest_test_probe1_cnt != 3)
31943c9dd8dSCarles Pey goto out_free;
32043c9dd8dSCarles Pey if (trace_selftest_test_probe2_cnt != 3)
32143c9dd8dSCarles Pey goto out_free;
32243c9dd8dSCarles Pey if (trace_selftest_test_probe3_cnt != 5)
32343c9dd8dSCarles Pey goto out_free;
32443c9dd8dSCarles Pey
32595950c2eSSteven Rostedt ret = 0;
32695950c2eSSteven Rostedt out_free:
32795950c2eSSteven Rostedt unregister_ftrace_function(dyn_ops);
32895950c2eSSteven Rostedt kfree(dyn_ops);
32995950c2eSSteven Rostedt
33095950c2eSSteven Rostedt out:
33195950c2eSSteven Rostedt /* Purposely unregister in the same order */
33295950c2eSSteven Rostedt unregister_ftrace_function(&test_probe1);
33395950c2eSSteven Rostedt unregister_ftrace_function(&test_probe2);
33495950c2eSSteven Rostedt unregister_ftrace_function(&test_probe3);
3354104d326SSteven Rostedt (Red Hat) if (cnt > 1)
3364104d326SSteven Rostedt (Red Hat) unregister_ftrace_function(tr->ops);
3374104d326SSteven Rostedt (Red Hat) ftrace_reset_array_ops(tr);
33895950c2eSSteven Rostedt
33995950c2eSSteven Rostedt /* Make sure everything is off */
34095950c2eSSteven Rostedt reset_counts();
34195950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME();
34295950c2eSSteven Rostedt DYN_FTRACE_TEST_NAME();
34395950c2eSSteven Rostedt
34495950c2eSSteven Rostedt if (trace_selftest_test_probe1_cnt ||
34595950c2eSSteven Rostedt trace_selftest_test_probe2_cnt ||
34695950c2eSSteven Rostedt trace_selftest_test_probe3_cnt ||
34795950c2eSSteven Rostedt trace_selftest_test_global_cnt ||
34895950c2eSSteven Rostedt trace_selftest_test_dyn_cnt)
34995950c2eSSteven Rostedt ret = -1;
35095950c2eSSteven Rostedt
35195950c2eSSteven Rostedt ftrace_enabled = save_ftrace_enabled;
35295950c2eSSteven Rostedt
35395950c2eSSteven Rostedt return ret;
35495950c2eSSteven Rostedt }
35595950c2eSSteven Rostedt
35677a2b37dSSteven Rostedt /* Test dynamic code modification and ftrace filters */
trace_selftest_startup_dynamic_tracing(struct tracer * trace,struct trace_array * tr,int (* func)(void))357ad1438a0SFabian Frederick static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
35877a2b37dSSteven Rostedt struct trace_array *tr,
35977a2b37dSSteven Rostedt int (*func)(void))
36077a2b37dSSteven Rostedt {
36177a2b37dSSteven Rostedt int save_ftrace_enabled = ftrace_enabled;
362dd0e545fSSteven Rostedt unsigned long count;
3634e491d14SSteven Rostedt char *func_name;
364dd0e545fSSteven Rostedt int ret;
36577a2b37dSSteven Rostedt
36677a2b37dSSteven Rostedt /* The ftrace test PASSED */
36777a2b37dSSteven Rostedt printk(KERN_CONT "PASSED\n");
36877a2b37dSSteven Rostedt pr_info("Testing dynamic ftrace: ");
36977a2b37dSSteven Rostedt
37077a2b37dSSteven Rostedt /* enable tracing, and record the filter function */
37177a2b37dSSteven Rostedt ftrace_enabled = 1;
37277a2b37dSSteven Rostedt
37377a2b37dSSteven Rostedt /* passed in by parameter to fool gcc from optimizing */
37477a2b37dSSteven Rostedt func();
37577a2b37dSSteven Rostedt
3764e491d14SSteven Rostedt /*
37773d8b8bcSWenji Huang * Some archs *cough*PowerPC*cough* add characters to the
3784e491d14SSteven Rostedt * start of the function names. We simply put a '*' to
37973d8b8bcSWenji Huang * accommodate them.
3804e491d14SSteven Rostedt */
3819cc26a26SSteven Rostedt func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
3824e491d14SSteven Rostedt
38377a2b37dSSteven Rostedt /* filter only on our function */
384936e074bSSteven Rostedt ftrace_set_global_filter(func_name, strlen(func_name), 1);
38577a2b37dSSteven Rostedt
38677a2b37dSSteven Rostedt /* enable tracing */
387b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
3881c80025aSFrederic Weisbecker if (ret) {
3891c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
3901c80025aSFrederic Weisbecker goto out;
3911c80025aSFrederic Weisbecker }
392dd0e545fSSteven Rostedt
39377a2b37dSSteven Rostedt /* Sleep for a 1/10 of a second */
39477a2b37dSSteven Rostedt msleep(100);
39577a2b37dSSteven Rostedt
39677a2b37dSSteven Rostedt /* we should have nothing in the buffer */
3971c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, &count);
39877a2b37dSSteven Rostedt if (ret)
39977a2b37dSSteven Rostedt goto out;
40077a2b37dSSteven Rostedt
40177a2b37dSSteven Rostedt if (count) {
40277a2b37dSSteven Rostedt ret = -1;
40377a2b37dSSteven Rostedt printk(KERN_CONT ".. filter did not filter .. ");
40477a2b37dSSteven Rostedt goto out;
40577a2b37dSSteven Rostedt }
40677a2b37dSSteven Rostedt
40777a2b37dSSteven Rostedt /* call our function again */
40877a2b37dSSteven Rostedt func();
40977a2b37dSSteven Rostedt
41077a2b37dSSteven Rostedt /* sleep again */
41177a2b37dSSteven Rostedt msleep(100);
41277a2b37dSSteven Rostedt
41377a2b37dSSteven Rostedt /* stop the tracing. */
414bbf5b1a0SSteven Rostedt tracing_stop();
41577a2b37dSSteven Rostedt ftrace_enabled = 0;
41677a2b37dSSteven Rostedt
41777a2b37dSSteven Rostedt /* check the trace buffer */
4181c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, &count);
4193ddee63aSSteven Rostedt (Red Hat)
4203ddee63aSSteven Rostedt (Red Hat) ftrace_enabled = 1;
421bbf5b1a0SSteven Rostedt tracing_start();
42277a2b37dSSteven Rostedt
42377a2b37dSSteven Rostedt /* we should only have one item */
42477a2b37dSSteven Rostedt if (!ret && count != 1) {
42595950c2eSSteven Rostedt trace->reset(tr);
42606fa75abSSteven Rostedt printk(KERN_CONT ".. filter failed count=%ld ..", count);
42777a2b37dSSteven Rostedt ret = -1;
42877a2b37dSSteven Rostedt goto out;
42977a2b37dSSteven Rostedt }
430bbf5b1a0SSteven Rostedt
43195950c2eSSteven Rostedt /* Test the ops with global tracing running */
4324104d326SSteven Rostedt (Red Hat) ret = trace_selftest_ops(tr, 1);
43395950c2eSSteven Rostedt trace->reset(tr);
43495950c2eSSteven Rostedt
43577a2b37dSSteven Rostedt out:
43677a2b37dSSteven Rostedt ftrace_enabled = save_ftrace_enabled;
43777a2b37dSSteven Rostedt
43877a2b37dSSteven Rostedt /* Enable tracing on all functions again */
439936e074bSSteven Rostedt ftrace_set_global_filter(NULL, 0, 1);
44077a2b37dSSteven Rostedt
44195950c2eSSteven Rostedt /* Test the ops with global tracing off */
44295950c2eSSteven Rostedt if (!ret)
4434104d326SSteven Rostedt (Red Hat) ret = trace_selftest_ops(tr, 2);
44495950c2eSSteven Rostedt
44577a2b37dSSteven Rostedt return ret;
44677a2b37dSSteven Rostedt }
447ea701f11SSteven Rostedt
448ea701f11SSteven Rostedt static int trace_selftest_recursion_cnt;
trace_selftest_test_recursion_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)449ea701f11SSteven Rostedt static void trace_selftest_test_recursion_func(unsigned long ip,
450ea701f11SSteven Rostedt unsigned long pip,
451ea701f11SSteven Rostedt struct ftrace_ops *op,
452d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
453ea701f11SSteven Rostedt {
454ea701f11SSteven Rostedt /*
455ea701f11SSteven Rostedt * This function is registered without the recursion safe flag.
456ea701f11SSteven Rostedt * The ftrace infrastructure should provide the recursion
457ea701f11SSteven Rostedt * protection. If not, this will crash the kernel!
458ea701f11SSteven Rostedt */
4599640388bSSteven Rostedt if (trace_selftest_recursion_cnt++ > 10)
4609640388bSSteven Rostedt return;
461ea701f11SSteven Rostedt DYN_FTRACE_TEST_NAME();
462ea701f11SSteven Rostedt }
463ea701f11SSteven Rostedt
trace_selftest_test_recursion_safe_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)464ea701f11SSteven Rostedt static void trace_selftest_test_recursion_safe_func(unsigned long ip,
465ea701f11SSteven Rostedt unsigned long pip,
466ea701f11SSteven Rostedt struct ftrace_ops *op,
467d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
468ea701f11SSteven Rostedt {
469ea701f11SSteven Rostedt /*
470ea701f11SSteven Rostedt * We said we would provide our own recursion. By calling
471ea701f11SSteven Rostedt * this function again, we should recurse back into this function
472ea701f11SSteven Rostedt * and count again. But this only happens if the arch supports
473ea701f11SSteven Rostedt * all of ftrace features and nothing else is using the function
474ea701f11SSteven Rostedt * tracing utility.
475ea701f11SSteven Rostedt */
476ea701f11SSteven Rostedt if (trace_selftest_recursion_cnt++)
477ea701f11SSteven Rostedt return;
478ea701f11SSteven Rostedt DYN_FTRACE_TEST_NAME();
479ea701f11SSteven Rostedt }
480ea701f11SSteven Rostedt
481ea701f11SSteven Rostedt static struct ftrace_ops test_rec_probe = {
482ea701f11SSteven Rostedt .func = trace_selftest_test_recursion_func,
483a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_RECURSION,
484ea701f11SSteven Rostedt };
485ea701f11SSteven Rostedt
486ea701f11SSteven Rostedt static struct ftrace_ops test_recsafe_probe = {
487ea701f11SSteven Rostedt .func = trace_selftest_test_recursion_safe_func,
488ea701f11SSteven Rostedt };
489ea701f11SSteven Rostedt
490ea701f11SSteven Rostedt static int
trace_selftest_function_recursion(void)491ea701f11SSteven Rostedt trace_selftest_function_recursion(void)
492ea701f11SSteven Rostedt {
493ea701f11SSteven Rostedt int save_ftrace_enabled = ftrace_enabled;
494ea701f11SSteven Rostedt char *func_name;
495ea701f11SSteven Rostedt int len;
496ea701f11SSteven Rostedt int ret;
497ea701f11SSteven Rostedt
498ea701f11SSteven Rostedt /* The previous test PASSED */
499ea701f11SSteven Rostedt pr_cont("PASSED\n");
500ea701f11SSteven Rostedt pr_info("Testing ftrace recursion: ");
501ea701f11SSteven Rostedt
502ea701f11SSteven Rostedt
503ea701f11SSteven Rostedt /* enable tracing, and record the filter function */
504ea701f11SSteven Rostedt ftrace_enabled = 1;
505ea701f11SSteven Rostedt
506ea701f11SSteven Rostedt /* Handle PPC64 '.' name */
507ea701f11SSteven Rostedt func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
508ea701f11SSteven Rostedt len = strlen(func_name);
509ea701f11SSteven Rostedt
510ea701f11SSteven Rostedt ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
511ea701f11SSteven Rostedt if (ret) {
512ea701f11SSteven Rostedt pr_cont("*Could not set filter* ");
513ea701f11SSteven Rostedt goto out;
514ea701f11SSteven Rostedt }
515ea701f11SSteven Rostedt
516ea701f11SSteven Rostedt ret = register_ftrace_function(&test_rec_probe);
517ea701f11SSteven Rostedt if (ret) {
518ea701f11SSteven Rostedt pr_cont("*could not register callback* ");
519ea701f11SSteven Rostedt goto out;
520ea701f11SSteven Rostedt }
521ea701f11SSteven Rostedt
522ea701f11SSteven Rostedt DYN_FTRACE_TEST_NAME();
523ea701f11SSteven Rostedt
524ea701f11SSteven Rostedt unregister_ftrace_function(&test_rec_probe);
525ea701f11SSteven Rostedt
526ea701f11SSteven Rostedt ret = -1;
527726b3d3fSSteven Rostedt (VMware) /*
528726b3d3fSSteven Rostedt (VMware) * Recursion allows for transitions between context,
529726b3d3fSSteven Rostedt (VMware) * and may call the callback twice.
530726b3d3fSSteven Rostedt (VMware) */
531726b3d3fSSteven Rostedt (VMware) if (trace_selftest_recursion_cnt != 1 &&
532726b3d3fSSteven Rostedt (VMware) trace_selftest_recursion_cnt != 2) {
533726b3d3fSSteven Rostedt (VMware) pr_cont("*callback not called once (or twice) (%d)* ",
534ea701f11SSteven Rostedt trace_selftest_recursion_cnt);
535ea701f11SSteven Rostedt goto out;
536ea701f11SSteven Rostedt }
537ea701f11SSteven Rostedt
538ea701f11SSteven Rostedt trace_selftest_recursion_cnt = 1;
539ea701f11SSteven Rostedt
540ea701f11SSteven Rostedt pr_cont("PASSED\n");
541ea701f11SSteven Rostedt pr_info("Testing ftrace recursion safe: ");
542ea701f11SSteven Rostedt
543ea701f11SSteven Rostedt ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
544ea701f11SSteven Rostedt if (ret) {
545ea701f11SSteven Rostedt pr_cont("*Could not set filter* ");
546ea701f11SSteven Rostedt goto out;
547ea701f11SSteven Rostedt }
548ea701f11SSteven Rostedt
549ea701f11SSteven Rostedt ret = register_ftrace_function(&test_recsafe_probe);
550ea701f11SSteven Rostedt if (ret) {
551ea701f11SSteven Rostedt pr_cont("*could not register callback* ");
552ea701f11SSteven Rostedt goto out;
553ea701f11SSteven Rostedt }
554ea701f11SSteven Rostedt
555ea701f11SSteven Rostedt DYN_FTRACE_TEST_NAME();
556ea701f11SSteven Rostedt
557ea701f11SSteven Rostedt unregister_ftrace_function(&test_recsafe_probe);
558ea701f11SSteven Rostedt
559ea701f11SSteven Rostedt ret = -1;
56005cbbf64SSteven Rostedt if (trace_selftest_recursion_cnt != 2) {
56105cbbf64SSteven Rostedt pr_cont("*callback not called expected 2 times (%d)* ",
56205cbbf64SSteven Rostedt trace_selftest_recursion_cnt);
563ea701f11SSteven Rostedt goto out;
564ea701f11SSteven Rostedt }
565ea701f11SSteven Rostedt
566ea701f11SSteven Rostedt ret = 0;
567ea701f11SSteven Rostedt out:
568ea701f11SSteven Rostedt ftrace_enabled = save_ftrace_enabled;
569ea701f11SSteven Rostedt
570ea701f11SSteven Rostedt return ret;
571ea701f11SSteven Rostedt }
57277a2b37dSSteven Rostedt #else
57377a2b37dSSteven Rostedt # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
574ea701f11SSteven Rostedt # define trace_selftest_function_recursion() ({ 0; })
57577a2b37dSSteven Rostedt #endif /* CONFIG_DYNAMIC_FTRACE */
576e9a22d1fSIngo Molnar
577ad97772aSSteven Rostedt static enum {
578ad97772aSSteven Rostedt TRACE_SELFTEST_REGS_START,
579ad97772aSSteven Rostedt TRACE_SELFTEST_REGS_FOUND,
580ad97772aSSteven Rostedt TRACE_SELFTEST_REGS_NOT_FOUND,
581ad97772aSSteven Rostedt } trace_selftest_regs_stat;
582ad97772aSSteven Rostedt
trace_selftest_test_regs_func(unsigned long ip,unsigned long pip,struct ftrace_ops * op,struct ftrace_regs * fregs)583ad97772aSSteven Rostedt static void trace_selftest_test_regs_func(unsigned long ip,
584ad97772aSSteven Rostedt unsigned long pip,
585ad97772aSSteven Rostedt struct ftrace_ops *op,
586d19ad077SSteven Rostedt (VMware) struct ftrace_regs *fregs)
587ad97772aSSteven Rostedt {
588d19ad077SSteven Rostedt (VMware) struct pt_regs *regs = ftrace_get_regs(fregs);
589d19ad077SSteven Rostedt (VMware)
590d19ad077SSteven Rostedt (VMware) if (regs)
591ad97772aSSteven Rostedt trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
592ad97772aSSteven Rostedt else
593ad97772aSSteven Rostedt trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
594ad97772aSSteven Rostedt }
595ad97772aSSteven Rostedt
596ad97772aSSteven Rostedt static struct ftrace_ops test_regs_probe = {
597ad97772aSSteven Rostedt .func = trace_selftest_test_regs_func,
598a25d036dSSteven Rostedt (VMware) .flags = FTRACE_OPS_FL_SAVE_REGS,
599ad97772aSSteven Rostedt };
600ad97772aSSteven Rostedt
601ad97772aSSteven Rostedt static int
trace_selftest_function_regs(void)602ad97772aSSteven Rostedt trace_selftest_function_regs(void)
603ad97772aSSteven Rostedt {
604ad97772aSSteven Rostedt int save_ftrace_enabled = ftrace_enabled;
605ad97772aSSteven Rostedt char *func_name;
606ad97772aSSteven Rostedt int len;
607ad97772aSSteven Rostedt int ret;
608ad97772aSSteven Rostedt int supported = 0;
609ad97772aSSteven Rostedt
61006aeaaeaSMasami Hiramatsu #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
611ad97772aSSteven Rostedt supported = 1;
612ad97772aSSteven Rostedt #endif
613ad97772aSSteven Rostedt
614ad97772aSSteven Rostedt /* The previous test PASSED */
615ad97772aSSteven Rostedt pr_cont("PASSED\n");
616ad97772aSSteven Rostedt pr_info("Testing ftrace regs%s: ",
617ad97772aSSteven Rostedt !supported ? "(no arch support)" : "");
618ad97772aSSteven Rostedt
619ad97772aSSteven Rostedt /* enable tracing, and record the filter function */
620ad97772aSSteven Rostedt ftrace_enabled = 1;
621ad97772aSSteven Rostedt
622ad97772aSSteven Rostedt /* Handle PPC64 '.' name */
623ad97772aSSteven Rostedt func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
624ad97772aSSteven Rostedt len = strlen(func_name);
625ad97772aSSteven Rostedt
626ad97772aSSteven Rostedt ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
627ad97772aSSteven Rostedt /*
628ad97772aSSteven Rostedt * If DYNAMIC_FTRACE is not set, then we just trace all functions.
629ad97772aSSteven Rostedt * This test really doesn't care.
630ad97772aSSteven Rostedt */
631ad97772aSSteven Rostedt if (ret && ret != -ENODEV) {
632ad97772aSSteven Rostedt pr_cont("*Could not set filter* ");
633ad97772aSSteven Rostedt goto out;
634ad97772aSSteven Rostedt }
635ad97772aSSteven Rostedt
636ad97772aSSteven Rostedt ret = register_ftrace_function(&test_regs_probe);
637ad97772aSSteven Rostedt /*
638ad97772aSSteven Rostedt * Now if the arch does not support passing regs, then this should
639ad97772aSSteven Rostedt * have failed.
640ad97772aSSteven Rostedt */
641ad97772aSSteven Rostedt if (!supported) {
642ad97772aSSteven Rostedt if (!ret) {
643ad97772aSSteven Rostedt pr_cont("*registered save-regs without arch support* ");
644ad97772aSSteven Rostedt goto out;
645ad97772aSSteven Rostedt }
646ad97772aSSteven Rostedt test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
647ad97772aSSteven Rostedt ret = register_ftrace_function(&test_regs_probe);
648ad97772aSSteven Rostedt }
649ad97772aSSteven Rostedt if (ret) {
650ad97772aSSteven Rostedt pr_cont("*could not register callback* ");
651ad97772aSSteven Rostedt goto out;
652ad97772aSSteven Rostedt }
653ad97772aSSteven Rostedt
654ad97772aSSteven Rostedt
655ad97772aSSteven Rostedt DYN_FTRACE_TEST_NAME();
656ad97772aSSteven Rostedt
657ad97772aSSteven Rostedt unregister_ftrace_function(&test_regs_probe);
658ad97772aSSteven Rostedt
659ad97772aSSteven Rostedt ret = -1;
660ad97772aSSteven Rostedt
661ad97772aSSteven Rostedt switch (trace_selftest_regs_stat) {
662ad97772aSSteven Rostedt case TRACE_SELFTEST_REGS_START:
663ad97772aSSteven Rostedt pr_cont("*callback never called* ");
664ad97772aSSteven Rostedt goto out;
665ad97772aSSteven Rostedt
666ad97772aSSteven Rostedt case TRACE_SELFTEST_REGS_FOUND:
667ad97772aSSteven Rostedt if (supported)
668ad97772aSSteven Rostedt break;
669ad97772aSSteven Rostedt pr_cont("*callback received regs without arch support* ");
670ad97772aSSteven Rostedt goto out;
671ad97772aSSteven Rostedt
672ad97772aSSteven Rostedt case TRACE_SELFTEST_REGS_NOT_FOUND:
673ad97772aSSteven Rostedt if (!supported)
674ad97772aSSteven Rostedt break;
675ad97772aSSteven Rostedt pr_cont("*callback received NULL regs* ");
676ad97772aSSteven Rostedt goto out;
677ad97772aSSteven Rostedt }
678ad97772aSSteven Rostedt
679ad97772aSSteven Rostedt ret = 0;
680ad97772aSSteven Rostedt out:
681ad97772aSSteven Rostedt ftrace_enabled = save_ftrace_enabled;
682ad97772aSSteven Rostedt
683ad97772aSSteven Rostedt return ret;
684ad97772aSSteven Rostedt }
685ad97772aSSteven Rostedt
68660a11774SSteven Rostedt /*
68760a11774SSteven Rostedt * Simple verification test of ftrace function tracer.
68860a11774SSteven Rostedt * Enable ftrace, sleep 1/10 second, and then read the trace
68960a11774SSteven Rostedt * buffer to see if all is in order.
69060a11774SSteven Rostedt */
691f1ed7c74SSteven Rostedt (Red Hat) __init int
trace_selftest_startup_function(struct tracer * trace,struct trace_array * tr)69260a11774SSteven Rostedt trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
69360a11774SSteven Rostedt {
69477a2b37dSSteven Rostedt int save_ftrace_enabled = ftrace_enabled;
695dd0e545fSSteven Rostedt unsigned long count;
696dd0e545fSSteven Rostedt int ret;
69760a11774SSteven Rostedt
698f1ed7c74SSteven Rostedt (Red Hat) #ifdef CONFIG_DYNAMIC_FTRACE
699f1ed7c74SSteven Rostedt (Red Hat) if (ftrace_filter_param) {
700f1ed7c74SSteven Rostedt (Red Hat) printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
701f1ed7c74SSteven Rostedt (Red Hat) return 0;
702f1ed7c74SSteven Rostedt (Red Hat) }
703f1ed7c74SSteven Rostedt (Red Hat) #endif
704f1ed7c74SSteven Rostedt (Red Hat)
70577a2b37dSSteven Rostedt /* make sure msleep has been recorded */
70677a2b37dSSteven Rostedt msleep(1);
70777a2b37dSSteven Rostedt
70860a11774SSteven Rostedt /* start the tracing */
709c7aafc54SIngo Molnar ftrace_enabled = 1;
710c7aafc54SIngo Molnar
711b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
7121c80025aSFrederic Weisbecker if (ret) {
7131c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
7141c80025aSFrederic Weisbecker goto out;
7151c80025aSFrederic Weisbecker }
7161c80025aSFrederic Weisbecker
71760a11774SSteven Rostedt /* Sleep for a 1/10 of a second */
71860a11774SSteven Rostedt msleep(100);
71960a11774SSteven Rostedt /* stop the tracing. */
720bbf5b1a0SSteven Rostedt tracing_stop();
721c7aafc54SIngo Molnar ftrace_enabled = 0;
722c7aafc54SIngo Molnar
72360a11774SSteven Rostedt /* check the trace buffer */
7241c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, &count);
7253ddee63aSSteven Rostedt (Red Hat)
7263ddee63aSSteven Rostedt (Red Hat) ftrace_enabled = 1;
72760a11774SSteven Rostedt trace->reset(tr);
728bbf5b1a0SSteven Rostedt tracing_start();
72960a11774SSteven Rostedt
73060a11774SSteven Rostedt if (!ret && !count) {
73160a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
73260a11774SSteven Rostedt ret = -1;
73377a2b37dSSteven Rostedt goto out;
73460a11774SSteven Rostedt }
73560a11774SSteven Rostedt
73677a2b37dSSteven Rostedt ret = trace_selftest_startup_dynamic_tracing(trace, tr,
73777a2b37dSSteven Rostedt DYN_FTRACE_TEST_NAME);
738ea701f11SSteven Rostedt if (ret)
739ea701f11SSteven Rostedt goto out;
74077a2b37dSSteven Rostedt
741ea701f11SSteven Rostedt ret = trace_selftest_function_recursion();
742ad97772aSSteven Rostedt if (ret)
743ad97772aSSteven Rostedt goto out;
744ad97772aSSteven Rostedt
745ad97772aSSteven Rostedt ret = trace_selftest_function_regs();
74677a2b37dSSteven Rostedt out:
74777a2b37dSSteven Rostedt ftrace_enabled = save_ftrace_enabled;
74877a2b37dSSteven Rostedt
7494eebcc81SSteven Rostedt /* kill ftrace totally if we failed */
7504eebcc81SSteven Rostedt if (ret)
7514eebcc81SSteven Rostedt ftrace_kill();
7524eebcc81SSteven Rostedt
75360a11774SSteven Rostedt return ret;
75460a11774SSteven Rostedt }
755606576ceSSteven Rostedt #endif /* CONFIG_FUNCTION_TRACER */
75660a11774SSteven Rostedt
7577447dce9SFrederic Weisbecker
7587447dce9SFrederic Weisbecker #ifdef CONFIG_FUNCTION_GRAPH_TRACER
759cf586b61SFrederic Weisbecker
76047c3c70aSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
76147c3c70aSSteven Rostedt (VMware)
7622f6b884dSSteven Rostedt (Google) #define CHAR_NUMBER 123
76347c3c70aSSteven Rostedt (VMware) #define SHORT_NUMBER 12345
76447c3c70aSSteven Rostedt (VMware) #define WORD_NUMBER 1234567890
76547c3c70aSSteven Rostedt (VMware) #define LONG_NUMBER 1234567890123456789LL
766dd120af2SMasami Hiramatsu (Google) #define ERRSTR_BUFLEN 128
76747c3c70aSSteven Rostedt (VMware)
768dd120af2SMasami Hiramatsu (Google) struct fgraph_fixture {
769dd120af2SMasami Hiramatsu (Google) struct fgraph_ops gops;
770dd120af2SMasami Hiramatsu (Google) int store_size;
771dd120af2SMasami Hiramatsu (Google) const char *store_type_name;
772dd120af2SMasami Hiramatsu (Google) char error_str_buf[ERRSTR_BUFLEN];
773dd120af2SMasami Hiramatsu (Google) char *error_str;
774dd120af2SMasami Hiramatsu (Google) };
77547c3c70aSSteven Rostedt (VMware)
store_entry(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)77647c3c70aSSteven Rostedt (VMware) static __init int store_entry(struct ftrace_graph_ent *trace,
77741705c42SMasami Hiramatsu (Google) struct fgraph_ops *gops,
77841705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
77947c3c70aSSteven Rostedt (VMware) {
780dd120af2SMasami Hiramatsu (Google) struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
781dd120af2SMasami Hiramatsu (Google) const char *type = fixture->store_type_name;
782dd120af2SMasami Hiramatsu (Google) int size = fixture->store_size;
78347c3c70aSSteven Rostedt (VMware) void *p;
78447c3c70aSSteven Rostedt (VMware)
78547c3c70aSSteven Rostedt (VMware) p = fgraph_reserve_data(gops->idx, size);
78647c3c70aSSteven Rostedt (VMware) if (!p) {
787dd120af2SMasami Hiramatsu (Google) snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
78847c3c70aSSteven Rostedt (VMware) "Failed to reserve %s\n", type);
78947c3c70aSSteven Rostedt (VMware) return 0;
79047c3c70aSSteven Rostedt (VMware) }
79147c3c70aSSteven Rostedt (VMware)
792dd120af2SMasami Hiramatsu (Google) switch (size) {
79347c3c70aSSteven Rostedt (VMware) case 1:
7942f6b884dSSteven Rostedt (Google) *(char *)p = CHAR_NUMBER;
79547c3c70aSSteven Rostedt (VMware) break;
79647c3c70aSSteven Rostedt (VMware) case 2:
79747c3c70aSSteven Rostedt (VMware) *(short *)p = SHORT_NUMBER;
79847c3c70aSSteven Rostedt (VMware) break;
79947c3c70aSSteven Rostedt (VMware) case 4:
80047c3c70aSSteven Rostedt (VMware) *(int *)p = WORD_NUMBER;
80147c3c70aSSteven Rostedt (VMware) break;
80247c3c70aSSteven Rostedt (VMware) case 8:
80347c3c70aSSteven Rostedt (VMware) *(long long *)p = LONG_NUMBER;
80447c3c70aSSteven Rostedt (VMware) break;
80547c3c70aSSteven Rostedt (VMware) }
80647c3c70aSSteven Rostedt (VMware)
80747c3c70aSSteven Rostedt (VMware) return 1;
80847c3c70aSSteven Rostedt (VMware) }
80947c3c70aSSteven Rostedt (VMware)
store_return(struct ftrace_graph_ret * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)81047c3c70aSSteven Rostedt (VMware) static __init void store_return(struct ftrace_graph_ret *trace,
811*2ca8c112SMasami Hiramatsu (Google) struct fgraph_ops *gops,
812*2ca8c112SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
81347c3c70aSSteven Rostedt (VMware) {
814dd120af2SMasami Hiramatsu (Google) struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops);
815dd120af2SMasami Hiramatsu (Google) const char *type = fixture->store_type_name;
81647c3c70aSSteven Rostedt (VMware) long long expect = 0;
81747c3c70aSSteven Rostedt (VMware) long long found = -1;
81847c3c70aSSteven Rostedt (VMware) int size;
81947c3c70aSSteven Rostedt (VMware) char *p;
82047c3c70aSSteven Rostedt (VMware)
82147c3c70aSSteven Rostedt (VMware) p = fgraph_retrieve_data(gops->idx, &size);
82247c3c70aSSteven Rostedt (VMware) if (!p) {
823dd120af2SMasami Hiramatsu (Google) snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
82447c3c70aSSteven Rostedt (VMware) "Failed to retrieve %s\n", type);
82547c3c70aSSteven Rostedt (VMware) return;
82647c3c70aSSteven Rostedt (VMware) }
827dd120af2SMasami Hiramatsu (Google) if (fixture->store_size > size) {
828dd120af2SMasami Hiramatsu (Google) snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
82947c3c70aSSteven Rostedt (VMware) "Retrieved size %d is smaller than expected %d\n",
830dd120af2SMasami Hiramatsu (Google) size, (int)fixture->store_size);
83147c3c70aSSteven Rostedt (VMware) return;
83247c3c70aSSteven Rostedt (VMware) }
83347c3c70aSSteven Rostedt (VMware)
834dd120af2SMasami Hiramatsu (Google) switch (fixture->store_size) {
83547c3c70aSSteven Rostedt (VMware) case 1:
8362f6b884dSSteven Rostedt (Google) expect = CHAR_NUMBER;
83747c3c70aSSteven Rostedt (VMware) found = *(char *)p;
83847c3c70aSSteven Rostedt (VMware) break;
83947c3c70aSSteven Rostedt (VMware) case 2:
84047c3c70aSSteven Rostedt (VMware) expect = SHORT_NUMBER;
84147c3c70aSSteven Rostedt (VMware) found = *(short *)p;
84247c3c70aSSteven Rostedt (VMware) break;
84347c3c70aSSteven Rostedt (VMware) case 4:
84447c3c70aSSteven Rostedt (VMware) expect = WORD_NUMBER;
84547c3c70aSSteven Rostedt (VMware) found = *(int *)p;
84647c3c70aSSteven Rostedt (VMware) break;
84747c3c70aSSteven Rostedt (VMware) case 8:
84847c3c70aSSteven Rostedt (VMware) expect = LONG_NUMBER;
84947c3c70aSSteven Rostedt (VMware) found = *(long long *)p;
85047c3c70aSSteven Rostedt (VMware) break;
85147c3c70aSSteven Rostedt (VMware) }
85247c3c70aSSteven Rostedt (VMware)
85347c3c70aSSteven Rostedt (VMware) if (found != expect) {
854dd120af2SMasami Hiramatsu (Google) snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
85547c3c70aSSteven Rostedt (VMware) "%s returned not %lld but %lld\n", type, expect, found);
85647c3c70aSSteven Rostedt (VMware) return;
85747c3c70aSSteven Rostedt (VMware) }
858dd120af2SMasami Hiramatsu (Google) fixture->error_str = NULL;
85947c3c70aSSteven Rostedt (VMware) }
86047c3c70aSSteven Rostedt (VMware)
init_fgraph_fixture(struct fgraph_fixture * fixture)861dd120af2SMasami Hiramatsu (Google) static int __init init_fgraph_fixture(struct fgraph_fixture *fixture)
86247c3c70aSSteven Rostedt (VMware) {
86347c3c70aSSteven Rostedt (VMware) char *func_name;
86447c3c70aSSteven Rostedt (VMware) int len;
86547c3c70aSSteven Rostedt (VMware)
866dd120af2SMasami Hiramatsu (Google) snprintf(fixture->error_str_buf, ERRSTR_BUFLEN,
867dd120af2SMasami Hiramatsu (Google) "Failed to execute storage %s\n", fixture->store_type_name);
868dd120af2SMasami Hiramatsu (Google) fixture->error_str = fixture->error_str_buf;
86947c3c70aSSteven Rostedt (VMware)
87047c3c70aSSteven Rostedt (VMware) func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
87147c3c70aSSteven Rostedt (VMware) len = strlen(func_name);
87247c3c70aSSteven Rostedt (VMware)
873dd120af2SMasami Hiramatsu (Google) return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1);
874dd120af2SMasami Hiramatsu (Google) }
875dd120af2SMasami Hiramatsu (Google)
876dd120af2SMasami Hiramatsu (Google) /* Test fgraph storage for each size */
test_graph_storage_single(struct fgraph_fixture * fixture)877dd120af2SMasami Hiramatsu (Google) static int __init test_graph_storage_single(struct fgraph_fixture *fixture)
878dd120af2SMasami Hiramatsu (Google) {
879dd120af2SMasami Hiramatsu (Google) int size = fixture->store_size;
880dd120af2SMasami Hiramatsu (Google) int ret;
881dd120af2SMasami Hiramatsu (Google)
882dd120af2SMasami Hiramatsu (Google) pr_cont("PASSED\n");
883b576d375SJiapeng Chong pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size));
884dd120af2SMasami Hiramatsu (Google)
885dd120af2SMasami Hiramatsu (Google) ret = init_fgraph_fixture(fixture);
88647c3c70aSSteven Rostedt (VMware) if (ret && ret != -ENODEV) {
88747c3c70aSSteven Rostedt (VMware) pr_cont("*Could not set filter* ");
88847c3c70aSSteven Rostedt (VMware) return -1;
88947c3c70aSSteven Rostedt (VMware) }
89047c3c70aSSteven Rostedt (VMware)
891dd120af2SMasami Hiramatsu (Google) ret = register_ftrace_graph(&fixture->gops);
89247c3c70aSSteven Rostedt (VMware) if (ret) {
89347c3c70aSSteven Rostedt (VMware) pr_warn("Failed to init store_bytes fgraph tracing\n");
89447c3c70aSSteven Rostedt (VMware) return -1;
89547c3c70aSSteven Rostedt (VMware) }
89647c3c70aSSteven Rostedt (VMware)
89747c3c70aSSteven Rostedt (VMware) DYN_FTRACE_TEST_NAME();
89847c3c70aSSteven Rostedt (VMware)
899dd120af2SMasami Hiramatsu (Google) unregister_ftrace_graph(&fixture->gops);
90047c3c70aSSteven Rostedt (VMware)
901dd120af2SMasami Hiramatsu (Google) if (fixture->error_str) {
902dd120af2SMasami Hiramatsu (Google) pr_cont("*** %s ***", fixture->error_str);
90347c3c70aSSteven Rostedt (VMware) return -1;
90447c3c70aSSteven Rostedt (VMware) }
90547c3c70aSSteven Rostedt (VMware)
90647c3c70aSSteven Rostedt (VMware) return 0;
90747c3c70aSSteven Rostedt (VMware) }
908dd120af2SMasami Hiramatsu (Google)
909dd120af2SMasami Hiramatsu (Google) static struct fgraph_fixture store_bytes[4] __initdata = {
910dd120af2SMasami Hiramatsu (Google) [0] = {
911dd120af2SMasami Hiramatsu (Google) .gops = {
912dd120af2SMasami Hiramatsu (Google) .entryfunc = store_entry,
913dd120af2SMasami Hiramatsu (Google) .retfunc = store_return,
914dd120af2SMasami Hiramatsu (Google) },
915dd120af2SMasami Hiramatsu (Google) .store_size = 1,
916dd120af2SMasami Hiramatsu (Google) .store_type_name = "byte",
917dd120af2SMasami Hiramatsu (Google) },
918dd120af2SMasami Hiramatsu (Google) [1] = {
919dd120af2SMasami Hiramatsu (Google) .gops = {
920dd120af2SMasami Hiramatsu (Google) .entryfunc = store_entry,
921dd120af2SMasami Hiramatsu (Google) .retfunc = store_return,
922dd120af2SMasami Hiramatsu (Google) },
923dd120af2SMasami Hiramatsu (Google) .store_size = 2,
924dd120af2SMasami Hiramatsu (Google) .store_type_name = "short",
925dd120af2SMasami Hiramatsu (Google) },
926dd120af2SMasami Hiramatsu (Google) [2] = {
927dd120af2SMasami Hiramatsu (Google) .gops = {
928dd120af2SMasami Hiramatsu (Google) .entryfunc = store_entry,
929dd120af2SMasami Hiramatsu (Google) .retfunc = store_return,
930dd120af2SMasami Hiramatsu (Google) },
931dd120af2SMasami Hiramatsu (Google) .store_size = 4,
932dd120af2SMasami Hiramatsu (Google) .store_type_name = "word",
933dd120af2SMasami Hiramatsu (Google) },
934dd120af2SMasami Hiramatsu (Google) [3] = {
935dd120af2SMasami Hiramatsu (Google) .gops = {
936dd120af2SMasami Hiramatsu (Google) .entryfunc = store_entry,
937dd120af2SMasami Hiramatsu (Google) .retfunc = store_return,
938dd120af2SMasami Hiramatsu (Google) },
939dd120af2SMasami Hiramatsu (Google) .store_size = 8,
940dd120af2SMasami Hiramatsu (Google) .store_type_name = "long long",
941dd120af2SMasami Hiramatsu (Google) },
942dd120af2SMasami Hiramatsu (Google) };
943dd120af2SMasami Hiramatsu (Google)
test_graph_storage_multi(void)944dd120af2SMasami Hiramatsu (Google) static __init int test_graph_storage_multi(void)
945dd120af2SMasami Hiramatsu (Google) {
946dd120af2SMasami Hiramatsu (Google) struct fgraph_fixture *fixture;
947dd120af2SMasami Hiramatsu (Google) bool printed = false;
948bc754cc7SMasami Hiramatsu (Google) int i, j, ret;
949dd120af2SMasami Hiramatsu (Google)
950dd120af2SMasami Hiramatsu (Google) pr_cont("PASSED\n");
951dd120af2SMasami Hiramatsu (Google) pr_info("Testing multiple fgraph storage on a function: ");
952dd120af2SMasami Hiramatsu (Google)
953dd120af2SMasami Hiramatsu (Google) for (i = 0; i < ARRAY_SIZE(store_bytes); i++) {
954dd120af2SMasami Hiramatsu (Google) fixture = &store_bytes[i];
955dd120af2SMasami Hiramatsu (Google) ret = init_fgraph_fixture(fixture);
956dd120af2SMasami Hiramatsu (Google) if (ret && ret != -ENODEV) {
957dd120af2SMasami Hiramatsu (Google) pr_cont("*Could not set filter* ");
958dd120af2SMasami Hiramatsu (Google) printed = true;
959bc754cc7SMasami Hiramatsu (Google) goto out2;
960bc754cc7SMasami Hiramatsu (Google) }
961dd120af2SMasami Hiramatsu (Google) }
962dd120af2SMasami Hiramatsu (Google)
963bc754cc7SMasami Hiramatsu (Google) for (j = 0; j < ARRAY_SIZE(store_bytes); j++) {
964bc754cc7SMasami Hiramatsu (Google) fixture = &store_bytes[j];
965dd120af2SMasami Hiramatsu (Google) ret = register_ftrace_graph(&fixture->gops);
966dd120af2SMasami Hiramatsu (Google) if (ret) {
967dd120af2SMasami Hiramatsu (Google) pr_warn("Failed to init store_bytes fgraph tracing\n");
968dd120af2SMasami Hiramatsu (Google) printed = true;
969bc754cc7SMasami Hiramatsu (Google) goto out1;
970dd120af2SMasami Hiramatsu (Google) }
971dd120af2SMasami Hiramatsu (Google) }
972dd120af2SMasami Hiramatsu (Google)
973dd120af2SMasami Hiramatsu (Google) DYN_FTRACE_TEST_NAME();
974bc754cc7SMasami Hiramatsu (Google) out1:
975bc754cc7SMasami Hiramatsu (Google) while (--j >= 0) {
976bc754cc7SMasami Hiramatsu (Google) fixture = &store_bytes[j];
977bc754cc7SMasami Hiramatsu (Google) unregister_ftrace_graph(&fixture->gops);
978bc754cc7SMasami Hiramatsu (Google)
979bc754cc7SMasami Hiramatsu (Google) if (fixture->error_str && !printed) {
980bc754cc7SMasami Hiramatsu (Google) pr_cont("*** %s ***", fixture->error_str);
981bc754cc7SMasami Hiramatsu (Google) printed = true;
982bc754cc7SMasami Hiramatsu (Google) }
983bc754cc7SMasami Hiramatsu (Google) }
984bc754cc7SMasami Hiramatsu (Google) out2:
985dd120af2SMasami Hiramatsu (Google) while (--i >= 0) {
986dd120af2SMasami Hiramatsu (Google) fixture = &store_bytes[i];
987bc754cc7SMasami Hiramatsu (Google) ftrace_free_filter(&fixture->gops.ops);
988dd120af2SMasami Hiramatsu (Google)
989dd120af2SMasami Hiramatsu (Google) if (fixture->error_str && !printed) {
990dd120af2SMasami Hiramatsu (Google) pr_cont("*** %s ***", fixture->error_str);
991dd120af2SMasami Hiramatsu (Google) printed = true;
992dd120af2SMasami Hiramatsu (Google) }
993dd120af2SMasami Hiramatsu (Google) }
994dd120af2SMasami Hiramatsu (Google) return printed ? -1 : 0;
995dd120af2SMasami Hiramatsu (Google) }
996dd120af2SMasami Hiramatsu (Google)
99747c3c70aSSteven Rostedt (VMware) /* Test the storage passed across function_graph entry and return */
test_graph_storage(void)99847c3c70aSSteven Rostedt (VMware) static __init int test_graph_storage(void)
99947c3c70aSSteven Rostedt (VMware) {
100047c3c70aSSteven Rostedt (VMware) int ret;
100147c3c70aSSteven Rostedt (VMware)
1002dd120af2SMasami Hiramatsu (Google) ret = test_graph_storage_single(&store_bytes[0]);
100347c3c70aSSteven Rostedt (VMware) if (ret)
100447c3c70aSSteven Rostedt (VMware) return ret;
1005dd120af2SMasami Hiramatsu (Google) ret = test_graph_storage_single(&store_bytes[1]);
100647c3c70aSSteven Rostedt (VMware) if (ret)
100747c3c70aSSteven Rostedt (VMware) return ret;
1008dd120af2SMasami Hiramatsu (Google) ret = test_graph_storage_single(&store_bytes[2]);
100947c3c70aSSteven Rostedt (VMware) if (ret)
101047c3c70aSSteven Rostedt (VMware) return ret;
1011dd120af2SMasami Hiramatsu (Google) ret = test_graph_storage_single(&store_bytes[3]);
1012dd120af2SMasami Hiramatsu (Google) if (ret)
1013dd120af2SMasami Hiramatsu (Google) return ret;
1014dd120af2SMasami Hiramatsu (Google) ret = test_graph_storage_multi();
101547c3c70aSSteven Rostedt (VMware) if (ret)
101647c3c70aSSteven Rostedt (VMware) return ret;
101747c3c70aSSteven Rostedt (VMware) return 0;
101847c3c70aSSteven Rostedt (VMware) }
101947c3c70aSSteven Rostedt (VMware) #else
test_graph_storage(void)102047c3c70aSSteven Rostedt (VMware) static inline int test_graph_storage(void) { return 0; }
102147c3c70aSSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */
102247c3c70aSSteven Rostedt (VMware)
1023cf586b61SFrederic Weisbecker /* Maximum number of functions to trace before diagnosing a hang */
1024cf586b61SFrederic Weisbecker #define GRAPH_MAX_FUNC_TEST 100000000
1025cf586b61SFrederic Weisbecker
1026cf586b61SFrederic Weisbecker static unsigned int graph_hang_thresh;
1027cf586b61SFrederic Weisbecker
1028cf586b61SFrederic Weisbecker /* Wrap the real function entry probe to avoid possible hanging */
trace_graph_entry_watchdog(struct ftrace_graph_ent * trace,struct fgraph_ops * gops,struct ftrace_regs * fregs)102937238abeSSteven Rostedt (VMware) static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace,
103041705c42SMasami Hiramatsu (Google) struct fgraph_ops *gops,
103141705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs)
1032cf586b61SFrederic Weisbecker {
1033cf586b61SFrederic Weisbecker /* This is harmlessly racy, we want to approximately detect a hang */
1034cf586b61SFrederic Weisbecker if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
1035cf586b61SFrederic Weisbecker ftrace_graph_stop();
1036cf586b61SFrederic Weisbecker printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
103719f0423fSHuang Yiwei if (ftrace_dump_on_oops_enabled()) {
10387fe70b57SSteven Rostedt (Red Hat) ftrace_dump(DUMP_ALL);
10397fe70b57SSteven Rostedt (Red Hat) /* ftrace_dump() disables tracing */
10407fe70b57SSteven Rostedt (Red Hat) tracing_on();
10417fe70b57SSteven Rostedt (Red Hat) }
1042cf586b61SFrederic Weisbecker return 0;
1043cf586b61SFrederic Weisbecker }
1044cf586b61SFrederic Weisbecker
104541705c42SMasami Hiramatsu (Google) return trace_graph_entry(trace, gops, fregs);
1046cf586b61SFrederic Weisbecker }
1047cf586b61SFrederic Weisbecker
1048688f7089SSteven Rostedt (VMware) static struct fgraph_ops fgraph_ops __initdata = {
1049688f7089SSteven Rostedt (VMware) .entryfunc = &trace_graph_entry_watchdog,
1050688f7089SSteven Rostedt (VMware) .retfunc = &trace_graph_return,
1051688f7089SSteven Rostedt (VMware) };
1052688f7089SSteven Rostedt (VMware)
1053c5229a0bSChristophe Leroy #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
105423edf483SFlorent Revest static struct ftrace_ops direct;
10554e341cadSSteven Rostedt (VMware) #endif
1056130c0806SJiri Olsa
10577447dce9SFrederic Weisbecker /*
10587447dce9SFrederic Weisbecker * Pretty much the same than for the function tracer from which the selftest
10597447dce9SFrederic Weisbecker * has been borrowed.
10607447dce9SFrederic Weisbecker */
1061f1ed7c74SSteven Rostedt (Red Hat) __init int
trace_selftest_startup_function_graph(struct tracer * trace,struct trace_array * tr)10627447dce9SFrederic Weisbecker trace_selftest_startup_function_graph(struct tracer *trace,
10637447dce9SFrederic Weisbecker struct trace_array *tr)
10647447dce9SFrederic Weisbecker {
10657447dce9SFrederic Weisbecker int ret;
10667447dce9SFrederic Weisbecker unsigned long count;
1067130c0806SJiri Olsa char *func_name __maybe_unused;
10687447dce9SFrederic Weisbecker
1069f1ed7c74SSteven Rostedt (Red Hat) #ifdef CONFIG_DYNAMIC_FTRACE
1070f1ed7c74SSteven Rostedt (Red Hat) if (ftrace_filter_param) {
1071f1ed7c74SSteven Rostedt (Red Hat) printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
1072f1ed7c74SSteven Rostedt (Red Hat) return 0;
1073f1ed7c74SSteven Rostedt (Red Hat) }
1074f1ed7c74SSteven Rostedt (Red Hat) #endif
1075f1ed7c74SSteven Rostedt (Red Hat)
1076cf586b61SFrederic Weisbecker /*
1077cf586b61SFrederic Weisbecker * Simulate the init() callback but we attach a watchdog callback
1078cf586b61SFrederic Weisbecker * to detect and recover from possible hangs
1079cf586b61SFrederic Weisbecker */
10801c5eb448SSteven Rostedt (VMware) tracing_reset_online_cpus(&tr->array_buffer);
108126dda563SSteven Rostedt (VMware) fgraph_ops.private = tr;
1082688f7089SSteven Rostedt (VMware) ret = register_ftrace_graph(&fgraph_ops);
10837447dce9SFrederic Weisbecker if (ret) {
10847447dce9SFrederic Weisbecker warn_failed_init_tracer(trace, ret);
10857447dce9SFrederic Weisbecker goto out;
10867447dce9SFrederic Weisbecker }
1087cf586b61SFrederic Weisbecker tracing_start_cmdline_record();
10887447dce9SFrederic Weisbecker
10897447dce9SFrederic Weisbecker /* Sleep for a 1/10 of a second */
10907447dce9SFrederic Weisbecker msleep(100);
10917447dce9SFrederic Weisbecker
1092cf586b61SFrederic Weisbecker /* Have we just recovered from a hang? */
1093cf586b61SFrederic Weisbecker if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
109460efe21eSMasami Hiramatsu disable_tracing_selftest("recovering from a hang");
1095cf586b61SFrederic Weisbecker ret = -1;
1096cf586b61SFrederic Weisbecker goto out;
1097cf586b61SFrederic Weisbecker }
1098cf586b61SFrederic Weisbecker
10997447dce9SFrederic Weisbecker tracing_stop();
11007447dce9SFrederic Weisbecker
11017447dce9SFrederic Weisbecker /* check the trace buffer */
11021c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, &count);
11037447dce9SFrederic Weisbecker
110452fde6e7SSteven Rostedt (VMware) /* Need to also simulate the tr->reset to remove this fgraph_ops */
110552fde6e7SSteven Rostedt (VMware) tracing_stop_cmdline_record();
110652fde6e7SSteven Rostedt (VMware) unregister_ftrace_graph(&fgraph_ops);
110752fde6e7SSteven Rostedt (VMware)
11087447dce9SFrederic Weisbecker tracing_start();
11097447dce9SFrederic Weisbecker
11107447dce9SFrederic Weisbecker if (!ret && !count) {
11117447dce9SFrederic Weisbecker printk(KERN_CONT ".. no entries found ..");
11127447dce9SFrederic Weisbecker ret = -1;
11137447dce9SFrederic Weisbecker goto out;
11147447dce9SFrederic Weisbecker }
11157447dce9SFrederic Weisbecker
1116c5229a0bSChristophe Leroy #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
1117a2d910f0SSteven Rostedt (Google) /*
1118a2d910f0SSteven Rostedt (Google) * These tests can take some time to run. Make sure on non PREEMPT
1119a2d910f0SSteven Rostedt (Google) * kernels, we do not trigger the softlockup detector.
1120a2d910f0SSteven Rostedt (Google) */
1121a2d910f0SSteven Rostedt (Google) cond_resched();
1122a2d910f0SSteven Rostedt (Google)
1123130c0806SJiri Olsa tracing_reset_online_cpus(&tr->array_buffer);
112426dda563SSteven Rostedt (VMware) fgraph_ops.private = tr;
11257447dce9SFrederic Weisbecker
1126130c0806SJiri Olsa /*
1127130c0806SJiri Olsa * Some archs *cough*PowerPC*cough* add characters to the
1128130c0806SJiri Olsa * start of the function names. We simply put a '*' to
1129130c0806SJiri Olsa * accommodate them.
1130130c0806SJiri Olsa */
1131130c0806SJiri Olsa func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
1132130c0806SJiri Olsa ftrace_set_global_filter(func_name, strlen(func_name), 1);
1133130c0806SJiri Olsa
1134130c0806SJiri Olsa /*
1135130c0806SJiri Olsa * Register direct function together with graph tracer
1136130c0806SJiri Olsa * and make sure we get graph trace.
1137130c0806SJiri Olsa */
113823edf483SFlorent Revest ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0);
1139da8bdfbdSFlorent Revest ret = register_ftrace_direct(&direct,
1140fee86a4eSMark Rutland (unsigned long)ftrace_stub_direct_tramp);
1141130c0806SJiri Olsa if (ret)
1142130c0806SJiri Olsa goto out;
1143130c0806SJiri Olsa
1144a2d910f0SSteven Rostedt (Google) cond_resched();
1145a2d910f0SSteven Rostedt (Google)
1146130c0806SJiri Olsa ret = register_ftrace_graph(&fgraph_ops);
1147130c0806SJiri Olsa if (ret) {
1148130c0806SJiri Olsa warn_failed_init_tracer(trace, ret);
1149130c0806SJiri Olsa goto out;
1150130c0806SJiri Olsa }
1151130c0806SJiri Olsa
1152130c0806SJiri Olsa DYN_FTRACE_TEST_NAME();
1153130c0806SJiri Olsa
1154130c0806SJiri Olsa count = 0;
1155130c0806SJiri Olsa
1156130c0806SJiri Olsa tracing_stop();
1157130c0806SJiri Olsa /* check the trace buffer */
1158130c0806SJiri Olsa ret = trace_test_buffer(&tr->array_buffer, &count);
1159130c0806SJiri Olsa
1160130c0806SJiri Olsa unregister_ftrace_graph(&fgraph_ops);
1161130c0806SJiri Olsa
1162da8bdfbdSFlorent Revest ret = unregister_ftrace_direct(&direct,
1163fee86a4eSMark Rutland (unsigned long)ftrace_stub_direct_tramp,
116423edf483SFlorent Revest true);
1165130c0806SJiri Olsa if (ret)
1166130c0806SJiri Olsa goto out;
1167130c0806SJiri Olsa
1168a2d910f0SSteven Rostedt (Google) cond_resched();
1169a2d910f0SSteven Rostedt (Google)
1170130c0806SJiri Olsa tracing_start();
1171130c0806SJiri Olsa
1172130c0806SJiri Olsa if (!ret && !count) {
1173130c0806SJiri Olsa ret = -1;
1174130c0806SJiri Olsa goto out;
1175130c0806SJiri Olsa }
1176e35c2d8eSLi Huafei
1177e35c2d8eSLi Huafei /* Enable tracing on all functions again */
1178e35c2d8eSLi Huafei ftrace_set_global_filter(NULL, 0, 1);
1179130c0806SJiri Olsa #endif
1180130c0806SJiri Olsa
118147c3c70aSSteven Rostedt (VMware) ret = test_graph_storage();
118247c3c70aSSteven Rostedt (VMware)
1183130c0806SJiri Olsa /* Don't test dynamic tracing, the function tracer already did */
11847447dce9SFrederic Weisbecker out:
11857447dce9SFrederic Weisbecker /* Stop it if we failed */
11867447dce9SFrederic Weisbecker if (ret)
11877447dce9SFrederic Weisbecker ftrace_graph_stop();
11887447dce9SFrederic Weisbecker
11897447dce9SFrederic Weisbecker return ret;
11907447dce9SFrederic Weisbecker }
11917447dce9SFrederic Weisbecker #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
11927447dce9SFrederic Weisbecker
11937447dce9SFrederic Weisbecker
119460a11774SSteven Rostedt #ifdef CONFIG_IRQSOFF_TRACER
119560a11774SSteven Rostedt int
trace_selftest_startup_irqsoff(struct tracer * trace,struct trace_array * tr)119660a11774SSteven Rostedt trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
119760a11774SSteven Rostedt {
11986d9b3fa5SSteven Rostedt (Red Hat) unsigned long save_max = tr->max_latency;
119960a11774SSteven Rostedt unsigned long count;
120060a11774SSteven Rostedt int ret;
120160a11774SSteven Rostedt
120260a11774SSteven Rostedt /* start the tracing */
1203b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
12041c80025aSFrederic Weisbecker if (ret) {
12051c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
12061c80025aSFrederic Weisbecker return ret;
12071c80025aSFrederic Weisbecker }
12081c80025aSFrederic Weisbecker
120960a11774SSteven Rostedt /* reset the max latency */
12106d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
121160a11774SSteven Rostedt /* disable interrupts for a bit */
121260a11774SSteven Rostedt local_irq_disable();
121360a11774SSteven Rostedt udelay(100);
121460a11774SSteven Rostedt local_irq_enable();
121549036200SFrederic Weisbecker
121649036200SFrederic Weisbecker /*
121749036200SFrederic Weisbecker * Stop the tracer to avoid a warning subsequent
121849036200SFrederic Weisbecker * to buffer flipping failure because tracing_stop()
121949036200SFrederic Weisbecker * disables the tr and max buffers, making flipping impossible
122049036200SFrederic Weisbecker * in case of parallels max irqs off latencies.
122149036200SFrederic Weisbecker */
122249036200SFrederic Weisbecker trace->stop(tr);
122360a11774SSteven Rostedt /* stop the tracing. */
1224bbf5b1a0SSteven Rostedt tracing_stop();
122560a11774SSteven Rostedt /* check both trace buffers */
12261c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, NULL);
122760a11774SSteven Rostedt if (!ret)
122812883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer(&tr->max_buffer, &count);
122960a11774SSteven Rostedt trace->reset(tr);
1230bbf5b1a0SSteven Rostedt tracing_start();
123160a11774SSteven Rostedt
123260a11774SSteven Rostedt if (!ret && !count) {
123360a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
123460a11774SSteven Rostedt ret = -1;
123560a11774SSteven Rostedt }
123660a11774SSteven Rostedt
12376d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = save_max;
123860a11774SSteven Rostedt
123960a11774SSteven Rostedt return ret;
124060a11774SSteven Rostedt }
124160a11774SSteven Rostedt #endif /* CONFIG_IRQSOFF_TRACER */
124260a11774SSteven Rostedt
124360a11774SSteven Rostedt #ifdef CONFIG_PREEMPT_TRACER
124460a11774SSteven Rostedt int
trace_selftest_startup_preemptoff(struct tracer * trace,struct trace_array * tr)124560a11774SSteven Rostedt trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
124660a11774SSteven Rostedt {
12476d9b3fa5SSteven Rostedt (Red Hat) unsigned long save_max = tr->max_latency;
124860a11774SSteven Rostedt unsigned long count;
124960a11774SSteven Rostedt int ret;
125060a11774SSteven Rostedt
1251769c48ebSSteven Rostedt /*
1252f2cc020dSIngo Molnar * Now that the big kernel lock is no longer preemptible,
1253769c48ebSSteven Rostedt * and this is called with the BKL held, it will always
1254769c48ebSSteven Rostedt * fail. If preemption is already disabled, simply
1255769c48ebSSteven Rostedt * pass the test. When the BKL is removed, or becomes
1256769c48ebSSteven Rostedt * preemptible again, we will once again test this,
1257769c48ebSSteven Rostedt * so keep it in.
1258769c48ebSSteven Rostedt */
1259769c48ebSSteven Rostedt if (preempt_count()) {
1260769c48ebSSteven Rostedt printk(KERN_CONT "can not test ... force ");
1261769c48ebSSteven Rostedt return 0;
1262769c48ebSSteven Rostedt }
1263769c48ebSSteven Rostedt
126460a11774SSteven Rostedt /* start the tracing */
1265b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
12661c80025aSFrederic Weisbecker if (ret) {
12671c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
12681c80025aSFrederic Weisbecker return ret;
12691c80025aSFrederic Weisbecker }
12701c80025aSFrederic Weisbecker
127160a11774SSteven Rostedt /* reset the max latency */
12726d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
127360a11774SSteven Rostedt /* disable preemption for a bit */
127460a11774SSteven Rostedt preempt_disable();
127560a11774SSteven Rostedt udelay(100);
127660a11774SSteven Rostedt preempt_enable();
127749036200SFrederic Weisbecker
127849036200SFrederic Weisbecker /*
127949036200SFrederic Weisbecker * Stop the tracer to avoid a warning subsequent
128049036200SFrederic Weisbecker * to buffer flipping failure because tracing_stop()
128149036200SFrederic Weisbecker * disables the tr and max buffers, making flipping impossible
128249036200SFrederic Weisbecker * in case of parallels max preempt off latencies.
128349036200SFrederic Weisbecker */
128449036200SFrederic Weisbecker trace->stop(tr);
128560a11774SSteven Rostedt /* stop the tracing. */
1286bbf5b1a0SSteven Rostedt tracing_stop();
128760a11774SSteven Rostedt /* check both trace buffers */
12881c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, NULL);
128960a11774SSteven Rostedt if (!ret)
129012883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer(&tr->max_buffer, &count);
129160a11774SSteven Rostedt trace->reset(tr);
1292bbf5b1a0SSteven Rostedt tracing_start();
129360a11774SSteven Rostedt
129460a11774SSteven Rostedt if (!ret && !count) {
129560a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
129660a11774SSteven Rostedt ret = -1;
129760a11774SSteven Rostedt }
129860a11774SSteven Rostedt
12996d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = save_max;
130060a11774SSteven Rostedt
130160a11774SSteven Rostedt return ret;
130260a11774SSteven Rostedt }
130360a11774SSteven Rostedt #endif /* CONFIG_PREEMPT_TRACER */
130460a11774SSteven Rostedt
130560a11774SSteven Rostedt #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
130660a11774SSteven Rostedt int
trace_selftest_startup_preemptirqsoff(struct tracer * trace,struct trace_array * tr)130760a11774SSteven Rostedt trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
130860a11774SSteven Rostedt {
13096d9b3fa5SSteven Rostedt (Red Hat) unsigned long save_max = tr->max_latency;
131060a11774SSteven Rostedt unsigned long count;
131160a11774SSteven Rostedt int ret;
131260a11774SSteven Rostedt
1313769c48ebSSteven Rostedt /*
1314f2cc020dSIngo Molnar * Now that the big kernel lock is no longer preemptible,
1315769c48ebSSteven Rostedt * and this is called with the BKL held, it will always
1316769c48ebSSteven Rostedt * fail. If preemption is already disabled, simply
1317769c48ebSSteven Rostedt * pass the test. When the BKL is removed, or becomes
1318769c48ebSSteven Rostedt * preemptible again, we will once again test this,
1319769c48ebSSteven Rostedt * so keep it in.
1320769c48ebSSteven Rostedt */
1321769c48ebSSteven Rostedt if (preempt_count()) {
1322769c48ebSSteven Rostedt printk(KERN_CONT "can not test ... force ");
1323769c48ebSSteven Rostedt return 0;
1324769c48ebSSteven Rostedt }
1325769c48ebSSteven Rostedt
132660a11774SSteven Rostedt /* start the tracing */
1327b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
13281c80025aSFrederic Weisbecker if (ret) {
13291c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
1330ac1d52d0SFrederic Weisbecker goto out_no_start;
13311c80025aSFrederic Weisbecker }
133260a11774SSteven Rostedt
133360a11774SSteven Rostedt /* reset the max latency */
13346d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
133560a11774SSteven Rostedt
133660a11774SSteven Rostedt /* disable preemption and interrupts for a bit */
133760a11774SSteven Rostedt preempt_disable();
133860a11774SSteven Rostedt local_irq_disable();
133960a11774SSteven Rostedt udelay(100);
134060a11774SSteven Rostedt preempt_enable();
134160a11774SSteven Rostedt /* reverse the order of preempt vs irqs */
134260a11774SSteven Rostedt local_irq_enable();
134360a11774SSteven Rostedt
134449036200SFrederic Weisbecker /*
134549036200SFrederic Weisbecker * Stop the tracer to avoid a warning subsequent
134649036200SFrederic Weisbecker * to buffer flipping failure because tracing_stop()
134749036200SFrederic Weisbecker * disables the tr and max buffers, making flipping impossible
134849036200SFrederic Weisbecker * in case of parallels max irqs/preempt off latencies.
134949036200SFrederic Weisbecker */
135049036200SFrederic Weisbecker trace->stop(tr);
135160a11774SSteven Rostedt /* stop the tracing. */
1352bbf5b1a0SSteven Rostedt tracing_stop();
135360a11774SSteven Rostedt /* check both trace buffers */
13541c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, NULL);
1355ac1d52d0SFrederic Weisbecker if (ret)
135660a11774SSteven Rostedt goto out;
135760a11774SSteven Rostedt
135812883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer(&tr->max_buffer, &count);
1359ac1d52d0SFrederic Weisbecker if (ret)
136060a11774SSteven Rostedt goto out;
136160a11774SSteven Rostedt
136260a11774SSteven Rostedt if (!ret && !count) {
136360a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
136460a11774SSteven Rostedt ret = -1;
136560a11774SSteven Rostedt goto out;
136660a11774SSteven Rostedt }
136760a11774SSteven Rostedt
136860a11774SSteven Rostedt /* do the test by disabling interrupts first this time */
13696d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
1370bbf5b1a0SSteven Rostedt tracing_start();
137149036200SFrederic Weisbecker trace->start(tr);
137249036200SFrederic Weisbecker
137360a11774SSteven Rostedt preempt_disable();
137460a11774SSteven Rostedt local_irq_disable();
137560a11774SSteven Rostedt udelay(100);
137660a11774SSteven Rostedt preempt_enable();
137760a11774SSteven Rostedt /* reverse the order of preempt vs irqs */
137860a11774SSteven Rostedt local_irq_enable();
137960a11774SSteven Rostedt
138049036200SFrederic Weisbecker trace->stop(tr);
138160a11774SSteven Rostedt /* stop the tracing. */
1382bbf5b1a0SSteven Rostedt tracing_stop();
138360a11774SSteven Rostedt /* check both trace buffers */
13841c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, NULL);
138560a11774SSteven Rostedt if (ret)
138660a11774SSteven Rostedt goto out;
138760a11774SSteven Rostedt
138812883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer(&tr->max_buffer, &count);
138960a11774SSteven Rostedt
139060a11774SSteven Rostedt if (!ret && !count) {
139160a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
139260a11774SSteven Rostedt ret = -1;
139360a11774SSteven Rostedt goto out;
139460a11774SSteven Rostedt }
139560a11774SSteven Rostedt
139660a11774SSteven Rostedt out:
1397bbf5b1a0SSteven Rostedt tracing_start();
1398ac1d52d0SFrederic Weisbecker out_no_start:
1399ac1d52d0SFrederic Weisbecker trace->reset(tr);
14006d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = save_max;
140160a11774SSteven Rostedt
140260a11774SSteven Rostedt return ret;
140360a11774SSteven Rostedt }
140460a11774SSteven Rostedt #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
140560a11774SSteven Rostedt
1406fb1b6d8bSSteven Noonan #ifdef CONFIG_NOP_TRACER
1407fb1b6d8bSSteven Noonan int
trace_selftest_startup_nop(struct tracer * trace,struct trace_array * tr)1408fb1b6d8bSSteven Noonan trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1409fb1b6d8bSSteven Noonan {
1410fb1b6d8bSSteven Noonan /* What could possibly go wrong? */
1411fb1b6d8bSSteven Noonan return 0;
1412fb1b6d8bSSteven Noonan }
1413fb1b6d8bSSteven Noonan #endif
1414fb1b6d8bSSteven Noonan
141560a11774SSteven Rostedt #ifdef CONFIG_SCHED_TRACER
1416addff1feSSteven Rostedt
1417addff1feSSteven Rostedt struct wakeup_test_data {
1418addff1feSSteven Rostedt struct completion is_ready;
1419addff1feSSteven Rostedt int go;
1420addff1feSSteven Rostedt };
1421addff1feSSteven Rostedt
trace_wakeup_test_thread(void * data)142260a11774SSteven Rostedt static int trace_wakeup_test_thread(void *data)
142360a11774SSteven Rostedt {
1424af6ace76SDario Faggioli /* Make this a -deadline thread */
1425af6ace76SDario Faggioli static const struct sched_attr attr = {
1426af6ace76SDario Faggioli .sched_policy = SCHED_DEADLINE,
1427af6ace76SDario Faggioli .sched_runtime = 100000ULL,
1428af6ace76SDario Faggioli .sched_deadline = 10000000ULL,
1429af6ace76SDario Faggioli .sched_period = 10000000ULL
1430af6ace76SDario Faggioli };
1431addff1feSSteven Rostedt struct wakeup_test_data *x = data;
143260a11774SSteven Rostedt
1433af6ace76SDario Faggioli sched_setattr(current, &attr);
143460a11774SSteven Rostedt
143560a11774SSteven Rostedt /* Make it know we have a new prio */
1436addff1feSSteven Rostedt complete(&x->is_ready);
143760a11774SSteven Rostedt
143860a11774SSteven Rostedt /* now go to sleep and let the test wake us up */
143960a11774SSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
1440addff1feSSteven Rostedt while (!x->go) {
144160a11774SSteven Rostedt schedule();
1442addff1feSSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
1443addff1feSSteven Rostedt }
144460a11774SSteven Rostedt
1445addff1feSSteven Rostedt complete(&x->is_ready);
1446addff1feSSteven Rostedt
1447addff1feSSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
14483c18c10bSSteven Rostedt
144960a11774SSteven Rostedt /* we are awake, now wait to disappear */
145060a11774SSteven Rostedt while (!kthread_should_stop()) {
1451addff1feSSteven Rostedt schedule();
1452addff1feSSteven Rostedt set_current_state(TASK_INTERRUPTIBLE);
145360a11774SSteven Rostedt }
145460a11774SSteven Rostedt
1455addff1feSSteven Rostedt __set_current_state(TASK_RUNNING);
1456addff1feSSteven Rostedt
145760a11774SSteven Rostedt return 0;
145860a11774SSteven Rostedt }
145960a11774SSteven Rostedt int
trace_selftest_startup_wakeup(struct tracer * trace,struct trace_array * tr)146060a11774SSteven Rostedt trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
146160a11774SSteven Rostedt {
14626d9b3fa5SSteven Rostedt (Red Hat) unsigned long save_max = tr->max_latency;
146360a11774SSteven Rostedt struct task_struct *p;
1464addff1feSSteven Rostedt struct wakeup_test_data data;
146560a11774SSteven Rostedt unsigned long count;
146660a11774SSteven Rostedt int ret;
146760a11774SSteven Rostedt
1468addff1feSSteven Rostedt memset(&data, 0, sizeof(data));
1469addff1feSSteven Rostedt
1470addff1feSSteven Rostedt init_completion(&data.is_ready);
147160a11774SSteven Rostedt
1472af6ace76SDario Faggioli /* create a -deadline thread */
1473addff1feSSteven Rostedt p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1474c7aafc54SIngo Molnar if (IS_ERR(p)) {
147560a11774SSteven Rostedt printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
147660a11774SSteven Rostedt return -1;
147760a11774SSteven Rostedt }
147860a11774SSteven Rostedt
1479af6ace76SDario Faggioli /* make sure the thread is running at -deadline policy */
1480addff1feSSteven Rostedt wait_for_completion(&data.is_ready);
148160a11774SSteven Rostedt
148260a11774SSteven Rostedt /* start the tracing */
1483b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
14841c80025aSFrederic Weisbecker if (ret) {
14851c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
14861c80025aSFrederic Weisbecker return ret;
14871c80025aSFrederic Weisbecker }
14881c80025aSFrederic Weisbecker
148960a11774SSteven Rostedt /* reset the max latency */
14906d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
149160a11774SSteven Rostedt
1492cd9626e9SPeter Zijlstra while (task_is_runnable(p)) {
149360a11774SSteven Rostedt /*
1494af6ace76SDario Faggioli * Sleep to make sure the -deadline thread is asleep too.
14953c18c10bSSteven Rostedt * On virtual machines we can't rely on timings,
14963c18c10bSSteven Rostedt * but we want to make sure this test still works.
149760a11774SSteven Rostedt */
14983c18c10bSSteven Rostedt msleep(100);
14993c18c10bSSteven Rostedt }
15003c18c10bSSteven Rostedt
1501addff1feSSteven Rostedt init_completion(&data.is_ready);
1502addff1feSSteven Rostedt
1503addff1feSSteven Rostedt data.go = 1;
1504addff1feSSteven Rostedt /* memory barrier is in the wake_up_process() */
150560a11774SSteven Rostedt
150660a11774SSteven Rostedt wake_up_process(p);
150760a11774SSteven Rostedt
15083c18c10bSSteven Rostedt /* Wait for the task to wake up */
1509addff1feSSteven Rostedt wait_for_completion(&data.is_ready);
15105aa60c60SSteven Rostedt
151160a11774SSteven Rostedt /* stop the tracing. */
1512bbf5b1a0SSteven Rostedt tracing_stop();
151360a11774SSteven Rostedt /* check both trace buffers */
15141c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, NULL);
151560a11774SSteven Rostedt if (!ret)
151612883efbSSteven Rostedt (Red Hat) ret = trace_test_buffer(&tr->max_buffer, &count);
151760a11774SSteven Rostedt
151860a11774SSteven Rostedt
151960a11774SSteven Rostedt trace->reset(tr);
1520bbf5b1a0SSteven Rostedt tracing_start();
152160a11774SSteven Rostedt
15226d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = save_max;
152360a11774SSteven Rostedt
152460a11774SSteven Rostedt /* kill the thread */
152560a11774SSteven Rostedt kthread_stop(p);
152660a11774SSteven Rostedt
152760a11774SSteven Rostedt if (!ret && !count) {
152860a11774SSteven Rostedt printk(KERN_CONT ".. no entries found ..");
152960a11774SSteven Rostedt ret = -1;
153060a11774SSteven Rostedt }
153160a11774SSteven Rostedt
153260a11774SSteven Rostedt return ret;
153360a11774SSteven Rostedt }
153460a11774SSteven Rostedt #endif /* CONFIG_SCHED_TRACER */
153560a11774SSteven Rostedt
153680e5ea45SSteven Rostedt #ifdef CONFIG_BRANCH_TRACER
153780e5ea45SSteven Rostedt int
trace_selftest_startup_branch(struct tracer * trace,struct trace_array * tr)153880e5ea45SSteven Rostedt trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
153980e5ea45SSteven Rostedt {
154080e5ea45SSteven Rostedt unsigned long count;
154180e5ea45SSteven Rostedt int ret;
154280e5ea45SSteven Rostedt
154380e5ea45SSteven Rostedt /* start the tracing */
1544b6f11df2SArnaldo Carvalho de Melo ret = tracer_init(trace, tr);
15451c80025aSFrederic Weisbecker if (ret) {
15461c80025aSFrederic Weisbecker warn_failed_init_tracer(trace, ret);
15471c80025aSFrederic Weisbecker return ret;
15481c80025aSFrederic Weisbecker }
15491c80025aSFrederic Weisbecker
155080e5ea45SSteven Rostedt /* Sleep for a 1/10 of a second */
155180e5ea45SSteven Rostedt msleep(100);
155280e5ea45SSteven Rostedt /* stop the tracing. */
155380e5ea45SSteven Rostedt tracing_stop();
155480e5ea45SSteven Rostedt /* check the trace buffer */
15551c5eb448SSteven Rostedt (VMware) ret = trace_test_buffer(&tr->array_buffer, &count);
155680e5ea45SSteven Rostedt trace->reset(tr);
155780e5ea45SSteven Rostedt tracing_start();
155880e5ea45SSteven Rostedt
1559d2ef7c2fSWenji Huang if (!ret && !count) {
1560d2ef7c2fSWenji Huang printk(KERN_CONT ".. no entries found ..");
1561d2ef7c2fSWenji Huang ret = -1;
1562d2ef7c2fSWenji Huang }
1563d2ef7c2fSWenji Huang
156480e5ea45SSteven Rostedt return ret;
156580e5ea45SSteven Rostedt }
156680e5ea45SSteven Rostedt #endif /* CONFIG_BRANCH_TRACER */
1567321bb5e1SMarkus Metzger
1568