1bb730b58SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
2bac5fb97STom Zanussi
3bc0c38d1SSteven Rostedt #ifndef _LINUX_KERNEL_TRACE_H
4bc0c38d1SSteven Rostedt #define _LINUX_KERNEL_TRACE_H
5bc0c38d1SSteven Rostedt
6bc0c38d1SSteven Rostedt #include <linux/fs.h>
760063497SArun Sharma #include <linux/atomic.h>
8bc0c38d1SSteven Rostedt #include <linux/sched.h>
9bc0c38d1SSteven Rostedt #include <linux/clocksource.h>
103928a8a2SSteven Rostedt #include <linux/ring_buffer.h>
11bd8ac686SPekka Paalanen #include <linux/mmiotrace.h>
124e5292eaSSteven Rostedt #include <linux/tracepoint.h>
13d13744cdSFrédéric Weisbecker #include <linux/ftrace.h>
142d6425afSDivya Indi #include <linux/trace.h>
1524f1e32cSFrederic Weisbecker #include <linux/hw_breakpoint.h>
169504504cSSteven Rostedt #include <linux/trace_seq.h>
17af658dcaSSteven Rostedt (Red Hat) #include <linux/trace_events.h>
1852f5684cSGideon Israel Dsouza #include <linux/compiler.h>
1960f1d5e3SMasami Hiramatsu #include <linux/glob.h>
2091edde2eSViktor Rosendahl (BMW) #include <linux/irq_work.h>
2191edde2eSViktor Rosendahl (BMW) #include <linux/workqueue.h>
2242d120e2STom Zanussi #include <linux/ctype.h>
23a358f406STanner Love #include <linux/once_lite.h>
249504504cSSteven Rostedt #include <linux/ftrace_regs.h>
256954e415SSteven Rostedt (VMware)
266954e415SSteven Rostedt (VMware) #include "pid_list.h"
2712ab74eeSSteven Rostedt
28d6e59579SYang Yang #ifdef CONFIG_FTRACE_SYSCALLS
2912ab74eeSSteven Rostedt #include <asm/unistd.h> /* For NR_syscalls */
3012ab74eeSSteven Rostedt #include <asm/syscall.h> /* some archs define it here */
3112ab74eeSSteven Rostedt #endif
3221ccc9cdSSteven Rostedt (VMware)
3321ccc9cdSSteven Rostedt (VMware) #define TRACE_MODE_WRITE 0640
3421ccc9cdSSteven Rostedt (VMware) #define TRACE_MODE_READ 0440
3572829bc3SThomas Gleixner
3672829bc3SThomas Gleixner enum trace_type {
3772829bc3SThomas Gleixner __TRACE_FIRST_TYPE = 0,
3872829bc3SThomas Gleixner
3972829bc3SThomas Gleixner TRACE_FN,
4072829bc3SThomas Gleixner TRACE_CTX,
4172829bc3SThomas Gleixner TRACE_WAKE,
42dd0e545fSSteven Rostedt TRACE_STACK,
4348ead020SFrederic Weisbecker TRACE_PRINT,
44bd8ac686SPekka Paalanen TRACE_BPRINT,
45bd8ac686SPekka Paalanen TRACE_MMIO_RW,
469f029e83SSteven Rostedt TRACE_MMIO_MAP,
47287b6e68SFrederic Weisbecker TRACE_BRANCH,
48287b6e68SFrederic Weisbecker TRACE_GRAPH_RET,
4921e92806SDonglin Peng TRACE_GRAPH_ENT,
5002b67518STörök Edwin TRACE_GRAPH_RETADDR_ENT,
51c71a8961SArnaldo Carvalho de Melo TRACE_USER_STACK,
5209ae7234SSteven Rostedt (Red Hat) TRACE_BLK,
53e7c15cd8SSteven Rostedt (Red Hat) TRACE_BPUTS,
54bce29ac9SDaniel Bristot de Oliveira TRACE_HWLAT,
55a955d7eaSDaniel Bristot de Oliveira TRACE_OSNOISE,
56fa32e855SSteven Rostedt TRACE_TIMERLAT,
57f689e4f2SYordan Karadzhov (VMware) TRACE_RAW_DATA,
5872829bc3SThomas Gleixner TRACE_FUNC_REPEATS,
59f0868d1eSSteven Rostedt
6072829bc3SThomas Gleixner __TRACE_LAST_TYPE,
6172829bc3SThomas Gleixner };
6236994e58SFrederic Weisbecker
630a1c49dbSSteven Rostedt
640a1c49dbSSteven Rostedt #undef __field
6536994e58SFrederic Weisbecker #define __field(type, item) type item;
6604ae87a5SPeter Zijlstra
6704ae87a5SPeter Zijlstra #undef __field_fn
6804ae87a5SPeter Zijlstra #define __field_fn(type, item) type item;
69d7315094SSteven Rostedt
70d7315094SSteven Rostedt #undef __field_struct
71d7315094SSteven Rostedt #define __field_struct(type, item) __field(type, item)
72d7315094SSteven Rostedt
73d7315094SSteven Rostedt #undef __field_desc
74d7315094SSteven Rostedt #define __field_desc(type, container, item)
754649079bSSteven Rostedt (VMware)
764649079bSSteven Rostedt (VMware) #undef __field_packed
774649079bSSteven Rostedt (VMware) #define __field_packed(type, container, item)
780a1c49dbSSteven Rostedt
790a1c49dbSSteven Rostedt #undef __array
800a1c49dbSSteven Rostedt #define __array(type, item, size) type item[size];
81e7186af7SSteven Rostedt (Google)
82e7186af7SSteven Rostedt (Google) /*
83e7186af7SSteven Rostedt (Google) * For backward compatibility, older user space expects to see the
84e7186af7SSteven Rostedt (Google) * kernel_stack event with a fixed size caller field. But today the fix
85e7186af7SSteven Rostedt (Google) * size is ignored by the kernel, and the real structure is dynamic.
86e7186af7SSteven Rostedt (Google) * Expose to user space: "unsigned long caller[8];" but the real structure
87e7186af7SSteven Rostedt (Google) * will be "unsigned long caller[] __counted_by(size)"
88e7186af7SSteven Rostedt (Google) */
89e7186af7SSteven Rostedt (Google) #undef __stack_array
90e7186af7SSteven Rostedt (Google) #define __stack_array(type, item, size, field) type item[] __counted_by(field);
91d7315094SSteven Rostedt
92d7315094SSteven Rostedt #undef __array_desc
93d7315094SSteven Rostedt #define __array_desc(type, container, item, size)
940a1c49dbSSteven Rostedt
950a1c49dbSSteven Rostedt #undef __dynamic_array
960a1c49dbSSteven Rostedt #define __dynamic_array(type, item) type item[];
9755de2c0bSMasami Hiramatsu
9855de2c0bSMasami Hiramatsu #undef __rel_dynamic_array
9955de2c0bSMasami Hiramatsu #define __rel_dynamic_array(type, item) type item[];
1000a1c49dbSSteven Rostedt
1010a1c49dbSSteven Rostedt #undef F_STRUCT
1020a1c49dbSSteven Rostedt #define F_STRUCT(args...) args
1030a1c49dbSSteven Rostedt
10404ae87a5SPeter Zijlstra #undef FTRACE_ENTRY
1050a1c49dbSSteven Rostedt #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
1060a1c49dbSSteven Rostedt struct struct_name { \
1070a1c49dbSSteven Rostedt struct trace_entry ent; \
1080a1c49dbSSteven Rostedt tstruct \
1090a1c49dbSSteven Rostedt }
1100a1c49dbSSteven Rostedt
11104ae87a5SPeter Zijlstra #undef FTRACE_ENTRY_DUP
1120a1c49dbSSteven Rostedt #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
113e59a0bffSJiri Olsa
11404ae87a5SPeter Zijlstra #undef FTRACE_ENTRY_REG
11504ae87a5SPeter Zijlstra #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
116e59a0bffSJiri Olsa FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
117a4a551b8SNamhyung Kim
11804ae87a5SPeter Zijlstra #undef FTRACE_ENTRY_PACKED
11904ae87a5SPeter Zijlstra #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
120a4a551b8SNamhyung Kim FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
1210a1c49dbSSteven Rostedt
1220a1c49dbSSteven Rostedt #include "trace_entries.h"
12324589e3aSSteven Rostedt (VMware)
124a358f406STanner Love /* Use this for memory failure errors */
125a358f406STanner Love #define MEM_FAIL(condition, fmt, ...) \
12624589e3aSSteven Rostedt (VMware) DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
1274ed8f337SMasami Hiramatsu (Google)
1284ed8f337SMasami Hiramatsu (Google) #define FAULT_STRING "(fault)"
12900cf3d67SSteven Rostedt (Google)
13000cf3d67SSteven Rostedt (Google) #define HIST_STACKTRACE_DEPTH 16
13100cf3d67SSteven Rostedt (Google) #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
13200cf3d67SSteven Rostedt (Google) #define HIST_STACKTRACE_SKIP 5
1330a1c49dbSSteven Rostedt
1340a1c49dbSSteven Rostedt /*
1350a1c49dbSSteven Rostedt * syscalls are special, and need special handling, this is why
1360a1c49dbSSteven Rostedt * they are not included in trace_entries.h
137bed1ffcaSFrederic Weisbecker */
138bed1ffcaSFrederic Weisbecker struct syscall_trace_enter {
139bed1ffcaSFrederic Weisbecker struct trace_entry ent;
140bed1ffcaSFrederic Weisbecker int nr;
141bed1ffcaSFrederic Weisbecker unsigned long args[];
142bed1ffcaSFrederic Weisbecker };
143bed1ffcaSFrederic Weisbecker
144bed1ffcaSFrederic Weisbecker struct syscall_trace_exit {
145bed1ffcaSFrederic Weisbecker struct trace_entry ent;
14699df5a6aSTom Zanussi int nr;
147bed1ffcaSFrederic Weisbecker long ret;
148bed1ffcaSFrederic Weisbecker };
14993ccae7aSMasami Hiramatsu
150413d37d1SMasami Hiramatsu struct kprobe_trace_entry_head {
151413d37d1SMasami Hiramatsu struct trace_entry ent;
152413d37d1SMasami Hiramatsu unsigned long ip;
153413d37d1SMasami Hiramatsu };
1547491e2c4STzvetomir Stoyanov (VMware)
1557491e2c4STzvetomir Stoyanov (VMware) struct eprobe_trace_entry_head {
1567491e2c4STzvetomir Stoyanov (VMware) struct trace_entry ent;
1577491e2c4STzvetomir Stoyanov (VMware) };
15893ccae7aSMasami Hiramatsu
159413d37d1SMasami Hiramatsu struct kretprobe_trace_entry_head {
160413d37d1SMasami Hiramatsu struct trace_entry ent;
161413d37d1SMasami Hiramatsu unsigned long func;
162413d37d1SMasami Hiramatsu unsigned long ret_ip;
163413d37d1SMasami Hiramatsu };
164334e5519SMasami Hiramatsu (Google)
165334e5519SMasami Hiramatsu (Google) struct fentry_trace_entry_head {
166334e5519SMasami Hiramatsu (Google) struct trace_entry ent;
167334e5519SMasami Hiramatsu (Google) unsigned long ip;
168334e5519SMasami Hiramatsu (Google) };
169334e5519SMasami Hiramatsu (Google)
170334e5519SMasami Hiramatsu (Google) struct fexit_trace_entry_head {
171334e5519SMasami Hiramatsu (Google) struct trace_entry ent;
172334e5519SMasami Hiramatsu (Google) unsigned long func;
173334e5519SMasami Hiramatsu (Google) unsigned long ret_ip;
174334e5519SMasami Hiramatsu (Google) };
1755bf9a1eeSPekka Paalanen
176bc0c38d1SSteven Rostedt #define TRACE_BUF_SIZE 1024
1772b6080f2SSteven Rostedt
1782b6080f2SSteven Rostedt struct trace_array;
179bc0c38d1SSteven Rostedt
180bc0c38d1SSteven Rostedt /*
181bc0c38d1SSteven Rostedt * The CPU trace array - it consists of thousands of trace entries
182bc0c38d1SSteven Rostedt * plus some other descriptor data: (for example which task started
183bc0c38d1SSteven Rostedt * the trace, etc.)
184bc0c38d1SSteven Rostedt */
185bc0c38d1SSteven Rostedt struct trace_array_cpu {
1862cadf913SSteven Rostedt atomic_t disabled;
1874e3c3333SIngo Molnar void *buffer_page; /* ring buffer spare */
188438ced17SVaibhav Nagarnaik
189bc0c38d1SSteven Rostedt unsigned long entries;
190bc0c38d1SSteven Rostedt unsigned long saved_latency;
191bc0c38d1SSteven Rostedt unsigned long critical_start;
192bc0c38d1SSteven Rostedt unsigned long critical_end;
193bc0c38d1SSteven Rostedt unsigned long critical_sequence;
194bc0c38d1SSteven Rostedt unsigned long nice;
195bc0c38d1SSteven Rostedt unsigned long policy;
1962f26ebd5SSteven Rostedt unsigned long rt_priority;
197a5a1d1c2SThomas Gleixner unsigned long skipped_entries;
198bc0c38d1SSteven Rostedt u64 preempt_timestamp;
199d20b92abSEric W. Biederman pid_t pid;
200bc0c38d1SSteven Rostedt kuid_t uid;
2013fdaf80fSSteven Rostedt (Red Hat) char comm[TASK_COMM_LEN];
202345ddcc8SSteven Rostedt (Red Hat)
203717e3f5eSSteven Rostedt (VMware) #ifdef CONFIG_FUNCTION_TRACER
204345ddcc8SSteven Rostedt (Red Hat) int ftrace_ignore_pid;
205717e3f5eSSteven Rostedt (VMware) #endif
206bc0c38d1SSteven Rostedt bool ignore_pid;
207bc0c38d1SSteven Rostedt };
2082b6080f2SSteven Rostedt
20937aea98bSSteven Rostedt (Red Hat) struct tracer;
2102b6080f2SSteven Rostedt struct trace_option_dentry;
2111c5eb448SSteven Rostedt (VMware)
21212883efbSSteven Rostedt (Red Hat) struct array_buffer {
21313292494SSteven Rostedt (VMware) struct trace_array *tr;
21412883efbSSteven Rostedt (Red Hat) struct trace_buffer *buffer;
215a5a1d1c2SThomas Gleixner struct trace_array_cpu __percpu *data;
21612883efbSSteven Rostedt (Red Hat) u64 time_start;
21712883efbSSteven Rostedt (Red Hat) int cpu;
21812883efbSSteven Rostedt (Red Hat) };
2199a38a885SSteven Rostedt (Red Hat)
2209a38a885SSteven Rostedt (Red Hat) #define TRACE_FLAGS_MAX_SIZE 32
22137aea98bSSteven Rostedt (Red Hat)
22237aea98bSSteven Rostedt (Red Hat) struct trace_options {
22337aea98bSSteven Rostedt (Red Hat) struct tracer *tracer;
22437aea98bSSteven Rostedt (Red Hat) struct trace_option_dentry *topts;
22537aea98bSSteven Rostedt (Red Hat) };
2266954e415SSteven Rostedt (VMware)
2276954e415SSteven Rostedt (VMware) struct trace_pid_list *trace_pid_list_alloc(void);
2286954e415SSteven Rostedt (VMware) void trace_pid_list_free(struct trace_pid_list *pid_list);
2296954e415SSteven Rostedt (VMware) bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
2306954e415SSteven Rostedt (VMware) int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
2316954e415SSteven Rostedt (VMware) int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
2326954e415SSteven Rostedt (VMware) int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
2336954e415SSteven Rostedt (VMware) int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
23449090107SSteven Rostedt (Red Hat) unsigned int *next);
23527683626SSteven Rostedt (VMware)
23627683626SSteven Rostedt (VMware) enum {
23727683626SSteven Rostedt (VMware) TRACE_PIDS = BIT(0),
23827683626SSteven Rostedt (VMware) TRACE_NO_PIDS = BIT(1),
23927683626SSteven Rostedt (VMware) };
24027683626SSteven Rostedt (VMware)
pid_type_enabled(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)24127683626SSteven Rostedt (VMware) static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
24227683626SSteven Rostedt (VMware) struct trace_pid_list *no_pid_list)
24327683626SSteven Rostedt (VMware) {
24427683626SSteven Rostedt (VMware) /* Return true if the pid list in type has pids */
24527683626SSteven Rostedt (VMware) return ((type & TRACE_PIDS) && pid_list) ||
24627683626SSteven Rostedt (VMware) ((type & TRACE_NO_PIDS) && no_pid_list);
24727683626SSteven Rostedt (VMware) }
24827683626SSteven Rostedt (VMware)
still_need_pid_events(int type,struct trace_pid_list * pid_list,struct trace_pid_list * no_pid_list)24927683626SSteven Rostedt (VMware) static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
25027683626SSteven Rostedt (VMware) struct trace_pid_list *no_pid_list)
25127683626SSteven Rostedt (VMware) {
25227683626SSteven Rostedt (VMware) /*
25327683626SSteven Rostedt (VMware) * Turning off what is in @type, return true if the "other"
25427683626SSteven Rostedt (VMware) * pid list, still has pids in it.
25527683626SSteven Rostedt (VMware) */
25627683626SSteven Rostedt (VMware) return (!(type & TRACE_PIDS) && pid_list) ||
25727683626SSteven Rostedt (VMware) (!(type & TRACE_NO_PIDS) && no_pid_list);
25827683626SSteven Rostedt (VMware) }
259a35873a0STom Zanussi
260a35873a0STom Zanussi typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
261a35873a0STom Zanussi
262a35873a0STom Zanussi /**
263a35873a0STom Zanussi * struct cond_snapshot - conditional snapshot data and callback
264a35873a0STom Zanussi *
265a35873a0STom Zanussi * The cond_snapshot structure encapsulates a callback function and
266a35873a0STom Zanussi * data associated with the snapshot for a given tracing instance.
267a35873a0STom Zanussi *
268a35873a0STom Zanussi * When a snapshot is taken conditionally, by invoking
269a35873a0STom Zanussi * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
270a35873a0STom Zanussi * passed in turn to the cond_snapshot.update() function. That data
271499f7bb0SQiujun Huang * can be compared by the update() implementation with the cond_data
272a35873a0STom Zanussi * contained within the struct cond_snapshot instance associated with
273a35873a0STom Zanussi * the trace_array. Because the tr->max_lock is held throughout the
274a35873a0STom Zanussi * update() call, the update() function can directly retrieve the
275a35873a0STom Zanussi * cond_snapshot and cond_data associated with the per-instance
276a35873a0STom Zanussi * snapshot associated with the trace_array.
277a35873a0STom Zanussi *
278a35873a0STom Zanussi * The cond_snapshot.update() implementation can save data to be
279a35873a0STom Zanussi * associated with the snapshot if it decides to, and returns 'true'
280a35873a0STom Zanussi * in that case, or it returns 'false' if the conditional snapshot
281a35873a0STom Zanussi * shouldn't be taken.
282a35873a0STom Zanussi *
283a35873a0STom Zanussi * The cond_snapshot instance is created and associated with the
284a35873a0STom Zanussi * user-defined cond_data by tracing_cond_snapshot_enable().
285a35873a0STom Zanussi * Likewise, the cond_snapshot instance is destroyed and is no longer
286a35873a0STom Zanussi * associated with the trace instance by
287a35873a0STom Zanussi * tracing_cond_snapshot_disable().
288a35873a0STom Zanussi *
289a35873a0STom Zanussi * The method below is required.
290a35873a0STom Zanussi *
291a35873a0STom Zanussi * @update: When a conditional snapshot is invoked, the update()
292a35873a0STom Zanussi * callback function is invoked with the tr->max_lock held. The
293a35873a0STom Zanussi * update() implementation signals whether or not to actually
294a35873a0STom Zanussi * take the snapshot, by returning 'true' if so, 'false' if no
295a35873a0STom Zanussi * snapshot should be taken. Because the max_lock is held for
296499f7bb0SQiujun Huang * the duration of update(), the implementation is safe to
297a35873a0STom Zanussi * directly retrieved and save any implementation data it needs
298a35873a0STom Zanussi * to in association with the snapshot.
299a35873a0STom Zanussi */
300a35873a0STom Zanussi struct cond_snapshot {
301a35873a0STom Zanussi void *cond_data;
302a35873a0STom Zanussi cond_update_fn_t update;
303a35873a0STom Zanussi };
304bc0c38d1SSteven Rostedt
30520344c54SYordan Karadzhov (VMware) /*
30620344c54SYordan Karadzhov (VMware) * struct trace_func_repeats - used to keep track of the consecutive
30720344c54SYordan Karadzhov (VMware) * (on the same CPU) calls of a single function.
30820344c54SYordan Karadzhov (VMware) */
30920344c54SYordan Karadzhov (VMware) struct trace_func_repeats {
31020344c54SYordan Karadzhov (VMware) unsigned long ip;
31120344c54SYordan Karadzhov (VMware) unsigned long parent_ip;
31220344c54SYordan Karadzhov (VMware) unsigned long count;
31320344c54SYordan Karadzhov (VMware) u64 ts_last_call;
31420344c54SYordan Karadzhov (VMware) };
31535a380ddSMasami Hiramatsu (Google)
31635a380ddSMasami Hiramatsu (Google) struct trace_module_delta {
31735a380ddSMasami Hiramatsu (Google) struct rcu_head rcu;
31835a380ddSMasami Hiramatsu (Google) long delta[];
31935a380ddSMasami Hiramatsu (Google) };
32020344c54SYordan Karadzhov (VMware)
321bc0c38d1SSteven Rostedt /*
322bc0c38d1SSteven Rostedt * The trace array - an array of per-CPU trace arrays. This is the
323bc0c38d1SSteven Rostedt * highest level data structure that individual tracers deal with.
324bc0c38d1SSteven Rostedt * They have on/off state as well:
325bc0c38d1SSteven Rostedt */
326ae63b31eSSteven Rostedt struct trace_array {
327277ba044SSteven Rostedt struct list_head list;
3281c5eb448SSteven Rostedt (VMware) char *name;
32912883efbSSteven Rostedt (Red Hat) struct array_buffer array_buffer;
33012883efbSSteven Rostedt (Red Hat) #ifdef CONFIG_TRACER_MAX_TRACE
33112883efbSSteven Rostedt (Red Hat) /*
33212883efbSSteven Rostedt (Red Hat) * The max_buffer is used to snapshot the trace when a maximum
33312883efbSSteven Rostedt (Red Hat) * latency is reached, or when the user initiates a snapshot.
33412883efbSSteven Rostedt (Red Hat) * Some tracers will use this to store a maximum trace while
33512883efbSSteven Rostedt (Red Hat) * it continues examining live traces.
3361c5eb448SSteven Rostedt (VMware) *
33712883efbSSteven Rostedt (Red Hat) * The buffers for the max_buffer are set up the same as the array_buffer
3381c5eb448SSteven Rostedt (VMware) * When a snapshot is taken, the buffer of the max_buffer is swapped
3391c5eb448SSteven Rostedt (VMware) * with the buffer of the array_buffer and the buffers are reset for
34012883efbSSteven Rostedt (Red Hat) * the array_buffer so the tracing can continue.
3411c5eb448SSteven Rostedt (VMware) */
34245ad21caSSteven Rostedt (Red Hat) struct array_buffer max_buffer;
343180e4e39SVincent Donnefort bool allocated_snapshot;
344180e4e39SVincent Donnefort spinlock_t snapshot_trigger_lock;
3456d9b3fa5SSteven Rostedt (Red Hat) unsigned int snapshot;
34691edde2eSViktor Rosendahl (BMW) unsigned long max_latency;
34791edde2eSViktor Rosendahl (BMW) #ifdef CONFIG_FSNOTIFY
34891edde2eSViktor Rosendahl (BMW) struct dentry *d_max_latency;
34991edde2eSViktor Rosendahl (BMW) struct work_struct fsnotify_work;
35091edde2eSViktor Rosendahl (BMW) struct irq_work fsnotify_irqwork;
35112883efbSSteven Rostedt (Red Hat) #endif
3522124de79SSteven Rostedt (Google) #endif
3532124de79SSteven Rostedt (Google) /* The below is for memory mapped ring buffer */
3542124de79SSteven Rostedt (Google) unsigned int mapped;
3552124de79SSteven Rostedt (Google) unsigned long range_addr_start;
356fb6d0323SMasami Hiramatsu (Google) unsigned long range_addr_size;
3577a1d1e4bSSteven Rostedt (Google) char *range_name;
35835a380ddSMasami Hiramatsu (Google) long text_delta;
359b6533482SSteven Rostedt struct trace_module_delta *module_delta;
360b6533482SSteven Rostedt void *scratch; /* pointer in persistent memory */
361b6533482SSteven Rostedt int scratch_size;
362b6533482SSteven Rostedt
3632124de79SSteven Rostedt (Google) int buffer_disabled;
36449090107SSteven Rostedt (Red Hat)
36527683626SSteven Rostedt (VMware) struct trace_pid_list __rcu *filtered_pids;
3660b9b12c1SSteven Rostedt (Red Hat) struct trace_pid_list __rcu *filtered_no_pids;
3670b9b12c1SSteven Rostedt (Red Hat) /*
3680b9b12c1SSteven Rostedt (Red Hat) * max_lock is used to protect the swapping of buffers
3690b9b12c1SSteven Rostedt (Red Hat) * when taking a max snapshot. The buffers themselves are
3700b9b12c1SSteven Rostedt (Red Hat) * protected by per_cpu spinlocks. But the action of the swap
3710b9b12c1SSteven Rostedt (Red Hat) * needs its own lock.
3720b9b12c1SSteven Rostedt (Red Hat) *
3730b9b12c1SSteven Rostedt (Red Hat) * This is defined as a arch_spinlock_t in order to help
3740b9b12c1SSteven Rostedt (Red Hat) * with performance when lockdep debugging is enabled.
3750b9b12c1SSteven Rostedt (Red Hat) *
3760b9b12c1SSteven Rostedt (Red Hat) * It is also used in other places outside the update_max_tr
3770b9b12c1SSteven Rostedt (Red Hat) * so it needs to be defined outside of the
3780b9b12c1SSteven Rostedt (Red Hat) * CONFIG_TRACER_MAX_TRACE.
3790b9b12c1SSteven Rostedt (Red Hat) */
38012ab74eeSSteven Rostedt arch_spinlock_t max_lock;
38112ab74eeSSteven Rostedt #ifdef CONFIG_FTRACE_SYSCALLS
38212ab74eeSSteven Rostedt int sys_refcount_enter;
3837f1d2f82SSteven Rostedt (Red Hat) int sys_refcount_exit;
3847f1d2f82SSteven Rostedt (Red Hat) struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
38512ab74eeSSteven Rostedt struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
3862b6080f2SSteven Rostedt #endif
3872b6080f2SSteven Rostedt int stop_count;
38837aea98bSSteven Rostedt (Red Hat) int clock_id;
389065e63f9SSteven Rostedt (VMware) int nr_topts;
39003329f99SSteven Rostedt (VMware) bool clear_trace;
3912f754e77SSteven Rostedt (VMware) int buffer_percent;
3922b6080f2SSteven Rostedt unsigned int n_err_log_entries;
393983f938aSSteven Rostedt (Red Hat) struct tracer *current_trace;
3949a38a885SSteven Rostedt (Red Hat) unsigned int trace_flags;
395ae63b31eSSteven Rostedt unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
3962b6080f2SSteven Rostedt unsigned int flags;
397d2356997SSteven Rostedt (Google) raw_spinlock_t start_lock;
3982f754e77SSteven Rostedt (VMware) const char *system_names;
399ae63b31eSSteven Rostedt struct list_head err_log;
4002b6080f2SSteven Rostedt struct dentry *dir;
4012b6080f2SSteven Rostedt struct dentry *options;
4025790b1fbSSteven Rostedt (Google) struct dentry *percpu_dir;
40337aea98bSSteven Rostedt (Red Hat) struct eventfs_inode *event_dir;
404ae63b31eSSteven Rostedt struct trace_options *topts;
405ae63b31eSSteven Rostedt struct list_head systems;
4063dd80953SSteven Rostedt (VMware) struct list_head events;
407ccfe9e42SAlexander Z Lam struct trace_event_file *trace_marker_file;
408c2489bb7SZheng Yejian cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
409c2489bb7SZheng Yejian /* one per_cpu trace_pipe can be opened by only one user */
410a695cb58SSteven Rostedt (Red Hat) cpumask_var_t pipe_cpumask;
4117ef282e0SSteven Rostedt (VMware) int ref;
412b355247dSSteven Rostedt int trace_ref;
413b355247dSSteven Rostedt #ifdef CONFIG_MODULES
414b355247dSSteven Rostedt struct list_head mod_events;
415f20a5806SSteven Rostedt (Red Hat) #endif
416f20a5806SSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_TRACER
417345ddcc8SSteven Rostedt (Red Hat) struct ftrace_ops *ops;
418b3b1e6edSSteven Rostedt (VMware) struct trace_pid_list __rcu *function_pids;
41926dda563SSteven Rostedt (VMware) struct trace_pid_list __rcu *function_no_pids;
42026dda563SSteven Rostedt (VMware) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
42126dda563SSteven Rostedt (VMware) struct fgraph_ops *gops;
42204ec7bb6SSteven Rostedt (VMware) #endif
423673feb9dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
42404ec7bb6SSteven Rostedt (VMware) /* All of these are protected by the ftrace_lock */
425673feb9dSSteven Rostedt (VMware) struct list_head func_probes;
426673feb9dSSteven Rostedt (VMware) struct list_head mod_trace;
42704ec7bb6SSteven Rostedt (VMware) struct list_head mod_notrace;
428f20a5806SSteven Rostedt (Red Hat) #endif
429f20a5806SSteven Rostedt (Red Hat) /* function tracing enabled */
430f20a5806SSteven Rostedt (Red Hat) int function_enabled;
431b94bc80dSSteven Rostedt (VMware) #endif
432067fe038STom Zanussi int no_filter_buffering_ref;
433a35873a0STom Zanussi struct list_head hist_vars;
434a35873a0STom Zanussi #ifdef CONFIG_TRACER_SNAPSHOT
435a35873a0STom Zanussi struct cond_snapshot *cond_snapshot;
43620344c54SYordan Karadzhov (VMware) #endif
437a1f157c7SZheng Yejian struct trace_func_repeats __percpu *last_func_repeats;
438a1f157c7SZheng Yejian /*
439a1f157c7SZheng Yejian * On boot up, the ring buffer is set to the minimum size, so that
440a1f157c7SZheng Yejian * we do not waste memory on systems that are not using tracing.
441a1f157c7SZheng Yejian */
442bc0c38d1SSteven Rostedt bool ring_buffer_expanded;
443bc0c38d1SSteven Rostedt };
444ae63b31eSSteven Rostedt
4459b7bdf6fSSteven Rostedt enum {
4469b7bdf6fSSteven Rostedt TRACE_ARRAY_FL_GLOBAL = BIT(0),
447bcba8d4dSSteven Rostedt TRACE_ARRAY_FL_BOOT = BIT(1),
448bcba8d4dSSteven Rostedt TRACE_ARRAY_FL_LAST_BOOT = BIT(2),
449*34ea8fa0SSteven Rostedt TRACE_ARRAY_FL_MOD_INIT = BIT(3),
450ae63b31eSSteven Rostedt TRACE_ARRAY_FL_MEMMAP = BIT(4),
451ae63b31eSSteven Rostedt };
452b355247dSSteven Rostedt
453b355247dSSteven Rostedt #ifdef CONFIG_MODULES
454b355247dSSteven Rostedt bool module_exists(const char *module);
455b355247dSSteven Rostedt #else
module_exists(const char * module)456b355247dSSteven Rostedt static inline bool module_exists(const char *module)
457b355247dSSteven Rostedt {
458b355247dSSteven Rostedt return false;
459b355247dSSteven Rostedt }
460b355247dSSteven Rostedt #endif
461ae63b31eSSteven Rostedt
462ae63b31eSSteven Rostedt extern struct list_head ftrace_trace_arrays;
463a8227415SAlexander Z Lam
464a8227415SAlexander Z Lam extern struct mutex trace_types_lock;
4658e2e2fa4SSteven Rostedt (Red Hat)
4668530dec6SSteven Rostedt (VMware) extern int trace_array_get(struct trace_array *tr);
46789c95fceSTom Zanussi extern int tracing_check_open_get_tr(struct trace_array *tr);
46889c95fceSTom Zanussi extern struct trace_array *trace_array_find(const char *instance);
4698e2e2fa4SSteven Rostedt (Red Hat) extern struct trace_array *trace_array_find_get(const char *instance);
470d8279bfcSSteven Rostedt (VMware)
471b94bc80dSSteven Rostedt (VMware) extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
472d71bd34dSTom Zanussi extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
47300b41452STom Zanussi extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
474860f9f6bSTom Zanussi
475860f9f6bSTom Zanussi extern bool trace_clock_in_ns(struct trace_array *tr);
47635a380ddSMasami Hiramatsu (Google)
47735a380ddSMasami Hiramatsu (Google) extern unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr);
478ae63b31eSSteven Rostedt
479ae63b31eSSteven Rostedt /*
480ae63b31eSSteven Rostedt * The global tracer (top) should be the first trace array added,
481ae63b31eSSteven Rostedt * but we check the flag anyway.
482ae63b31eSSteven Rostedt */
top_trace_array(void)483ae63b31eSSteven Rostedt static inline struct trace_array *top_trace_array(void)
484ae63b31eSSteven Rostedt {
485ae63b31eSSteven Rostedt struct trace_array *tr;
486da9c3413SSteven Rostedt (Red Hat)
487dc81e5e3SYoshihiro YUNOMAE if (list_empty(&ftrace_trace_arrays))
488dc81e5e3SYoshihiro YUNOMAE return NULL;
489ae63b31eSSteven Rostedt
490ae63b31eSSteven Rostedt tr = list_entry(ftrace_trace_arrays.prev,
491ae63b31eSSteven Rostedt typeof(*tr), list);
492ae63b31eSSteven Rostedt WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
493ae63b31eSSteven Rostedt return tr;
494ae63b31eSSteven Rostedt }
4957104f300SSteven Rostedt
4967104f300SSteven Rostedt #define FTRACE_CMP_TYPE(var, type) \
4977104f300SSteven Rostedt __builtin_types_compatible_p(typeof(var), type *)
4987104f300SSteven Rostedt
4997104f300SSteven Rostedt #undef IF_ASSIGN
5007104f300SSteven Rostedt #define IF_ASSIGN(var, entry, etype, id) \
5017104f300SSteven Rostedt if (FTRACE_CMP_TYPE(var, etype)) { \
502968e5170SNathan Chancellor var = (typeof(var))(entry); \
5037104f300SSteven Rostedt WARN_ON(id != 0 && (entry)->type != id); \
5047104f300SSteven Rostedt break; \
5057104f300SSteven Rostedt }
5067104f300SSteven Rostedt
5077104f300SSteven Rostedt /* Will cause compile errors if type is not found. */
5087104f300SSteven Rostedt extern void __ftrace_bad_type(void);
5097104f300SSteven Rostedt
5107104f300SSteven Rostedt /*
5117104f300SSteven Rostedt * The trace_assign_type is a verifier that the entry type is
5127104f300SSteven Rostedt * the same as the type being assigned. To add new types simply
5137104f300SSteven Rostedt * add a line with the following format:
5147104f300SSteven Rostedt *
5157104f300SSteven Rostedt * IF_ASSIGN(var, ent, type, id);
5167104f300SSteven Rostedt *
5177104f300SSteven Rostedt * Where "type" is the trace type that includes the trace_entry
5187104f300SSteven Rostedt * as the "ent" item. And "id" is the trace identifier that is
5197104f300SSteven Rostedt * used in the trace_type enum.
5207104f300SSteven Rostedt *
5217104f300SSteven Rostedt * If the type can have more than one id, then use zero.
5227104f300SSteven Rostedt */
5237104f300SSteven Rostedt #define trace_assign_type(var, ent) \
5247104f300SSteven Rostedt do { \
5257104f300SSteven Rostedt IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
5267104f300SSteven Rostedt IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
52702b67518STörök Edwin IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
5287104f300SSteven Rostedt IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
52948ead020SFrederic Weisbecker IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
53009ae7234SSteven Rostedt (Red Hat) IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
531e7c15cd8SSteven Rostedt (Red Hat) IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
532bce29ac9SDaniel Bristot de Oliveira IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
533a955d7eaSDaniel Bristot de Oliveira IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
534fa32e855SSteven Rostedt IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
5357104f300SSteven Rostedt IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
5367104f300SSteven Rostedt IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
5377104f300SSteven Rostedt TRACE_MMIO_RW); \
5387104f300SSteven Rostedt IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
5399f029e83SSteven Rostedt TRACE_MMIO_MAP); \
540287b6e68SFrederic Weisbecker IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
541287b6e68SFrederic Weisbecker IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
54221e92806SDonglin Peng TRACE_GRAPH_ENT); \
54321e92806SDonglin Peng IF_ASSIGN(var, ent, struct fgraph_retaddr_ent_entry,\
544287b6e68SFrederic Weisbecker TRACE_GRAPH_RETADDR_ENT); \
545287b6e68SFrederic Weisbecker IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
546f689e4f2SYordan Karadzhov (VMware) TRACE_GRAPH_RET); \
547f689e4f2SYordan Karadzhov (VMware) IF_ASSIGN(var, ent, struct func_repeats_entry, \
5487104f300SSteven Rostedt TRACE_FUNC_REPEATS); \
5497104f300SSteven Rostedt __ftrace_bad_type(); \
5502c4f035fSFrederic Weisbecker } while (0)
551adf9f195SFrederic Weisbecker
552adf9f195SFrederic Weisbecker /*
553adf9f195SFrederic Weisbecker * An option specific to a tracer. This is a boolean value.
554adf9f195SFrederic Weisbecker * The bit is the bit index that sets its value on the
555adf9f195SFrederic Weisbecker * flags value in struct tracer_flags.
556adf9f195SFrederic Weisbecker */
557adf9f195SFrederic Weisbecker struct tracer_opt {
558adf9f195SFrederic Weisbecker const char *name; /* Will appear on the trace_options file */
559adf9f195SFrederic Weisbecker u32 bit; /* Mask assigned in val field in tracer_flags */
560adf9f195SFrederic Weisbecker };
561adf9f195SFrederic Weisbecker
562adf9f195SFrederic Weisbecker /*
563adf9f195SFrederic Weisbecker * The set of specific options for a tracer. Your tracer
564adf9f195SFrederic Weisbecker * have to set the initial value of the flags val.
565adf9f195SFrederic Weisbecker */
566adf9f195SFrederic Weisbecker struct tracer_flags {
567adf9f195SFrederic Weisbecker u32 val;
568d39cdd20SChunyu Hu struct tracer_opt *opts;
569adf9f195SFrederic Weisbecker struct tracer *trace;
570adf9f195SFrederic Weisbecker };
571adf9f195SFrederic Weisbecker
572adf9f195SFrederic Weisbecker /* Makes more easy to define a tracer opt */
573adf9f195SFrederic Weisbecker #define TRACER_OPT(s, b) .name = #s, .bit = b
574034939b6SFrederic Weisbecker
57541d9c0beSSteven Rostedt (Red Hat)
57641d9c0beSSteven Rostedt (Red Hat) struct trace_option_dentry {
57741d9c0beSSteven Rostedt (Red Hat) struct tracer_opt *opt;
57841d9c0beSSteven Rostedt (Red Hat) struct tracer_flags *flags;
57941d9c0beSSteven Rostedt (Red Hat) struct trace_array *tr;
58041d9c0beSSteven Rostedt (Red Hat) struct dentry *entry;
58141d9c0beSSteven Rostedt (Red Hat) };
5826eaaa5d5SFrederic Weisbecker
5838434dc93SSteven Rostedt (Red Hat) /**
5846eaaa5d5SFrederic Weisbecker * struct tracer - a specific tracer and its callbacks to interact with tracefs
5856eaaa5d5SFrederic Weisbecker * @name: the name chosen to select it on the available_tracers file
5866eaaa5d5SFrederic Weisbecker * @init: called when one switches to this tracer (echo name > current_tracer)
58705a724bdSChuyu Hu * @reset: called when one switches to another tracer
58805a724bdSChuyu Hu * @start: called when tracing is unpaused (echo 1 > tracing_on)
5896508fa76SStanislav Fomichev * @stop: called when tracing is paused (echo 0 > tracing_on)
5906eaaa5d5SFrederic Weisbecker * @update_thresh: called when tracing_thresh is updated
5916eaaa5d5SFrederic Weisbecker * @open: called when the trace file is opened
5926eaaa5d5SFrederic Weisbecker * @pipe_open: called when the trace_pipe file is opened
593c521efd1SSteven Rostedt * @close: called when the trace file is released
5946eaaa5d5SFrederic Weisbecker * @pipe_close: called when the trace_pipe file is released
5956eaaa5d5SFrederic Weisbecker * @read: override the default read callback on trace_pipe
5966eaaa5d5SFrederic Weisbecker * @splice_read: override the default splice_read callback on trace_pipe
5976eaaa5d5SFrederic Weisbecker * @selftest: selftest to run on boot (see trace_selftest.c)
5986eaaa5d5SFrederic Weisbecker * @print_headers: override the first lines that describe your columns
5996eaaa5d5SFrederic Weisbecker * @print_line: callback that prints a trace
6006eaaa5d5SFrederic Weisbecker * @set_flag: signals one of your private flags changed (trace_options file)
601bc0c38d1SSteven Rostedt * @flags: your private flags
602bc0c38d1SSteven Rostedt */
603bc0c38d1SSteven Rostedt struct tracer {
6041c80025aSFrederic Weisbecker const char *name;
605bc0c38d1SSteven Rostedt int (*init)(struct trace_array *tr);
6069036990dSSteven Rostedt void (*reset)(struct trace_array *tr);
6079036990dSSteven Rostedt void (*start)(struct trace_array *tr);
6086508fa76SStanislav Fomichev void (*stop)(struct trace_array *tr);
609bc0c38d1SSteven Rostedt int (*update_thresh)(struct trace_array *tr);
610107bad8bSSteven Rostedt void (*open)(struct trace_iterator *iter);
611bc0c38d1SSteven Rostedt void (*pipe_open)(struct trace_iterator *iter);
612c521efd1SSteven Rostedt void (*close)(struct trace_iterator *iter);
613107bad8bSSteven Rostedt void (*pipe_close)(struct trace_iterator *iter);
614107bad8bSSteven Rostedt ssize_t (*read)(struct trace_iterator *iter,
615107bad8bSSteven Rostedt struct file *filp, char __user *ubuf,
6163c56819bSEduard - Gabriel Munteanu size_t cnt, loff_t *ppos);
6173c56819bSEduard - Gabriel Munteanu ssize_t (*splice_read)(struct trace_iterator *iter,
6183c56819bSEduard - Gabriel Munteanu struct file *filp,
6193c56819bSEduard - Gabriel Munteanu loff_t *ppos,
6203c56819bSEduard - Gabriel Munteanu struct pipe_inode_info *pipe,
6213c56819bSEduard - Gabriel Munteanu size_t len,
62260a11774SSteven Rostedt unsigned int flags);
62360a11774SSteven Rostedt #ifdef CONFIG_FTRACE_STARTUP_TEST
62460a11774SSteven Rostedt int (*selftest)(struct tracer *trace,
62560a11774SSteven Rostedt struct trace_array *tr);
6268bba1bf5SMarkus Metzger #endif
6272c4f035fSFrederic Weisbecker void (*print_header)(struct seq_file *m);
628adf9f195SFrederic Weisbecker enum print_line_t (*print_line)(struct trace_iterator *iter);
6298c1a49aeSSteven Rostedt (Red Hat) /* If you handled the flag setting, return 0 */
6308c1a49aeSSteven Rostedt (Red Hat) int (*set_flag)(struct trace_array *tr,
631613f04a0SSteven Rostedt (Red Hat) u32 old_flags, u32 bit, int set);
632bf6065b5SSteven Rostedt (Red Hat) /* Return 0 if OK with change, else return non-zero */
633613f04a0SSteven Rostedt (Red Hat) int (*flag_changed)(struct trace_array *tr,
634bc0c38d1SSteven Rostedt u32 mask, int set);
635adf9f195SFrederic Weisbecker struct tracer *next;
63650512ab5SSteven Rostedt (Red Hat) struct tracer_flags *flags;
637f43c738bSHiraku Toyooka int enabled;
638607e2ea1SSteven Rostedt (Red Hat) bool print_max;
63912883efbSSteven Rostedt (Red Hat) bool allow_instances;
640f43c738bSHiraku Toyooka #ifdef CONFIG_TRACER_MAX_TRACE
64112883efbSSteven Rostedt (Red Hat) bool use_max_tr;
642c7b3ae0bSZiqian SUN (Zamir) #endif
643c7b3ae0bSZiqian SUN (Zamir) /* True if tracer cannot be enabled in kernel param */
644bc0c38d1SSteven Rostedt bool noboot;
645bc0c38d1SSteven Rostedt };
6466d158a81SSteven Rostedt
6476d158a81SSteven Rostedt static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator * iter,int cpu)6486d158a81SSteven Rostedt trace_buffer_iter(struct trace_iterator *iter, int cpu)
649f26808baSyuan linyu {
6506d158a81SSteven Rostedt return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
6516d158a81SSteven Rostedt }
652b6f11df2SArnaldo Carvalho de Melo
6539036990dSSteven Rostedt int tracer_init(struct tracer *t, struct trace_array *tr);
6541c5eb448SSteven Rostedt (VMware) int tracing_is_enabled(void);
655873c642fSSteven Rostedt (Red Hat) void tracing_reset_online_cpus(struct array_buffer *buf);
656e18eb878SSteven Rostedt (Google) void tracing_reset_all_online_cpus(void);
657bc0c38d1SSteven Rostedt void tracing_reset_all_online_cpus_unlocked(void);
658aa07d71fSSteven Rostedt (VMware) int tracing_open_generic(struct inode *inode, struct file *filp);
659139f8400STzvetomir Stoyanov (VMware) int tracing_open_generic_tr(struct inode *inode, struct file *filp);
660f5ca233eSSteven Rostedt (Google) int tracing_release_generic_tr(struct inode *inode, struct file *file);
661f5ca233eSSteven Rostedt (Google) int tracing_open_file_tr(struct inode *inode, struct file *filp);
6621cc111b9SZheng Yejian int tracing_release_file_tr(struct inode *inode, struct file *filp);
6632e86421dSGeyslan G. Bem int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
664ec573508SSteven Rostedt (VMware) bool tracing_is_disabled(void);
6652290f2c5SSteven Rostedt (VMware) bool tracer_tracing_is_on(struct trace_array *tr);
6662290f2c5SSteven Rostedt (VMware) void tracer_tracing_on(struct trace_array *tr);
6675452af66SFrederic Weisbecker void tracer_tracing_off(struct trace_array *tr);
668f4ae40a6SAl Viro struct dentry *trace_create_file(const char *name,
6695452af66SFrederic Weisbecker umode_t mode,
6705452af66SFrederic Weisbecker struct dentry *parent,
6715452af66SFrederic Weisbecker void *data,
6725452af66SFrederic Weisbecker const struct file_operations *fops);
67322c36b18SWei Yang
674d618b3e6SIngo Molnar int tracing_init_dentry(void);
67551a763ddSArnaldo Carvalho de Melo
67651a763ddSArnaldo Carvalho de Melo struct ring_buffer_event;
677e77405adSSteven Rostedt
67813292494SSteven Rostedt (VMware) struct ring_buffer_event *
6797a4f453bSLi Zefan trace_buffer_lock_reserve(struct trace_buffer *buffer,
68051a763ddSArnaldo Carvalho de Melo int type,
68136590c50SSebastian Andrzej Siewior unsigned long len,
68251a763ddSArnaldo Carvalho de Melo unsigned int trace_ctx);
683950032ffSSteven Rostedt (Google)
684950032ffSSteven Rostedt (Google) int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu);
68545dcd8b8SPekka Paalanen
68645dcd8b8SPekka Paalanen struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
687c4a8e8beSFrederic Weisbecker struct trace_array_cpu *data);
688c4a8e8beSFrederic Weisbecker
689c4a8e8beSFrederic Weisbecker struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
690c4a8e8beSFrederic Weisbecker int *ent_cpu, u64 *ent_ts);
69113292494SSteven Rostedt (VMware)
6927ffbd48dSSteven Rostedt void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
6937ffbd48dSSteven Rostedt struct ring_buffer_event *event);
6949a6944feSSteven Rostedt (VMware)
695efbbdaa2SMasami Hiramatsu bool trace_is_tracepoint_string(const char *str);
69680a76994SSteven Rostedt (Google) const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
697afd2627fSSteven Rostedt char *trace_iter_expand_format(struct trace_iterator *iter);
698efbbdaa2SMasami Hiramatsu bool ignore_event(struct trace_iterator *iter);
699955b61e5SJason Wessel
700955b61e5SJason Wessel int trace_empty(struct trace_iterator *iter);
701955b61e5SJason Wessel
702955b61e5SJason Wessel void *trace_find_next_entry_inc(struct trace_iterator *iter);
703955b61e5SJason Wessel
704955b61e5SJason Wessel void trace_init_global_iter(struct trace_iterator *iter);
705955b61e5SJason Wessel
706955b61e5SJason Wessel void tracing_iter_reset(struct trace_iterator *iter, int cpu);
707ecffc8a8SDouglas Anderson
708ecffc8a8SDouglas Anderson unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
709ecffc8a8SDouglas Anderson unsigned long trace_total_entries(struct trace_array *tr);
7106fb44b71SSteven Rostedt
7116fb44b71SSteven Rostedt void trace_function(struct trace_array *tr,
7126fb44b71SSteven Rostedt unsigned long ip,
71336590c50SSebastian Andrzej Siewior unsigned long parent_ip,
7140a772620SJiri Olsa unsigned int trace_ctx,
7150a772620SJiri Olsa struct ftrace_regs *regs);
7160a772620SJiri Olsa void trace_graph_function(struct trace_array *tr,
71736590c50SSebastian Andrzej Siewior unsigned long ip,
7187e9a49efSJiri Olsa unsigned long parent_ip,
71962b915f1SJiri Olsa unsigned int trace_ctx);
72062b915f1SJiri Olsa void trace_latency_header(struct seq_file *m);
721bc0c38d1SSteven Rostedt void trace_default_header(struct seq_file *m);
7222ca8c112SMasami Hiramatsu (Google) void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
7232ca8c112SMasami Hiramatsu (Google)
72441705c42SMasami Hiramatsu (Google) void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops,
72541705c42SMasami Hiramatsu (Google) struct ftrace_regs *fregs);
7261e9b51c2SMarkus Metzger int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops,
72741bc8144SSteven Rostedt struct ftrace_regs *fregs);
72841bc8144SSteven Rostedt
729d914ba37SJoel Fernandes void tracing_start_cmdline_record(void);
730d914ba37SJoel Fernandes void tracing_stop_cmdline_record(void);
731d914ba37SJoel Fernandes void tracing_start_tgid_record(void);
732bc0c38d1SSteven Rostedt void tracing_stop_tgid_record(void);
733b5130b1eSCarsten Emde
734955b61e5SJason Wessel int register_tracer(struct tracer *type);
735098c879eSSteven Rostedt (Red Hat) int is_tracing_stopped(void);
736098c879eSSteven Rostedt (Red Hat)
737955b61e5SJason Wessel loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
738955b61e5SJason Wessel
739955b61e5SJason Wessel extern cpumask_var_t __read_mostly tracing_buffer_mask;
740955b61e5SJason Wessel
741bc0c38d1SSteven Rostedt #define for_each_tracing_cpu(cpu) \
742bc0c38d1SSteven Rostedt for_each_cpu(cpu, tracing_buffer_mask)
743bc0c38d1SSteven Rostedt
7440e950173STim Bird extern unsigned long nsecs_to_usecs(unsigned long nsecs);
7450e950173STim Bird
7464e267db1SSteven Rostedt extern unsigned long tracing_thresh;
74776c813e2SSteven Rostedt (Red Hat)
7484e267db1SSteven Rostedt /* PID filtering */
7494e267db1SSteven Rostedt
7504e267db1SSteven Rostedt bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
751b3b1e6edSSteven Rostedt (VMware) pid_t search_pid);
7524e267db1SSteven Rostedt bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
7534e267db1SSteven Rostedt struct trace_pid_list *filtered_no_pids,
7544e267db1SSteven Rostedt struct task_struct *task);
7554e267db1SSteven Rostedt void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
7565cc8976bSSteven Rostedt (Red Hat) struct task_struct *self,
7575cc8976bSSteven Rostedt (Red Hat) struct task_struct *task);
7585cc8976bSSteven Rostedt (Red Hat) void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
75976c813e2SSteven Rostedt (Red Hat) void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
76076c813e2SSteven Rostedt (Red Hat) int trace_pid_show(struct seq_file *m, void *v);
76176c813e2SSteven Rostedt (Red Hat) int trace_pid_write(struct trace_pid_list *filtered_pids,
7624e267db1SSteven Rostedt struct trace_pid_list **new_pid_list,
7635d4a9dbaSSteven Rostedt const char __user *ubuf, size_t cnt);
764a35873a0STom Zanussi
765a35873a0STom Zanussi #ifdef CONFIG_TRACER_MAX_TRACE
766bc0c38d1SSteven Rostedt void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
767bc0c38d1SSteven Rostedt void *cond_data);
768bc0c38d1SSteven Rostedt void update_max_tr_single(struct trace_array *tr,
769e25e43a4SMasami Hiramatsu (Google) struct task_struct *tsk, int cpu);
7706880c987SSteven Rostedt (VMware)
7716880c987SSteven Rostedt (VMware) #ifdef CONFIG_FSNOTIFY
772e25e43a4SMasami Hiramatsu (Google) #define LATENCY_FS_NOTIFY
77391edde2eSViktor Rosendahl (BMW) #endif
7746880c987SSteven Rostedt (VMware) #endif /* CONFIG_TRACER_MAX_TRACE */
77591edde2eSViktor Rosendahl (BMW)
77691edde2eSViktor Rosendahl (BMW) #ifdef LATENCY_FS_NOTIFY
77736b3615dSSteven Rostedt (VMware) void latency_fsnotify(struct trace_array *tr);
77891edde2eSViktor Rosendahl (BMW) #else
latency_fsnotify(struct trace_array * tr)77991edde2eSViktor Rosendahl (BMW) static inline void latency_fsnotify(struct trace_array *tr) { }
780c0a0d0d3SFrederic Weisbecker #endif
78136590c50SSebastian Andrzej Siewior
782c0a0d0d3SFrederic Weisbecker #ifdef CONFIG_STACKTRACE
78336590c50SSebastian Andrzej Siewior void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
78436590c50SSebastian Andrzej Siewior #else
__trace_stack(struct trace_array * tr,unsigned int trace_ctx,int skip)785c0a0d0d3SFrederic Weisbecker static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
786c0a0d0d3SFrederic Weisbecker int skip)
787c0a0d0d3SFrederic Weisbecker {
788c0a0d0d3SFrederic Weisbecker }
789c658797fSYordan Karadzhov (VMware) #endif /* CONFIG_STACKTRACE */
790c658797fSYordan Karadzhov (VMware)
791c658797fSYordan Karadzhov (VMware) void trace_last_func_repeats(struct trace_array *tr,
792c658797fSYordan Karadzhov (VMware) struct trace_func_repeats *last_info,
793a5a1d1c2SThomas Gleixner unsigned int trace_ctx);
794bc0c38d1SSteven Rostedt
7954ca53085SSteven Rostedt extern u64 ftrace_now(int cpu);
796d914ba37SJoel Fernandes
797c37775d5SSteven Rostedt extern void trace_find_cmdline(int pid, char comm[]);
798f7d48cbdSIngo Molnar extern int trace_find_tgid(int pid);
7995f3719f6SSteven Rostedt extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
8005f3719f6SSteven Rostedt
801bc0c38d1SSteven Rostedt extern int trace_events_enabled(struct trace_array *tr, const char *system);
802bc0c38d1SSteven Rostedt
803da537f0aSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
804da537f0aSSteven Rostedt (VMware) extern unsigned long ftrace_update_tot_cnt;
80536a367b8SSteven Rostedt extern unsigned long ftrace_number_of_pages;
80636a367b8SSteven Rostedt extern unsigned long ftrace_number_of_groups;
80704ec7bb6SSteven Rostedt (VMware) extern u64 ftrace_update_time;
80804ec7bb6SSteven Rostedt (VMware) extern u64 ftrace_total_mod_time;
80904ec7bb6SSteven Rostedt (VMware) void ftrace_init_trace_array(struct trace_array *tr);
810ad97772aSSteven Rostedt #else
ftrace_init_trace_array(struct trace_array * tr)811d05cdb25SSteven Rostedt static inline void ftrace_init_trace_array(struct trace_array *tr) { }
812d05cdb25SSteven Rostedt #endif
81395950c2eSSteven Rostedt #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
81495950c2eSSteven Rostedt extern int DYN_FTRACE_TEST_NAME(void);
815bc0c38d1SSteven Rostedt #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
816a1f157c7SZheng Yejian extern int DYN_FTRACE_TEST_NAME2(void);
817020e5f85SLi Zefan
818020e5f85SLi Zefan extern void trace_set_ring_buffer_expanded(struct trace_array *tr);
81960a11774SSteven Rostedt extern bool tracing_selftest_disabled;
82060efe21eSMasami Hiramatsu
82160efe21eSMasami Hiramatsu #ifdef CONFIG_FTRACE_STARTUP_TEST
82260a11774SSteven Rostedt extern void __init disable_tracing_selftest(const char *reason);
82360a11774SSteven Rostedt
8247447dce9SFrederic Weisbecker extern int trace_selftest_startup_function(struct tracer *trace,
8257447dce9SFrederic Weisbecker struct trace_array *tr);
82660a11774SSteven Rostedt extern int trace_selftest_startup_function_graph(struct tracer *trace,
82760a11774SSteven Rostedt struct trace_array *tr);
82860a11774SSteven Rostedt extern int trace_selftest_startup_irqsoff(struct tracer *trace,
82960a11774SSteven Rostedt struct trace_array *tr);
83060a11774SSteven Rostedt extern int trace_selftest_startup_preemptoff(struct tracer *trace,
83160a11774SSteven Rostedt struct trace_array *tr);
83260a11774SSteven Rostedt extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
83360a11774SSteven Rostedt struct trace_array *tr);
834fb1b6d8bSSteven Noonan extern int trace_selftest_startup_wakeup(struct tracer *trace,
835fb1b6d8bSSteven Noonan struct trace_array *tr);
83680e5ea45SSteven Rostedt extern int trace_selftest_startup_nop(struct tracer *trace,
83780e5ea45SSteven Rostedt struct trace_array *tr);
8388f768993SSteven Rostedt (Red Hat) extern int trace_selftest_startup_branch(struct tracer *trace,
8398f768993SSteven Rostedt (Red Hat) struct trace_array *tr);
8408f768993SSteven Rostedt (Red Hat) /*
8418f768993SSteven Rostedt (Red Hat) * Tracer data references selftest functions that only occur
8428f768993SSteven Rostedt (Red Hat) * on boot up. These can be __init functions. Thus, when selftests
8438f768993SSteven Rostedt (Red Hat) * are enabled, then the tracers need to reference __init functions.
8448f768993SSteven Rostedt (Red Hat) */
84560efe21eSMasami Hiramatsu #define __tracer_data __refdata
84660efe21eSMasami Hiramatsu #else
disable_tracing_selftest(const char * reason)84760efe21eSMasami Hiramatsu static inline void __init disable_tracing_selftest(const char *reason)
8488f768993SSteven Rostedt (Red Hat) {
8498f768993SSteven Rostedt (Red Hat) }
85060a11774SSteven Rostedt /* Tracers are seldom changed. Optimize when selftests are disabled. */
85160a11774SSteven Rostedt #define __tracer_data __read_mostly
852c7aafc54SIngo Molnar #endif /* CONFIG_FTRACE_STARTUP_TEST */
853a5a1d1c2SThomas Gleixner
8541fd8f2a3SFrederic Weisbecker extern void *head_page(struct trace_array_cpu *data);
85540ce74f1SSteven Rostedt extern unsigned long long ns2usecs(u64 nsec);
85648ead020SFrederic Weisbecker
85740ce74f1SSteven Rostedt __printf(2, 0)
858659372d3SSteven Rostedt int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
859659372d3SSteven Rostedt __printf(2, 0)
860659372d3SSteven Rostedt int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
86113292494SSteven Rostedt (VMware) __printf(3, 0)
86212883efbSSteven Rostedt (Red Hat) int trace_array_vprintk(struct trace_array *tr,
863955b61e5SJason Wessel unsigned long ip, const char *fmt, va_list args);
864955b61e5SJason Wessel __printf(3, 4)
865c7aafc54SIngo Molnar int trace_array_printk_buf(struct trace_buffer *buffer,
8668e1e1df2SByungchul Park unsigned long ip, const char *fmt, ...);
8678e1e1df2SByungchul Park void trace_printk_seq(struct trace_seq *s);
868673feb9dSSteven Rostedt (VMware) enum print_line_t print_trace_line(struct trace_iterator *iter);
869673feb9dSSteven Rostedt (VMware)
870673feb9dSSteven Rostedt (VMware) extern char trace_find_mark(unsigned long long duration);
871673feb9dSSteven Rostedt (VMware)
872673feb9dSSteven Rostedt (VMware) struct ftrace_hash;
873673feb9dSSteven Rostedt (VMware)
874673feb9dSSteven Rostedt (VMware) struct ftrace_mod_load {
875673feb9dSSteven Rostedt (VMware) struct list_head list;
876673feb9dSSteven Rostedt (VMware) char *func;
8778c08f0d5SSteven Rostedt (VMware) char *module;
8788c08f0d5SSteven Rostedt (VMware) int enable;
8798c08f0d5SSteven Rostedt (VMware) };
8808c08f0d5SSteven Rostedt (VMware)
8814046bf02SNamhyung Kim enum {
8824046bf02SNamhyung Kim FTRACE_HASH_FL_MOD = (1 << 0),
8834046bf02SNamhyung Kim };
8844046bf02SNamhyung Kim
8858c08f0d5SSteven Rostedt (VMware) struct ftrace_hash {
8864046bf02SNamhyung Kim unsigned long size_bits;
8874046bf02SNamhyung Kim struct hlist_head *buckets;
8884046bf02SNamhyung Kim unsigned long count;
8894046bf02SNamhyung Kim unsigned long flags;
8904046bf02SNamhyung Kim struct rcu_head rcu;
8914046bf02SNamhyung Kim };
892eb583cd4SArnd Bergmann
8934046bf02SNamhyung Kim struct ftrace_func_entry *
8948c08f0d5SSteven Rostedt (VMware) ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
8954046bf02SNamhyung Kim
ftrace_hash_empty(struct ftrace_hash * hash)8964046bf02SNamhyung Kim static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
89715e6cb36SFrederic Weisbecker {
898fb52607aSFrederic Weisbecker return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
89962b915f1SJiri Olsa }
90062b915f1SJiri Olsa
90162b915f1SJiri Olsa /* Standard output formatting function used for function return traces */
90262b915f1SJiri Olsa #ifdef CONFIG_FUNCTION_GRAPH_TRACER
90362b915f1SJiri Olsa
90462b915f1SJiri Olsa /* Flag options */
90562b915f1SJiri Olsa #define TRACE_GRAPH_PRINT_OVERRUN 0x1
90662b915f1SJiri Olsa #define TRACE_GRAPH_PRINT_CPU 0x2
9079acd8de6SChangbin Du #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
9089acd8de6SChangbin Du #define TRACE_GRAPH_PRINT_PROC 0x8
9099acd8de6SChangbin Du #define TRACE_GRAPH_PRINT_DURATION 0x10
9109acd8de6SChangbin Du #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
9119acd8de6SChangbin Du #define TRACE_GRAPH_PRINT_REL_TIME 0x40
912a1be9cccSDonglin Peng #define TRACE_GRAPH_PRINT_IRQS 0x80
913a1be9cccSDonglin Peng #define TRACE_GRAPH_PRINT_TAIL 0x100
91421e92806SDonglin Peng #define TRACE_GRAPH_SLEEP_TIME 0x200
9156fc84ea7SSteven Rostedt (Red Hat) #define TRACE_GRAPH_GRAPH_TIME 0x400
9166fc84ea7SSteven Rostedt (Red Hat) #define TRACE_GRAPH_PRINT_RETVAL 0x800
91762b915f1SJiri Olsa #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000
91855577204SSteven Rostedt (Red Hat) #define TRACE_GRAPH_PRINT_RETADDR 0x2000
919c8dd0f45SSteven Rostedt (VMware) #define TRACE_GRAPH_ARGS 0x4000
920c8dd0f45SSteven Rostedt (VMware) #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
92155577204SSteven Rostedt (Red Hat) #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
922c8dd0f45SSteven Rostedt (VMware)
923c8dd0f45SSteven Rostedt (VMware) extern void ftrace_graph_sleep_time_control(bool enable);
924c8dd0f45SSteven Rostedt (VMware)
92555577204SSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_PROFILER
926d7a8d9e9SJiri Olsa extern void ftrace_graph_graph_time_control(bool enable);
927d7a8d9e9SJiri Olsa #else
ftrace_graph_graph_time_control(bool enable)928d7a8d9e9SJiri Olsa static inline void ftrace_graph_graph_time_control(bool enable) { }
9299d9add34SSteven Rostedt (Red Hat) #endif
9300706f1c4SSteven Rostedt
93162b915f1SJiri Olsa extern enum print_line_t
93262b915f1SJiri Olsa print_graph_function_flags(struct trace_iterator *iter, u32 flags);
93362b915f1SJiri Olsa extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
93462b915f1SJiri Olsa extern void
93536590c50SSebastian Andrzej Siewior trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
93621e92806SDonglin Peng extern void graph_trace_open(struct trace_iterator *iter);
93721e92806SDonglin Peng extern void graph_trace_close(struct trace_iterator *iter);
93821e92806SDonglin Peng extern int __trace_graph_entry(struct trace_array *tr,
93921e92806SDonglin Peng struct ftrace_graph_ent *trace,
94062b915f1SJiri Olsa unsigned int trace_ctx);
94162b915f1SJiri Olsa extern int __trace_graph_retaddr_entry(struct trace_array *tr,
94266611c04SSteven Rostedt struct ftrace_graph_ent *trace,
94366611c04SSteven Rostedt unsigned int trace_ctx,
94466611c04SSteven Rostedt unsigned long retaddr);
945c132be2cSSteven Rostedt (VMware) extern void __trace_graph_return(struct trace_array *tr,
946c132be2cSSteven Rostedt (VMware) struct ftrace_graph_ret *trace,
94726dda563SSteven Rostedt (VMware) unsigned int trace_ctx,
94862b915f1SJiri Olsa u64 calltime, u64 rettime);
94912117f33SSteven Rostedt (VMware)
95012117f33SSteven Rostedt (VMware) extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
951068da098SSteven Rostedt (VMware) extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
952068da098SSteven Rostedt (VMware) extern void free_fgraph_ops(struct trace_array *tr);
953068da098SSteven Rostedt (VMware)
954068da098SSteven Rostedt (VMware) enum {
955068da098SSteven Rostedt (VMware) TRACE_GRAPH_FL = 1,
956068da098SSteven Rostedt (VMware)
957068da098SSteven Rostedt (VMware) /*
958068da098SSteven Rostedt (VMware) * In the very unlikely case that an interrupt came in
959068da098SSteven Rostedt (VMware) * at a start of graph tracing, and we want to trace
960068da098SSteven Rostedt (VMware) * the function in that interrupt, the depth can be greater
961068da098SSteven Rostedt (VMware) * than zero, because of the preempted start of a previous
962068da098SSteven Rostedt (VMware) * trace. In an even more unlikely case, depth could be 2
963068da098SSteven Rostedt (VMware) * if a softirq interrupted the start of graph tracing,
964068da098SSteven Rostedt (VMware) * followed by an interrupt preempting a start of graph
965068da098SSteven Rostedt (VMware) * tracing in the softirq, and depth can even be 3
966068da098SSteven Rostedt (VMware) * if an NMI came in at the start of an interrupt function
967068da098SSteven Rostedt (VMware) * that preempted a softirq start of a function that
968068da098SSteven Rostedt (VMware) * preempted normal context!!!! Luckily, it can't be
969068da098SSteven Rostedt (VMware) * greater than 3, so the next two bits are a mask
970b8421489SSteven Rostedt (VMware) * of what the depth is when we set TRACE_GRAPH_FL
971b8421489SSteven Rostedt (VMware) */
972b8421489SSteven Rostedt (VMware)
973b8421489SSteven Rostedt (VMware) TRACE_GRAPH_DEPTH_START_BIT,
974b8421489SSteven Rostedt (VMware) TRACE_GRAPH_DEPTH_END_BIT,
975b8421489SSteven Rostedt (VMware)
976b8421489SSteven Rostedt (VMware) /*
97712117f33SSteven Rostedt (VMware) * To implement set_graph_notrace, if this bit is set, we ignore
97812117f33SSteven Rostedt (VMware) * function graph tracing of called functions, until the return
979b8421489SSteven Rostedt (VMware) * function is called to clear it.
980b8421489SSteven Rostedt (VMware) */
981068da098SSteven Rostedt (VMware) TRACE_GRAPH_NOTRACE_BIT,
982068da098SSteven Rostedt (VMware) };
983068da098SSteven Rostedt (VMware)
984068da098SSteven Rostedt (VMware) #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT)
985068da098SSteven Rostedt (VMware)
ftrace_graph_depth(unsigned long * task_var)986068da098SSteven Rostedt (VMware) static inline unsigned long ftrace_graph_depth(unsigned long *task_var)
987068da098SSteven Rostedt (VMware) {
988068da098SSteven Rostedt (VMware) return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3;
989068da098SSteven Rostedt (VMware) }
990068da098SSteven Rostedt (VMware)
ftrace_graph_set_depth(unsigned long * task_var,int depth)991ea4e2bc4SSteven Rostedt static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth)
992ea4e2bc4SSteven Rostedt {
99324a9729fSAmol Grover *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT);
994fd0e6852SAmol Grover *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT;
995ea4e2bc4SSteven Rostedt }
99612117f33SSteven Rostedt (VMware)
99712117f33SSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
998ea4e2bc4SSteven Rostedt extern struct ftrace_hash __rcu *ftrace_graph_hash;
9995cf99a0fSSteven Rostedt (VMware) extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
1000b9b0c831SNamhyung Kim
100124a9729fSAmol Grover static inline int
ftrace_graph_addr(unsigned long * task_var,struct ftrace_graph_ent * trace)1002ea4e2bc4SSteven Rostedt ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
1003b9b0c831SNamhyung Kim {
1004ea4e2bc4SSteven Rostedt unsigned long addr = trace->func;
100516052dd5SSteven Rostedt (VMware) int ret = 0;
100616052dd5SSteven Rostedt (VMware) struct ftrace_hash *hash;
100716052dd5SSteven Rostedt (VMware)
100816052dd5SSteven Rostedt (VMware) preempt_disable_notrace();
100954a16ff6SSteven Rostedt (VMware)
101016052dd5SSteven Rostedt (VMware) /*
101124a9729fSAmol Grover * Have to open code "rcu_dereference_sched()" because the
101224a9729fSAmol Grover * function graph tracer can be called when RCU is not
101324a9729fSAmol Grover * "watching".
1014b9b0c831SNamhyung Kim * Protected with schedule_on_each_cpu(ftrace_sync)
1015b9b0c831SNamhyung Kim */
1016b9b0c831SNamhyung Kim hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
1017b9b0c831SNamhyung Kim
101824a9729fSAmol Grover if (ftrace_hash_empty(hash)) {
10195cf99a0fSSteven Rostedt (VMware) ret = 1;
10205cf99a0fSSteven Rostedt (VMware) goto out;
10215cf99a0fSSteven Rostedt (VMware) }
10225cf99a0fSSteven Rostedt (VMware)
102312117f33SSteven Rostedt (VMware) if (ftrace_lookup_ip(hash, addr)) {
1024068da098SSteven Rostedt (VMware) /*
10255cf99a0fSSteven Rostedt (VMware) * This needs to be cleared on the return functions
1026e4a3f541SSteven Rostedt * when the depth is zero.
1027e4a3f541SSteven Rostedt */
1028e4a3f541SSteven Rostedt *task_var |= TRACE_GRAPH_FL;
1029e4a3f541SSteven Rostedt ftrace_graph_set_depth(task_var, trace->depth);
1030e4a3f541SSteven Rostedt
1031affc6592SChangbin Du /*
1032e4a3f541SSteven Rostedt * If no irqs are to be traced, but a set_graph_function
1033e4a3f541SSteven Rostedt * is set, and called by an interrupt handler, we still
1034e4a3f541SSteven Rostedt * want to trace it.
1035b9b0c831SNamhyung Kim */
1036e4a3f541SSteven Rostedt if (in_hardirq())
1037ea4e2bc4SSteven Rostedt trace_recursion_set(TRACE_IRQ_BIT);
1038b9b0c831SNamhyung Kim else
1039b9b0c831SNamhyung Kim trace_recursion_clear(TRACE_IRQ_BIT);
1040b9b0c831SNamhyung Kim ret = 1;
1041ea4e2bc4SSteven Rostedt }
104229ad23b0SNamhyung Kim
104312117f33SSteven Rostedt (VMware) out:
104412117f33SSteven Rostedt (VMware) preempt_enable_notrace();
10455cf99a0fSSteven Rostedt (VMware) return ret;
104612117f33SSteven Rostedt (VMware) }
104712117f33SSteven Rostedt (VMware)
104812117f33SSteven Rostedt (VMware) static inline void
ftrace_graph_addr_finish(struct fgraph_ops * gops,struct ftrace_graph_ret * trace)1049068da098SSteven Rostedt (VMware) ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
105012117f33SSteven Rostedt (VMware) {
10515cf99a0fSSteven Rostedt (VMware) unsigned long *task_var = fgraph_get_task_var(gops);
10525cf99a0fSSteven Rostedt (VMware)
105329ad23b0SNamhyung Kim if ((*task_var & TRACE_GRAPH_FL) &&
105429ad23b0SNamhyung Kim trace->depth == ftrace_graph_depth(task_var))
1055b9b0c831SNamhyung Kim *task_var &= ~TRACE_GRAPH_FL;
1056fd0e6852SAmol Grover }
105729ad23b0SNamhyung Kim
ftrace_graph_notrace_addr(unsigned long addr)1058b9b0c831SNamhyung Kim static inline int ftrace_graph_notrace_addr(unsigned long addr)
105929ad23b0SNamhyung Kim {
106016052dd5SSteven Rostedt (VMware) int ret = 0;
106116052dd5SSteven Rostedt (VMware) struct ftrace_hash *notrace_hash;
106216052dd5SSteven Rostedt (VMware)
106316052dd5SSteven Rostedt (VMware) preempt_disable_notrace();
106454a16ff6SSteven Rostedt (VMware)
106516052dd5SSteven Rostedt (VMware) /*
1066fd0e6852SAmol Grover * Have to open code "rcu_dereference_sched()" because the
1067fd0e6852SAmol Grover * function graph tracer can be called when RCU is not
1068fd0e6852SAmol Grover * "watching".
1069fd0e6852SAmol Grover * Protected with schedule_on_each_cpu(ftrace_sync)
1070b9b0c831SNamhyung Kim */
107129ad23b0SNamhyung Kim notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1072b9b0c831SNamhyung Kim !preemptible());
1073b9b0c831SNamhyung Kim
107429ad23b0SNamhyung Kim if (ftrace_lookup_ip(notrace_hash, addr))
107515e6cb36SFrederic Weisbecker ret = 1;
107612117f33SSteven Rostedt (VMware)
10776b253930SIngo Molnar preempt_enable_notrace();
10786b253930SIngo Molnar return ret;
1079ea4e2bc4SSteven Rostedt }
108029ad23b0SNamhyung Kim #else
ftrace_graph_addr(unsigned long * task_var,struct ftrace_graph_ent * trace)108129ad23b0SNamhyung Kim static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace)
108229ad23b0SNamhyung Kim {
108329ad23b0SNamhyung Kim return 1;
108429ad23b0SNamhyung Kim }
108512117f33SSteven Rostedt (VMware)
ftrace_graph_notrace_addr(unsigned long addr)10865cf99a0fSSteven Rostedt (VMware) static inline int ftrace_graph_notrace_addr(unsigned long addr)
1087ea4e2bc4SSteven Rostedt {
10881a414428SSteven Rostedt (Red Hat) return 0;
10891a414428SSteven Rostedt (Red Hat) }
ftrace_graph_addr_finish(struct fgraph_ops * gops,struct ftrace_graph_ret * trace)10903c9880f3SSteven Rostedt static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace)
10911a414428SSteven Rostedt (Red Hat) { }
109212117f33SSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */
109312117f33SSteven Rostedt (VMware)
10941a414428SSteven Rostedt (Red Hat) extern unsigned int fgraph_max_depth;
109512117f33SSteven Rostedt (VMware) extern bool fgraph_sleep_time;
109612117f33SSteven Rostedt (VMware)
10971a414428SSteven Rostedt (Red Hat) static inline bool
ftrace_graph_ignore_func(struct fgraph_ops * gops,struct ftrace_graph_ent * trace)109812117f33SSteven Rostedt (VMware) ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace)
109912117f33SSteven Rostedt (VMware) {
11001a414428SSteven Rostedt (Red Hat) unsigned long *task_var = fgraph_get_task_var(gops);
11011a414428SSteven Rostedt (Red Hat)
11021a414428SSteven Rostedt (Red Hat) /* trace it when it is-nested-in or is a function enabled. */
11031a414428SSteven Rostedt (Red Hat) return !((*task_var & TRACE_GRAPH_FL) ||
1104c132be2cSSteven Rostedt (VMware) ftrace_graph_addr(task_var, trace)) ||
1105c132be2cSSteven Rostedt (VMware) (trace->depth < 0) ||
1106c132be2cSSteven Rostedt (VMware) (fgraph_max_depth && trace->depth >= fgraph_max_depth);
1107ea4e2bc4SSteven Rostedt }
110815e6cb36SFrederic Weisbecker
1109d7a8d9e9SJiri Olsa void fgraph_init_ops(struct ftrace_ops *dst_ops,
111015e6cb36SFrederic Weisbecker struct ftrace_ops *src_ops);
111115e6cb36SFrederic Weisbecker
111215e6cb36SFrederic Weisbecker #else /* CONFIG_FUNCTION_GRAPH_TRACER */
111326dda563SSteven Rostedt (VMware) static inline enum print_line_t
print_graph_function_flags(struct trace_iterator * iter,u32 flags)1114c132be2cSSteven Rostedt (VMware) print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1115c132be2cSSteven Rostedt (VMware) {
1116c132be2cSSteven Rostedt (VMware) return TRACE_TYPE_UNHANDLED;
1117ea4e2bc4SSteven Rostedt }
free_fgraph_ops(struct trace_array * tr)111815e6cb36SFrederic Weisbecker static inline void free_fgraph_ops(struct trace_array *tr) { }
1119756d17eeS[email protected] /* ftrace_ops may not be defined */
1120804a6851SSteven Rostedt #define init_array_fgraph_ops(tr, ops) do { } while (0)
11211155de47SPaul Mundt #define allocate_fgraph_ops(tr, ops) ({ 0; })
1122c58b6b03SJosef Bacik #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1123c58b6b03SJosef Bacik
1124c58b6b03SJosef Bacik extern struct list_head ftrace_pids;
1125c58b6b03SJosef Bacik
112692a68fa0SSteven Rostedt (VMware) #ifdef CONFIG_FUNCTION_TRACER
112792a68fa0SSteven Rostedt (VMware)
112892a68fa0SSteven Rostedt (VMware) #define FTRACE_PID_IGNORE -1
112904ec7bb6SSteven Rostedt (VMware) #define FTRACE_PID_TRACE -2
113004ec7bb6SSteven Rostedt (VMware)
113192a68fa0SSteven Rostedt (VMware) struct ftrace_func_command {
113292a68fa0SSteven Rostedt (VMware) struct list_head list;
113392a68fa0SSteven Rostedt (VMware) char *name;
1134f1ed7c74SSteven Rostedt (Red Hat) int (*func)(struct trace_array *tr,
1135345ddcc8SSteven Rostedt (Red Hat) struct ftrace_hash *hash,
1136804a6851SSteven Rostedt char *func, char *cmd,
1137c58b6b03SJosef Bacik char *params, int enable);
1138c58b6b03SJosef Bacik };
1139804a6851SSteven Rostedt extern bool ftrace_filter_param __initdata;
ftrace_trace_task(struct trace_array * tr)1140e0a413f6SSteven Rostedt static inline int ftrace_trace_task(struct trace_array *tr)
1141591dffdaSSteven Rostedt (Red Hat) {
1142591dffdaSSteven Rostedt (Red Hat) return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1143591dffdaSSteven Rostedt (Red Hat) FTRACE_PID_IGNORE;
11444114fbfdSMasami Hiramatsu }
11454114fbfdSMasami Hiramatsu extern int ftrace_is_dead(void);
11464104d326SSteven Rostedt (Red Hat) int ftrace_create_function_files(struct trace_array *tr,
114731f505dcSSteven Rostedt struct dentry *parent);
11484104d326SSteven Rostedt (Red Hat) void ftrace_destroy_function_files(struct trace_array *tr);
11494104d326SSteven Rostedt (Red Hat) int ftrace_allocate_ftrace_ops(struct trace_array *tr);
1150345ddcc8SSteven Rostedt (Red Hat) void ftrace_free_ftrace_ops(struct trace_array *tr);
1151501c2375SSteven Rostedt (Red Hat) void ftrace_init_global_array_ops(struct trace_array *tr);
1152501c2375SSteven Rostedt (Red Hat) struct trace_array *trace_get_global_array(void);
1153d879d0b8SNamhyung Kim void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1154dbeafd0dSSteven Rostedt (VMware) void ftrace_reset_array_ops(struct trace_array *tr);
11551e10486fSNamhyung Kim void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
11561155de47SPaul Mundt void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1157345ddcc8SSteven Rostedt (Red Hat) struct dentry *d_tracer);
11581155de47SPaul Mundt void ftrace_clear_pids(struct trace_array *tr);
11591155de47SPaul Mundt int init_function_trace(void);
11601155de47SPaul Mundt void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1161e0a413f6SSteven Rostedt #else
ftrace_trace_task(struct trace_array * tr)1162591dffdaSSteven Rostedt (Red Hat) static inline int ftrace_trace_task(struct trace_array *tr)
1163591dffdaSSteven Rostedt (Red Hat) {
1164591dffdaSSteven Rostedt (Red Hat) return 1;
1165591dffdaSSteven Rostedt (Red Hat) }
ftrace_is_dead(void)1166591dffdaSSteven Rostedt (Red Hat) static inline int ftrace_is_dead(void) { return 0; }
1167591dffdaSSteven Rostedt (Red Hat) static inline int
ftrace_create_function_files(struct trace_array * tr,struct dentry * parent)11684114fbfdSMasami Hiramatsu ftrace_create_function_files(struct trace_array *tr,
11694114fbfdSMasami Hiramatsu struct dentry *parent)
11704114fbfdSMasami Hiramatsu {
11714114fbfdSMasami Hiramatsu return 0;
11724114fbfdSMasami Hiramatsu }
ftrace_allocate_ftrace_ops(struct trace_array * tr)1173591dffdaSSteven Rostedt (Red Hat) static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
11744104d326SSteven Rostedt (Red Hat) {
11754104d326SSteven Rostedt (Red Hat) return 0;
11764104d326SSteven Rostedt (Red Hat) }
ftrace_free_ftrace_ops(struct trace_array * tr)1177345ddcc8SSteven Rostedt (Red Hat) static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
ftrace_destroy_function_files(struct trace_array * tr)1178501c2375SSteven Rostedt (Red Hat) static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1179d879d0b8SNamhyung Kim static inline __init void
ftrace_init_global_array_ops(struct trace_array * tr)1180dbeafd0dSSteven Rostedt (VMware) ftrace_init_global_array_ops(struct trace_array *tr) { }
ftrace_reset_array_ops(struct trace_array * tr)11811e10486fSNamhyung Kim static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
ftrace_init_tracefs(struct trace_array * tr,struct dentry * d)11824104d326SSteven Rostedt (Red Hat) static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
ftrace_init_tracefs_toplevel(struct trace_array * tr,struct dentry * d)11834104d326SSteven Rostedt (Red Hat) static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
ftrace_clear_pids(struct trace_array * tr)1184591dffdaSSteven Rostedt (Red Hat) static inline void ftrace_clear_pids(struct trace_array *tr) { }
init_function_trace(void)1185591dffdaSSteven Rostedt (Red Hat) static inline int init_function_trace(void) { return 0; }
ftrace_pid_follow_fork(struct trace_array * tr,bool enable)1186591dffdaSSteven Rostedt (Red Hat) static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1187ec19b859SSteven Rostedt (VMware) /* ftace_func_t type is not defined, use macro instead of static inline */
1188ec19b859SSteven Rostedt (VMware) #define ftrace_init_array_ops(tr, func) do { } while (0)
1189ec19b859SSteven Rostedt (VMware) #endif /* CONFIG_FUNCTION_TRACER */
1190ec19b859SSteven Rostedt (VMware)
1191b5f081b5SSteven Rostedt (VMware) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1192bca6c8d0SSteven Rostedt (VMware)
11936e444319SSteven Rostedt (VMware) struct ftrace_probe_ops {
1194ec19b859SSteven Rostedt (VMware) void (*func)(unsigned long ip,
1195b5f081b5SSteven Rostedt (VMware) unsigned long parent_ip,
11966e444319SSteven Rostedt (VMware) struct trace_array *tr,
11976e444319SSteven Rostedt (VMware) struct ftrace_probe_ops *ops,
1198ec19b859SSteven Rostedt (VMware) void *data);
1199b5f081b5SSteven Rostedt (VMware) int (*init)(struct ftrace_probe_ops *ops,
12006e444319SSteven Rostedt (VMware) struct trace_array *tr,
1201ec19b859SSteven Rostedt (VMware) unsigned long ip, void *init_data,
1202ec19b859SSteven Rostedt (VMware) void **data);
1203ec19b859SSteven Rostedt (VMware) void (*free)(struct ftrace_probe_ops *ops,
1204ec19b859SSteven Rostedt (VMware) struct trace_array *tr,
1205ec19b859SSteven Rostedt (VMware) unsigned long ip, void *data);
1206ec19b859SSteven Rostedt (VMware) int (*print)(struct seq_file *m,
120741794f19SSteven Rostedt (VMware) unsigned long ip,
120841794f19SSteven Rostedt (VMware) struct ftrace_probe_ops *ops,
120941794f19SSteven Rostedt (VMware) void *data);
121041794f19SSteven Rostedt (VMware) };
121141794f19SSteven Rostedt (VMware)
121241794f19SSteven Rostedt (VMware) struct ftrace_func_mapper;
121341794f19SSteven Rostedt (VMware) typedef int (*ftrace_mapper_func)(void *data);
121441794f19SSteven Rostedt (VMware)
121541794f19SSteven Rostedt (VMware) struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
121641794f19SSteven Rostedt (VMware) void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
121741794f19SSteven Rostedt (VMware) unsigned long ip);
121841794f19SSteven Rostedt (VMware) int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
121941794f19SSteven Rostedt (VMware) unsigned long ip, void *data);
1220ec19b859SSteven Rostedt (VMware) void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
122104ec7bb6SSteven Rostedt (VMware) unsigned long ip);
122204ec7bb6SSteven Rostedt (VMware) void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1223d3d532d7SSteven Rostedt (VMware) ftrace_mapper_func free_func);
12247b60f3d8SSteven Rostedt (VMware)
12257b60f3d8SSteven Rostedt (VMware) extern int
1226a0e6369eSNaveen N. Rao register_ftrace_function_probe(char *glob, struct trace_array *tr,
1227ec19b859SSteven Rostedt (VMware) struct ftrace_probe_ops *ops, void *data);
122892a68fa0SSteven Rostedt (VMware) extern int
122992a68fa0SSteven Rostedt (VMware) unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
123092a68fa0SSteven Rostedt (VMware) struct ftrace_probe_ops *ops);
1231591dffdaSSteven Rostedt (Red Hat) extern void clear_ftrace_function_probes(struct trace_array *tr);
1232591dffdaSSteven Rostedt (Red Hat)
1233591dffdaSSteven Rostedt (Red Hat) int register_ftrace_command(struct ftrace_func_command *cmd);
12345c3469cbSMasami Hiramatsu int unregister_ftrace_command(struct ftrace_func_command *cmd);
12355c3469cbSMasami Hiramatsu
12365c3469cbSMasami Hiramatsu void ftrace_create_filter_files(struct ftrace_ops *ops,
12375c3469cbSMasami Hiramatsu struct dentry *parent);
12385c3469cbSMasami Hiramatsu void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1239591dffdaSSteven Rostedt (Red Hat)
124092a68fa0SSteven Rostedt (VMware) extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
124192a68fa0SSteven Rostedt (VMware) int len, int reset);
124292a68fa0SSteven Rostedt (VMware) extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
124392a68fa0SSteven Rostedt (VMware) int len, int reset);
124492a68fa0SSteven Rostedt (VMware) #else
124592a68fa0SSteven Rostedt (VMware) struct ftrace_func_command;
124692a68fa0SSteven Rostedt (VMware)
register_ftrace_command(struct ftrace_func_command * cmd)124792a68fa0SSteven Rostedt (VMware) static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
124892a68fa0SSteven Rostedt (VMware) {
124992a68fa0SSteven Rostedt (VMware) return -EINVAL;
12508a49f3e0SSteven Rostedt (VMware) }
unregister_ftrace_command(char * cmd_name)12518a49f3e0SSteven Rostedt (VMware) static inline __init int unregister_ftrace_command(char *cmd_name)
12528a49f3e0SSteven Rostedt (VMware) {
12538a49f3e0SSteven Rostedt (VMware) return -EINVAL;
1254591dffdaSSteven Rostedt (Red Hat) }
clear_ftrace_function_probes(struct trace_array * tr)1255591dffdaSSteven Rostedt (Red Hat) static inline void clear_ftrace_function_probes(struct trace_array *tr)
1256591dffdaSSteven Rostedt (Red Hat) {
1257591dffdaSSteven Rostedt (Red Hat) }
1258591dffdaSSteven Rostedt (Red Hat)
1259591dffdaSSteven Rostedt (Red Hat) /*
1260591dffdaSSteven Rostedt (Red Hat) * The ops parameter passed in is usually undefined.
1261804a6851SSteven Rostedt * This must be a macro.
1262c6650b2eSYaowei Bai */
1263ced39002SJiri Olsa #define ftrace_create_filter_files(ops, parent) do { } while (0)
12644fcdae83SSteven Rostedt #define ftrace_destroy_filter_files(ops) do { } while (0)
1265b63f39eaS[email protected] #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1266b63f39eaS[email protected]
1267b63f39eaS[email protected] bool ftrace_event_is_function(struct trace_event_call *call);
12681537a363SDaniel Mack
1269b63f39eaS[email protected] /*
1270b63f39eaS[email protected] * struct trace_parser - servers for reading the user input separated by spaces
1271b63f39eaS[email protected] * @cont: set if the input is not complete - no final space char was found
1272b63f39eaS[email protected] * @buffer: holds the parsed user input
1273b63f39eaS[email protected] * @idx: user input length
1274b63f39eaS[email protected] * @size: buffer size
1275b63f39eaS[email protected] */
1276b63f39eaS[email protected] struct trace_parser {
1277b63f39eaS[email protected] bool cont;
1278b63f39eaS[email protected] char *buffer;
1279b63f39eaS[email protected] unsigned idx;
1280b63f39eaS[email protected] unsigned size;
1281b63f39eaS[email protected] };
1282b63f39eaS[email protected]
trace_parser_loaded(struct trace_parser * parser)1283b63f39eaS[email protected] static inline bool trace_parser_loaded(struct trace_parser *parser)
1284b63f39eaS[email protected] {
1285b63f39eaS[email protected] return (parser->idx != 0);
1286b63f39eaS[email protected] }
1287b63f39eaS[email protected]
trace_parser_cont(struct trace_parser * parser)1288b63f39eaS[email protected] static inline bool trace_parser_cont(struct trace_parser *parser)
1289b63f39eaS[email protected] {
1290b63f39eaS[email protected] return parser->cont;
1291b63f39eaS[email protected] }
1292b63f39eaS[email protected]
trace_parser_clear(struct trace_parser * parser)1293b63f39eaS[email protected] static inline void trace_parser_clear(struct trace_parser *parser)
1294b63f39eaS[email protected] {
1295b63f39eaS[email protected] parser->cont = false;
1296b63f39eaS[email protected] parser->idx = 0;
1297b63f39eaS[email protected] }
1298b63f39eaS[email protected]
1299b63f39eaS[email protected] extern int trace_parser_get_init(struct trace_parser *parser, int size);
1300729358daSSteven Rostedt (Red Hat) extern void trace_parser_put(struct trace_parser *parser);
1301729358daSSteven Rostedt (Red Hat) extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1302729358daSSteven Rostedt (Red Hat) size_t cnt, loff_t *ppos);
1303729358daSSteven Rostedt (Red Hat)
1304729358daSSteven Rostedt (Red Hat) /*
1305729358daSSteven Rostedt (Red Hat) * Only create function graph options if function graph is configured.
1306729358daSSteven Rostedt (Red Hat) */
1307729358daSSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1308729358daSSteven Rostedt (Red Hat) # define FGRAPH_FLAGS \
13094ee4301cSSteven Rostedt (Red Hat) C(DISPLAY_GRAPH, "display-graph"),
13104ee4301cSSteven Rostedt (Red Hat) #else
13114ee4301cSSteven Rostedt (Red Hat) # define FGRAPH_FLAGS
13124ee4301cSSteven Rostedt (Red Hat) #endif
13134ee4301cSSteven Rostedt (Red Hat)
13144ee4301cSSteven Rostedt (Red Hat) #ifdef CONFIG_BRANCH_TRACER
13154ee4301cSSteven Rostedt (Red Hat) # define BRANCH_FLAGS \
13168179e8a1SSteven Rostedt (Red Hat) C(BRANCH, "branch"),
13178179e8a1SSteven Rostedt (Red Hat) #else
13181e10486fSNamhyung Kim # define BRANCH_FLAGS
13191e10486fSNamhyung Kim #endif
13208179e8a1SSteven Rostedt (Red Hat)
13218179e8a1SSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_TRACER
13228179e8a1SSteven Rostedt (Red Hat) # define FUNCTION_FLAGS \
13238179e8a1SSteven Rostedt (Red Hat) C(FUNCTION, "function-trace"), \
13241e10486fSNamhyung Kim C(FUNC_FORK, "function-fork"),
13258179e8a1SSteven Rostedt (Red Hat) # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
13268179e8a1SSteven Rostedt (Red Hat) #else
132773dddbb5SSteven Rostedt (Red Hat) # define FUNCTION_FLAGS
132873dddbb5SSteven Rostedt (Red Hat) # define FUNCTION_DEFAULT_FLAGS 0UL
132973dddbb5SSteven Rostedt (Red Hat) # define TRACE_ITER_FUNC_FORK 0UL
133073dddbb5SSteven Rostedt (Red Hat) #endif
133173dddbb5SSteven Rostedt (Red Hat)
133273dddbb5SSteven Rostedt (Red Hat) #ifdef CONFIG_STACKTRACE
133373dddbb5SSteven Rostedt (Red Hat) # define STACK_FLAGS \
1334729358daSSteven Rostedt (Red Hat) C(STACKTRACE, "stacktrace"),
13354fcdae83SSteven Rostedt #else
13364fcdae83SSteven Rostedt # define STACK_FLAGS
13374fcdae83SSteven Rostedt #endif
13384fcdae83SSteven Rostedt
1339a3418a36SSteven Rostedt (Red Hat) /*
13404fcdae83SSteven Rostedt * trace_iterator_flags is an enumeration that defines bit
1341a3418a36SSteven Rostedt (Red Hat) * positions into trace_flags that controls the output.
1342a3418a36SSteven Rostedt (Red Hat) *
1343a3418a36SSteven Rostedt (Red Hat) * NOTE: These bits must match the trace_options array in
1344a3418a36SSteven Rostedt (Red Hat) * trace.c (this macro guarantees it).
1345a3418a36SSteven Rostedt (Red Hat) */
1346a3418a36SSteven Rostedt (Red Hat) #define TRACE_FLAGS \
1347a3418a36SSteven Rostedt (Red Hat) C(PRINT_PARENT, "print-parent"), \
1348a3418a36SSteven Rostedt (Red Hat) C(SYM_OFFSET, "sym-offset"), \
1349a3418a36SSteven Rostedt (Red Hat) C(SYM_ADDR, "sym-addr"), \
135080a76994SSteven Rostedt (Google) C(VERBOSE, "verbose"), \
1351a3418a36SSteven Rostedt (Red Hat) C(RAW, "raw"), \
1352a3418a36SSteven Rostedt (Red Hat) C(HEX, "hex"), \
1353a3418a36SSteven Rostedt (Red Hat) C(BIN, "bin"), \
1354a3418a36SSteven Rostedt (Red Hat) C(BLOCK, "block"), \
1355a3418a36SSteven Rostedt (Red Hat) C(FIELDS, "fields"), \
1356a3418a36SSteven Rostedt (Red Hat) C(PRINTK, "trace_printk"), \
1357a3418a36SSteven Rostedt (Red Hat) C(ANNOTATE, "annotate"), \
1358a3418a36SSteven Rostedt (Red Hat) C(USERSTACKTRACE, "userstacktrace"), \
1359d914ba37SJoel Fernandes C(SYM_USEROBJ, "sym-userobj"), \
1360a3418a36SSteven Rostedt (Red Hat) C(PRINTK_MSGONLY, "printk-msg-only"), \
1361a3418a36SSteven Rostedt (Red Hat) C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
1362a3418a36SSteven Rostedt (Red Hat) C(LATENCY_FMT, "latency-format"), \
1363a3418a36SSteven Rostedt (Red Hat) C(RECORD_CMD, "record-cmd"), \
1364c37775d5SSteven Rostedt C(RECORD_TGID, "record-tgid"), \
1365ef2bd81dSSteven Rostedt C(OVERWRITE, "overwrite"), \
136606e0a548SSteven Rostedt (VMware) C(STOP_ON_FREE, "disable_on_free"), \
1367a345a671SMasami Hiramatsu C(IRQ_INFO, "irq-info"), \
13688179e8a1SSteven Rostedt (Red Hat) C(MARKERS, "markers"), \
13694ee4301cSSteven Rostedt (Red Hat) C(EVENT_FORK, "event-fork"), \
137073dddbb5SSteven Rostedt (Red Hat) C(TRACE_PRINTK, "trace_printk_dest"), \
13714ee4301cSSteven Rostedt (Red Hat) C(PAUSE_ON_TRACE, "pause-on-trace"), \
1372ce3fed62SSteven Rostedt (Red Hat) C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \
1373a3418a36SSteven Rostedt (Red Hat) FUNCTION_FLAGS \
1374a3418a36SSteven Rostedt (Red Hat) FGRAPH_FLAGS \
1375a3418a36SSteven Rostedt (Red Hat) STACK_FLAGS \
1376a3418a36SSteven Rostedt (Red Hat) BRANCH_FLAGS
1377a3418a36SSteven Rostedt (Red Hat)
1378a3418a36SSteven Rostedt (Red Hat) /*
1379a3418a36SSteven Rostedt (Red Hat) * By defining C, we can make TRACE_FLAGS a list of bit names
1380b5e87c05SSteven Rostedt (Red Hat) * that will define the bits for the flag masks.
1381b5e87c05SSteven Rostedt (Red Hat) */
1382b5e87c05SSteven Rostedt (Red Hat) #undef C
1383b5e87c05SSteven Rostedt (Red Hat) #define C(a, b) TRACE_ITER_##a##_BIT
1384b5e87c05SSteven Rostedt (Red Hat)
1385a3418a36SSteven Rostedt (Red Hat) enum trace_iterator_bits {
1386a3418a36SSteven Rostedt (Red Hat) TRACE_FLAGS
1387a3418a36SSteven Rostedt (Red Hat) /* Make sure we don't go more than we have bits for */
1388a3418a36SSteven Rostedt (Red Hat) TRACE_ITER_LAST_BIT
1389a3418a36SSteven Rostedt (Red Hat) };
1390a3418a36SSteven Rostedt (Red Hat)
1391a3418a36SSteven Rostedt (Red Hat) /*
1392a3418a36SSteven Rostedt (Red Hat) * By redefining C, we can make TRACE_FLAGS a list of masks that
1393a3418a36SSteven Rostedt (Red Hat) * use the bits as defined above.
13944e655519SIngo Molnar */
139515e6cb36SFrederic Weisbecker #undef C
139615e6cb36SFrederic Weisbecker #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
139715e6cb36SFrederic Weisbecker
139815e6cb36SFrederic Weisbecker enum trace_iterator_flags { TRACE_FLAGS };
139915e6cb36SFrederic Weisbecker
140015e6cb36SFrederic Weisbecker /*
140115e6cb36SFrederic Weisbecker * TRACE_ITER_SYM_MASK masks the options in trace_flags that
140243a15386SFrédéric Weisbecker * control the output of kernel symbols.
140343a15386SFrédéric Weisbecker */
14042ed84eebSSteven Rostedt #define TRACE_ITER_SYM_MASK \
14059f029e83SSteven Rostedt (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
14069f029e83SSteven Rostedt
14079f029e83SSteven Rostedt extern struct tracer nop_trace;
140852f232cbSSteven Rostedt
1409983f938aSSteven Rostedt (Red Hat) #ifdef CONFIG_BRANCH_TRACER
14109f029e83SSteven Rostedt extern int enable_branch_tracing(struct trace_array *tr);
141152f232cbSSteven Rostedt extern void disable_branch_tracing(void);
trace_branch_enable(struct trace_array * tr)141252f232cbSSteven Rostedt static inline int trace_branch_enable(struct trace_array *tr)
14139f029e83SSteven Rostedt {
141452f232cbSSteven Rostedt if (tr->trace_flags & TRACE_ITER_BRANCH)
141552f232cbSSteven Rostedt return enable_branch_tracing(tr);
14169f029e83SSteven Rostedt return 0;
141752f232cbSSteven Rostedt }
trace_branch_disable(void)141852f232cbSSteven Rostedt static inline void trace_branch_disable(void)
14199f029e83SSteven Rostedt {
142052f232cbSSteven Rostedt /* due to races, always disable */
142152f232cbSSteven Rostedt disable_branch_tracing();
142252f232cbSSteven Rostedt }
14239f029e83SSteven Rostedt #else
trace_branch_enable(struct trace_array * tr)142452f232cbSSteven Rostedt static inline int trace_branch_enable(struct trace_array *tr)
142552f232cbSSteven Rostedt {
14262ed84eebSSteven Rostedt return 0;
142752f232cbSSteven Rostedt }
trace_branch_disable(void)14281852fcceSSteven Rostedt static inline void trace_branch_disable(void)
1429a1f157c7SZheng Yejian {
14301852fcceSSteven Rostedt }
1431ddeea494SSven Schnelle #endif /* CONFIG_BRANCH_TRACER */
1432ddeea494SSven Schnelle
1433ddeea494SSven Schnelle /* set ring buffers to default size if not already done so */
1434ddeea494SSven Schnelle int tracing_update_buffers(struct trace_array *tr);
1435ddeea494SSven Schnelle
1436ddeea494SSven Schnelle union trace_synth_field {
1437ddeea494SSven Schnelle u8 as_u8;
1438ddeea494SSven Schnelle u16 as_u16;
1439cf027f64STom Zanussi u32 as_u32;
1440cf027f64STom Zanussi u64 as_u64;
144192edca07SSteven Rostedt struct trace_dynamic_info as_dynamic;
144292edca07SSteven Rostedt };
1443aa38e9fcSLi Zefan
1444cf027f64STom Zanussi struct ftrace_event_field {
1445cf027f64STom Zanussi struct list_head link;
1446afd2627fSSteven Rostedt const char *name;
1447afd2627fSSteven Rostedt const char *type;
1448b6c7abd1SYafang Shao int filter_type;
1449cf027f64STom Zanussi int offset;
1450cf027f64STom Zanussi int size;
145180765597SSteven Rostedt (VMware) unsigned int is_signed:1;
145280765597SSteven Rostedt (VMware) unsigned int needs_test:1;
145330e673b2STom Zanussi int len;
145480765597SSteven Rostedt (VMware) };
14558b372562STom Zanussi
145630e673b2STom Zanussi struct prog_entry;
145730e673b2STom Zanussi
1458cfb180f3STom Zanussi struct event_filter {
1459cfb180f3STom Zanussi struct prog_entry __rcu *prog;
1460cfb180f3STom Zanussi char *filter_string;
14611f9963cbSLi Zefan };
1462e9dbfae5SSteven Rostedt
1463cfb180f3STom Zanussi struct event_subsystem {
1464cfb180f3STom Zanussi struct list_head list;
14657967b3e0SSteven Rostedt (Red Hat) const char *name;
1466ae63b31eSSteven Rostedt struct event_filter *filter;
1467ae63b31eSSteven Rostedt int ref_count;
1468ae63b31eSSteven Rostedt };
14695790b1fbSSteven Rostedt (Google)
1470ae63b31eSSteven Rostedt struct trace_subsystem_dir {
1471ae63b31eSSteven Rostedt struct list_head list;
1472ae63b31eSSteven Rostedt struct event_subsystem *subsystem;
1473ae63b31eSSteven Rostedt struct trace_array *tr;
1474fa66ddb8SSteven Rostedt (Red Hat) struct eventfs_inode *ei;
147513292494SSteven Rostedt (VMware) int ref_count;
1476fa66ddb8SSteven Rostedt (Red Hat) int nr_events;
147736590c50SSebastian Andrzej Siewior };
1478fa66ddb8SSteven Rostedt (Red Hat)
147933fddff2SSteven Rostedt (Red Hat) void trace_buffer_unlock_commit_regs(struct trace_array *tr,
148033fddff2SSteven Rostedt (Red Hat) struct trace_buffer *buffer,
148113292494SSteven Rostedt (VMware) struct ring_buffer_event *event,
148233fddff2SSteven Rostedt (Red Hat) unsigned int trcace_ctx,
148336590c50SSebastian Andrzej Siewior struct pt_regs *regs);
148433fddff2SSteven Rostedt (Red Hat)
trace_buffer_unlock_commit(struct trace_array * tr,struct trace_buffer * buffer,struct ring_buffer_event * event,unsigned int trace_ctx)148536590c50SSebastian Andrzej Siewior static inline void trace_buffer_unlock_commit(struct trace_array *tr,
148633fddff2SSteven Rostedt (Red Hat) struct trace_buffer *buffer,
148733fddff2SSteven Rostedt (Red Hat) struct ring_buffer_event *event,
14882cc621fdSSteven Rostedt (Google) unsigned int trace_ctx)
14892cc621fdSSteven Rostedt (Google) {
14902cc621fdSSteven Rostedt (Google) trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
14912cc621fdSSteven Rostedt (Google) }
14922cc621fdSSteven Rostedt (Google)
14932cc621fdSSteven Rostedt (Google) DECLARE_PER_CPU(bool, trace_taskinfo_save);
14942cc621fdSSteven Rostedt (Google) int trace_save_cmdline(struct task_struct *tsk);
14952cc621fdSSteven Rostedt (Google) int trace_create_savedcmd(void);
14962cc621fdSSteven Rostedt (Google) int trace_alloc_tgid_map(void);
14972cc621fdSSteven Rostedt (Google) void trace_free_saved_cmdlines_buffer(void);
14980fc1b09fSSteven Rostedt (Red Hat)
14990fc1b09fSSteven Rostedt (Red Hat) extern const struct file_operations tracing_saved_cmdlines_fops;
15000fc1b09fSSteven Rostedt (Red Hat) extern const struct file_operations tracing_saved_tgids_fops;
15010fc1b09fSSteven Rostedt (Red Hat) extern const struct file_operations tracing_saved_cmdlines_size_fops;
15020fc1b09fSSteven Rostedt (Red Hat)
1503c4846480SSteven Rostedt (Google) DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1504c4846480SSteven Rostedt (Google) DECLARE_PER_CPU(int, trace_buffered_event_cnt);
15050fc1b09fSSteven Rostedt (Red Hat) void trace_buffered_event_disable(void);
150613292494SSteven Rostedt (VMware) void trace_buffered_event_enable(void);
15070fc1b09fSSteven Rostedt (Red Hat)
15080fc1b09fSSteven Rostedt (Red Hat) void early_enable_events(struct trace_array *tr, char *buf, bool disable_first);
15090fc1b09fSSteven Rostedt (Red Hat)
15106c536d76SSteven Rostedt (VMware) static inline void
__trace_event_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)15110fc1b09fSSteven Rostedt (Red Hat) __trace_event_discard_commit(struct trace_buffer *buffer,
15126c536d76SSteven Rostedt (VMware) struct ring_buffer_event *event)
15130fc1b09fSSteven Rostedt (Red Hat) {
15140fc1b09fSSteven Rostedt (Red Hat) if (this_cpu_read(trace_buffered_event) == event) {
15156c536d76SSteven Rostedt (VMware) /* Simply release the temp buffer and enable preemption */
15160fc1b09fSSteven Rostedt (Red Hat) this_cpu_dec(trace_buffered_event_cnt);
15170fc1b09fSSteven Rostedt (Red Hat) preempt_enable_notrace();
15180fc1b09fSSteven Rostedt (Red Hat) return;
1519dad56ee7SSteven Rostedt (Red Hat) }
1520dad56ee7SSteven Rostedt (Red Hat) /* ring_buffer_discard_commit() enables preemption */
1521dad56ee7SSteven Rostedt (Red Hat) ring_buffer_discard_commit(buffer, event);
1522499f7bb0SQiujun Huang }
1523dad56ee7SSteven Rostedt (Red Hat)
1524dad56ee7SSteven Rostedt (Red Hat) /*
1525dad56ee7SSteven Rostedt (Red Hat) * Helper function for event_trigger_unlock_commit{_regs}().
1526dad56ee7SSteven Rostedt (Red Hat) * If there are event triggers attached to this event that requires
1527dad56ee7SSteven Rostedt (Red Hat) * filtering against its fields, then they will be called as the
1528dad56ee7SSteven Rostedt (Red Hat) * entry already holds the field information of the current event.
1529dad56ee7SSteven Rostedt (Red Hat) *
1530dad56ee7SSteven Rostedt (Red Hat) * It also checks if the event should be discarded or not.
1531dad56ee7SSteven Rostedt (Red Hat) * It is to be discarded if the event is soft disabled and the
1532dad56ee7SSteven Rostedt (Red Hat) * event was only recorded to process triggers, or if the event
1533dad56ee7SSteven Rostedt (Red Hat) * filter is active and this event did not match the filters.
153413292494SSteven Rostedt (VMware) *
1535dad56ee7SSteven Rostedt (Red Hat) * Returns true if the event is discarded, false otherwise.
1536dad56ee7SSteven Rostedt (Red Hat) */
1537dad56ee7SSteven Rostedt (Red Hat) static inline bool
__event_trigger_test_discard(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,enum event_trigger_type * tt)1538dad56ee7SSteven Rostedt (Red Hat) __event_trigger_test_discard(struct trace_event_file *file,
1539dad56ee7SSteven Rostedt (Red Hat) struct trace_buffer *buffer,
1540dad56ee7SSteven Rostedt (Red Hat) struct ring_buffer_event *event,
1541dad56ee7SSteven Rostedt (Red Hat) void *entry,
1542b47e3302SSteven Rostedt (VMware) enum event_trigger_type *tt)
1543dad56ee7SSteven Rostedt (Red Hat) {
1544a55f224fSSteven Rostedt (VMware) unsigned long eflags = file->flags;
1545a55f224fSSteven Rostedt (VMware)
1546a55f224fSSteven Rostedt (VMware) if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1547a55f224fSSteven Rostedt (VMware) *tt = event_triggers_call(file, buffer, entry, event);
1548a55f224fSSteven Rostedt (VMware)
1549a55f224fSSteven Rostedt (VMware) if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
1550a55f224fSSteven Rostedt (VMware) EVENT_FILE_FL_FILTERED |
1551a55f224fSSteven Rostedt (VMware) EVENT_FILE_FL_PID_FILTER))))
1552a55f224fSSteven Rostedt (VMware) return false;
1553a55f224fSSteven Rostedt (VMware)
1554a55f224fSSteven Rostedt (VMware) if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
1555a55f224fSSteven Rostedt (VMware) goto discard;
1556a55f224fSSteven Rostedt (VMware)
1557a55f224fSSteven Rostedt (VMware) if (file->flags & EVENT_FILE_FL_FILTERED &&
1558a55f224fSSteven Rostedt (VMware) !filter_match_preds(file->filter, entry))
1559dad56ee7SSteven Rostedt (Red Hat) goto discard;
15609cbb1506SSteven Rostedt (Red Hat)
1561a55f224fSSteven Rostedt (VMware) if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
1562a55f224fSSteven Rostedt (VMware) trace_event_ignore_this_pid(file))
1563a55f224fSSteven Rostedt (VMware) goto discard;
15649cbb1506SSteven Rostedt (Red Hat)
15659cbb1506SSteven Rostedt (Red Hat) return false;
1566dad56ee7SSteven Rostedt (Red Hat) discard:
1567dad56ee7SSteven Rostedt (Red Hat) __trace_event_discard_commit(buffer, event);
1568f2cc020dSIngo Molnar return true;
1569dad56ee7SSteven Rostedt (Red Hat) }
1570dad56ee7SSteven Rostedt (Red Hat)
1571dad56ee7SSteven Rostedt (Red Hat) /**
157236590c50SSebastian Andrzej Siewior * event_trigger_unlock_commit - handle triggers and finish event commit
1573dad56ee7SSteven Rostedt (Red Hat) * @file: The file pointer associated with the event
1574dad56ee7SSteven Rostedt (Red Hat) * @buffer: The ring buffer that the event is being written to
1575dad56ee7SSteven Rostedt (Red Hat) * @event: The event meta data in the ring buffer
1576dad56ee7SSteven Rostedt (Red Hat) * @entry: The event itself
1577dad56ee7SSteven Rostedt (Red Hat) * @trace_ctx: The tracing context flags.
1578dad56ee7SSteven Rostedt (Red Hat) *
1579dad56ee7SSteven Rostedt (Red Hat) * This is a helper function to handle triggers that require data
158013292494SSteven Rostedt (VMware) * from the event itself. It also tests the event against filters and
1581dad56ee7SSteven Rostedt (Red Hat) * if the event is soft disabled and should be discarded.
158236590c50SSebastian Andrzej Siewior */
1583dad56ee7SSteven Rostedt (Red Hat) static inline void
event_trigger_unlock_commit(struct trace_event_file * file,struct trace_buffer * buffer,struct ring_buffer_event * event,void * entry,unsigned int trace_ctx)1584dad56ee7SSteven Rostedt (Red Hat) event_trigger_unlock_commit(struct trace_event_file *file,
1585dad56ee7SSteven Rostedt (Red Hat) struct trace_buffer *buffer,
1586dad56ee7SSteven Rostedt (Red Hat) struct ring_buffer_event *event,
158736590c50SSebastian Andrzej Siewior void *entry, unsigned int trace_ctx)
1588dad56ee7SSteven Rostedt (Red Hat) {
1589dad56ee7SSteven Rostedt (Red Hat) enum event_trigger_type tt = ETT_NONE;
1590c94e45bcSSteven Rostedt (VMware)
1591dad56ee7SSteven Rostedt (Red Hat) if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1592dad56ee7SSteven Rostedt (Red Hat) trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
159361e9dea2SSteven Rostedt
159461e9dea2SSteven Rostedt if (tt)
159543cd4145SSteven Rostedt event_triggers_post_call(file, tt);
159661e9dea2SSteven Rostedt }
1597bf93f9edSSteven Rostedt
1598bf93f9edSSteven Rostedt #define FILTER_PRED_INVALID ((unsigned short)-1)
1599bf93f9edSSteven Rostedt #define FILTER_PRED_IS_RIGHT (1 << 15)
1600bf93f9edSSteven Rostedt #define FILTER_PRED_FOLD (1 << 15)
1601bf93f9edSSteven Rostedt
1602bf93f9edSSteven Rostedt /*
1603bf93f9edSSteven Rostedt * The max preds is the size of unsigned short with
1604bf93f9edSSteven Rostedt * two flags at the MSBs. One bit is used for both the IS_RIGHT
16054a3d27e9SSteven Rostedt * and FOLD flags. The other is reserved.
16067ce7e424STom Zanussi *
16071889d209SFrederic Weisbecker * 2^14 preds is way more than enough.
16087ce7e424STom Zanussi */
16091889d209SFrederic Weisbecker #define MAX_FILTER_PRED 16384
16101889d209SFrederic Weisbecker
16113f6fe06dSFrederic Weisbecker struct filter_pred;
1612b0f1a59aSLi Zefan struct regex;
16133f6fe06dSFrederic Weisbecker
16143f6fe06dSFrederic Weisbecker typedef int (*regex_match_func)(char *str, struct regex *r, int len);
16153f6fe06dSFrederic Weisbecker
161660f1d5e3SMasami Hiramatsu enum regex_type {
1617f79b3f33SSteven Rostedt (VMware) MATCH_FULL = 0,
16183f6fe06dSFrederic Weisbecker MATCH_FRONT_ONLY,
16193f6fe06dSFrederic Weisbecker MATCH_MIDDLE_ONLY,
16201889d209SFrederic Weisbecker MATCH_END_ONLY,
16211889d209SFrederic Weisbecker MATCH_GLOB,
16221889d209SFrederic Weisbecker MATCH_INDEX,
16231889d209SFrederic Weisbecker };
16241889d209SFrederic Weisbecker
16251889d209SFrederic Weisbecker struct regex {
16261889d209SFrederic Weisbecker char pattern[MAX_FILTER_STR_VAL];
16274ef56902STom Zanussi int len;
16284ef56902STom Zanussi int field_len;
16294ef56902STom Zanussi regex_match_func match;
163005770dd0SMasami Hiramatsu };
16314ef56902STom Zanussi
is_string_field(struct ftrace_event_field * field)16324c738413SSteven Rostedt (VMware) static inline bool is_string_field(struct ftrace_event_field *field)
16334c738413SSteven Rostedt (VMware) {
16344ef56902STom Zanussi return field->filter_type == FILTER_DYN_STRING ||
16354ef56902STom Zanussi field->filter_type == FILTER_RDYN_STRING ||
16364ef56902STom Zanussi field->filter_type == FILTER_STATIC_STRING ||
16374ef56902STom Zanussi field->filter_type == FILTER_PTR_STRING ||
16384ef56902STom Zanussi field->filter_type == FILTER_COMM;
16394ef56902STom Zanussi }
16404ef56902STom Zanussi
is_function_field(struct ftrace_event_field * field)16413f6fe06dSFrederic Weisbecker static inline bool is_function_field(struct ftrace_event_field *field)
16423f6fe06dSFrederic Weisbecker {
16437f1d2f82SSteven Rostedt (Red Hat) return field->filter_type == FILTER_TRACE_FN;
16444bda2d51STom Zanussi }
16457f1d2f82SSteven Rostedt (Red Hat)
16468b372562STom Zanussi extern enum regex_type
16477967b3e0SSteven Rostedt (Red Hat) filter_parse_regex(char *buff, int len, char **search, int *not);
16488b372562STom Zanussi extern void print_event_filter(struct trace_event_file *file,
16498b372562STom Zanussi struct trace_seq *s);
1650ac1adc55STom Zanussi extern int apply_event_filter(struct trace_event_file *file,
1651aa38e9fcSLi Zefan char *filter_string);
16521e144d73SSteven Rostedt (VMware) extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
16531e144d73SSteven Rostedt (VMware) char *filter_string);
1654bac5fb97STom Zanussi extern void print_subsystem_event_filter(struct event_subsystem *system,
1655bac5fb97STom Zanussi struct trace_seq *s);
1656bac5fb97STom Zanussi extern int filter_assign_type(const char *type);
16577ce7e424STom Zanussi extern int create_event_filter(struct trace_array *tr,
1658b3a8c6fdSzhangwei(Jovi) struct trace_event_call *call,
16592425bcb9SSteven Rostedt (Red Hat) char *filter_str, bool set_str,
16602e33af02SSteven Rostedt struct event_filter **filterp);
1661e870e9a1SLi Zefan extern void free_event_filter(struct event_filter *filter);
1662d914ba37SJoel Fernandes
1663d914ba37SJoel Fernandes struct ftrace_event_field *
166458b92547SSteven Rostedt (VMware) trace_find_event_field(struct trace_event_call *call, char *name);
16653bb06eb6SSteven Rostedt (Google)
1666277ba044SSteven Rostedt extern void trace_event_enable_cmd_record(bool enable);
16670c8916c3SSteven Rostedt extern void trace_event_enable_tgid_record(bool enable);
1668720dee53SMasami Hiramatsu
1669e870e9a1SLi Zefan extern int event_trace_init(void);
16703c96529cSSteven Rostedt (VMware) extern int init_events(void);
16713c96529cSSteven Rostedt (VMware) extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
16723c96529cSSteven Rostedt (VMware) extern int event_trace_del_tracer(struct trace_array *tr);
16737f1d2f82SSteven Rostedt (Red Hat) extern void __trace_early_add_events(struct trace_array *tr);
16747862ad18STom Zanussi
16757862ad18STom Zanussi extern struct trace_event_file *__find_event_file(struct trace_array *tr,
16767862ad18STom Zanussi const char *system,
167785f2b082STom Zanussi const char *event);
167885f2b082STom Zanussi extern struct trace_event_file *find_event_file(struct trace_array *tr,
16796aa7de05SMark Rutland const char *system,
168085f2b082STom Zanussi const char *event);
168185f2b082STom Zanussi
event_file_data(struct file * filp)168220c8928aSLi Zefan static inline void *event_file_data(struct file *filp)
1683a59fd602SSteven Rostedt {
1684ac199db0SPeter Zijlstra return READ_ONCE(file_inode(filp)->i_private);
1685b1560408SSteven Rostedt }
1686b1560408SSteven Rostedt
1687b1560408SSteven Rostedt extern struct mutex event_mutex;
1688b1560408SSteven Rostedt extern struct list_head ftrace_events;
1689b1560408SSteven Rostedt
1690b1560408SSteven Rostedt /*
1691b1560408SSteven Rostedt * When the trace_event_file is the filp->i_private pointer,
1692b1560408SSteven Rostedt * it must be taken under the event_mutex lock, and then checked
1693b1560408SSteven Rostedt * if the EVENT_FILE_FL_FREED flag is set. If it is, then the
1694b1560408SSteven Rostedt * data pointed to by the trace_event_file can not be trusted.
1695b1560408SSteven Rostedt *
1696b1560408SSteven Rostedt * Use the event_file_file() to access the trace_event_file from
1697b1560408SSteven Rostedt * the filp the first time under the event_mutex and check for
1698b1560408SSteven Rostedt * NULL. If it is needed to be retrieved again and the event_mutex
1699b1560408SSteven Rostedt * is still held, then the event_file_data() can be used and it
1700b1560408SSteven Rostedt * is guaranteed to be valid.
1701b1560408SSteven Rostedt */
event_file_file(struct file * filp)1702b1560408SSteven Rostedt static inline struct trace_event_file *event_file_file(struct file *filp)
1703b1560408SSteven Rostedt {
1704b1560408SSteven Rostedt struct trace_event_file *file;
1705b1560408SSteven Rostedt
1706b1560408SSteven Rostedt lockdep_assert_held(&event_mutex);
1707b1560408SSteven Rostedt file = READ_ONCE(file_inode(filp)->i_private);
170885f2b082STom Zanussi if (!file || file->flags & EVENT_FILE_FL_FREED)
17097ef224d1STom Zanussi return NULL;
17102d19bd79STom Zanussi return file;
17116c3edaf9SCong Wang }
17127ef224d1STom Zanussi
17137ef224d1STom Zanussi extern const struct file_operations event_trigger_fops;
17147ef224d1STom Zanussi extern const struct file_operations event_hist_fops;
1715d0bad49bSTom Zanussi extern const struct file_operations event_hist_debug_fops;
17167ef224d1STom Zanussi extern const struct file_operations event_inject_fops;
17177ef224d1STom Zanussi
1718d0bad49bSTom Zanussi #ifdef CONFIG_HIST_TRIGGERS
17197ef224d1STom Zanussi extern int register_trigger_hist_cmd(void);
172085f2b082STom Zanussi extern int register_trigger_hist_enable_disable_cmds(void);
172185f2b082STom Zanussi #else
register_trigger_hist_cmd(void)172285f2b082STom Zanussi static inline int register_trigger_hist_cmd(void) { return 0; }
register_trigger_hist_enable_disable_cmds(void)172385f2b082STom Zanussi static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
17247491e2c4STzvetomir Stoyanov (VMware) #endif
17257491e2c4STzvetomir Stoyanov (VMware)
17267491e2c4STzvetomir Stoyanov (VMware) extern int register_trigger_cmds(void);
17277491e2c4STzvetomir Stoyanov (VMware) extern void clear_event_triggers(struct trace_array *tr);
172885f2b082STom Zanussi
172985f2b082STom Zanussi enum {
173085f2b082STom Zanussi EVENT_TRIGGER_FL_PROBE = BIT(0),
17317491e2c4STzvetomir Stoyanov (VMware) };
173285f2b082STom Zanussi
173385f2b082STom Zanussi struct event_trigger_data {
1734d8a30f20SSteven Rostedt (Red Hat) unsigned long count;
173585f2b082STom Zanussi int ref;
173685f2b082STom Zanussi int flags;
1737104f2810STom Zanussi const struct event_trigger_ops *ops;
1738db1388b4STom Zanussi struct event_command *cmd_ops;
173985f2b082STom Zanussi struct event_filter __rcu *filter;
1740db1388b4STom Zanussi char *filter_str;
1741db1388b4STom Zanussi void *private_data;
1742db1388b4STom Zanussi bool paused;
174385f2b082STom Zanussi bool paused_tmp;
174485f2b082STom Zanussi struct list_head list;
1745d0bad49bSTom Zanussi char *name;
1746d0bad49bSTom Zanussi struct list_head named_list;
1747d0bad49bSTom Zanussi struct event_trigger_data *named_data;
1748d0bad49bSTom Zanussi };
1749d0bad49bSTom Zanussi
1750d0bad49bSTom Zanussi /* Avoid typos */
1751d0bad49bSTom Zanussi #define ENABLE_EVENT_STR "enable_event"
1752d0bad49bSTom Zanussi #define DISABLE_EVENT_STR "disable_event"
1753d0bad49bSTom Zanussi #define ENABLE_HIST_STR "enable_hist"
1754d0bad49bSTom Zanussi #define DISABLE_HIST_STR "disable_hist"
1755d0bad49bSTom Zanussi
1756d0bad49bSTom Zanussi struct enable_trigger_data {
1757d0bad49bSTom Zanussi struct trace_event_file *file;
1758d0bad49bSTom Zanussi bool enable;
175947670541STom Zanussi bool hist;
17609ec5a7d1STom Zanussi };
1761d0bad49bSTom Zanussi
1762e1f187d0STom Zanussi extern int event_enable_trigger_print(struct seq_file *m,
1763e1f187d0STom Zanussi struct event_trigger_data *data);
1764d0bad49bSTom Zanussi extern void event_enable_trigger_free(struct event_trigger_data *data);
1765d0bad49bSTom Zanussi extern int event_enable_trigger_parse(struct event_command *cmd_ops,
1766d0bad49bSTom Zanussi struct trace_event_file *file,
1767d0bad49bSTom Zanussi char *glob, char *cmd,
1768d0bad49bSTom Zanussi char *param_and_filter);
1769d0bad49bSTom Zanussi extern int event_enable_register_trigger(char *glob,
1770ab4bf008STom Zanussi struct event_trigger_data *data,
177147670541STom Zanussi struct trace_event_file *file);
1772ab4bf008STom Zanussi extern void event_enable_unregister_trigger(char *glob,
1773ab4bf008STom Zanussi struct event_trigger_data *test,
1774ab4bf008STom Zanussi struct trace_event_file *file);
1775ab4bf008STom Zanussi extern void trigger_data_free(struct event_trigger_data *data);
1776ab4bf008STom Zanussi extern int event_trigger_init(struct event_trigger_data *data);
1777ab4bf008STom Zanussi extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1778db1388b4STom Zanussi int trigger_enable);
1779db1388b4STom Zanussi extern void update_cond_flag(struct trace_event_file *file);
1780db1388b4STom Zanussi extern int set_trigger_filter(char *filter_str,
1781db1388b4STom Zanussi struct event_trigger_data *trigger_data,
1782db1388b4STom Zanussi struct trace_event_file *file);
1783db1388b4STom Zanussi extern struct event_trigger_data *find_named_trigger(const char *name);
1784db1388b4STom Zanussi extern bool is_named_trigger(struct event_trigger_data *test);
1785db1388b4STom Zanussi extern int save_named_trigger(const char *name,
1786db1388b4STom Zanussi struct event_trigger_data *data);
1787067fe038STom Zanussi extern void del_named_trigger(struct event_trigger_data *data);
1788067fe038STom Zanussi extern void pause_named_trigger(struct event_trigger_data *data);
1789ab4bf008STom Zanussi extern void unpause_named_trigger(struct event_trigger_data *data);
1790d0bad49bSTom Zanussi extern void set_named_trigger_data(struct event_trigger_data *data,
1791d0bad49bSTom Zanussi struct event_trigger_data *named_data);
179286599dbeSTom Zanussi extern struct event_trigger_data *
179386599dbeSTom Zanussi get_named_trigger_data(struct event_trigger_data *data);
179486599dbeSTom Zanussi extern int register_event_command(struct event_command *cmd);
179586599dbeSTom Zanussi extern int unregister_event_command(struct event_command *cmd);
179686599dbeSTom Zanussi extern int register_trigger_hist_enable_disable_cmds(void);
179786599dbeSTom Zanussi extern bool event_trigger_check_remove(const char *glob);
179886599dbeSTom Zanussi extern bool event_trigger_empty_param(const char *param);
179986599dbeSTom Zanussi extern int event_trigger_separate_filter(char *param_and_filter, char **param,
180086599dbeSTom Zanussi char **filter, bool param_required);
180186599dbeSTom Zanussi extern struct event_trigger_data *
180286599dbeSTom Zanussi event_trigger_alloc(struct event_command *cmd_ops,
180386599dbeSTom Zanussi char *cmd,
180486599dbeSTom Zanussi char *param,
180586599dbeSTom Zanussi void *private_data);
180686599dbeSTom Zanussi extern int event_trigger_parse_num(char *trigger,
180786599dbeSTom Zanussi struct event_trigger_data *trigger_data);
180886599dbeSTom Zanussi extern int event_trigger_set_filter(struct event_command *cmd_ops,
180986599dbeSTom Zanussi struct trace_event_file *file,
181086599dbeSTom Zanussi char *param,
181186599dbeSTom Zanussi struct event_trigger_data *trigger_data);
1812b8cc44a4STom Zanussi extern void event_trigger_reset_filter(struct event_command *cmd_ops,
1813b8cc44a4STom Zanussi struct event_trigger_data *trigger_data);
1814b8cc44a4STom Zanussi extern int event_trigger_register(struct event_command *cmd_ops,
1815b8cc44a4STom Zanussi struct trace_event_file *file,
1816b8cc44a4STom Zanussi char *glob,
1817ab4bf008STom Zanussi struct event_trigger_data *trigger_data);
1818bb32500fSSteven Rostedt (Google) extern void event_trigger_unregister(struct event_command *cmd_ops,
1819bb32500fSSteven Rostedt (Google) struct trace_event_file *file,
1820bb32500fSSteven Rostedt (Google) char *glob,
182185f2b082STom Zanussi struct event_trigger_data *trigger_data);
182285f2b082STom Zanussi
182385f2b082STom Zanussi extern void event_file_get(struct trace_event_file *file);
182485f2b082STom Zanussi extern void event_file_put(struct trace_event_file *file);
182585f2b082STom Zanussi
182685f2b082STom Zanussi /**
1827fb339e53STom Zanussi * struct event_trigger_ops - callbacks for trace event triggers
1828fb339e53STom Zanussi *
1829fb339e53STom Zanussi * The methods in this structure provide per-event trigger hooks for
1830fb339e53STom Zanussi * various trigger operations.
1831fb339e53STom Zanussi *
1832fb339e53STom Zanussi * The @init and @free methods are used during trigger setup and
1833fb339e53STom Zanussi * teardown, typically called from an event_command's @parse()
1834fb339e53STom Zanussi * function implementation.
1835fb339e53STom Zanussi *
1836fb339e53STom Zanussi * The @print method is used to print the trigger spec.
183785f2b082STom Zanussi *
183885f2b082STom Zanussi * The @trigger method is the function that actually implements the
183985f2b082STom Zanussi * trigger and is called in the context of the triggering event
1840fb339e53STom Zanussi * whenever that event occurs.
184185f2b082STom Zanussi *
184285f2b082STom Zanussi * All the methods below, except for @init() and @free(), must be
1843c4a59230STom Zanussi * implemented.
1844c4a59230STom Zanussi *
184585f2b082STom Zanussi * @trigger: The trigger 'probe' function called when the triggering
184685f2b082STom Zanussi * event occurs. The data passed into this callback is the data
184785f2b082STom Zanussi * that was supplied to the event_command @reg() function that
184885f2b082STom Zanussi * registered the trigger (see struct event_command) along with
184985f2b082STom Zanussi * the trace record, rec.
185085f2b082STom Zanussi *
185185f2b082STom Zanussi * @init: An optional initialization function called for the trigger
185285f2b082STom Zanussi * when the trigger is registered (via the event_command reg()
185385f2b082STom Zanussi * function). This can be used to perform per-trigger
185485f2b082STom Zanussi * initialization such as incrementing a per-trigger reference
185585f2b082STom Zanussi * count, for instance. This is usually implemented by the
185685f2b082STom Zanussi * generic utility function @event_trigger_init() (see
185785f2b082STom Zanussi * trace_event_triggers.c).
185885f2b082STom Zanussi *
185985f2b082STom Zanussi * @free: An optional de-initialization function called for the
186085f2b082STom Zanussi * trigger when the trigger is unregistered (via the
186185f2b082STom Zanussi * event_command @reg() function). This can be used to perform
186285f2b082STom Zanussi * per-trigger de-initialization such as decrementing a
186385f2b082STom Zanussi * per-trigger reference count and freeing corresponding trigger
186485f2b082STom Zanussi * data, for instance. This is usually implemented by the
186585f2b082STom Zanussi * generic utility function @event_trigger_free() (see
186685f2b082STom Zanussi * trace_event_triggers.c).
186785f2b082STom Zanussi *
186885f2b082STom Zanussi * @print: The callback function invoked to have the trigger print
1869fb339e53STom Zanussi * itself. This is usually implemented by a wrapper function
1870fb339e53STom Zanussi * that calls the generic utility function @event_trigger_print()
1871fb339e53STom Zanussi * (see trace_event_triggers.c).
18721ac4f51cSTom Zanussi */
187347670541STom Zanussi struct event_trigger_ops {
187447670541STom Zanussi void (*trigger)(struct event_trigger_data *data,
187585f2b082STom Zanussi struct trace_buffer *buffer,
187685f2b082STom Zanussi void *rec,
187785f2b082STom Zanussi struct ring_buffer_event *rbe);
187885f2b082STom Zanussi int (*init)(struct event_trigger_data *data);
187985f2b082STom Zanussi void (*free)(struct event_trigger_data *data);
188085f2b082STom Zanussi int (*print)(struct seq_file *m,
188185f2b082STom Zanussi struct event_trigger_data *data);
188285f2b082STom Zanussi };
188385f2b082STom Zanussi
188485f2b082STom Zanussi /**
188585f2b082STom Zanussi * struct event_command - callbacks and data members for event commands
188685f2b082STom Zanussi *
188785f2b082STom Zanussi * Event commands are invoked by users by writing the command name
188885f2b082STom Zanussi * into the 'trigger' file associated with a trace event. The
188985f2b082STom Zanussi * parameters associated with a specific invocation of an event
189085f2b082STom Zanussi * command are used to create an event trigger instance, which is
189185f2b082STom Zanussi * added to the list of trigger instances associated with that trace
189285f2b082STom Zanussi * event. When the event is hit, the set of triggers associated with
189385f2b082STom Zanussi * that event is invoked.
189485f2b082STom Zanussi *
189585f2b082STom Zanussi * The data members in this structure provide per-event command data
189685f2b082STom Zanussi * for various event commands.
189785f2b082STom Zanussi *
189885f2b082STom Zanussi * All the data members below, except for @post_trigger, must be set
189985f2b082STom Zanussi * for each event command.
190085f2b082STom Zanussi *
190185f2b082STom Zanussi * @name: The unique name that identifies the event command. This is
190285f2b082STom Zanussi * the name used when setting triggers via trigger files.
190385f2b082STom Zanussi *
190485f2b082STom Zanussi * @trigger_type: A unique id that identifies the event command
190585f2b082STom Zanussi * 'type'. This value has two purposes, the first to ensure that
190685f2b082STom Zanussi * only one trigger of the same type can be set at a given time
190785f2b082STom Zanussi * for a particular event e.g. it doesn't make sense to have both
190885f2b082STom Zanussi * a traceon and traceoff trigger attached to a single event at
190985f2b082STom Zanussi * the same time, so traceon and traceoff have the same type
191085f2b082STom Zanussi * though they have different names. The @trigger_type value is
1911af658dcaSSteven Rostedt (Red Hat) * also used as a bit value for deferring the actual trigger
191285f2b082STom Zanussi * action until after the current event is finished. Some
1913353206f5SSteven Rostedt (Red Hat) * commands need to do this if they themselves log to the trace
1914a5863daeSTom Zanussi * buffer (see the @post_trigger() member below). @trigger_type
1915a88e1cfbSTom Zanussi * values are defined by adding new values to the trigger_type
1916a88e1cfbSTom Zanussi * enum in include/linux/trace_events.h.
191785f2b082STom Zanussi *
19189ec5a7d1STom Zanussi * @flags: See the enum event_command_flags below.
191985f2b082STom Zanussi *
192085f2b082STom Zanussi * All the methods below, except for @set_filter() and @unreg_all(),
192185f2b082STom Zanussi * must be implemented.
192285f2b082STom Zanussi *
192385f2b082STom Zanussi * @parse: The callback function responsible for parsing and
192485f2b082STom Zanussi * registering the trigger written to the 'trigger' file by the
192585f2b082STom Zanussi * user. It allocates the trigger instance and registers it with
192685f2b082STom Zanussi * the appropriate trace event. It makes use of the other
192785f2b082STom Zanussi * event_command callback functions to orchestrate this, and is
192885f2b082STom Zanussi * usually implemented by the generic utility function
192985f2b082STom Zanussi * @event_trigger_callback() (see trace_event_triggers.c).
193085f2b082STom Zanussi *
193185f2b082STom Zanussi * @reg: Adds the trigger to the list of triggers associated with the
193285f2b082STom Zanussi * event, and enables the event trigger itself, after
193385f2b082STom Zanussi * initializing it (via the event_trigger_ops @init() function).
193485f2b082STom Zanussi * This is also where commands can use the @trigger_type value to
193585f2b082STom Zanussi * make the decision as to whether or not multiple instances of
193685f2b082STom Zanussi * the trigger should be allowed. This is usually implemented by
193785f2b082STom Zanussi * the generic utility function @register_trigger() (see
193885f2b082STom Zanussi * trace_event_triggers.c).
193985f2b082STom Zanussi *
194085f2b082STom Zanussi * @unreg: Removes the trigger from the list of triggers associated
1941a88e1cfbSTom Zanussi * with the event, and disables the event trigger itself, after
1942a88e1cfbSTom Zanussi * initializing it (via the event_trigger_ops @free() function).
1943a88e1cfbSTom Zanussi * This is usually implemented by the generic utility function
1944a88e1cfbSTom Zanussi * @unregister_trigger() (see trace_event_triggers.c).
194585f2b082STom Zanussi *
194685f2b082STom Zanussi * @unreg_all: An optional function called to remove all the triggers
194785f2b082STom Zanussi * from the list of triggers associated with the event. Called
194885f2b082STom Zanussi * when a trigger file is opened in truncate mode.
194985f2b082STom Zanussi *
195085f2b082STom Zanussi * @set_filter: An optional function called to parse and set a filter
195185f2b082STom Zanussi * for the trigger. If no @set_filter() method is set for the
195285f2b082STom Zanussi * event command, filters set by the user for the command will be
19539ec5a7d1STom Zanussi * ignored. This is usually implemented by the generic utility
19549ec5a7d1STom Zanussi * function @set_trigger_filter() (see trace_event_triggers.c).
19559ec5a7d1STom Zanussi *
19569ec5a7d1STom Zanussi * @get_trigger_ops: The callback function invoked to retrieve the
195785f2b082STom Zanussi * event_trigger_ops implementation associated with the command.
195885f2b082STom Zanussi * This callback function allows a single event_command to
195985f2b082STom Zanussi * support multiple trigger implementations via different sets of
196085f2b082STom Zanussi * event_trigger_ops, depending on the value of the @param
196185f2b082STom Zanussi * string.
1962353206f5SSteven Rostedt (Red Hat) */
19639ec5a7d1STom Zanussi struct event_command {
19647f1d2f82SSteven Rostedt (Red Hat) struct list_head list;
19659ec5a7d1STom Zanussi char *name;
19669ec5a7d1STom Zanussi enum event_trigger_type trigger_type;
196785f2b082STom Zanussi int flags;
196885f2b082STom Zanussi int (*parse)(struct event_command *cmd_ops,
19697f1d2f82SSteven Rostedt (Red Hat) struct trace_event_file *file,
197085f2b082STom Zanussi char *glob, char *cmd,
197185f2b082STom Zanussi char *param_and_filter);
19727f1d2f82SSteven Rostedt (Red Hat) int (*reg)(char *glob,
1973a88e1cfbSTom Zanussi struct event_trigger_data *data,
197485f2b082STom Zanussi struct trace_event_file *file);
197585f2b082STom Zanussi void (*unreg)(char *glob,
19767f1d2f82SSteven Rostedt (Red Hat) struct event_trigger_data *data,
197785f2b082STom Zanussi struct trace_event_file *file);
197885f2b082STom Zanussi void (*unreg_all)(struct trace_event_file *file);
197985f2b082STom Zanussi int (*set_filter)(char *filter_str,
1980353206f5SSteven Rostedt (Red Hat) struct event_trigger_data *data,
1981353206f5SSteven Rostedt (Red Hat) struct trace_event_file *file);
1982353206f5SSteven Rostedt (Red Hat) const struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1983353206f5SSteven Rostedt (Red Hat) };
1984353206f5SSteven Rostedt (Red Hat)
1985353206f5SSteven Rostedt (Red Hat) /**
1986353206f5SSteven Rostedt (Red Hat) * enum event_command_flags - flags for struct event_command
1987353206f5SSteven Rostedt (Red Hat) *
1988353206f5SSteven Rostedt (Red Hat) * @POST_TRIGGER: A flag that says whether or not this command needs
1989353206f5SSteven Rostedt (Red Hat) * to have its action delayed until after the current event has
1990353206f5SSteven Rostedt (Red Hat) * been closed. Some triggers need to avoid being invoked while
1991353206f5SSteven Rostedt (Red Hat) * an event is currently in the process of being logged, since
1992353206f5SSteven Rostedt (Red Hat) * the trigger may itself log data into the trace buffer. Thus
1993353206f5SSteven Rostedt (Red Hat) * we make sure the current event is committed before invoking
1994353206f5SSteven Rostedt (Red Hat) * those triggers. To do that, the trigger invocation is split
1995353206f5SSteven Rostedt (Red Hat) * in two - the first part checks the filter using the current
1996353206f5SSteven Rostedt (Red Hat) * trace record; if a command has the @post_trigger flag set, it
1997353206f5SSteven Rostedt (Red Hat) * sets a bit for itself in the return value, otherwise it
1998353206f5SSteven Rostedt (Red Hat) * directly invokes the trigger. Once all commands have been
1999353206f5SSteven Rostedt (Red Hat) * either invoked or set their return flag, the current record is
2000353206f5SSteven Rostedt (Red Hat) * either committed or discarded. At that point, if any commands
2001353206f5SSteven Rostedt (Red Hat) * have deferred their triggers, those commands are finally
2002353206f5SSteven Rostedt (Red Hat) * invoked following the close of the current event. In other
2003353206f5SSteven Rostedt (Red Hat) * words, if the event_trigger_ops @func() probe implementation
2004353206f5SSteven Rostedt (Red Hat) * itself logs to the trace buffer, this flag should be set,
2005353206f5SSteven Rostedt (Red Hat) * otherwise it can be left unspecified.
2006353206f5SSteven Rostedt (Red Hat) *
2007353206f5SSteven Rostedt (Red Hat) * @NEEDS_REC: A flag that says whether or not this command needs
2008353206f5SSteven Rostedt (Red Hat) * access to the trace record in order to perform its function,
2009353206f5SSteven Rostedt (Red Hat) * regardless of whether or not it has a filter associated with
2010353206f5SSteven Rostedt (Red Hat) * it (filters make a trigger require access to the trace record
2011353206f5SSteven Rostedt (Red Hat) * but are not always present).
2012353206f5SSteven Rostedt (Red Hat) */
2013353206f5SSteven Rostedt (Red Hat) enum event_command_flags {
2014353206f5SSteven Rostedt (Red Hat) EVENT_CMD_FL_POST_TRIGGER = 1,
2015353206f5SSteven Rostedt (Red Hat) EVENT_CMD_FL_NEEDS_REC = 2,
2016353206f5SSteven Rostedt (Red Hat) };
2017353206f5SSteven Rostedt (Red Hat)
event_command_post_trigger(struct event_command * cmd_ops)2018353206f5SSteven Rostedt (Red Hat) static inline bool event_command_post_trigger(struct event_command *cmd_ops)
2019353206f5SSteven Rostedt (Red Hat) {
2020353206f5SSteven Rostedt (Red Hat) return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
2021353206f5SSteven Rostedt (Red Hat) }
2022353206f5SSteven Rostedt (Red Hat)
event_command_needs_rec(struct event_command * cmd_ops)20237f1d2f82SSteven Rostedt (Red Hat) static inline bool event_command_needs_rec(struct event_command *cmd_ops)
202485f2b082STom Zanussi {
202593e31ffbSTom Zanussi return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
2026a35873a0STom Zanussi }
2027a35873a0STom Zanussi
2028a35873a0STom Zanussi extern int trace_event_enable_disable(struct trace_event_file *file,
2029a35873a0STom Zanussi int enable, int soft_disable);
2030a35873a0STom Zanussi extern int tracing_alloc_snapshot(void);
203185f2b082STom Zanussi extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
2032e9fb2b6dSSteven Rostedt extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
2033e9fb2b6dSSteven Rostedt
2034e9fb2b6dSSteven Rostedt extern int tracing_snapshot_cond_disable(struct trace_array *tr);
2035102c9323SSteven Rostedt (Red Hat) extern void *tracing_cond_snapshot_data(struct trace_array *tr);
2036102c9323SSteven Rostedt (Red Hat)
2037102c9323SSteven Rostedt (Red Hat) extern const char *__start___trace_bprintk_fmt[];
2038b9f9108cSSteven Rostedt (Red Hat) extern const char *__stop___trace_bprintk_fmt[];
203981698831SSteven Rostedt
2040613f04a0SSteven Rostedt (Red Hat) extern const char *__start___tracepoint_str[];
20412b6080f2SSteven Rostedt extern const char *__stop___tracepoint_str[];
204207d777feSSteven Rostedt
20435c3469cbSMasami Hiramatsu void trace_printk_control(bool enabled);
20445c3469cbSMasami Hiramatsu void trace_printk_start_comm(void);
20455c3469cbSMasami Hiramatsu int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
20465c3469cbSMasami Hiramatsu int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
20475c3469cbSMasami Hiramatsu
20485c3469cbSMasami Hiramatsu /* Used from boot time tracer */
20495c3469cbSMasami Hiramatsu extern int trace_set_options(struct trace_array *tr, char *option);
20505c3469cbSMasami Hiramatsu extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
20515c3469cbSMasami Hiramatsu extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
20527e465baaSTom Zanussi unsigned long size, int cpu_id);
20537e465baaSTom Zanussi extern int tracing_set_cpumask(struct trace_array *tr,
20547e465baaSTom Zanussi cpumask_var_t tracing_cpumask_new);
20557e465baaSTom Zanussi
2056d262271dSMasami Hiramatsu
20577e465baaSTom Zanussi #define MAX_EVENT_NAME_LEN 64
20588a062902STom Zanussi
20592f754e77SSteven Rostedt (VMware) extern ssize_t trace_parse_run_command(struct file *file,
20602f754e77SSteven Rostedt (VMware) const char __user *buffer, size_t count, loff_t *ppos,
20611581a884STom Zanussi int (*createfn)(const char *));
20628a062902STom Zanussi
2063ca268da6SSteven Rostedt (Red Hat) extern unsigned int err_pos(char *cmd, const char *str);
2064ca268da6SSteven Rostedt (Red Hat) extern void tracing_log_err(struct trace_array *tr,
2065ca268da6SSteven Rostedt (Red Hat) const char *loc, const char *cmd,
2066ca268da6SSteven Rostedt (Red Hat) const char **errs, u8 type, u16 pos);
2067ca268da6SSteven Rostedt (Red Hat)
2068ca268da6SSteven Rostedt (Red Hat) /*
2069ca268da6SSteven Rostedt (Red Hat) * Normal trace_printk() and friends allocates special buffers
2070ca268da6SSteven Rostedt (Red Hat) * to do the manipulation, as well as saves the print formats
2071ca268da6SSteven Rostedt (Red Hat) * into sections to display. But the trace infrastructure wants
2072ca268da6SSteven Rostedt (Red Hat) * to use these without the added overhead at the price of being
2073ca268da6SSteven Rostedt (Red Hat) * a bit slower (used mainly for warnings, where we don't care
20744e5292eaSSteven Rostedt * about performance). The internal_trace_puts() is for such
207504ae87a5SPeter Zijlstra * a purpose.
20762425bcb9SSteven Rostedt (Red Hat) */
207752f5684cSGideon Israel Dsouza #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
20784e5292eaSSteven Rostedt
207904ae87a5SPeter Zijlstra #undef FTRACE_ENTRY
208004ae87a5SPeter Zijlstra #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
2081a4a551b8SNamhyung Kim extern struct trace_event_call \
208204ae87a5SPeter Zijlstra __aligned(4) event_##call;
208304ae87a5SPeter Zijlstra #undef FTRACE_ENTRY_DUP
2084a4a551b8SNamhyung Kim #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
20854e5292eaSSteven Rostedt FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
2086e1112b4dSTom Zanussi #undef FTRACE_ENTRY_PACKED
20876e48b550SMark Brown #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
20882425bcb9SSteven Rostedt (Red Hat) FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
2089ced39002SJiri Olsa
2090ced39002SJiri Olsa #include "trace_entries.h"
2091ced39002SJiri Olsa
20926e48b550SMark Brown #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2093ced39002SJiri Olsa int perf_ftrace_event_register(struct trace_event_call *call,
20945f893b26SSteven Rostedt (Red Hat) enum trace_reg type, void *data);
20955f893b26SSteven Rostedt (Red Hat) #else
2096dbfeaa7aSTom Zanussi #define perf_ftrace_event_register NULL
20975f893b26SSteven Rostedt (Red Hat) #endif
20985f893b26SSteven Rostedt (Red Hat)
2099dbfeaa7aSTom Zanussi #ifdef CONFIG_FTRACE_SYSCALLS
2100dbfeaa7aSTom Zanussi void init_ftrace_syscalls(void);
2101dbfeaa7aSTom Zanussi const char *get_syscall_name(int syscall);
2102dbfeaa7aSTom Zanussi #else
init_ftrace_syscalls(void)21035f893b26SSteven Rostedt (Red Hat) static inline void init_ftrace_syscalls(void) { }
get_syscall_name(int syscall)21045f893b26SSteven Rostedt (Red Hat) static inline const char *get_syscall_name(int syscall)
21055f893b26SSteven Rostedt (Red Hat) {
21065f893b26SSteven Rostedt (Red Hat) return NULL;
2107f57a4143SJeremy Linton }
21085c3469cbSMasami Hiramatsu #endif
21095c3469cbSMasami Hiramatsu
21105c3469cbSMasami Hiramatsu #ifdef CONFIG_EVENT_TRACING
21115f893b26SSteven Rostedt (Red Hat) void trace_event_init(void);
21125f893b26SSteven Rostedt (Red Hat) void trace_event_eval_update(struct trace_eval_map **map, int len);
2113f57a4143SJeremy Linton /* Used from boot time tracer */
21145f893b26SSteven Rostedt (Red Hat) extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
21155f893b26SSteven Rostedt (Red Hat) extern int trigger_process_regex(struct trace_event_file *file, char *buff);
21162824f503SSteven Rostedt (VMware) #else
trace_event_init(void)21172824f503SSteven Rostedt (VMware) static inline void __init trace_event_init(void) { }
trace_event_eval_update(struct trace_eval_map ** map,int len)21182824f503SSteven Rostedt (VMware) static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
2119180e4e39SVincent Donnefort #endif
2120180e4e39SVincent Donnefort
21212824f503SSteven Rostedt (VMware) #ifdef CONFIG_TRACER_SNAPSHOT
21222824f503SSteven Rostedt (VMware) void tracing_snapshot_instance(struct trace_array *tr);
21232824f503SSteven Rostedt (VMware) int tracing_alloc_snapshot_instance(struct trace_array *tr);
21242824f503SSteven Rostedt (VMware) int tracing_arm_snapshot(struct trace_array *tr);
21252824f503SSteven Rostedt (VMware) void tracing_disarm_snapshot(struct trace_array *tr);
21262824f503SSteven Rostedt (VMware) #else
tracing_snapshot_instance(struct trace_array * tr)2127180e4e39SVincent Donnefort static inline void tracing_snapshot_instance(struct trace_array *tr) { }
tracing_alloc_snapshot_instance(struct trace_array * tr)2128180e4e39SVincent Donnefort static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
21292824f503SSteven Rostedt (VMware) {
21302824f503SSteven Rostedt (VMware) return 0;
21313f1756dcSSteven Rostedt (VMware) }
tracing_arm_snapshot(struct trace_array * tr)21323f1756dcSSteven Rostedt (VMware) static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; }
tracing_disarm_snapshot(struct trace_array * tr)21333f1756dcSSteven Rostedt (VMware) static inline void tracing_disarm_snapshot(struct trace_array *tr) { }
21343f1756dcSSteven Rostedt (VMware) #endif
21353f1756dcSSteven Rostedt (VMware)
21363f1756dcSSteven Rostedt (VMware) #ifdef CONFIG_PREEMPT_TRACER
21373f1756dcSSteven Rostedt (VMware) void tracer_preempt_on(unsigned long a0, unsigned long a1);
21383f1756dcSSteven Rostedt (VMware) void tracer_preempt_off(unsigned long a0, unsigned long a1);
21393f1756dcSSteven Rostedt (VMware) #else
tracer_preempt_on(unsigned long a0,unsigned long a1)21403f1756dcSSteven Rostedt (VMware) static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
tracer_preempt_off(unsigned long a0,unsigned long a1)21413f1756dcSSteven Rostedt (VMware) static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
21423f1756dcSSteven Rostedt (VMware) #endif
21433f1756dcSSteven Rostedt (VMware) #ifdef CONFIG_IRQSOFF_TRACER
21443f1756dcSSteven Rostedt (VMware) void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
21453f1756dcSSteven Rostedt (VMware) void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
21460c97bf86SMiguel Ojeda #else
tracer_hardirqs_on(unsigned long a0,unsigned long a1)21470c97bf86SMiguel Ojeda static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
tracer_hardirqs_off(unsigned long a0,unsigned long a1)21480c97bf86SMiguel Ojeda static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
21490c97bf86SMiguel Ojeda #endif
21500c97bf86SMiguel Ojeda
21510c97bf86SMiguel Ojeda /*
21520c97bf86SMiguel Ojeda * Reset the state of the trace_iterator so that it can read consumed data.
2153dba87967SXiu Jianfeng * Normally, the trace_iterator is used for reading the data when it is not
21540c97bf86SMiguel Ojeda * consumed, and must retain state.
21550c97bf86SMiguel Ojeda */
trace_iterator_reset(struct trace_iterator * iter)21560c97bf86SMiguel Ojeda static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
215742d120e2STom Zanussi {
2158575b76cbSSteven Rostedt (Google) memset_startat(iter, 0, seq);
215942d120e2STom Zanussi iter->pos = -1;
2160575b76cbSSteven Rostedt (Google) }
216142d120e2STom Zanussi
216242d120e2STom Zanussi /* Check the name is good for event/group/fields */
__is_good_name(const char * name,bool hash_ok)2163575b76cbSSteven Rostedt (Google) static inline bool __is_good_name(const char *name, bool hash_ok)
2164575b76cbSSteven Rostedt (Google) {
216542d120e2STom Zanussi if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-'))
216642d120e2STom Zanussi return false;
216742d120e2STom Zanussi while (*++name != '\0') {
216842d120e2STom Zanussi if (!isalpha(*name) && !isdigit(*name) && *name != '_' &&
216942d120e2STom Zanussi (!hash_ok || *name != '-'))
2170575b76cbSSteven Rostedt (Google) return false;
2171575b76cbSSteven Rostedt (Google) }
2172575b76cbSSteven Rostedt (Google) return true;
2173575b76cbSSteven Rostedt (Google) }
2174575b76cbSSteven Rostedt (Google)
2175575b76cbSSteven Rostedt (Google) /* Check the name is good for event/group/fields */
is_good_name(const char * name)2176575b76cbSSteven Rostedt (Google) static inline bool is_good_name(const char *name)
2177575b76cbSSteven Rostedt (Google) {
2178575b76cbSSteven Rostedt (Google) return __is_good_name(name, false);
2179575b76cbSSteven Rostedt (Google) }
2180575b76cbSSteven Rostedt (Google)
2181575b76cbSSteven Rostedt (Google) /* Check the name is good for system */
is_good_system_name(const char * name)21827491e2c4STzvetomir Stoyanov (VMware) static inline bool is_good_system_name(const char *name)
21837491e2c4STzvetomir Stoyanov (VMware) {
21847491e2c4STzvetomir Stoyanov (VMware) return __is_good_name(name, true);
21857491e2c4STzvetomir Stoyanov (VMware) }
21867491e2c4STzvetomir Stoyanov (VMware)
21877491e2c4STzvetomir Stoyanov (VMware) /* Convert certain expected symbols into '_' when generating event names */
sanitize_event_name(char * name)21887491e2c4STzvetomir Stoyanov (VMware) static inline void sanitize_event_name(char *name)
21897491e2c4STzvetomir Stoyanov (VMware) {
2190bc87cf0aSDaniel Bristot de Oliveira while (*name++ != '\0')
2191bc87cf0aSDaniel Bristot de Oliveira if (*name == ':' || *name == '.')
2192bc87cf0aSDaniel Bristot de Oliveira *name = '_';
2193bc87cf0aSDaniel Bristot de Oliveira }
2194bc87cf0aSDaniel Bristot de Oliveira
2195bc87cf0aSDaniel Bristot de Oliveira /*
2196bc87cf0aSDaniel Bristot de Oliveira * This is a generic way to read and write a u64 value from a file in tracefs.
2197bc87cf0aSDaniel Bristot de Oliveira *
2198bc87cf0aSDaniel Bristot de Oliveira * The value is stored on the variable pointed by *val. The value needs
2199bc87cf0aSDaniel Bristot de Oliveira * to be at least *min and at most *max. The write is protected by an
2200bc87cf0aSDaniel Bristot de Oliveira * existing *lock.
2201bc87cf0aSDaniel Bristot de Oliveira */
2202bc87cf0aSDaniel Bristot de Oliveira struct trace_min_max_param {
2203bc87cf0aSDaniel Bristot de Oliveira struct mutex *lock;
2204bc87cf0aSDaniel Bristot de Oliveira u64 *val;
2205bc87cf0aSDaniel Bristot de Oliveira u64 *min;
2206bc87cf0aSDaniel Bristot de Oliveira u64 *max;
2207bc87cf0aSDaniel Bristot de Oliveira };
2208102227b9SDaniel Bristot de Oliveira
2209102227b9SDaniel Bristot de Oliveira #define U64_STR_SIZE 24 /* 20 digits max */
2210102227b9SDaniel Bristot de Oliveira
2211102227b9SDaniel Bristot de Oliveira extern const struct file_operations trace_min_max_fops;
2212102227b9SDaniel Bristot de Oliveira
2213102227b9SDaniel Bristot de Oliveira #ifdef CONFIG_RV
2214102227b9SDaniel Bristot de Oliveira extern int rv_init_interface(void);
2215102227b9SDaniel Bristot de Oliveira #else
rv_init_interface(void)2216102227b9SDaniel Bristot de Oliveira static inline int rv_init_interface(void)
22176ce5a6f0STatsuya S {
22186ce5a6f0STatsuya S return 0;
22196ce5a6f0STatsuya S }
22206ce5a6f0STatsuya S #endif
22216ce5a6f0STatsuya S
22226ce5a6f0STatsuya S /*
22236ce5a6f0STatsuya S * This is used only to distinguish
2224bc0c38d1SSteven Rostedt * function address from trampoline code.
2225 * So this value has no meaning.
2226 */
2227 #define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX)
2228
2229 #endif /* _LINUX_KERNEL_TRACE_H */
2230