xref: /linux-6.15/include/trace/perf.h (revision cdb537ac)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2ee53bbd1SSteven Rostedt (Red Hat) 
3ee53bbd1SSteven Rostedt (Red Hat) #undef TRACE_SYSTEM_VAR
4ee53bbd1SSteven Rostedt (Red Hat) 
5ee53bbd1SSteven Rostedt (Red Hat) #ifdef CONFIG_PERF_EVENTS
6ee53bbd1SSteven Rostedt (Red Hat) 
792a22ceaSSteven Rostedt (Google) #include "stages/stage6_event_callback.h"
8d07c9ad6SChuck Lever 
9ee53bbd1SSteven Rostedt (Red Hat) #undef __perf_count
10ee53bbd1SSteven Rostedt (Red Hat) #define __perf_count(c)	(__count = (c))
11ee53bbd1SSteven Rostedt (Red Hat) 
12ee53bbd1SSteven Rostedt (Red Hat) #undef __perf_task
13ee53bbd1SSteven Rostedt (Red Hat) #define __perf_task(t)	(__task = (t))
14ee53bbd1SSteven Rostedt (Red Hat) 
1565e7462aSMathieu Desnoyers #undef __DECLARE_EVENT_CLASS
1665e7462aSMathieu Desnoyers #define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
17ee53bbd1SSteven Rostedt (Red Hat) static notrace void							\
1865e7462aSMathieu Desnoyers do_perf_trace_##call(void *__data, proto)				\
19ee53bbd1SSteven Rostedt (Red Hat) {									\
202425bcb9SSteven Rostedt (Red Hat) 	struct trace_event_call *event_call = __data;			\
2162323a14SSteven Rostedt (Red Hat) 	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
22a7237765SSteven Rostedt (Red Hat) 	struct trace_event_raw_##call *entry;				\
23ee53bbd1SSteven Rostedt (Red Hat) 	struct pt_regs *__regs;						\
24e93735beSAlexei Starovoitov 	u64 __count = 1;						\
25ee53bbd1SSteven Rostedt (Red Hat) 	struct task_struct *__task = NULL;				\
26ee53bbd1SSteven Rostedt (Red Hat) 	struct hlist_head *head;					\
27ee53bbd1SSteven Rostedt (Red Hat) 	int __entry_size;						\
28ee53bbd1SSteven Rostedt (Red Hat) 	int __data_size;						\
29ee53bbd1SSteven Rostedt (Red Hat) 	int rctx;							\
30ee53bbd1SSteven Rostedt (Red Hat) 									\
31d0ee8f4aSSteven Rostedt (Red Hat) 	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
32ee53bbd1SSteven Rostedt (Red Hat) 									\
33ee53bbd1SSteven Rostedt (Red Hat) 	head = this_cpu_ptr(event_call->perf_events);			\
34e87c6bc3SYonghong Song 	if (!bpf_prog_array_valid(event_call) &&			\
35e87c6bc3SYonghong Song 	    __builtin_constant_p(!__task) && !__task &&			\
36ee53bbd1SSteven Rostedt (Red Hat) 	    hlist_empty(head))						\
37ee53bbd1SSteven Rostedt (Red Hat) 		return;							\
38ee53bbd1SSteven Rostedt (Red Hat) 									\
39ee53bbd1SSteven Rostedt (Red Hat) 	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
40ee53bbd1SSteven Rostedt (Red Hat) 			     sizeof(u64));				\
41ee53bbd1SSteven Rostedt (Red Hat) 	__entry_size -= sizeof(u32);					\
42ee53bbd1SSteven Rostedt (Red Hat) 									\
431e1dcd93SAlexei Starovoitov 	entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx);	\
44ee53bbd1SSteven Rostedt (Red Hat) 	if (!entry)							\
45ee53bbd1SSteven Rostedt (Red Hat) 		return;							\
46ee53bbd1SSteven Rostedt (Red Hat) 									\
47ee53bbd1SSteven Rostedt (Red Hat) 	perf_fetch_caller_regs(__regs);					\
48ee53bbd1SSteven Rostedt (Red Hat) 									\
49ee53bbd1SSteven Rostedt (Red Hat) 	tstruct								\
50ee53bbd1SSteven Rostedt (Red Hat) 									\
51ee53bbd1SSteven Rostedt (Red Hat) 	{ assign; }							\
52ee53bbd1SSteven Rostedt (Red Hat) 									\
5385b67bcbSAlexei Starovoitov 	perf_trace_run_bpf_submit(entry, __entry_size, rctx,		\
5485b67bcbSAlexei Starovoitov 				  event_call, __count, __regs,		\
551e1dcd93SAlexei Starovoitov 				  head, __task);			\
56ee53bbd1SSteven Rostedt (Red Hat) }
57ee53bbd1SSteven Rostedt (Red Hat) 
5865e7462aSMathieu Desnoyers /*
5965e7462aSMathieu Desnoyers  * Define unused __count and __task variables to use @args to pass
6065e7462aSMathieu Desnoyers  * arguments to do_perf_trace_##call. This is needed because the
6165e7462aSMathieu Desnoyers  * macros __perf_count and __perf_task introduce the side-effect to
6265e7462aSMathieu Desnoyers  * store copies into those local variables.
6365e7462aSMathieu Desnoyers  */
6465e7462aSMathieu Desnoyers #undef DECLARE_EVENT_CLASS
6565e7462aSMathieu Desnoyers #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
6665e7462aSMathieu Desnoyers __DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
6765e7462aSMathieu Desnoyers 		      PARAMS(assign), PARAMS(print))			\
6865e7462aSMathieu Desnoyers static notrace void							\
6965e7462aSMathieu Desnoyers perf_trace_##call(void *__data, proto)					\
7065e7462aSMathieu Desnoyers {									\
7165e7462aSMathieu Desnoyers 	u64 __count __attribute__((unused));				\
7265e7462aSMathieu Desnoyers 	struct task_struct *__task __attribute__((unused));		\
7365e7462aSMathieu Desnoyers 									\
7465e7462aSMathieu Desnoyers 	do_perf_trace_##call(__data, args);				\
7565e7462aSMathieu Desnoyers }
7665e7462aSMathieu Desnoyers 
770e6caab8SMathieu Desnoyers #undef DECLARE_EVENT_SYSCALL_CLASS
7865e7462aSMathieu Desnoyers #define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
7965e7462aSMathieu Desnoyers __DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
8065e7462aSMathieu Desnoyers 		      PARAMS(assign), PARAMS(print))			\
8165e7462aSMathieu Desnoyers static notrace void							\
8265e7462aSMathieu Desnoyers perf_trace_##call(void *__data, proto)					\
8365e7462aSMathieu Desnoyers {									\
8465e7462aSMathieu Desnoyers 	u64 __count __attribute__((unused));				\
8565e7462aSMathieu Desnoyers 	struct task_struct *__task __attribute__((unused));		\
8665e7462aSMathieu Desnoyers 									\
87*cdb537acSMathieu Desnoyers 	might_fault();							\
8865e7462aSMathieu Desnoyers 	preempt_disable_notrace();					\
8965e7462aSMathieu Desnoyers 	do_perf_trace_##call(__data, args);				\
9065e7462aSMathieu Desnoyers 	preempt_enable_notrace();					\
9165e7462aSMathieu Desnoyers }
920e6caab8SMathieu Desnoyers 
93ee53bbd1SSteven Rostedt (Red Hat) /*
94ee53bbd1SSteven Rostedt (Red Hat)  * This part is compiled out, it is only here as a build time check
95ee53bbd1SSteven Rostedt (Red Hat)  * to make sure that if the tracepoint handling changes, the
96ee53bbd1SSteven Rostedt (Red Hat)  * perf probe will fail to compile unless it too is updated.
97ee53bbd1SSteven Rostedt (Red Hat)  */
98ee53bbd1SSteven Rostedt (Red Hat) #undef DEFINE_EVENT
99ee53bbd1SSteven Rostedt (Red Hat) #define DEFINE_EVENT(template, call, proto, args)			\
100ee53bbd1SSteven Rostedt (Red Hat) static inline void perf_test_probe_##call(void)				\
101ee53bbd1SSteven Rostedt (Red Hat) {									\
102ee53bbd1SSteven Rostedt (Red Hat) 	check_trace_callback_type_##call(perf_trace_##template);	\
103ee53bbd1SSteven Rostedt (Red Hat) }
104ee53bbd1SSteven Rostedt (Red Hat) 
105ee53bbd1SSteven Rostedt (Red Hat) 
106ee53bbd1SSteven Rostedt (Red Hat) #undef DEFINE_EVENT_PRINT
107ee53bbd1SSteven Rostedt (Red Hat) #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
108ee53bbd1SSteven Rostedt (Red Hat) 	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
109ee53bbd1SSteven Rostedt (Red Hat) 
110ee53bbd1SSteven Rostedt (Red Hat) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
11165e7462aSMathieu Desnoyers 
11265e7462aSMathieu Desnoyers #undef __DECLARE_EVENT_CLASS
11365e7462aSMathieu Desnoyers 
114ee53bbd1SSteven Rostedt (Red Hat) #endif /* CONFIG_PERF_EVENTS */
115