xref: /linux-6.15/include/linux/trace_events.h (revision b2d0f5d5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5 
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12 
13 struct trace_array;
14 struct trace_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18 
19 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
20 				  unsigned long flags,
21 				  const struct trace_print_flags *flag_array);
22 
23 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
24 				    const struct trace_print_flags *symbol_array);
25 
26 #if BITS_PER_LONG == 32
27 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
28 		      unsigned long long flags,
29 		      const struct trace_print_flags_u64 *flag_array);
30 
31 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
32 					unsigned long long val,
33 					const struct trace_print_flags_u64
34 								 *symbol_array);
35 #endif
36 
37 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
38 				    unsigned int bitmask_size);
39 
40 const char *trace_print_hex_seq(struct trace_seq *p,
41 				const unsigned char *buf, int len,
42 				bool concatenate);
43 
44 const char *trace_print_array_seq(struct trace_seq *p,
45 				   const void *buf, int count,
46 				   size_t el_size);
47 
48 struct trace_iterator;
49 struct trace_event;
50 
51 int trace_raw_output_prep(struct trace_iterator *iter,
52 			  struct trace_event *event);
53 
54 /*
55  * The trace entry - the most basic unit of tracing. This is what
56  * is printed in the end as a single line in the trace output, such as:
57  *
58  *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
59  */
60 struct trace_entry {
61 	unsigned short		type;
62 	unsigned char		flags;
63 	unsigned char		preempt_count;
64 	int			pid;
65 };
66 
67 #define TRACE_EVENT_TYPE_MAX						\
68 	((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
69 
70 /*
71  * Trace iterator - used by printout routines who present trace
72  * results to users and which routines might sleep, etc:
73  */
74 struct trace_iterator {
75 	struct trace_array	*tr;
76 	struct tracer		*trace;
77 	struct trace_buffer	*trace_buffer;
78 	void			*private;
79 	int			cpu_file;
80 	struct mutex		mutex;
81 	struct ring_buffer_iter	**buffer_iter;
82 	unsigned long		iter_flags;
83 
84 	/* trace_seq for __print_flags() and __print_symbolic() etc. */
85 	struct trace_seq	tmp_seq;
86 
87 	cpumask_var_t		started;
88 
89 	/* it's true when current open file is snapshot */
90 	bool			snapshot;
91 
92 	/* The below is zeroed out in pipe_read */
93 	struct trace_seq	seq;
94 	struct trace_entry	*ent;
95 	unsigned long		lost_events;
96 	int			leftover;
97 	int			ent_size;
98 	int			cpu;
99 	u64			ts;
100 
101 	loff_t			pos;
102 	long			idx;
103 
104 	/* All new field here will be zeroed out in pipe_read */
105 };
106 
107 enum trace_iter_flags {
108 	TRACE_FILE_LAT_FMT	= 1,
109 	TRACE_FILE_ANNOTATE	= 2,
110 	TRACE_FILE_TIME_IN_NS	= 4,
111 };
112 
113 
114 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
115 				      int flags, struct trace_event *event);
116 
117 struct trace_event_functions {
118 	trace_print_func	trace;
119 	trace_print_func	raw;
120 	trace_print_func	hex;
121 	trace_print_func	binary;
122 };
123 
124 struct trace_event {
125 	struct hlist_node		node;
126 	struct list_head		list;
127 	int				type;
128 	struct trace_event_functions	*funcs;
129 };
130 
131 extern int register_trace_event(struct trace_event *event);
132 extern int unregister_trace_event(struct trace_event *event);
133 
134 /* Return values for print_line callback */
135 enum print_line_t {
136 	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
137 	TRACE_TYPE_HANDLED	= 1,
138 	TRACE_TYPE_UNHANDLED	= 2,	/* Relay to other output functions */
139 	TRACE_TYPE_NO_CONSUME	= 3	/* Handled but ask to not consume */
140 };
141 
142 enum print_line_t trace_handle_return(struct trace_seq *s);
143 
144 void tracing_generic_entry_update(struct trace_entry *entry,
145 				  unsigned long flags,
146 				  int pc);
147 struct trace_event_file;
148 
149 struct ring_buffer_event *
150 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
151 				struct trace_event_file *trace_file,
152 				int type, unsigned long len,
153 				unsigned long flags, int pc);
154 
155 #define TRACE_RECORD_CMDLINE	BIT(0)
156 #define TRACE_RECORD_TGID	BIT(1)
157 
158 void tracing_record_taskinfo(struct task_struct *task, int flags);
159 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
160 					  struct task_struct *next, int flags);
161 
162 void tracing_record_cmdline(struct task_struct *task);
163 void tracing_record_tgid(struct task_struct *task);
164 
165 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
166 
167 struct event_filter;
168 
169 enum trace_reg {
170 	TRACE_REG_REGISTER,
171 	TRACE_REG_UNREGISTER,
172 #ifdef CONFIG_PERF_EVENTS
173 	TRACE_REG_PERF_REGISTER,
174 	TRACE_REG_PERF_UNREGISTER,
175 	TRACE_REG_PERF_OPEN,
176 	TRACE_REG_PERF_CLOSE,
177 	TRACE_REG_PERF_ADD,
178 	TRACE_REG_PERF_DEL,
179 #endif
180 };
181 
182 struct trace_event_call;
183 
184 struct trace_event_class {
185 	const char		*system;
186 	void			*probe;
187 #ifdef CONFIG_PERF_EVENTS
188 	void			*perf_probe;
189 #endif
190 	int			(*reg)(struct trace_event_call *event,
191 				       enum trace_reg type, void *data);
192 	int			(*define_fields)(struct trace_event_call *);
193 	struct list_head	*(*get_fields)(struct trace_event_call *);
194 	struct list_head	fields;
195 	int			(*raw_init)(struct trace_event_call *);
196 };
197 
198 extern int trace_event_reg(struct trace_event_call *event,
199 			    enum trace_reg type, void *data);
200 
201 struct trace_event_buffer {
202 	struct ring_buffer		*buffer;
203 	struct ring_buffer_event	*event;
204 	struct trace_event_file		*trace_file;
205 	void				*entry;
206 	unsigned long			flags;
207 	int				pc;
208 };
209 
210 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
211 				  struct trace_event_file *trace_file,
212 				  unsigned long len);
213 
214 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
215 
216 enum {
217 	TRACE_EVENT_FL_FILTERED_BIT,
218 	TRACE_EVENT_FL_CAP_ANY_BIT,
219 	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
220 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
221 	TRACE_EVENT_FL_TRACEPOINT_BIT,
222 	TRACE_EVENT_FL_KPROBE_BIT,
223 	TRACE_EVENT_FL_UPROBE_BIT,
224 };
225 
226 /*
227  * Event flags:
228  *  FILTERED	  - The event has a filter attached
229  *  CAP_ANY	  - Any user can enable for perf
230  *  NO_SET_FILTER - Set when filter has error and is to be ignored
231  *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
232  *  TRACEPOINT    - Event is a tracepoint
233  *  KPROBE        - Event is a kprobe
234  *  UPROBE        - Event is a uprobe
235  */
236 enum {
237 	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
238 	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
239 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
240 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
241 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
242 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT),
243 	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT),
244 };
245 
246 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
247 
248 struct trace_event_call {
249 	struct list_head	list;
250 	struct trace_event_class *class;
251 	union {
252 		char			*name;
253 		/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
254 		struct tracepoint	*tp;
255 	};
256 	struct trace_event	event;
257 	char			*print_fmt;
258 	struct event_filter	*filter;
259 	void			*mod;
260 	void			*data;
261 	/*
262 	 *   bit 0:		filter_active
263 	 *   bit 1:		allow trace by non root (cap any)
264 	 *   bit 2:		failed to apply filter
265 	 *   bit 3:		trace internal event (do not enable)
266 	 *   bit 4:		Event was enabled by module
267 	 *   bit 5:		use call filter rather than file filter
268 	 *   bit 6:		Event is a tracepoint
269 	 */
270 	int			flags; /* static flags of different events */
271 
272 #ifdef CONFIG_PERF_EVENTS
273 	int				perf_refcount;
274 	struct hlist_head __percpu	*perf_events;
275 	struct bpf_prog_array __rcu	*prog_array;
276 
277 	int	(*perf_perm)(struct trace_event_call *,
278 			     struct perf_event *);
279 #endif
280 };
281 
282 #ifdef CONFIG_PERF_EVENTS
283 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
284 {
285 	/*
286 	 * This inline function checks whether call->prog_array
287 	 * is valid or not. The function is called in various places,
288 	 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
289 	 *
290 	 * If this function returns true, and later call->prog_array
291 	 * becomes false inside rcu_read_lock/unlock region,
292 	 * we bail out then. If this function return false,
293 	 * there is a risk that we might miss a few events if the checking
294 	 * were delayed until inside rcu_read_lock/unlock region and
295 	 * call->prog_array happened to become non-NULL then.
296 	 *
297 	 * Here, READ_ONCE() is used instead of rcu_access_pointer().
298 	 * rcu_access_pointer() requires the actual definition of
299 	 * "struct bpf_prog_array" while READ_ONCE() only needs
300 	 * a declaration of the same type.
301 	 */
302 	return !!READ_ONCE(call->prog_array);
303 }
304 #endif
305 
306 static inline const char *
307 trace_event_name(struct trace_event_call *call)
308 {
309 	if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
310 		return call->tp ? call->tp->name : NULL;
311 	else
312 		return call->name;
313 }
314 
315 struct trace_array;
316 struct trace_subsystem_dir;
317 
318 enum {
319 	EVENT_FILE_FL_ENABLED_BIT,
320 	EVENT_FILE_FL_RECORDED_CMD_BIT,
321 	EVENT_FILE_FL_RECORDED_TGID_BIT,
322 	EVENT_FILE_FL_FILTERED_BIT,
323 	EVENT_FILE_FL_NO_SET_FILTER_BIT,
324 	EVENT_FILE_FL_SOFT_MODE_BIT,
325 	EVENT_FILE_FL_SOFT_DISABLED_BIT,
326 	EVENT_FILE_FL_TRIGGER_MODE_BIT,
327 	EVENT_FILE_FL_TRIGGER_COND_BIT,
328 	EVENT_FILE_FL_PID_FILTER_BIT,
329 	EVENT_FILE_FL_WAS_ENABLED_BIT,
330 };
331 
332 /*
333  * Event file flags:
334  *  ENABLED	  - The event is enabled
335  *  RECORDED_CMD  - The comms should be recorded at sched_switch
336  *  RECORDED_TGID - The tgids should be recorded at sched_switch
337  *  FILTERED	  - The event has a filter attached
338  *  NO_SET_FILTER - Set when filter has error and is to be ignored
339  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
340  *  SOFT_DISABLED - When set, do not trace the event (even though its
341  *                   tracepoint may be enabled)
342  *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
343  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
344  *  PID_FILTER    - When set, the event is filtered based on pid
345  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
346  */
347 enum {
348 	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
349 	EVENT_FILE_FL_RECORDED_CMD	= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
350 	EVENT_FILE_FL_RECORDED_TGID	= (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
351 	EVENT_FILE_FL_FILTERED		= (1 << EVENT_FILE_FL_FILTERED_BIT),
352 	EVENT_FILE_FL_NO_SET_FILTER	= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
353 	EVENT_FILE_FL_SOFT_MODE		= (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
354 	EVENT_FILE_FL_SOFT_DISABLED	= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
355 	EVENT_FILE_FL_TRIGGER_MODE	= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
356 	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
357 	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
358 	EVENT_FILE_FL_WAS_ENABLED	= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
359 };
360 
361 struct trace_event_file {
362 	struct list_head		list;
363 	struct trace_event_call		*event_call;
364 	struct event_filter __rcu	*filter;
365 	struct dentry			*dir;
366 	struct trace_array		*tr;
367 	struct trace_subsystem_dir	*system;
368 	struct list_head		triggers;
369 
370 	/*
371 	 * 32 bit flags:
372 	 *   bit 0:		enabled
373 	 *   bit 1:		enabled cmd record
374 	 *   bit 2:		enable/disable with the soft disable bit
375 	 *   bit 3:		soft disabled
376 	 *   bit 4:		trigger enabled
377 	 *
378 	 * Note: The bits must be set atomically to prevent races
379 	 * from other writers. Reads of flags do not need to be in
380 	 * sync as they occur in critical sections. But the way flags
381 	 * is currently used, these changes do not affect the code
382 	 * except that when a change is made, it may have a slight
383 	 * delay in propagating the changes to other CPUs due to
384 	 * caching and such. Which is mostly OK ;-)
385 	 */
386 	unsigned long		flags;
387 	atomic_t		sm_ref;	/* soft-mode reference counter */
388 	atomic_t		tm_ref;	/* trigger-mode reference counter */
389 };
390 
391 #define __TRACE_EVENT_FLAGS(name, value)				\
392 	static int __init trace_init_flags_##name(void)			\
393 	{								\
394 		event_##name.flags |= value;				\
395 		return 0;						\
396 	}								\
397 	early_initcall(trace_init_flags_##name);
398 
399 #define __TRACE_EVENT_PERF_PERM(name, expr...)				\
400 	static int perf_perm_##name(struct trace_event_call *tp_event, \
401 				    struct perf_event *p_event)		\
402 	{								\
403 		return ({ expr; });					\
404 	}								\
405 	static int __init trace_init_perf_perm_##name(void)		\
406 	{								\
407 		event_##name.perf_perm = &perf_perm_##name;		\
408 		return 0;						\
409 	}								\
410 	early_initcall(trace_init_perf_perm_##name);
411 
412 #define PERF_MAX_TRACE_SIZE	2048
413 
414 #define MAX_FILTER_STR_VAL	256	/* Should handle KSYM_SYMBOL_LEN */
415 
416 enum event_trigger_type {
417 	ETT_NONE		= (0),
418 	ETT_TRACE_ONOFF		= (1 << 0),
419 	ETT_SNAPSHOT		= (1 << 1),
420 	ETT_STACKTRACE		= (1 << 2),
421 	ETT_EVENT_ENABLE	= (1 << 3),
422 	ETT_EVENT_HIST		= (1 << 4),
423 	ETT_HIST_ENABLE		= (1 << 5),
424 };
425 
426 extern int filter_match_preds(struct event_filter *filter, void *rec);
427 
428 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
429 						   void *rec);
430 extern void event_triggers_post_call(struct trace_event_file *file,
431 				     enum event_trigger_type tt,
432 				     void *rec);
433 
434 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
435 
436 /**
437  * trace_trigger_soft_disabled - do triggers and test if soft disabled
438  * @file: The file pointer of the event to test
439  *
440  * If any triggers without filters are attached to this event, they
441  * will be called here. If the event is soft disabled and has no
442  * triggers that require testing the fields, it will return true,
443  * otherwise false.
444  */
445 static inline bool
446 trace_trigger_soft_disabled(struct trace_event_file *file)
447 {
448 	unsigned long eflags = file->flags;
449 
450 	if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
451 		if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
452 			event_triggers_call(file, NULL);
453 		if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
454 			return true;
455 		if (eflags & EVENT_FILE_FL_PID_FILTER)
456 			return trace_event_ignore_this_pid(file);
457 	}
458 	return false;
459 }
460 
461 #ifdef CONFIG_BPF_EVENTS
462 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
463 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
464 void perf_event_detach_bpf_prog(struct perf_event *event);
465 #else
466 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
467 {
468 	return 1;
469 }
470 
471 static inline int
472 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
473 {
474 	return -EOPNOTSUPP;
475 }
476 
477 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
478 
479 #endif
480 
481 enum {
482 	FILTER_OTHER = 0,
483 	FILTER_STATIC_STRING,
484 	FILTER_DYN_STRING,
485 	FILTER_PTR_STRING,
486 	FILTER_TRACE_FN,
487 	FILTER_COMM,
488 	FILTER_CPU,
489 };
490 
491 extern int trace_event_raw_init(struct trace_event_call *call);
492 extern int trace_define_field(struct trace_event_call *call, const char *type,
493 			      const char *name, int offset, int size,
494 			      int is_signed, int filter_type);
495 extern int trace_add_event_call(struct trace_event_call *call);
496 extern int trace_remove_event_call(struct trace_event_call *call);
497 extern int trace_event_get_offsets(struct trace_event_call *call);
498 
499 #define is_signed_type(type)	(((type)(-1)) < (type)1)
500 
501 int trace_set_clr_event(const char *system, const char *event, int set);
502 
503 /*
504  * The double __builtin_constant_p is because gcc will give us an error
505  * if we try to allocate the static variable to fmt if it is not a
506  * constant. Even with the outer if statement optimizing out.
507  */
508 #define event_trace_printk(ip, fmt, args...)				\
509 do {									\
510 	__trace_printk_check_format(fmt, ##args);			\
511 	tracing_record_cmdline(current);				\
512 	if (__builtin_constant_p(fmt)) {				\
513 		static const char *trace_printk_fmt			\
514 		  __attribute__((section("__trace_printk_fmt"))) =	\
515 			__builtin_constant_p(fmt) ? fmt : NULL;		\
516 									\
517 		__trace_bprintk(ip, trace_printk_fmt, ##args);		\
518 	} else								\
519 		__trace_printk(ip, fmt, ##args);			\
520 } while (0)
521 
522 #ifdef CONFIG_PERF_EVENTS
523 struct perf_event;
524 
525 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
526 
527 extern int  perf_trace_init(struct perf_event *event);
528 extern void perf_trace_destroy(struct perf_event *event);
529 extern int  perf_trace_add(struct perf_event *event, int flags);
530 extern void perf_trace_del(struct perf_event *event, int flags);
531 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
532 				     char *filter_str);
533 extern void ftrace_profile_free_filter(struct perf_event *event);
534 void perf_trace_buf_update(void *record, u16 type);
535 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
536 
537 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
538 			       struct trace_event_call *call, u64 count,
539 			       struct pt_regs *regs, struct hlist_head *head,
540 			       struct task_struct *task);
541 
542 static inline void
543 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
544 		       u64 count, struct pt_regs *regs, void *head,
545 		       struct task_struct *task, struct perf_event *event)
546 {
547 	perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
548 }
549 
550 #endif
551 
552 #endif /* _LINUX_TRACE_EVENT_H */
553