xref: /linux-6.15/include/linux/trace_events.h (revision bfca85fa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _LINUX_TRACE_EVENT_H
4 #define _LINUX_TRACE_EVENT_H
5 
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_seq.h>
8 #include <linux/percpu.h>
9 #include <linux/hardirq.h>
10 #include <linux/perf_event.h>
11 #include <linux/tracepoint.h>
12 
13 struct trace_array;
14 struct array_buffer;
15 struct tracer;
16 struct dentry;
17 struct bpf_prog;
18 union bpf_attr;
19 
20 /* Used for event string fields when they are NULL */
21 #define EVENT_NULL_STR		"(null)"
22 
23 const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
24 				  unsigned long flags,
25 				  const struct trace_print_flags *flag_array);
26 
27 const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
28 				    const struct trace_print_flags *symbol_array);
29 
30 #if BITS_PER_LONG == 32
31 const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
32 		      unsigned long long flags,
33 		      const struct trace_print_flags_u64 *flag_array);
34 
35 const char *trace_print_symbols_seq_u64(struct trace_seq *p,
36 					unsigned long long val,
37 					const struct trace_print_flags_u64
38 								 *symbol_array);
39 #endif
40 
41 const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
42 				    unsigned int bitmask_size);
43 
44 const char *trace_print_hex_seq(struct trace_seq *p,
45 				const unsigned char *buf, int len,
46 				bool concatenate);
47 
48 const char *trace_print_array_seq(struct trace_seq *p,
49 				   const void *buf, int count,
50 				   size_t el_size);
51 
52 const char *
53 trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
54 			 int prefix_type, int rowsize, int groupsize,
55 			 const void *buf, size_t len, bool ascii);
56 
57 struct trace_iterator;
58 struct trace_event;
59 
60 int trace_raw_output_prep(struct trace_iterator *iter,
61 			  struct trace_event *event);
62 extern __printf(2, 3)
63 void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
64 
65 /* Used to find the offset and length of dynamic fields in trace events */
66 struct trace_dynamic_info {
67 #ifdef CONFIG_CPU_BIG_ENDIAN
68 	u16	len;
69 	u16	offset;
70 #else
71 	u16	offset;
72 	u16	len;
73 #endif
74 } __packed;
75 
76 /*
77  * The trace entry - the most basic unit of tracing. This is what
78  * is printed in the end as a single line in the trace output, such as:
79  *
80  *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
81  */
82 struct trace_entry {
83 	unsigned short		type;
84 	unsigned char		flags;
85 	unsigned char		preempt_count;
86 	int			pid;
87 };
88 
89 #define TRACE_EVENT_TYPE_MAX						\
90 	((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
91 
92 /*
93  * Trace iterator - used by printout routines who present trace
94  * results to users and which routines might sleep, etc:
95  */
96 struct trace_iterator {
97 	struct trace_array	*tr;
98 	struct tracer		*trace;
99 	struct array_buffer	*array_buffer;
100 	void			*private;
101 	int			cpu_file;
102 	struct mutex		mutex;
103 	struct ring_buffer_iter	**buffer_iter;
104 	unsigned long		iter_flags;
105 	void			*temp;	/* temp holder */
106 	unsigned int		temp_size;
107 	char			*fmt;	/* modified format holder */
108 	unsigned int		fmt_size;
109 	atomic_t		wait_index;
110 
111 	/* trace_seq for __print_flags() and __print_symbolic() etc. */
112 	struct trace_seq	tmp_seq;
113 
114 	cpumask_var_t		started;
115 
116 	/* Set when the file is closed to prevent new waiters */
117 	bool			closed;
118 
119 	/* it's true when current open file is snapshot */
120 	bool			snapshot;
121 
122 	/* The below is zeroed out in pipe_read */
123 	struct trace_seq	seq;
124 	struct trace_entry	*ent;
125 	unsigned long		lost_events;
126 	int			leftover;
127 	int			ent_size;
128 	int			cpu;
129 	u64			ts;
130 
131 	loff_t			pos;
132 	long			idx;
133 
134 	/* All new field here will be zeroed out in pipe_read */
135 };
136 
137 enum trace_iter_flags {
138 	TRACE_FILE_LAT_FMT	= 1,
139 	TRACE_FILE_ANNOTATE	= 2,
140 	TRACE_FILE_TIME_IN_NS	= 4,
141 };
142 
143 
144 typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
145 				      int flags, struct trace_event *event);
146 
147 struct trace_event_functions {
148 	trace_print_func	trace;
149 	trace_print_func	raw;
150 	trace_print_func	hex;
151 	trace_print_func	binary;
152 };
153 
154 struct trace_event {
155 	struct hlist_node		node;
156 	int				type;
157 	struct trace_event_functions	*funcs;
158 };
159 
160 extern int register_trace_event(struct trace_event *event);
161 extern int unregister_trace_event(struct trace_event *event);
162 
163 /* Return values for print_line callback */
164 enum print_line_t {
165 	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
166 	TRACE_TYPE_HANDLED	= 1,
167 	TRACE_TYPE_UNHANDLED	= 2,	/* Relay to other output functions */
168 	TRACE_TYPE_NO_CONSUME	= 3	/* Handled but ask to not consume */
169 };
170 
171 enum print_line_t trace_handle_return(struct trace_seq *s);
172 
173 static inline void tracing_generic_entry_update(struct trace_entry *entry,
174 						unsigned short type,
175 						unsigned int trace_ctx)
176 {
177 	entry->preempt_count		= trace_ctx & 0xff;
178 	entry->pid			= current->pid;
179 	entry->type			= type;
180 	entry->flags =			trace_ctx >> 16;
181 }
182 
183 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
184 
185 enum trace_flag_type {
186 	TRACE_FLAG_IRQS_OFF		= 0x01,
187 	TRACE_FLAG_NEED_RESCHED_LAZY	= 0x02,
188 	TRACE_FLAG_NEED_RESCHED		= 0x04,
189 	TRACE_FLAG_HARDIRQ		= 0x08,
190 	TRACE_FLAG_SOFTIRQ		= 0x10,
191 	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
192 	TRACE_FLAG_NMI			= 0x40,
193 	TRACE_FLAG_BH_OFF		= 0x80,
194 };
195 
196 static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
197 {
198 	unsigned int irq_status = irqs_disabled_flags(irqflags) ?
199 		TRACE_FLAG_IRQS_OFF : 0;
200 	return tracing_gen_ctx_irq_test(irq_status);
201 }
202 static inline unsigned int tracing_gen_ctx(void)
203 {
204 	unsigned long irqflags;
205 
206 	local_save_flags(irqflags);
207 	return tracing_gen_ctx_flags(irqflags);
208 }
209 
210 static inline unsigned int tracing_gen_ctx_dec(void)
211 {
212 	unsigned int trace_ctx;
213 
214 	trace_ctx = tracing_gen_ctx();
215 	/*
216 	 * Subtract one from the preemption counter if preemption is enabled,
217 	 * see trace_event_buffer_reserve()for details.
218 	 */
219 	if (IS_ENABLED(CONFIG_PREEMPTION))
220 		trace_ctx--;
221 	return trace_ctx;
222 }
223 
224 struct trace_event_file;
225 
226 struct ring_buffer_event *
227 trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
228 				struct trace_event_file *trace_file,
229 				int type, unsigned long len,
230 				unsigned int trace_ctx);
231 
232 #define TRACE_RECORD_CMDLINE	BIT(0)
233 #define TRACE_RECORD_TGID	BIT(1)
234 
235 void tracing_record_taskinfo(struct task_struct *task, int flags);
236 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
237 					  struct task_struct *next, int flags);
238 
239 void tracing_record_cmdline(struct task_struct *task);
240 void tracing_record_tgid(struct task_struct *task);
241 
242 int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
243 	 __printf(3, 4);
244 
245 struct event_filter;
246 
247 enum trace_reg {
248 	TRACE_REG_REGISTER,
249 	TRACE_REG_UNREGISTER,
250 #ifdef CONFIG_PERF_EVENTS
251 	TRACE_REG_PERF_REGISTER,
252 	TRACE_REG_PERF_UNREGISTER,
253 	TRACE_REG_PERF_OPEN,
254 	TRACE_REG_PERF_CLOSE,
255 	/*
256 	 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
257 	 * custom action was taken and the default action is not to be
258 	 * performed.
259 	 */
260 	TRACE_REG_PERF_ADD,
261 	TRACE_REG_PERF_DEL,
262 #endif
263 };
264 
265 struct trace_event_call;
266 
267 #define TRACE_FUNCTION_TYPE ((const char *)~0UL)
268 
269 struct trace_event_fields {
270 	const char *type;
271 	union {
272 		struct {
273 			const char *name;
274 			const int  size;
275 			const int  align;
276 			const int  is_signed;
277 			const int  filter_type;
278 			const int  len;
279 		};
280 		int (*define_fields)(struct trace_event_call *);
281 	};
282 };
283 
284 struct trace_event_class {
285 	const char		*system;
286 	void			*probe;
287 #ifdef CONFIG_PERF_EVENTS
288 	void			*perf_probe;
289 #endif
290 	int			(*reg)(struct trace_event_call *event,
291 				       enum trace_reg type, void *data);
292 	struct trace_event_fields *fields_array;
293 	struct list_head	*(*get_fields)(struct trace_event_call *);
294 	struct list_head	fields;
295 	int			(*raw_init)(struct trace_event_call *);
296 };
297 
298 extern int trace_event_reg(struct trace_event_call *event,
299 			    enum trace_reg type, void *data);
300 
301 struct trace_event_buffer {
302 	struct trace_buffer		*buffer;
303 	struct ring_buffer_event	*event;
304 	struct trace_event_file		*trace_file;
305 	void				*entry;
306 	unsigned int			trace_ctx;
307 	struct pt_regs			*regs;
308 };
309 
310 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
311 				  struct trace_event_file *trace_file,
312 				  unsigned long len);
313 
314 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
315 
316 enum {
317 	TRACE_EVENT_FL_CAP_ANY_BIT,
318 	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
319 	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
320 	TRACE_EVENT_FL_TRACEPOINT_BIT,
321 	TRACE_EVENT_FL_DYNAMIC_BIT,
322 	TRACE_EVENT_FL_KPROBE_BIT,
323 	TRACE_EVENT_FL_UPROBE_BIT,
324 	TRACE_EVENT_FL_EPROBE_BIT,
325 	TRACE_EVENT_FL_FPROBE_BIT,
326 	TRACE_EVENT_FL_CUSTOM_BIT,
327 };
328 
329 /*
330  * Event flags:
331  *  CAP_ANY	  - Any user can enable for perf
332  *  NO_SET_FILTER - Set when filter has error and is to be ignored
333  *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
334  *  TRACEPOINT    - Event is a tracepoint
335  *  DYNAMIC       - Event is a dynamic event (created at run time)
336  *  KPROBE        - Event is a kprobe
337  *  UPROBE        - Event is a uprobe
338  *  EPROBE        - Event is an event probe
339  *  FPROBE        - Event is an function probe
340  *  CUSTOM        - Event is a custom event (to be attached to an exsiting tracepoint)
341  *                   This is set when the custom event has not been attached
342  *                   to a tracepoint yet, then it is cleared when it is.
343  */
344 enum {
345 	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
346 	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
347 	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
348 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
349 	TRACE_EVENT_FL_DYNAMIC		= (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
350 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT),
351 	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT),
352 	TRACE_EVENT_FL_EPROBE		= (1 << TRACE_EVENT_FL_EPROBE_BIT),
353 	TRACE_EVENT_FL_FPROBE		= (1 << TRACE_EVENT_FL_FPROBE_BIT),
354 	TRACE_EVENT_FL_CUSTOM		= (1 << TRACE_EVENT_FL_CUSTOM_BIT),
355 };
356 
357 #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
358 
359 struct trace_event_call {
360 	struct list_head	list;
361 	struct trace_event_class *class;
362 	union {
363 		char			*name;
364 		/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
365 		struct tracepoint	*tp;
366 	};
367 	struct trace_event	event;
368 	char			*print_fmt;
369 	/*
370 	 * Static events can disappear with modules,
371 	 * where as dynamic ones need their own ref count.
372 	 */
373 	union {
374 		void				*module;
375 		atomic_t			refcnt;
376 	};
377 	void			*data;
378 
379 	/* See the TRACE_EVENT_FL_* flags above */
380 	int			flags; /* static flags of different events */
381 
382 #ifdef CONFIG_PERF_EVENTS
383 	int				perf_refcount;
384 	struct hlist_head __percpu	*perf_events;
385 	struct bpf_prog_array __rcu	*prog_array;
386 
387 	int	(*perf_perm)(struct trace_event_call *,
388 			     struct perf_event *);
389 #endif
390 };
391 
392 #ifdef CONFIG_DYNAMIC_EVENTS
393 bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
394 void trace_event_dyn_put_ref(struct trace_event_call *call);
395 bool trace_event_dyn_busy(struct trace_event_call *call);
396 #else
397 static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
398 {
399 	/* Without DYNAMIC_EVENTS configured, nothing should be calling this */
400 	return false;
401 }
402 static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
403 {
404 }
405 static inline bool trace_event_dyn_busy(struct trace_event_call *call)
406 {
407 	/* Nothing should call this without DYNAIMIC_EVENTS configured. */
408 	return true;
409 }
410 #endif
411 
412 static inline bool trace_event_try_get_ref(struct trace_event_call *call)
413 {
414 	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
415 		return trace_event_dyn_try_get_ref(call);
416 	else
417 		return try_module_get(call->module);
418 }
419 
420 static inline void trace_event_put_ref(struct trace_event_call *call)
421 {
422 	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
423 		trace_event_dyn_put_ref(call);
424 	else
425 		module_put(call->module);
426 }
427 
428 #ifdef CONFIG_PERF_EVENTS
429 static inline bool bpf_prog_array_valid(struct trace_event_call *call)
430 {
431 	/*
432 	 * This inline function checks whether call->prog_array
433 	 * is valid or not. The function is called in various places,
434 	 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
435 	 *
436 	 * If this function returns true, and later call->prog_array
437 	 * becomes false inside rcu_read_lock/unlock region,
438 	 * we bail out then. If this function return false,
439 	 * there is a risk that we might miss a few events if the checking
440 	 * were delayed until inside rcu_read_lock/unlock region and
441 	 * call->prog_array happened to become non-NULL then.
442 	 *
443 	 * Here, READ_ONCE() is used instead of rcu_access_pointer().
444 	 * rcu_access_pointer() requires the actual definition of
445 	 * "struct bpf_prog_array" while READ_ONCE() only needs
446 	 * a declaration of the same type.
447 	 */
448 	return !!READ_ONCE(call->prog_array);
449 }
450 #endif
451 
452 static inline const char *
453 trace_event_name(struct trace_event_call *call)
454 {
455 	if (call->flags & TRACE_EVENT_FL_CUSTOM)
456 		return call->name;
457 	else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
458 		return call->tp ? call->tp->name : NULL;
459 	else
460 		return call->name;
461 }
462 
463 static inline struct list_head *
464 trace_get_fields(struct trace_event_call *event_call)
465 {
466 	if (!event_call->class->get_fields)
467 		return &event_call->class->fields;
468 	return event_call->class->get_fields(event_call);
469 }
470 
471 struct trace_subsystem_dir;
472 
473 enum {
474 	EVENT_FILE_FL_ENABLED_BIT,
475 	EVENT_FILE_FL_RECORDED_CMD_BIT,
476 	EVENT_FILE_FL_RECORDED_TGID_BIT,
477 	EVENT_FILE_FL_FILTERED_BIT,
478 	EVENT_FILE_FL_NO_SET_FILTER_BIT,
479 	EVENT_FILE_FL_SOFT_MODE_BIT,
480 	EVENT_FILE_FL_SOFT_DISABLED_BIT,
481 	EVENT_FILE_FL_TRIGGER_MODE_BIT,
482 	EVENT_FILE_FL_TRIGGER_COND_BIT,
483 	EVENT_FILE_FL_PID_FILTER_BIT,
484 	EVENT_FILE_FL_WAS_ENABLED_BIT,
485 	EVENT_FILE_FL_FREED_BIT,
486 };
487 
488 extern struct trace_event_file *trace_get_event_file(const char *instance,
489 						     const char *system,
490 						     const char *event);
491 extern void trace_put_event_file(struct trace_event_file *file);
492 
493 #define MAX_DYNEVENT_CMD_LEN	(2048)
494 
495 enum dynevent_type {
496 	DYNEVENT_TYPE_SYNTH = 1,
497 	DYNEVENT_TYPE_KPROBE,
498 	DYNEVENT_TYPE_NONE,
499 };
500 
501 struct dynevent_cmd;
502 
503 typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
504 
505 struct dynevent_cmd {
506 	struct seq_buf		seq;
507 	const char		*event_name;
508 	unsigned int		n_fields;
509 	enum dynevent_type	type;
510 	dynevent_create_fn_t	run_command;
511 	void			*private_data;
512 };
513 
514 extern int dynevent_create(struct dynevent_cmd *cmd);
515 
516 extern int synth_event_delete(const char *name);
517 
518 extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
519 				 char *buf, int maxlen);
520 
521 extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
522 				       const char *name,
523 				       struct module *mod, ...);
524 
525 #define synth_event_gen_cmd_start(cmd, name, mod, ...)	\
526 	__synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
527 
528 struct synth_field_desc {
529 	const char *type;
530 	const char *name;
531 };
532 
533 extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
534 					   const char *name,
535 					   struct module *mod,
536 					   struct synth_field_desc *fields,
537 					   unsigned int n_fields);
538 extern int synth_event_create(const char *name,
539 			      struct synth_field_desc *fields,
540 			      unsigned int n_fields, struct module *mod);
541 
542 extern int synth_event_add_field(struct dynevent_cmd *cmd,
543 				 const char *type,
544 				 const char *name);
545 extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
546 				     const char *type_name);
547 extern int synth_event_add_fields(struct dynevent_cmd *cmd,
548 				  struct synth_field_desc *fields,
549 				  unsigned int n_fields);
550 
551 #define synth_event_gen_cmd_end(cmd)	\
552 	dynevent_create(cmd)
553 
554 struct synth_event;
555 
556 struct synth_event_trace_state {
557 	struct trace_event_buffer fbuffer;
558 	struct synth_trace_event *entry;
559 	struct trace_buffer *buffer;
560 	struct synth_event *event;
561 	unsigned int cur_field;
562 	unsigned int n_u64;
563 	bool disabled;
564 	bool add_next;
565 	bool add_name;
566 };
567 
568 extern int synth_event_trace(struct trace_event_file *file,
569 			     unsigned int n_vals, ...);
570 extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
571 				   unsigned int n_vals);
572 extern int synth_event_trace_start(struct trace_event_file *file,
573 				   struct synth_event_trace_state *trace_state);
574 extern int synth_event_add_next_val(u64 val,
575 				    struct synth_event_trace_state *trace_state);
576 extern int synth_event_add_val(const char *field_name, u64 val,
577 			       struct synth_event_trace_state *trace_state);
578 extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
579 
580 extern int kprobe_event_delete(const char *name);
581 
582 extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
583 				  char *buf, int maxlen);
584 
585 #define kprobe_event_gen_cmd_start(cmd, name, loc, ...)			\
586 	__kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
587 
588 #define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)		\
589 	__kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
590 
591 extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
592 					bool kretprobe,
593 					const char *name,
594 					const char *loc, ...);
595 
596 #define kprobe_event_add_fields(cmd, ...)	\
597 	__kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
598 
599 #define kprobe_event_add_field(cmd, field)	\
600 	__kprobe_event_add_fields(cmd, field, NULL)
601 
602 extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
603 
604 #define kprobe_event_gen_cmd_end(cmd)		\
605 	dynevent_create(cmd)
606 
607 #define kretprobe_event_gen_cmd_end(cmd)	\
608 	dynevent_create(cmd)
609 
610 /*
611  * Event file flags:
612  *  ENABLED	  - The event is enabled
613  *  RECORDED_CMD  - The comms should be recorded at sched_switch
614  *  RECORDED_TGID - The tgids should be recorded at sched_switch
615  *  FILTERED	  - The event has a filter attached
616  *  NO_SET_FILTER - Set when filter has error and is to be ignored
617  *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
618  *  SOFT_DISABLED - When set, do not trace the event (even though its
619  *                   tracepoint may be enabled)
620  *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
621  *  TRIGGER_COND  - When set, one or more triggers has an associated filter
622  *  PID_FILTER    - When set, the event is filtered based on pid
623  *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
624  *  FREED         - File descriptor is freed, all fields should be considered invalid
625  */
626 enum {
627 	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
628 	EVENT_FILE_FL_RECORDED_CMD	= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
629 	EVENT_FILE_FL_RECORDED_TGID	= (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
630 	EVENT_FILE_FL_FILTERED		= (1 << EVENT_FILE_FL_FILTERED_BIT),
631 	EVENT_FILE_FL_NO_SET_FILTER	= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
632 	EVENT_FILE_FL_SOFT_MODE		= (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
633 	EVENT_FILE_FL_SOFT_DISABLED	= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
634 	EVENT_FILE_FL_TRIGGER_MODE	= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
635 	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
636 	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
637 	EVENT_FILE_FL_WAS_ENABLED	= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
638 	EVENT_FILE_FL_FREED		= (1 << EVENT_FILE_FL_FREED_BIT),
639 };
640 
641 struct trace_event_file {
642 	struct list_head		list;
643 	struct trace_event_call		*event_call;
644 	struct event_filter __rcu	*filter;
645 	struct eventfs_inode		*ei;
646 	struct trace_array		*tr;
647 	struct trace_subsystem_dir	*system;
648 	struct list_head		triggers;
649 
650 	/*
651 	 * 32 bit flags:
652 	 *   bit 0:		enabled
653 	 *   bit 1:		enabled cmd record
654 	 *   bit 2:		enable/disable with the soft disable bit
655 	 *   bit 3:		soft disabled
656 	 *   bit 4:		trigger enabled
657 	 *
658 	 * Note: The bits must be set atomically to prevent races
659 	 * from other writers. Reads of flags do not need to be in
660 	 * sync as they occur in critical sections. But the way flags
661 	 * is currently used, these changes do not affect the code
662 	 * except that when a change is made, it may have a slight
663 	 * delay in propagating the changes to other CPUs due to
664 	 * caching and such. Which is mostly OK ;-)
665 	 */
666 	unsigned long		flags;
667 	refcount_t		ref;	/* ref count for opened files */
668 	atomic_t		sm_ref;	/* soft-mode reference counter */
669 	atomic_t		tm_ref;	/* trigger-mode reference counter */
670 };
671 
672 #define __TRACE_EVENT_FLAGS(name, value)				\
673 	static int __init trace_init_flags_##name(void)			\
674 	{								\
675 		event_##name.flags |= value;				\
676 		return 0;						\
677 	}								\
678 	early_initcall(trace_init_flags_##name);
679 
680 #define __TRACE_EVENT_PERF_PERM(name, expr...)				\
681 	static int perf_perm_##name(struct trace_event_call *tp_event, \
682 				    struct perf_event *p_event)		\
683 	{								\
684 		return ({ expr; });					\
685 	}								\
686 	static int __init trace_init_perf_perm_##name(void)		\
687 	{								\
688 		event_##name.perf_perm = &perf_perm_##name;		\
689 		return 0;						\
690 	}								\
691 	early_initcall(trace_init_perf_perm_##name);
692 
693 #define PERF_MAX_TRACE_SIZE	8192
694 
695 #define MAX_FILTER_STR_VAL	256U	/* Should handle KSYM_SYMBOL_LEN */
696 
697 enum event_trigger_type {
698 	ETT_NONE		= (0),
699 	ETT_TRACE_ONOFF		= (1 << 0),
700 	ETT_SNAPSHOT		= (1 << 1),
701 	ETT_STACKTRACE		= (1 << 2),
702 	ETT_EVENT_ENABLE	= (1 << 3),
703 	ETT_EVENT_HIST		= (1 << 4),
704 	ETT_HIST_ENABLE		= (1 << 5),
705 	ETT_EVENT_EPROBE	= (1 << 6),
706 };
707 
708 extern int filter_match_preds(struct event_filter *filter, void *rec);
709 
710 extern enum event_trigger_type
711 event_triggers_call(struct trace_event_file *file,
712 		    struct trace_buffer *buffer, void *rec,
713 		    struct ring_buffer_event *event);
714 extern void
715 event_triggers_post_call(struct trace_event_file *file,
716 			 enum event_trigger_type tt);
717 
718 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
719 
720 bool __trace_trigger_soft_disabled(struct trace_event_file *file);
721 
722 /**
723  * trace_trigger_soft_disabled - do triggers and test if soft disabled
724  * @file: The file pointer of the event to test
725  *
726  * If any triggers without filters are attached to this event, they
727  * will be called here. If the event is soft disabled and has no
728  * triggers that require testing the fields, it will return true,
729  * otherwise false.
730  */
731 static __always_inline bool
732 trace_trigger_soft_disabled(struct trace_event_file *file)
733 {
734 	unsigned long eflags = file->flags;
735 
736 	if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
737 			       EVENT_FILE_FL_SOFT_DISABLED |
738 			       EVENT_FILE_FL_PID_FILTER))))
739 		return false;
740 
741 	if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
742 		return false;
743 
744 	return __trace_trigger_soft_disabled(file);
745 }
746 
747 #ifdef CONFIG_BPF_EVENTS
748 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
749 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
750 void perf_event_detach_bpf_prog(struct perf_event *event);
751 int perf_event_query_prog_array(struct perf_event *event, void __user *info);
752 
753 struct bpf_raw_tp_link;
754 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
755 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link);
756 
757 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
758 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
759 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
760 			    u32 *fd_type, const char **buf,
761 			    u64 *probe_offset, u64 *probe_addr,
762 			    unsigned long *missed);
763 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
764 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
765 #else
766 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
767 {
768 	return 1;
769 }
770 
771 static inline int
772 perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
773 {
774 	return -EOPNOTSUPP;
775 }
776 
777 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
778 
779 static inline int
780 perf_event_query_prog_array(struct perf_event *event, void __user *info)
781 {
782 	return -EOPNOTSUPP;
783 }
784 struct bpf_raw_tp_link;
785 static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
786 {
787 	return -EOPNOTSUPP;
788 }
789 static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link)
790 {
791 	return -EOPNOTSUPP;
792 }
793 static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
794 {
795 	return NULL;
796 }
797 static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
798 {
799 }
800 static inline int bpf_get_perf_event_info(const struct perf_event *event,
801 					  u32 *prog_id, u32 *fd_type,
802 					  const char **buf, u64 *probe_offset,
803 					  u64 *probe_addr, unsigned long *missed)
804 {
805 	return -EOPNOTSUPP;
806 }
807 static inline int
808 bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
809 {
810 	return -EOPNOTSUPP;
811 }
812 static inline int
813 bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
814 {
815 	return -EOPNOTSUPP;
816 }
817 #endif
818 
819 enum {
820 	FILTER_OTHER = 0,
821 	FILTER_STATIC_STRING,
822 	FILTER_DYN_STRING,
823 	FILTER_RDYN_STRING,
824 	FILTER_PTR_STRING,
825 	FILTER_TRACE_FN,
826 	FILTER_CPUMASK,
827 	FILTER_COMM,
828 	FILTER_CPU,
829 	FILTER_STACKTRACE,
830 };
831 
832 extern int trace_event_raw_init(struct trace_event_call *call);
833 extern int trace_define_field(struct trace_event_call *call, const char *type,
834 			      const char *name, int offset, int size,
835 			      int is_signed, int filter_type);
836 extern int trace_add_event_call(struct trace_event_call *call);
837 extern int trace_remove_event_call(struct trace_event_call *call);
838 extern int trace_event_get_offsets(struct trace_event_call *call);
839 
840 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
841 int trace_set_clr_event(const char *system, const char *event, int set);
842 int trace_array_set_clr_event(struct trace_array *tr, const char *system,
843 		const char *event, bool enable);
844 /*
845  * The double __builtin_constant_p is because gcc will give us an error
846  * if we try to allocate the static variable to fmt if it is not a
847  * constant. Even with the outer if statement optimizing out.
848  */
849 #define event_trace_printk(ip, fmt, args...)				\
850 do {									\
851 	__trace_printk_check_format(fmt, ##args);			\
852 	tracing_record_cmdline(current);				\
853 	if (__builtin_constant_p(fmt)) {				\
854 		static const char *trace_printk_fmt			\
855 		  __section("__trace_printk_fmt") =			\
856 			__builtin_constant_p(fmt) ? fmt : NULL;		\
857 									\
858 		__trace_bprintk(ip, trace_printk_fmt, ##args);		\
859 	} else								\
860 		__trace_printk(ip, fmt, ##args);			\
861 } while (0)
862 
863 #ifdef CONFIG_PERF_EVENTS
864 struct perf_event;
865 
866 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
867 
868 extern int  perf_trace_init(struct perf_event *event);
869 extern void perf_trace_destroy(struct perf_event *event);
870 extern int  perf_trace_add(struct perf_event *event, int flags);
871 extern void perf_trace_del(struct perf_event *event, int flags);
872 #ifdef CONFIG_KPROBE_EVENTS
873 extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
874 extern void perf_kprobe_destroy(struct perf_event *event);
875 extern int bpf_get_kprobe_info(const struct perf_event *event,
876 			       u32 *fd_type, const char **symbol,
877 			       u64 *probe_offset, u64 *probe_addr,
878 			       unsigned long *missed,
879 			       bool perf_type_tracepoint);
880 #endif
881 #ifdef CONFIG_UPROBE_EVENTS
882 extern int  perf_uprobe_init(struct perf_event *event,
883 			     unsigned long ref_ctr_offset, bool is_retprobe);
884 extern void perf_uprobe_destroy(struct perf_event *event);
885 extern int bpf_get_uprobe_info(const struct perf_event *event,
886 			       u32 *fd_type, const char **filename,
887 			       u64 *probe_offset, u64 *probe_addr,
888 			       bool perf_type_tracepoint);
889 #endif
890 extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
891 				     char *filter_str);
892 extern void ftrace_profile_free_filter(struct perf_event *event);
893 void perf_trace_buf_update(void *record, u16 type);
894 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
895 
896 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
897 void perf_event_free_bpf_prog(struct perf_event *event);
898 
899 void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1);
900 void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2);
901 void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
902 		    u64 arg3);
903 void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
904 		    u64 arg3, u64 arg4);
905 void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
906 		    u64 arg3, u64 arg4, u64 arg5);
907 void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
908 		    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
909 void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
910 		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
911 void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
912 		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
913 		    u64 arg8);
914 void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
915 		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
916 		    u64 arg8, u64 arg9);
917 void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
918 		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
919 		     u64 arg8, u64 arg9, u64 arg10);
920 void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
921 		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
922 		     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
923 void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2,
924 		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
925 		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
926 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
927 			       struct trace_event_call *call, u64 count,
928 			       struct pt_regs *regs, struct hlist_head *head,
929 			       struct task_struct *task);
930 
931 static inline void
932 perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
933 		       u64 count, struct pt_regs *regs, void *head,
934 		       struct task_struct *task)
935 {
936 	perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
937 }
938 
939 #endif
940 
941 #define TRACE_EVENT_STR_MAX	512
942 
943 /*
944  * gcc warns that you can not use a va_list in an inlined
945  * function. But lets me make it into a macro :-/
946  */
947 #define __trace_event_vstr_len(fmt, va)			\
948 ({							\
949 	va_list __ap;					\
950 	int __ret;					\
951 							\
952 	va_copy(__ap, *(va));				\
953 	__ret = vsnprintf(NULL, 0, fmt, __ap) + 1;	\
954 	va_end(__ap);					\
955 							\
956 	min(__ret, TRACE_EVENT_STR_MAX);		\
957 })
958 
959 #endif /* _LINUX_TRACE_EVENT_H */
960 
961 /*
962  * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
963  *  This is due to the way trace custom events work. If a file includes two
964  *  trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
965  *  will override the TRACE_CUSTOM_EVENT and break the second include.
966  */
967 
968 #ifndef TRACE_CUSTOM_EVENT
969 
970 #define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
971 #define DEFINE_CUSTOM_EVENT(template, name, proto, args)
972 #define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
973 
974 #endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */
975