xref: /linux-6.15/include/linux/perf_event.h (revision a0db77bf)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <[email protected]>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 #include <uapi/linux/bpf_perf_event.h>
19 
20 /*
21  * Kernel-internal data types and definitions:
22  */
23 
24 #ifdef CONFIG_PERF_EVENTS
25 # include <asm/perf_event.h>
26 # include <asm/local64.h>
27 #endif
28 
29 struct perf_guest_info_callbacks {
30 	int				(*is_in_guest)(void);
31 	int				(*is_user_mode)(void);
32 	unsigned long			(*get_guest_ip)(void);
33 	void				(*handle_intel_pt_intr)(void);
34 };
35 
36 #ifdef CONFIG_HAVE_HW_BREAKPOINT
37 #include <asm/hw_breakpoint.h>
38 #endif
39 
40 #include <linux/list.h>
41 #include <linux/mutex.h>
42 #include <linux/rculist.h>
43 #include <linux/rcupdate.h>
44 #include <linux/spinlock.h>
45 #include <linux/hrtimer.h>
46 #include <linux/fs.h>
47 #include <linux/pid_namespace.h>
48 #include <linux/workqueue.h>
49 #include <linux/ftrace.h>
50 #include <linux/cpu.h>
51 #include <linux/irq_work.h>
52 #include <linux/static_key.h>
53 #include <linux/jump_label_ratelimit.h>
54 #include <linux/atomic.h>
55 #include <linux/sysfs.h>
56 #include <linux/perf_regs.h>
57 #include <linux/cgroup.h>
58 #include <linux/refcount.h>
59 #include <asm/local.h>
60 
61 struct perf_callchain_entry {
62 	__u64				nr;
63 	__u64				ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
64 };
65 
66 struct perf_callchain_entry_ctx {
67 	struct perf_callchain_entry *entry;
68 	u32			    max_stack;
69 	u32			    nr;
70 	short			    contexts;
71 	bool			    contexts_maxed;
72 };
73 
74 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
75 				     unsigned long off, unsigned long len);
76 
77 struct perf_raw_frag {
78 	union {
79 		struct perf_raw_frag	*next;
80 		unsigned long		pad;
81 	};
82 	perf_copy_f			copy;
83 	void				*data;
84 	u32				size;
85 } __packed;
86 
87 struct perf_raw_record {
88 	struct perf_raw_frag		frag;
89 	u32				size;
90 };
91 
92 /*
93  * branch stack layout:
94  *  nr: number of taken branches stored in entries[]
95  *
96  * Note that nr can vary from sample to sample
97  * branches (to, from) are stored from most recent
98  * to least recent, i.e., entries[0] contains the most
99  * recent branch.
100  */
101 struct perf_branch_stack {
102 	__u64				nr;
103 	struct perf_branch_entry	entries[0];
104 };
105 
106 struct task_struct;
107 
108 /*
109  * extra PMU register associated with an event
110  */
111 struct hw_perf_event_extra {
112 	u64		config;	/* register value */
113 	unsigned int	reg;	/* register address or index */
114 	int		alloc;	/* extra register already allocated */
115 	int		idx;	/* index in shared_regs->regs[] */
116 };
117 
118 /**
119  * struct hw_perf_event - performance event hardware details:
120  */
121 struct hw_perf_event {
122 #ifdef CONFIG_PERF_EVENTS
123 	union {
124 		struct { /* hardware */
125 			u64		config;
126 			u64		last_tag;
127 			unsigned long	config_base;
128 			unsigned long	event_base;
129 			int		event_base_rdpmc;
130 			int		idx;
131 			int		last_cpu;
132 			int		flags;
133 
134 			struct hw_perf_event_extra extra_reg;
135 			struct hw_perf_event_extra branch_reg;
136 		};
137 		struct { /* software */
138 			struct hrtimer	hrtimer;
139 		};
140 		struct { /* tracepoint */
141 			/* for tp_event->class */
142 			struct list_head	tp_list;
143 		};
144 		struct { /* amd_power */
145 			u64	pwr_acc;
146 			u64	ptsc;
147 		};
148 #ifdef CONFIG_HAVE_HW_BREAKPOINT
149 		struct { /* breakpoint */
150 			/*
151 			 * Crufty hack to avoid the chicken and egg
152 			 * problem hw_breakpoint has with context
153 			 * creation and event initalization.
154 			 */
155 			struct arch_hw_breakpoint	info;
156 			struct list_head		bp_list;
157 		};
158 #endif
159 		struct { /* amd_iommu */
160 			u8	iommu_bank;
161 			u8	iommu_cntr;
162 			u16	padding;
163 			u64	conf;
164 			u64	conf1;
165 		};
166 	};
167 	/*
168 	 * If the event is a per task event, this will point to the task in
169 	 * question. See the comment in perf_event_alloc().
170 	 */
171 	struct task_struct		*target;
172 
173 	/*
174 	 * PMU would store hardware filter configuration
175 	 * here.
176 	 */
177 	void				*addr_filters;
178 
179 	/* Last sync'ed generation of filters */
180 	unsigned long			addr_filters_gen;
181 
182 /*
183  * hw_perf_event::state flags; used to track the PERF_EF_* state.
184  */
185 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
186 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
187 #define PERF_HES_ARCH		0x04
188 
189 	int				state;
190 
191 	/*
192 	 * The last observed hardware counter value, updated with a
193 	 * local64_cmpxchg() such that pmu::read() can be called nested.
194 	 */
195 	local64_t			prev_count;
196 
197 	/*
198 	 * The period to start the next sample with.
199 	 */
200 	u64				sample_period;
201 
202 	/*
203 	 * The period we started this sample with.
204 	 */
205 	u64				last_period;
206 
207 	/*
208 	 * However much is left of the current period; note that this is
209 	 * a full 64bit value and allows for generation of periods longer
210 	 * than hardware might allow.
211 	 */
212 	local64_t			period_left;
213 
214 	/*
215 	 * State for throttling the event, see __perf_event_overflow() and
216 	 * perf_adjust_freq_unthr_context().
217 	 */
218 	u64                             interrupts_seq;
219 	u64				interrupts;
220 
221 	/*
222 	 * State for freq target events, see __perf_event_overflow() and
223 	 * perf_adjust_freq_unthr_context().
224 	 */
225 	u64				freq_time_stamp;
226 	u64				freq_count_stamp;
227 #endif
228 };
229 
230 struct perf_event;
231 
232 /*
233  * Common implementation detail of pmu::{start,commit,cancel}_txn
234  */
235 #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
236 #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
237 
238 /**
239  * pmu::capabilities flags
240  */
241 #define PERF_PMU_CAP_NO_INTERRUPT		0x01
242 #define PERF_PMU_CAP_NO_NMI			0x02
243 #define PERF_PMU_CAP_AUX_NO_SG			0x04
244 #define PERF_PMU_CAP_EXCLUSIVE			0x10
245 #define PERF_PMU_CAP_ITRACE			0x20
246 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
247 #define PERF_PMU_CAP_NO_EXCLUDE			0x80
248 
249 /**
250  * struct pmu - generic performance monitoring unit
251  */
252 struct pmu {
253 	struct list_head		entry;
254 
255 	struct module			*module;
256 	struct device			*dev;
257 	const struct attribute_group	**attr_groups;
258 	const struct attribute_group	**attr_update;
259 	const char			*name;
260 	int				type;
261 
262 	/*
263 	 * various common per-pmu feature flags
264 	 */
265 	int				capabilities;
266 
267 	int __percpu			*pmu_disable_count;
268 	struct perf_cpu_context __percpu *pmu_cpu_context;
269 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
270 	int				task_ctx_nr;
271 	int				hrtimer_interval_ms;
272 
273 	/* number of address filters this PMU can do */
274 	unsigned int			nr_addr_filters;
275 
276 	/*
277 	 * Fully disable/enable this PMU, can be used to protect from the PMI
278 	 * as well as for lazy/batch writing of the MSRs.
279 	 */
280 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
281 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
282 
283 	/*
284 	 * Try and initialize the event for this PMU.
285 	 *
286 	 * Returns:
287 	 *  -ENOENT	-- @event is not for this PMU
288 	 *
289 	 *  -ENODEV	-- @event is for this PMU but PMU not present
290 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
291 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
292 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
293 	 *  -EACCESS	-- @event is for this PMU, @event is valid, but no privilidges
294 	 *
295 	 *  0		-- @event is for this PMU and valid
296 	 *
297 	 * Other error return values are allowed.
298 	 */
299 	int (*event_init)		(struct perf_event *event);
300 
301 	/*
302 	 * Notification that the event was mapped or unmapped.  Called
303 	 * in the context of the mapping task.
304 	 */
305 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
306 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
307 
308 	/*
309 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
310 	 * matching hw_perf_event::state flags.
311 	 */
312 #define PERF_EF_START	0x01		/* start the counter when adding    */
313 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
314 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
315 
316 	/*
317 	 * Adds/Removes a counter to/from the PMU, can be done inside a
318 	 * transaction, see the ->*_txn() methods.
319 	 *
320 	 * The add/del callbacks will reserve all hardware resources required
321 	 * to service the event, this includes any counter constraint
322 	 * scheduling etc.
323 	 *
324 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
325 	 * is on.
326 	 *
327 	 * ->add() called without PERF_EF_START should result in the same state
328 	 *  as ->add() followed by ->stop().
329 	 *
330 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
331 	 *  ->stop() that must deal with already being stopped without
332 	 *  PERF_EF_UPDATE.
333 	 */
334 	int  (*add)			(struct perf_event *event, int flags);
335 	void (*del)			(struct perf_event *event, int flags);
336 
337 	/*
338 	 * Starts/Stops a counter present on the PMU.
339 	 *
340 	 * The PMI handler should stop the counter when perf_event_overflow()
341 	 * returns !0. ->start() will be used to continue.
342 	 *
343 	 * Also used to change the sample period.
344 	 *
345 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
346 	 * is on -- will be called from NMI context with the PMU generates
347 	 * NMIs.
348 	 *
349 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
350 	 *  period/count values like ->read() would.
351 	 *
352 	 * ->start() with PERF_EF_RELOAD will reprogram the the counter
353 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
354 	 */
355 	void (*start)			(struct perf_event *event, int flags);
356 	void (*stop)			(struct perf_event *event, int flags);
357 
358 	/*
359 	 * Updates the counter value of the event.
360 	 *
361 	 * For sampling capable PMUs this will also update the software period
362 	 * hw_perf_event::period_left field.
363 	 */
364 	void (*read)			(struct perf_event *event);
365 
366 	/*
367 	 * Group events scheduling is treated as a transaction, add
368 	 * group events as a whole and perform one schedulability test.
369 	 * If the test fails, roll back the whole group
370 	 *
371 	 * Start the transaction, after this ->add() doesn't need to
372 	 * do schedulability tests.
373 	 *
374 	 * Optional.
375 	 */
376 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
377 	/*
378 	 * If ->start_txn() disabled the ->add() schedulability test
379 	 * then ->commit_txn() is required to perform one. On success
380 	 * the transaction is closed. On error the transaction is kept
381 	 * open until ->cancel_txn() is called.
382 	 *
383 	 * Optional.
384 	 */
385 	int  (*commit_txn)		(struct pmu *pmu);
386 	/*
387 	 * Will cancel the transaction, assumes ->del() is called
388 	 * for each successful ->add() during the transaction.
389 	 *
390 	 * Optional.
391 	 */
392 	void (*cancel_txn)		(struct pmu *pmu);
393 
394 	/*
395 	 * Will return the value for perf_event_mmap_page::index for this event,
396 	 * if no implementation is provided it will default to: event->hw.idx + 1.
397 	 */
398 	int (*event_idx)		(struct perf_event *event); /*optional */
399 
400 	/*
401 	 * context-switches callback
402 	 */
403 	void (*sched_task)		(struct perf_event_context *ctx,
404 					bool sched_in);
405 	/*
406 	 * PMU specific data size
407 	 */
408 	size_t				task_ctx_size;
409 
410 
411 	/*
412 	 * Set up pmu-private data structures for an AUX area
413 	 */
414 	void *(*setup_aux)		(struct perf_event *event, void **pages,
415 					 int nr_pages, bool overwrite);
416 					/* optional */
417 
418 	/*
419 	 * Free pmu-private AUX data structures
420 	 */
421 	void (*free_aux)		(void *aux); /* optional */
422 
423 	/*
424 	 * Validate address range filters: make sure the HW supports the
425 	 * requested configuration and number of filters; return 0 if the
426 	 * supplied filters are valid, -errno otherwise.
427 	 *
428 	 * Runs in the context of the ioctl()ing process and is not serialized
429 	 * with the rest of the PMU callbacks.
430 	 */
431 	int (*addr_filters_validate)	(struct list_head *filters);
432 					/* optional */
433 
434 	/*
435 	 * Synchronize address range filter configuration:
436 	 * translate hw-agnostic filters into hardware configuration in
437 	 * event::hw::addr_filters.
438 	 *
439 	 * Runs as a part of filter sync sequence that is done in ->start()
440 	 * callback by calling perf_event_addr_filters_sync().
441 	 *
442 	 * May (and should) traverse event::addr_filters::list, for which its
443 	 * caller provides necessary serialization.
444 	 */
445 	void (*addr_filters_sync)	(struct perf_event *event);
446 					/* optional */
447 
448 	/*
449 	 * Filter events for PMU-specific reasons.
450 	 */
451 	int (*filter_match)		(struct perf_event *event); /* optional */
452 
453 	/*
454 	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
455 	 */
456 	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
457 };
458 
459 enum perf_addr_filter_action_t {
460 	PERF_ADDR_FILTER_ACTION_STOP = 0,
461 	PERF_ADDR_FILTER_ACTION_START,
462 	PERF_ADDR_FILTER_ACTION_FILTER,
463 };
464 
465 /**
466  * struct perf_addr_filter - address range filter definition
467  * @entry:	event's filter list linkage
468  * @path:	object file's path for file-based filters
469  * @offset:	filter range offset
470  * @size:	filter range size (size==0 means single address trigger)
471  * @action:	filter/start/stop
472  *
473  * This is a hardware-agnostic filter configuration as specified by the user.
474  */
475 struct perf_addr_filter {
476 	struct list_head	entry;
477 	struct path		path;
478 	unsigned long		offset;
479 	unsigned long		size;
480 	enum perf_addr_filter_action_t	action;
481 };
482 
483 /**
484  * struct perf_addr_filters_head - container for address range filters
485  * @list:	list of filters for this event
486  * @lock:	spinlock that serializes accesses to the @list and event's
487  *		(and its children's) filter generations.
488  * @nr_file_filters:	number of file-based filters
489  *
490  * A child event will use parent's @list (and therefore @lock), so they are
491  * bundled together; see perf_event_addr_filters().
492  */
493 struct perf_addr_filters_head {
494 	struct list_head	list;
495 	raw_spinlock_t		lock;
496 	unsigned int		nr_file_filters;
497 };
498 
499 struct perf_addr_filter_range {
500 	unsigned long		start;
501 	unsigned long		size;
502 };
503 
504 /**
505  * enum perf_event_state - the states of an event:
506  */
507 enum perf_event_state {
508 	PERF_EVENT_STATE_DEAD		= -4,
509 	PERF_EVENT_STATE_EXIT		= -3,
510 	PERF_EVENT_STATE_ERROR		= -2,
511 	PERF_EVENT_STATE_OFF		= -1,
512 	PERF_EVENT_STATE_INACTIVE	=  0,
513 	PERF_EVENT_STATE_ACTIVE		=  1,
514 };
515 
516 struct file;
517 struct perf_sample_data;
518 
519 typedef void (*perf_overflow_handler_t)(struct perf_event *,
520 					struct perf_sample_data *,
521 					struct pt_regs *regs);
522 
523 /*
524  * Event capabilities. For event_caps and groups caps.
525  *
526  * PERF_EV_CAP_SOFTWARE: Is a software event.
527  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
528  * from any CPU in the package where it is active.
529  */
530 #define PERF_EV_CAP_SOFTWARE		BIT(0)
531 #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
532 
533 #define SWEVENT_HLIST_BITS		8
534 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
535 
536 struct swevent_hlist {
537 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
538 	struct rcu_head			rcu_head;
539 };
540 
541 #define PERF_ATTACH_CONTEXT	0x01
542 #define PERF_ATTACH_GROUP	0x02
543 #define PERF_ATTACH_TASK	0x04
544 #define PERF_ATTACH_TASK_DATA	0x08
545 #define PERF_ATTACH_ITRACE	0x10
546 
547 struct perf_cgroup;
548 struct ring_buffer;
549 
550 struct pmu_event_list {
551 	raw_spinlock_t		lock;
552 	struct list_head	list;
553 };
554 
555 #define for_each_sibling_event(sibling, event)			\
556 	if ((event)->group_leader == (event))			\
557 		list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
558 
559 /**
560  * struct perf_event - performance event kernel representation:
561  */
562 struct perf_event {
563 #ifdef CONFIG_PERF_EVENTS
564 	/*
565 	 * entry onto perf_event_context::event_list;
566 	 *   modifications require ctx->lock
567 	 *   RCU safe iterations.
568 	 */
569 	struct list_head		event_entry;
570 
571 	/*
572 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
573 	 * either sufficies for read.
574 	 */
575 	struct list_head		sibling_list;
576 	struct list_head		active_list;
577 	/*
578 	 * Node on the pinned or flexible tree located at the event context;
579 	 */
580 	struct rb_node			group_node;
581 	u64				group_index;
582 	/*
583 	 * We need storage to track the entries in perf_pmu_migrate_context; we
584 	 * cannot use the event_entry because of RCU and we want to keep the
585 	 * group in tact which avoids us using the other two entries.
586 	 */
587 	struct list_head		migrate_entry;
588 
589 	struct hlist_node		hlist_entry;
590 	struct list_head		active_entry;
591 	int				nr_siblings;
592 
593 	/* Not serialized. Only written during event initialization. */
594 	int				event_caps;
595 	/* The cumulative AND of all event_caps for events in this group. */
596 	int				group_caps;
597 
598 	struct perf_event		*group_leader;
599 	struct pmu			*pmu;
600 	void				*pmu_private;
601 
602 	enum perf_event_state		state;
603 	unsigned int			attach_state;
604 	local64_t			count;
605 	atomic64_t			child_count;
606 
607 	/*
608 	 * These are the total time in nanoseconds that the event
609 	 * has been enabled (i.e. eligible to run, and the task has
610 	 * been scheduled in, if this is a per-task event)
611 	 * and running (scheduled onto the CPU), respectively.
612 	 */
613 	u64				total_time_enabled;
614 	u64				total_time_running;
615 	u64				tstamp;
616 
617 	/*
618 	 * timestamp shadows the actual context timing but it can
619 	 * be safely used in NMI interrupt context. It reflects the
620 	 * context time as it was when the event was last scheduled in.
621 	 *
622 	 * ctx_time already accounts for ctx->timestamp. Therefore to
623 	 * compute ctx_time for a sample, simply add perf_clock().
624 	 */
625 	u64				shadow_ctx_time;
626 
627 	struct perf_event_attr		attr;
628 	u16				header_size;
629 	u16				id_header_size;
630 	u16				read_size;
631 	struct hw_perf_event		hw;
632 
633 	struct perf_event_context	*ctx;
634 	atomic_long_t			refcount;
635 
636 	/*
637 	 * These accumulate total time (in nanoseconds) that children
638 	 * events have been enabled and running, respectively.
639 	 */
640 	atomic64_t			child_total_time_enabled;
641 	atomic64_t			child_total_time_running;
642 
643 	/*
644 	 * Protect attach/detach and child_list:
645 	 */
646 	struct mutex			child_mutex;
647 	struct list_head		child_list;
648 	struct perf_event		*parent;
649 
650 	int				oncpu;
651 	int				cpu;
652 
653 	struct list_head		owner_entry;
654 	struct task_struct		*owner;
655 
656 	/* mmap bits */
657 	struct mutex			mmap_mutex;
658 	atomic_t			mmap_count;
659 
660 	struct ring_buffer		*rb;
661 	struct list_head		rb_entry;
662 	unsigned long			rcu_batches;
663 	int				rcu_pending;
664 
665 	/* poll related */
666 	wait_queue_head_t		waitq;
667 	struct fasync_struct		*fasync;
668 
669 	/* delayed work for NMIs and such */
670 	int				pending_wakeup;
671 	int				pending_kill;
672 	int				pending_disable;
673 	struct irq_work			pending;
674 
675 	atomic_t			event_limit;
676 
677 	/* address range filters */
678 	struct perf_addr_filters_head	addr_filters;
679 	/* vma address array for file-based filders */
680 	struct perf_addr_filter_range	*addr_filter_ranges;
681 	unsigned long			addr_filters_gen;
682 
683 	void (*destroy)(struct perf_event *);
684 	struct rcu_head			rcu_head;
685 
686 	struct pid_namespace		*ns;
687 	u64				id;
688 
689 	u64				(*clock)(void);
690 	perf_overflow_handler_t		overflow_handler;
691 	void				*overflow_handler_context;
692 #ifdef CONFIG_BPF_SYSCALL
693 	perf_overflow_handler_t		orig_overflow_handler;
694 	struct bpf_prog			*prog;
695 #endif
696 
697 #ifdef CONFIG_EVENT_TRACING
698 	struct trace_event_call		*tp_event;
699 	struct event_filter		*filter;
700 #ifdef CONFIG_FUNCTION_TRACER
701 	struct ftrace_ops               ftrace_ops;
702 #endif
703 #endif
704 
705 #ifdef CONFIG_CGROUP_PERF
706 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
707 #endif
708 
709 	struct list_head		sb_list;
710 #endif /* CONFIG_PERF_EVENTS */
711 };
712 
713 
714 struct perf_event_groups {
715 	struct rb_root	tree;
716 	u64		index;
717 };
718 
719 /**
720  * struct perf_event_context - event context structure
721  *
722  * Used as a container for task events and CPU events as well:
723  */
724 struct perf_event_context {
725 	struct pmu			*pmu;
726 	/*
727 	 * Protect the states of the events in the list,
728 	 * nr_active, and the list:
729 	 */
730 	raw_spinlock_t			lock;
731 	/*
732 	 * Protect the list of events.  Locking either mutex or lock
733 	 * is sufficient to ensure the list doesn't change; to change
734 	 * the list you need to lock both the mutex and the spinlock.
735 	 */
736 	struct mutex			mutex;
737 
738 	struct list_head		active_ctx_list;
739 	struct perf_event_groups	pinned_groups;
740 	struct perf_event_groups	flexible_groups;
741 	struct list_head		event_list;
742 
743 	struct list_head		pinned_active;
744 	struct list_head		flexible_active;
745 
746 	int				nr_events;
747 	int				nr_active;
748 	int				is_active;
749 	int				nr_stat;
750 	int				nr_freq;
751 	int				rotate_disable;
752 	refcount_t			refcount;
753 	struct task_struct		*task;
754 
755 	/*
756 	 * Context clock, runs when context enabled.
757 	 */
758 	u64				time;
759 	u64				timestamp;
760 
761 	/*
762 	 * These fields let us detect when two contexts have both
763 	 * been cloned (inherited) from a common ancestor.
764 	 */
765 	struct perf_event_context	*parent_ctx;
766 	u64				parent_gen;
767 	u64				generation;
768 	int				pin_count;
769 #ifdef CONFIG_CGROUP_PERF
770 	int				nr_cgroups;	 /* cgroup evts */
771 #endif
772 	void				*task_ctx_data; /* pmu specific data */
773 	struct rcu_head			rcu_head;
774 };
775 
776 /*
777  * Number of contexts where an event can trigger:
778  *	task, softirq, hardirq, nmi.
779  */
780 #define PERF_NR_CONTEXTS	4
781 
782 /**
783  * struct perf_event_cpu_context - per cpu event context structure
784  */
785 struct perf_cpu_context {
786 	struct perf_event_context	ctx;
787 	struct perf_event_context	*task_ctx;
788 	int				active_oncpu;
789 	int				exclusive;
790 
791 	raw_spinlock_t			hrtimer_lock;
792 	struct hrtimer			hrtimer;
793 	ktime_t				hrtimer_interval;
794 	unsigned int			hrtimer_active;
795 
796 #ifdef CONFIG_CGROUP_PERF
797 	struct perf_cgroup		*cgrp;
798 	struct list_head		cgrp_cpuctx_entry;
799 #endif
800 
801 	struct list_head		sched_cb_entry;
802 	int				sched_cb_usage;
803 
804 	int				online;
805 };
806 
807 struct perf_output_handle {
808 	struct perf_event		*event;
809 	struct ring_buffer		*rb;
810 	unsigned long			wakeup;
811 	unsigned long			size;
812 	u64				aux_flags;
813 	union {
814 		void			*addr;
815 		unsigned long		head;
816 	};
817 	int				page;
818 };
819 
820 struct bpf_perf_event_data_kern {
821 	bpf_user_pt_regs_t *regs;
822 	struct perf_sample_data *data;
823 	struct perf_event *event;
824 };
825 
826 #ifdef CONFIG_CGROUP_PERF
827 
828 /*
829  * perf_cgroup_info keeps track of time_enabled for a cgroup.
830  * This is a per-cpu dynamically allocated data structure.
831  */
832 struct perf_cgroup_info {
833 	u64				time;
834 	u64				timestamp;
835 };
836 
837 struct perf_cgroup {
838 	struct cgroup_subsys_state	css;
839 	struct perf_cgroup_info	__percpu *info;
840 };
841 
842 /*
843  * Must ensure cgroup is pinned (css_get) before calling
844  * this function. In other words, we cannot call this function
845  * if there is no cgroup event for the current CPU context.
846  */
847 static inline struct perf_cgroup *
848 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
849 {
850 	return container_of(task_css_check(task, perf_event_cgrp_id,
851 					   ctx ? lockdep_is_held(&ctx->lock)
852 					       : true),
853 			    struct perf_cgroup, css);
854 }
855 #endif /* CONFIG_CGROUP_PERF */
856 
857 #ifdef CONFIG_PERF_EVENTS
858 
859 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
860 				   struct perf_event *event);
861 extern void perf_aux_output_end(struct perf_output_handle *handle,
862 				unsigned long size);
863 extern int perf_aux_output_skip(struct perf_output_handle *handle,
864 				unsigned long size);
865 extern void *perf_get_aux(struct perf_output_handle *handle);
866 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
867 extern void perf_event_itrace_started(struct perf_event *event);
868 
869 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
870 extern void perf_pmu_unregister(struct pmu *pmu);
871 
872 extern int perf_num_counters(void);
873 extern const char *perf_pmu_name(void);
874 extern void __perf_event_task_sched_in(struct task_struct *prev,
875 				       struct task_struct *task);
876 extern void __perf_event_task_sched_out(struct task_struct *prev,
877 					struct task_struct *next);
878 extern int perf_event_init_task(struct task_struct *child);
879 extern void perf_event_exit_task(struct task_struct *child);
880 extern void perf_event_free_task(struct task_struct *task);
881 extern void perf_event_delayed_put(struct task_struct *task);
882 extern struct file *perf_event_get(unsigned int fd);
883 extern const struct perf_event *perf_get_event(struct file *file);
884 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
885 extern void perf_event_print_debug(void);
886 extern void perf_pmu_disable(struct pmu *pmu);
887 extern void perf_pmu_enable(struct pmu *pmu);
888 extern void perf_sched_cb_dec(struct pmu *pmu);
889 extern void perf_sched_cb_inc(struct pmu *pmu);
890 extern int perf_event_task_disable(void);
891 extern int perf_event_task_enable(void);
892 
893 extern void perf_pmu_resched(struct pmu *pmu);
894 
895 extern int perf_event_refresh(struct perf_event *event, int refresh);
896 extern void perf_event_update_userpage(struct perf_event *event);
897 extern int perf_event_release_kernel(struct perf_event *event);
898 extern struct perf_event *
899 perf_event_create_kernel_counter(struct perf_event_attr *attr,
900 				int cpu,
901 				struct task_struct *task,
902 				perf_overflow_handler_t callback,
903 				void *context);
904 extern void perf_pmu_migrate_context(struct pmu *pmu,
905 				int src_cpu, int dst_cpu);
906 int perf_event_read_local(struct perf_event *event, u64 *value,
907 			  u64 *enabled, u64 *running);
908 extern u64 perf_event_read_value(struct perf_event *event,
909 				 u64 *enabled, u64 *running);
910 
911 
912 struct perf_sample_data {
913 	/*
914 	 * Fields set by perf_sample_data_init(), group so as to
915 	 * minimize the cachelines touched.
916 	 */
917 	u64				addr;
918 	struct perf_raw_record		*raw;
919 	struct perf_branch_stack	*br_stack;
920 	u64				period;
921 	u64				weight;
922 	u64				txn;
923 	union  perf_mem_data_src	data_src;
924 
925 	/*
926 	 * The other fields, optionally {set,used} by
927 	 * perf_{prepare,output}_sample().
928 	 */
929 	u64				type;
930 	u64				ip;
931 	struct {
932 		u32	pid;
933 		u32	tid;
934 	}				tid_entry;
935 	u64				time;
936 	u64				id;
937 	u64				stream_id;
938 	struct {
939 		u32	cpu;
940 		u32	reserved;
941 	}				cpu_entry;
942 	struct perf_callchain_entry	*callchain;
943 
944 	/*
945 	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
946 	 * on arch details.
947 	 */
948 	struct perf_regs		regs_user;
949 	struct pt_regs			regs_user_copy;
950 
951 	struct perf_regs		regs_intr;
952 	u64				stack_user_size;
953 
954 	u64				phys_addr;
955 } ____cacheline_aligned;
956 
957 /* default value for data source */
958 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
959 		    PERF_MEM_S(LVL, NA)   |\
960 		    PERF_MEM_S(SNOOP, NA) |\
961 		    PERF_MEM_S(LOCK, NA)  |\
962 		    PERF_MEM_S(TLB, NA))
963 
964 static inline void perf_sample_data_init(struct perf_sample_data *data,
965 					 u64 addr, u64 period)
966 {
967 	/* remaining struct members initialized in perf_prepare_sample() */
968 	data->addr = addr;
969 	data->raw  = NULL;
970 	data->br_stack = NULL;
971 	data->period = period;
972 	data->weight = 0;
973 	data->data_src.val = PERF_MEM_NA;
974 	data->txn = 0;
975 }
976 
977 extern void perf_output_sample(struct perf_output_handle *handle,
978 			       struct perf_event_header *header,
979 			       struct perf_sample_data *data,
980 			       struct perf_event *event);
981 extern void perf_prepare_sample(struct perf_event_header *header,
982 				struct perf_sample_data *data,
983 				struct perf_event *event,
984 				struct pt_regs *regs);
985 
986 extern int perf_event_overflow(struct perf_event *event,
987 				 struct perf_sample_data *data,
988 				 struct pt_regs *regs);
989 
990 extern void perf_event_output_forward(struct perf_event *event,
991 				     struct perf_sample_data *data,
992 				     struct pt_regs *regs);
993 extern void perf_event_output_backward(struct perf_event *event,
994 				       struct perf_sample_data *data,
995 				       struct pt_regs *regs);
996 extern int perf_event_output(struct perf_event *event,
997 			     struct perf_sample_data *data,
998 			     struct pt_regs *regs);
999 
1000 static inline bool
1001 is_default_overflow_handler(struct perf_event *event)
1002 {
1003 	if (likely(event->overflow_handler == perf_event_output_forward))
1004 		return true;
1005 	if (unlikely(event->overflow_handler == perf_event_output_backward))
1006 		return true;
1007 	return false;
1008 }
1009 
1010 extern void
1011 perf_event_header__init_id(struct perf_event_header *header,
1012 			   struct perf_sample_data *data,
1013 			   struct perf_event *event);
1014 extern void
1015 perf_event__output_id_sample(struct perf_event *event,
1016 			     struct perf_output_handle *handle,
1017 			     struct perf_sample_data *sample);
1018 
1019 extern void
1020 perf_log_lost_samples(struct perf_event *event, u64 lost);
1021 
1022 static inline bool event_has_any_exclude_flag(struct perf_event *event)
1023 {
1024 	struct perf_event_attr *attr = &event->attr;
1025 
1026 	return attr->exclude_idle || attr->exclude_user ||
1027 	       attr->exclude_kernel || attr->exclude_hv ||
1028 	       attr->exclude_guest || attr->exclude_host;
1029 }
1030 
1031 static inline bool is_sampling_event(struct perf_event *event)
1032 {
1033 	return event->attr.sample_period != 0;
1034 }
1035 
1036 /*
1037  * Return 1 for a software event, 0 for a hardware event
1038  */
1039 static inline int is_software_event(struct perf_event *event)
1040 {
1041 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
1042 }
1043 
1044 /*
1045  * Return 1 for event in sw context, 0 for event in hw context
1046  */
1047 static inline int in_software_context(struct perf_event *event)
1048 {
1049 	return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1050 }
1051 
1052 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1053 
1054 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1055 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1056 
1057 #ifndef perf_arch_fetch_caller_regs
1058 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1059 #endif
1060 
1061 /*
1062  * When generating a perf sample in-line, instead of from an interrupt /
1063  * exception, we lack a pt_regs. This is typically used from software events
1064  * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1065  *
1066  * We typically don't need a full set, but (for x86) do require:
1067  * - ip for PERF_SAMPLE_IP
1068  * - cs for user_mode() tests
1069  * - sp for PERF_SAMPLE_CALLCHAIN
1070  * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1071  *
1072  * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1073  * things like PERF_SAMPLE_REGS_INTR.
1074  */
1075 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1076 {
1077 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1078 }
1079 
1080 static __always_inline void
1081 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1082 {
1083 	if (static_key_false(&perf_swevent_enabled[event_id]))
1084 		__perf_sw_event(event_id, nr, regs, addr);
1085 }
1086 
1087 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1088 
1089 /*
1090  * 'Special' version for the scheduler, it hard assumes no recursion,
1091  * which is guaranteed by us not actually scheduling inside other swevents
1092  * because those disable preemption.
1093  */
1094 static __always_inline void
1095 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1096 {
1097 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1098 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1099 
1100 		perf_fetch_caller_regs(regs);
1101 		___perf_sw_event(event_id, nr, regs, addr);
1102 	}
1103 }
1104 
1105 extern struct static_key_false perf_sched_events;
1106 
1107 static __always_inline bool
1108 perf_sw_migrate_enabled(void)
1109 {
1110 	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1111 		return true;
1112 	return false;
1113 }
1114 
1115 static inline void perf_event_task_migrate(struct task_struct *task)
1116 {
1117 	if (perf_sw_migrate_enabled())
1118 		task->sched_migrated = 1;
1119 }
1120 
1121 static inline void perf_event_task_sched_in(struct task_struct *prev,
1122 					    struct task_struct *task)
1123 {
1124 	if (static_branch_unlikely(&perf_sched_events))
1125 		__perf_event_task_sched_in(prev, task);
1126 
1127 	if (perf_sw_migrate_enabled() && task->sched_migrated) {
1128 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1129 
1130 		perf_fetch_caller_regs(regs);
1131 		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1132 		task->sched_migrated = 0;
1133 	}
1134 }
1135 
1136 static inline void perf_event_task_sched_out(struct task_struct *prev,
1137 					     struct task_struct *next)
1138 {
1139 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1140 
1141 	if (static_branch_unlikely(&perf_sched_events))
1142 		__perf_event_task_sched_out(prev, next);
1143 }
1144 
1145 extern void perf_event_mmap(struct vm_area_struct *vma);
1146 
1147 extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1148 			       bool unregister, const char *sym);
1149 extern void perf_event_bpf_event(struct bpf_prog *prog,
1150 				 enum perf_bpf_event_type type,
1151 				 u16 flags);
1152 
1153 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1154 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1155 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1156 
1157 extern void perf_event_exec(void);
1158 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1159 extern void perf_event_namespaces(struct task_struct *tsk);
1160 extern void perf_event_fork(struct task_struct *tsk);
1161 
1162 /* Callchains */
1163 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1164 
1165 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1166 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1167 extern struct perf_callchain_entry *
1168 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1169 		   u32 max_stack, bool crosstask, bool add_mark);
1170 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1171 extern int get_callchain_buffers(int max_stack);
1172 extern void put_callchain_buffers(void);
1173 
1174 extern int sysctl_perf_event_max_stack;
1175 extern int sysctl_perf_event_max_contexts_per_stack;
1176 
1177 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1178 {
1179 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1180 		struct perf_callchain_entry *entry = ctx->entry;
1181 		entry->ip[entry->nr++] = ip;
1182 		++ctx->contexts;
1183 		return 0;
1184 	} else {
1185 		ctx->contexts_maxed = true;
1186 		return -1; /* no more room, stop walking the stack */
1187 	}
1188 }
1189 
1190 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1191 {
1192 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1193 		struct perf_callchain_entry *entry = ctx->entry;
1194 		entry->ip[entry->nr++] = ip;
1195 		++ctx->nr;
1196 		return 0;
1197 	} else {
1198 		return -1; /* no more room, stop walking the stack */
1199 	}
1200 }
1201 
1202 extern int sysctl_perf_event_paranoid;
1203 extern int sysctl_perf_event_mlock;
1204 extern int sysctl_perf_event_sample_rate;
1205 extern int sysctl_perf_cpu_time_max_percent;
1206 
1207 extern void perf_sample_event_took(u64 sample_len_ns);
1208 
1209 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1210 		void __user *buffer, size_t *lenp,
1211 		loff_t *ppos);
1212 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1213 		void __user *buffer, size_t *lenp,
1214 		loff_t *ppos);
1215 
1216 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1217 				 void __user *buffer, size_t *lenp, loff_t *ppos);
1218 
1219 static inline bool perf_paranoid_tracepoint_raw(void)
1220 {
1221 	return sysctl_perf_event_paranoid > -1;
1222 }
1223 
1224 static inline bool perf_paranoid_cpu(void)
1225 {
1226 	return sysctl_perf_event_paranoid > 0;
1227 }
1228 
1229 static inline bool perf_paranoid_kernel(void)
1230 {
1231 	return sysctl_perf_event_paranoid > 1;
1232 }
1233 
1234 extern void perf_event_init(void);
1235 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1236 			  int entry_size, struct pt_regs *regs,
1237 			  struct hlist_head *head, int rctx,
1238 			  struct task_struct *task);
1239 extern void perf_bp_event(struct perf_event *event, void *data);
1240 
1241 #ifndef perf_misc_flags
1242 # define perf_misc_flags(regs) \
1243 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1244 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1245 #endif
1246 #ifndef perf_arch_bpf_user_pt_regs
1247 # define perf_arch_bpf_user_pt_regs(regs) regs
1248 #endif
1249 
1250 static inline bool has_branch_stack(struct perf_event *event)
1251 {
1252 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1253 }
1254 
1255 static inline bool needs_branch_stack(struct perf_event *event)
1256 {
1257 	return event->attr.branch_sample_type != 0;
1258 }
1259 
1260 static inline bool has_aux(struct perf_event *event)
1261 {
1262 	return event->pmu->setup_aux;
1263 }
1264 
1265 static inline bool is_write_backward(struct perf_event *event)
1266 {
1267 	return !!event->attr.write_backward;
1268 }
1269 
1270 static inline bool has_addr_filter(struct perf_event *event)
1271 {
1272 	return event->pmu->nr_addr_filters;
1273 }
1274 
1275 /*
1276  * An inherited event uses parent's filters
1277  */
1278 static inline struct perf_addr_filters_head *
1279 perf_event_addr_filters(struct perf_event *event)
1280 {
1281 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1282 
1283 	if (event->parent)
1284 		ifh = &event->parent->addr_filters;
1285 
1286 	return ifh;
1287 }
1288 
1289 extern void perf_event_addr_filters_sync(struct perf_event *event);
1290 
1291 extern int perf_output_begin(struct perf_output_handle *handle,
1292 			     struct perf_event *event, unsigned int size);
1293 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1294 				    struct perf_event *event,
1295 				    unsigned int size);
1296 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1297 				      struct perf_event *event,
1298 				      unsigned int size);
1299 
1300 extern void perf_output_end(struct perf_output_handle *handle);
1301 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1302 			     const void *buf, unsigned int len);
1303 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1304 				     unsigned int len);
1305 extern int perf_swevent_get_recursion_context(void);
1306 extern void perf_swevent_put_recursion_context(int rctx);
1307 extern u64 perf_swevent_set_period(struct perf_event *event);
1308 extern void perf_event_enable(struct perf_event *event);
1309 extern void perf_event_disable(struct perf_event *event);
1310 extern void perf_event_disable_local(struct perf_event *event);
1311 extern void perf_event_disable_inatomic(struct perf_event *event);
1312 extern void perf_event_task_tick(void);
1313 extern int perf_event_account_interrupt(struct perf_event *event);
1314 #else /* !CONFIG_PERF_EVENTS: */
1315 static inline void *
1316 perf_aux_output_begin(struct perf_output_handle *handle,
1317 		      struct perf_event *event)				{ return NULL; }
1318 static inline void
1319 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1320 									{ }
1321 static inline int
1322 perf_aux_output_skip(struct perf_output_handle *handle,
1323 		     unsigned long size)				{ return -EINVAL; }
1324 static inline void *
1325 perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1326 static inline void
1327 perf_event_task_migrate(struct task_struct *task)			{ }
1328 static inline void
1329 perf_event_task_sched_in(struct task_struct *prev,
1330 			 struct task_struct *task)			{ }
1331 static inline void
1332 perf_event_task_sched_out(struct task_struct *prev,
1333 			  struct task_struct *next)			{ }
1334 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
1335 static inline void perf_event_exit_task(struct task_struct *child)	{ }
1336 static inline void perf_event_free_task(struct task_struct *task)	{ }
1337 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
1338 static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
1339 static inline const struct perf_event *perf_get_event(struct file *file)
1340 {
1341 	return ERR_PTR(-EINVAL);
1342 }
1343 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1344 {
1345 	return ERR_PTR(-EINVAL);
1346 }
1347 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1348 					u64 *enabled, u64 *running)
1349 {
1350 	return -EINVAL;
1351 }
1352 static inline void perf_event_print_debug(void)				{ }
1353 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
1354 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1355 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1356 {
1357 	return -EINVAL;
1358 }
1359 
1360 static inline void
1361 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1362 static inline void
1363 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
1364 static inline void
1365 perf_bp_event(struct perf_event *event, void *data)			{ }
1366 
1367 static inline int perf_register_guest_info_callbacks
1368 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1369 static inline int perf_unregister_guest_info_callbacks
1370 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1371 
1372 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1373 
1374 typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1375 static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1376 				      bool unregister, const char *sym)	{ }
1377 static inline void perf_event_bpf_event(struct bpf_prog *prog,
1378 					enum perf_bpf_event_type type,
1379 					u16 flags)			{ }
1380 static inline void perf_event_exec(void)				{ }
1381 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
1382 static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
1383 static inline void perf_event_fork(struct task_struct *tsk)		{ }
1384 static inline void perf_event_init(void)				{ }
1385 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1386 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1387 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
1388 static inline void perf_event_enable(struct perf_event *event)		{ }
1389 static inline void perf_event_disable(struct perf_event *event)		{ }
1390 static inline int __perf_event_disable(void *info)			{ return -1; }
1391 static inline void perf_event_task_tick(void)				{ }
1392 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
1393 #endif
1394 
1395 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1396 extern void perf_restore_debug_store(void);
1397 #else
1398 static inline void perf_restore_debug_store(void)			{ }
1399 #endif
1400 
1401 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1402 {
1403 	return frag->pad < sizeof(u64);
1404 }
1405 
1406 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1407 
1408 struct perf_pmu_events_attr {
1409 	struct device_attribute attr;
1410 	u64 id;
1411 	const char *event_str;
1412 };
1413 
1414 struct perf_pmu_events_ht_attr {
1415 	struct device_attribute			attr;
1416 	u64					id;
1417 	const char				*event_str_ht;
1418 	const char				*event_str_noht;
1419 };
1420 
1421 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1422 			      char *page);
1423 
1424 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1425 static struct perf_pmu_events_attr _var = {				\
1426 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1427 	.id   =  _id,							\
1428 };
1429 
1430 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1431 static struct perf_pmu_events_attr _var = {				    \
1432 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1433 	.id		= 0,						    \
1434 	.event_str	= _str,						    \
1435 };
1436 
1437 #define PMU_FORMAT_ATTR(_name, _format)					\
1438 static ssize_t								\
1439 _name##_show(struct device *dev,					\
1440 			       struct device_attribute *attr,		\
1441 			       char *page)				\
1442 {									\
1443 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1444 	return sprintf(page, _format "\n");				\
1445 }									\
1446 									\
1447 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1448 
1449 /* Performance counter hotplug functions */
1450 #ifdef CONFIG_PERF_EVENTS
1451 int perf_event_init_cpu(unsigned int cpu);
1452 int perf_event_exit_cpu(unsigned int cpu);
1453 #else
1454 #define perf_event_init_cpu	NULL
1455 #define perf_event_exit_cpu	NULL
1456 #endif
1457 
1458 #endif /* _LINUX_PERF_EVENT_H */
1459