xref: /linux-6.15/include/linux/perf_event.h (revision c819e2cf)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <[email protected]>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 
19 /*
20  * Kernel-internal data types and definitions:
21  */
22 
23 #ifdef CONFIG_PERF_EVENTS
24 # include <asm/perf_event.h>
25 # include <asm/local64.h>
26 #endif
27 
28 struct perf_guest_info_callbacks {
29 	int				(*is_in_guest)(void);
30 	int				(*is_user_mode)(void);
31 	unsigned long			(*get_guest_ip)(void);
32 };
33 
34 #ifdef CONFIG_HAVE_HW_BREAKPOINT
35 #include <asm/hw_breakpoint.h>
36 #endif
37 
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/rculist.h>
41 #include <linux/rcupdate.h>
42 #include <linux/spinlock.h>
43 #include <linux/hrtimer.h>
44 #include <linux/fs.h>
45 #include <linux/pid_namespace.h>
46 #include <linux/workqueue.h>
47 #include <linux/ftrace.h>
48 #include <linux/cpu.h>
49 #include <linux/irq_work.h>
50 #include <linux/static_key.h>
51 #include <linux/jump_label_ratelimit.h>
52 #include <linux/atomic.h>
53 #include <linux/sysfs.h>
54 #include <linux/perf_regs.h>
55 #include <linux/workqueue.h>
56 #include <asm/local.h>
57 
58 struct perf_callchain_entry {
59 	__u64				nr;
60 	__u64				ip[PERF_MAX_STACK_DEPTH];
61 };
62 
63 struct perf_raw_record {
64 	u32				size;
65 	void				*data;
66 };
67 
68 /*
69  * branch stack layout:
70  *  nr: number of taken branches stored in entries[]
71  *
72  * Note that nr can vary from sample to sample
73  * branches (to, from) are stored from most recent
74  * to least recent, i.e., entries[0] contains the most
75  * recent branch.
76  */
77 struct perf_branch_stack {
78 	__u64				nr;
79 	struct perf_branch_entry	entries[0];
80 };
81 
82 struct task_struct;
83 
84 /*
85  * extra PMU register associated with an event
86  */
87 struct hw_perf_event_extra {
88 	u64		config;	/* register value */
89 	unsigned int	reg;	/* register address or index */
90 	int		alloc;	/* extra register already allocated */
91 	int		idx;	/* index in shared_regs->regs[] */
92 };
93 
94 struct event_constraint;
95 
96 /**
97  * struct hw_perf_event - performance event hardware details:
98  */
99 struct hw_perf_event {
100 #ifdef CONFIG_PERF_EVENTS
101 	union {
102 		struct { /* hardware */
103 			u64		config;
104 			u64		last_tag;
105 			unsigned long	config_base;
106 			unsigned long	event_base;
107 			int		event_base_rdpmc;
108 			int		idx;
109 			int		last_cpu;
110 			int		flags;
111 
112 			struct hw_perf_event_extra extra_reg;
113 			struct hw_perf_event_extra branch_reg;
114 
115 			struct event_constraint *constraint;
116 		};
117 		struct { /* software */
118 			struct hrtimer	hrtimer;
119 		};
120 		struct { /* tracepoint */
121 			struct task_struct	*tp_target;
122 			/* for tp_event->class */
123 			struct list_head	tp_list;
124 		};
125 #ifdef CONFIG_HAVE_HW_BREAKPOINT
126 		struct { /* breakpoint */
127 			/*
128 			 * Crufty hack to avoid the chicken and egg
129 			 * problem hw_breakpoint has with context
130 			 * creation and event initalization.
131 			 */
132 			struct task_struct		*bp_target;
133 			struct arch_hw_breakpoint	info;
134 			struct list_head		bp_list;
135 		};
136 #endif
137 	};
138 	int				state;
139 	local64_t			prev_count;
140 	u64				sample_period;
141 	u64				last_period;
142 	local64_t			period_left;
143 	u64                             interrupts_seq;
144 	u64				interrupts;
145 
146 	u64				freq_time_stamp;
147 	u64				freq_count_stamp;
148 #endif
149 };
150 
151 /*
152  * hw_perf_event::state flags
153  */
154 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
155 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
156 #define PERF_HES_ARCH		0x04
157 
158 struct perf_event;
159 
160 /*
161  * Common implementation detail of pmu::{start,commit,cancel}_txn
162  */
163 #define PERF_EVENT_TXN 0x1
164 
165 /**
166  * pmu::capabilities flags
167  */
168 #define PERF_PMU_CAP_NO_INTERRUPT		0x01
169 
170 /**
171  * struct pmu - generic performance monitoring unit
172  */
173 struct pmu {
174 	struct list_head		entry;
175 
176 	struct module			*module;
177 	struct device			*dev;
178 	const struct attribute_group	**attr_groups;
179 	const char			*name;
180 	int				type;
181 
182 	/*
183 	 * various common per-pmu feature flags
184 	 */
185 	int				capabilities;
186 
187 	int * __percpu			pmu_disable_count;
188 	struct perf_cpu_context * __percpu pmu_cpu_context;
189 	int				task_ctx_nr;
190 	int				hrtimer_interval_ms;
191 
192 	/*
193 	 * Fully disable/enable this PMU, can be used to protect from the PMI
194 	 * as well as for lazy/batch writing of the MSRs.
195 	 */
196 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
197 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
198 
199 	/*
200 	 * Try and initialize the event for this PMU.
201 	 * Should return -ENOENT when the @event doesn't match this PMU.
202 	 */
203 	int (*event_init)		(struct perf_event *event);
204 
205 #define PERF_EF_START	0x01		/* start the counter when adding    */
206 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
207 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
208 
209 	/*
210 	 * Adds/Removes a counter to/from the PMU, can be done inside
211 	 * a transaction, see the ->*_txn() methods.
212 	 */
213 	int  (*add)			(struct perf_event *event, int flags);
214 	void (*del)			(struct perf_event *event, int flags);
215 
216 	/*
217 	 * Starts/Stops a counter present on the PMU. The PMI handler
218 	 * should stop the counter when perf_event_overflow() returns
219 	 * !0. ->start() will be used to continue.
220 	 */
221 	void (*start)			(struct perf_event *event, int flags);
222 	void (*stop)			(struct perf_event *event, int flags);
223 
224 	/*
225 	 * Updates the counter value of the event.
226 	 */
227 	void (*read)			(struct perf_event *event);
228 
229 	/*
230 	 * Group events scheduling is treated as a transaction, add
231 	 * group events as a whole and perform one schedulability test.
232 	 * If the test fails, roll back the whole group
233 	 *
234 	 * Start the transaction, after this ->add() doesn't need to
235 	 * do schedulability tests.
236 	 */
237 	void (*start_txn)		(struct pmu *pmu); /* optional */
238 	/*
239 	 * If ->start_txn() disabled the ->add() schedulability test
240 	 * then ->commit_txn() is required to perform one. On success
241 	 * the transaction is closed. On error the transaction is kept
242 	 * open until ->cancel_txn() is called.
243 	 */
244 	int  (*commit_txn)		(struct pmu *pmu); /* optional */
245 	/*
246 	 * Will cancel the transaction, assumes ->del() is called
247 	 * for each successful ->add() during the transaction.
248 	 */
249 	void (*cancel_txn)		(struct pmu *pmu); /* optional */
250 
251 	/*
252 	 * Will return the value for perf_event_mmap_page::index for this event,
253 	 * if no implementation is provided it will default to: event->hw.idx + 1.
254 	 */
255 	int (*event_idx)		(struct perf_event *event); /*optional */
256 
257 	/*
258 	 * flush branch stack on context-switches (needed in cpu-wide mode)
259 	 */
260 	void (*flush_branch_stack)	(void);
261 };
262 
263 /**
264  * enum perf_event_active_state - the states of a event
265  */
266 enum perf_event_active_state {
267 	PERF_EVENT_STATE_EXIT		= -3,
268 	PERF_EVENT_STATE_ERROR		= -2,
269 	PERF_EVENT_STATE_OFF		= -1,
270 	PERF_EVENT_STATE_INACTIVE	=  0,
271 	PERF_EVENT_STATE_ACTIVE		=  1,
272 };
273 
274 struct file;
275 struct perf_sample_data;
276 
277 typedef void (*perf_overflow_handler_t)(struct perf_event *,
278 					struct perf_sample_data *,
279 					struct pt_regs *regs);
280 
281 enum perf_group_flag {
282 	PERF_GROUP_SOFTWARE		= 0x1,
283 };
284 
285 #define SWEVENT_HLIST_BITS		8
286 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
287 
288 struct swevent_hlist {
289 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
290 	struct rcu_head			rcu_head;
291 };
292 
293 #define PERF_ATTACH_CONTEXT	0x01
294 #define PERF_ATTACH_GROUP	0x02
295 #define PERF_ATTACH_TASK	0x04
296 
297 struct perf_cgroup;
298 struct ring_buffer;
299 
300 /**
301  * struct perf_event - performance event kernel representation:
302  */
303 struct perf_event {
304 #ifdef CONFIG_PERF_EVENTS
305 	/*
306 	 * entry onto perf_event_context::event_list;
307 	 *   modifications require ctx->lock
308 	 *   RCU safe iterations.
309 	 */
310 	struct list_head		event_entry;
311 
312 	/*
313 	 * XXX: group_entry and sibling_list should be mutually exclusive;
314 	 * either you're a sibling on a group, or you're the group leader.
315 	 * Rework the code to always use the same list element.
316 	 *
317 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
318 	 * either sufficies for read.
319 	 */
320 	struct list_head		group_entry;
321 	struct list_head		sibling_list;
322 
323 	/*
324 	 * We need storage to track the entries in perf_pmu_migrate_context; we
325 	 * cannot use the event_entry because of RCU and we want to keep the
326 	 * group in tact which avoids us using the other two entries.
327 	 */
328 	struct list_head		migrate_entry;
329 
330 	struct hlist_node		hlist_entry;
331 	struct list_head		active_entry;
332 	int				nr_siblings;
333 	int				group_flags;
334 	struct perf_event		*group_leader;
335 	struct pmu			*pmu;
336 
337 	enum perf_event_active_state	state;
338 	unsigned int			attach_state;
339 	local64_t			count;
340 	atomic64_t			child_count;
341 
342 	/*
343 	 * These are the total time in nanoseconds that the event
344 	 * has been enabled (i.e. eligible to run, and the task has
345 	 * been scheduled in, if this is a per-task event)
346 	 * and running (scheduled onto the CPU), respectively.
347 	 *
348 	 * They are computed from tstamp_enabled, tstamp_running and
349 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
350 	 */
351 	u64				total_time_enabled;
352 	u64				total_time_running;
353 
354 	/*
355 	 * These are timestamps used for computing total_time_enabled
356 	 * and total_time_running when the event is in INACTIVE or
357 	 * ACTIVE state, measured in nanoseconds from an arbitrary point
358 	 * in time.
359 	 * tstamp_enabled: the notional time when the event was enabled
360 	 * tstamp_running: the notional time when the event was scheduled on
361 	 * tstamp_stopped: in INACTIVE state, the notional time when the
362 	 *	event was scheduled off.
363 	 */
364 	u64				tstamp_enabled;
365 	u64				tstamp_running;
366 	u64				tstamp_stopped;
367 
368 	/*
369 	 * timestamp shadows the actual context timing but it can
370 	 * be safely used in NMI interrupt context. It reflects the
371 	 * context time as it was when the event was last scheduled in.
372 	 *
373 	 * ctx_time already accounts for ctx->timestamp. Therefore to
374 	 * compute ctx_time for a sample, simply add perf_clock().
375 	 */
376 	u64				shadow_ctx_time;
377 
378 	struct perf_event_attr		attr;
379 	u16				header_size;
380 	u16				id_header_size;
381 	u16				read_size;
382 	struct hw_perf_event		hw;
383 
384 	struct perf_event_context	*ctx;
385 	atomic_long_t			refcount;
386 
387 	/*
388 	 * These accumulate total time (in nanoseconds) that children
389 	 * events have been enabled and running, respectively.
390 	 */
391 	atomic64_t			child_total_time_enabled;
392 	atomic64_t			child_total_time_running;
393 
394 	/*
395 	 * Protect attach/detach and child_list:
396 	 */
397 	struct mutex			child_mutex;
398 	struct list_head		child_list;
399 	struct perf_event		*parent;
400 
401 	int				oncpu;
402 	int				cpu;
403 
404 	struct list_head		owner_entry;
405 	struct task_struct		*owner;
406 
407 	/* mmap bits */
408 	struct mutex			mmap_mutex;
409 	atomic_t			mmap_count;
410 
411 	struct ring_buffer		*rb;
412 	struct list_head		rb_entry;
413 	unsigned long			rcu_batches;
414 	int				rcu_pending;
415 
416 	/* poll related */
417 	wait_queue_head_t		waitq;
418 	struct fasync_struct		*fasync;
419 
420 	/* delayed work for NMIs and such */
421 	int				pending_wakeup;
422 	int				pending_kill;
423 	int				pending_disable;
424 	struct irq_work			pending;
425 
426 	atomic_t			event_limit;
427 
428 	void (*destroy)(struct perf_event *);
429 	struct rcu_head			rcu_head;
430 
431 	struct pid_namespace		*ns;
432 	u64				id;
433 
434 	perf_overflow_handler_t		overflow_handler;
435 	void				*overflow_handler_context;
436 
437 #ifdef CONFIG_EVENT_TRACING
438 	struct ftrace_event_call	*tp_event;
439 	struct event_filter		*filter;
440 #ifdef CONFIG_FUNCTION_TRACER
441 	struct ftrace_ops               ftrace_ops;
442 #endif
443 #endif
444 
445 #ifdef CONFIG_CGROUP_PERF
446 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
447 	int				cgrp_defer_enabled;
448 #endif
449 
450 #endif /* CONFIG_PERF_EVENTS */
451 };
452 
453 /**
454  * struct perf_event_context - event context structure
455  *
456  * Used as a container for task events and CPU events as well:
457  */
458 struct perf_event_context {
459 	struct pmu			*pmu;
460 	/*
461 	 * Protect the states of the events in the list,
462 	 * nr_active, and the list:
463 	 */
464 	raw_spinlock_t			lock;
465 	/*
466 	 * Protect the list of events.  Locking either mutex or lock
467 	 * is sufficient to ensure the list doesn't change; to change
468 	 * the list you need to lock both the mutex and the spinlock.
469 	 */
470 	struct mutex			mutex;
471 
472 	struct list_head		active_ctx_list;
473 	struct list_head		pinned_groups;
474 	struct list_head		flexible_groups;
475 	struct list_head		event_list;
476 	int				nr_events;
477 	int				nr_active;
478 	int				is_active;
479 	int				nr_stat;
480 	int				nr_freq;
481 	int				rotate_disable;
482 	atomic_t			refcount;
483 	struct task_struct		*task;
484 
485 	/*
486 	 * Context clock, runs when context enabled.
487 	 */
488 	u64				time;
489 	u64				timestamp;
490 
491 	/*
492 	 * These fields let us detect when two contexts have both
493 	 * been cloned (inherited) from a common ancestor.
494 	 */
495 	struct perf_event_context	*parent_ctx;
496 	u64				parent_gen;
497 	u64				generation;
498 	int				pin_count;
499 	int				nr_cgroups;	 /* cgroup evts */
500 	int				nr_branch_stack; /* branch_stack evt */
501 	struct rcu_head			rcu_head;
502 
503 	struct delayed_work		orphans_remove;
504 	bool				orphans_remove_sched;
505 };
506 
507 /*
508  * Number of contexts where an event can trigger:
509  *	task, softirq, hardirq, nmi.
510  */
511 #define PERF_NR_CONTEXTS	4
512 
513 /**
514  * struct perf_event_cpu_context - per cpu event context structure
515  */
516 struct perf_cpu_context {
517 	struct perf_event_context	ctx;
518 	struct perf_event_context	*task_ctx;
519 	int				active_oncpu;
520 	int				exclusive;
521 	struct hrtimer			hrtimer;
522 	ktime_t				hrtimer_interval;
523 	struct pmu			*unique_pmu;
524 	struct perf_cgroup		*cgrp;
525 };
526 
527 struct perf_output_handle {
528 	struct perf_event		*event;
529 	struct ring_buffer		*rb;
530 	unsigned long			wakeup;
531 	unsigned long			size;
532 	void				*addr;
533 	int				page;
534 };
535 
536 #ifdef CONFIG_PERF_EVENTS
537 
538 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
539 extern void perf_pmu_unregister(struct pmu *pmu);
540 
541 extern int perf_num_counters(void);
542 extern const char *perf_pmu_name(void);
543 extern void __perf_event_task_sched_in(struct task_struct *prev,
544 				       struct task_struct *task);
545 extern void __perf_event_task_sched_out(struct task_struct *prev,
546 					struct task_struct *next);
547 extern int perf_event_init_task(struct task_struct *child);
548 extern void perf_event_exit_task(struct task_struct *child);
549 extern void perf_event_free_task(struct task_struct *task);
550 extern void perf_event_delayed_put(struct task_struct *task);
551 extern void perf_event_print_debug(void);
552 extern void perf_pmu_disable(struct pmu *pmu);
553 extern void perf_pmu_enable(struct pmu *pmu);
554 extern int perf_event_task_disable(void);
555 extern int perf_event_task_enable(void);
556 extern int perf_event_refresh(struct perf_event *event, int refresh);
557 extern void perf_event_update_userpage(struct perf_event *event);
558 extern int perf_event_release_kernel(struct perf_event *event);
559 extern struct perf_event *
560 perf_event_create_kernel_counter(struct perf_event_attr *attr,
561 				int cpu,
562 				struct task_struct *task,
563 				perf_overflow_handler_t callback,
564 				void *context);
565 extern void perf_pmu_migrate_context(struct pmu *pmu,
566 				int src_cpu, int dst_cpu);
567 extern u64 perf_event_read_value(struct perf_event *event,
568 				 u64 *enabled, u64 *running);
569 
570 
571 struct perf_sample_data {
572 	/*
573 	 * Fields set by perf_sample_data_init(), group so as to
574 	 * minimize the cachelines touched.
575 	 */
576 	u64				addr;
577 	struct perf_raw_record		*raw;
578 	struct perf_branch_stack	*br_stack;
579 	u64				period;
580 	u64				weight;
581 	u64				txn;
582 	union  perf_mem_data_src	data_src;
583 
584 	/*
585 	 * The other fields, optionally {set,used} by
586 	 * perf_{prepare,output}_sample().
587 	 */
588 	u64				type;
589 	u64				ip;
590 	struct {
591 		u32	pid;
592 		u32	tid;
593 	}				tid_entry;
594 	u64				time;
595 	u64				id;
596 	u64				stream_id;
597 	struct {
598 		u32	cpu;
599 		u32	reserved;
600 	}				cpu_entry;
601 	struct perf_callchain_entry	*callchain;
602 
603 	/*
604 	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
605 	 * on arch details.
606 	 */
607 	struct perf_regs		regs_user;
608 	struct pt_regs			regs_user_copy;
609 
610 	struct perf_regs		regs_intr;
611 	u64				stack_user_size;
612 } ____cacheline_aligned;
613 
614 /* default value for data source */
615 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
616 		    PERF_MEM_S(LVL, NA)   |\
617 		    PERF_MEM_S(SNOOP, NA) |\
618 		    PERF_MEM_S(LOCK, NA)  |\
619 		    PERF_MEM_S(TLB, NA))
620 
621 static inline void perf_sample_data_init(struct perf_sample_data *data,
622 					 u64 addr, u64 period)
623 {
624 	/* remaining struct members initialized in perf_prepare_sample() */
625 	data->addr = addr;
626 	data->raw  = NULL;
627 	data->br_stack = NULL;
628 	data->period = period;
629 	data->weight = 0;
630 	data->data_src.val = PERF_MEM_NA;
631 	data->txn = 0;
632 }
633 
634 extern void perf_output_sample(struct perf_output_handle *handle,
635 			       struct perf_event_header *header,
636 			       struct perf_sample_data *data,
637 			       struct perf_event *event);
638 extern void perf_prepare_sample(struct perf_event_header *header,
639 				struct perf_sample_data *data,
640 				struct perf_event *event,
641 				struct pt_regs *regs);
642 
643 extern int perf_event_overflow(struct perf_event *event,
644 				 struct perf_sample_data *data,
645 				 struct pt_regs *regs);
646 
647 static inline bool is_sampling_event(struct perf_event *event)
648 {
649 	return event->attr.sample_period != 0;
650 }
651 
652 /*
653  * Return 1 for a software event, 0 for a hardware event
654  */
655 static inline int is_software_event(struct perf_event *event)
656 {
657 	return event->pmu->task_ctx_nr == perf_sw_context;
658 }
659 
660 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
661 
662 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
663 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
664 
665 #ifndef perf_arch_fetch_caller_regs
666 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
667 #endif
668 
669 /*
670  * Take a snapshot of the regs. Skip ip and frame pointer to
671  * the nth caller. We only need a few of the regs:
672  * - ip for PERF_SAMPLE_IP
673  * - cs for user_mode() tests
674  * - bp for callchains
675  * - eflags, for future purposes, just in case
676  */
677 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
678 {
679 	memset(regs, 0, sizeof(*regs));
680 
681 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
682 }
683 
684 static __always_inline void
685 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
686 {
687 	if (static_key_false(&perf_swevent_enabled[event_id]))
688 		__perf_sw_event(event_id, nr, regs, addr);
689 }
690 
691 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
692 
693 /*
694  * 'Special' version for the scheduler, it hard assumes no recursion,
695  * which is guaranteed by us not actually scheduling inside other swevents
696  * because those disable preemption.
697  */
698 static __always_inline void
699 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
700 {
701 	if (static_key_false(&perf_swevent_enabled[event_id])) {
702 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
703 
704 		perf_fetch_caller_regs(regs);
705 		___perf_sw_event(event_id, nr, regs, addr);
706 	}
707 }
708 
709 extern struct static_key_deferred perf_sched_events;
710 
711 static inline void perf_event_task_sched_in(struct task_struct *prev,
712 					    struct task_struct *task)
713 {
714 	if (static_key_false(&perf_sched_events.key))
715 		__perf_event_task_sched_in(prev, task);
716 }
717 
718 static inline void perf_event_task_sched_out(struct task_struct *prev,
719 					     struct task_struct *next)
720 {
721 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
722 
723 	if (static_key_false(&perf_sched_events.key))
724 		__perf_event_task_sched_out(prev, next);
725 }
726 
727 extern void perf_event_mmap(struct vm_area_struct *vma);
728 extern struct perf_guest_info_callbacks *perf_guest_cbs;
729 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
730 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
731 
732 extern void perf_event_exec(void);
733 extern void perf_event_comm(struct task_struct *tsk, bool exec);
734 extern void perf_event_fork(struct task_struct *tsk);
735 
736 /* Callchains */
737 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
738 
739 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
740 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
741 
742 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
743 {
744 	if (entry->nr < PERF_MAX_STACK_DEPTH)
745 		entry->ip[entry->nr++] = ip;
746 }
747 
748 extern int sysctl_perf_event_paranoid;
749 extern int sysctl_perf_event_mlock;
750 extern int sysctl_perf_event_sample_rate;
751 extern int sysctl_perf_cpu_time_max_percent;
752 
753 extern void perf_sample_event_took(u64 sample_len_ns);
754 
755 extern int perf_proc_update_handler(struct ctl_table *table, int write,
756 		void __user *buffer, size_t *lenp,
757 		loff_t *ppos);
758 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
759 		void __user *buffer, size_t *lenp,
760 		loff_t *ppos);
761 
762 
763 static inline bool perf_paranoid_tracepoint_raw(void)
764 {
765 	return sysctl_perf_event_paranoid > -1;
766 }
767 
768 static inline bool perf_paranoid_cpu(void)
769 {
770 	return sysctl_perf_event_paranoid > 0;
771 }
772 
773 static inline bool perf_paranoid_kernel(void)
774 {
775 	return sysctl_perf_event_paranoid > 1;
776 }
777 
778 extern void perf_event_init(void);
779 extern void perf_tp_event(u64 addr, u64 count, void *record,
780 			  int entry_size, struct pt_regs *regs,
781 			  struct hlist_head *head, int rctx,
782 			  struct task_struct *task);
783 extern void perf_bp_event(struct perf_event *event, void *data);
784 
785 #ifndef perf_misc_flags
786 # define perf_misc_flags(regs) \
787 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
788 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
789 #endif
790 
791 static inline bool has_branch_stack(struct perf_event *event)
792 {
793 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
794 }
795 
796 extern int perf_output_begin(struct perf_output_handle *handle,
797 			     struct perf_event *event, unsigned int size);
798 extern void perf_output_end(struct perf_output_handle *handle);
799 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
800 			     const void *buf, unsigned int len);
801 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
802 				     unsigned int len);
803 extern int perf_swevent_get_recursion_context(void);
804 extern void perf_swevent_put_recursion_context(int rctx);
805 extern u64 perf_swevent_set_period(struct perf_event *event);
806 extern void perf_event_enable(struct perf_event *event);
807 extern void perf_event_disable(struct perf_event *event);
808 extern int __perf_event_disable(void *info);
809 extern void perf_event_task_tick(void);
810 #else /* !CONFIG_PERF_EVENTS: */
811 static inline void
812 perf_event_task_sched_in(struct task_struct *prev,
813 			 struct task_struct *task)			{ }
814 static inline void
815 perf_event_task_sched_out(struct task_struct *prev,
816 			  struct task_struct *next)			{ }
817 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
818 static inline void perf_event_exit_task(struct task_struct *child)	{ }
819 static inline void perf_event_free_task(struct task_struct *task)	{ }
820 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
821 static inline void perf_event_print_debug(void)				{ }
822 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
823 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
824 static inline int perf_event_refresh(struct perf_event *event, int refresh)
825 {
826 	return -EINVAL;
827 }
828 
829 static inline void
830 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
831 static inline void
832 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
833 static inline void
834 perf_bp_event(struct perf_event *event, void *data)			{ }
835 
836 static inline int perf_register_guest_info_callbacks
837 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
838 static inline int perf_unregister_guest_info_callbacks
839 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
840 
841 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
842 static inline void perf_event_exec(void)				{ }
843 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
844 static inline void perf_event_fork(struct task_struct *tsk)		{ }
845 static inline void perf_event_init(void)				{ }
846 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
847 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
848 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
849 static inline void perf_event_enable(struct perf_event *event)		{ }
850 static inline void perf_event_disable(struct perf_event *event)		{ }
851 static inline int __perf_event_disable(void *info)			{ return -1; }
852 static inline void perf_event_task_tick(void)				{ }
853 #endif
854 
855 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
856 extern bool perf_event_can_stop_tick(void);
857 #else
858 static inline bool perf_event_can_stop_tick(void)			{ return true; }
859 #endif
860 
861 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
862 extern void perf_restore_debug_store(void);
863 #else
864 static inline void perf_restore_debug_store(void)			{ }
865 #endif
866 
867 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
868 
869 /*
870  * This has to have a higher priority than migration_notifier in sched/core.c.
871  */
872 #define perf_cpu_notifier(fn)						\
873 do {									\
874 	static struct notifier_block fn##_nb =				\
875 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
876 	unsigned long cpu = smp_processor_id();				\
877 	unsigned long flags;						\
878 									\
879 	cpu_notifier_register_begin();					\
880 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
881 		(void *)(unsigned long)cpu);				\
882 	local_irq_save(flags);						\
883 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
884 		(void *)(unsigned long)cpu);				\
885 	local_irq_restore(flags);					\
886 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
887 		(void *)(unsigned long)cpu);				\
888 	__register_cpu_notifier(&fn##_nb);				\
889 	cpu_notifier_register_done();					\
890 } while (0)
891 
892 /*
893  * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
894  * callback for already online CPUs.
895  */
896 #define __perf_cpu_notifier(fn)						\
897 do {									\
898 	static struct notifier_block fn##_nb =				\
899 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
900 									\
901 	__register_cpu_notifier(&fn##_nb);				\
902 } while (0)
903 
904 struct perf_pmu_events_attr {
905 	struct device_attribute attr;
906 	u64 id;
907 	const char *event_str;
908 };
909 
910 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
911 static struct perf_pmu_events_attr _var = {				\
912 	.attr = __ATTR(_name, 0444, _show, NULL),			\
913 	.id   =  _id,							\
914 };
915 
916 #define PMU_FORMAT_ATTR(_name, _format)					\
917 static ssize_t								\
918 _name##_show(struct device *dev,					\
919 			       struct device_attribute *attr,		\
920 			       char *page)				\
921 {									\
922 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
923 	return sprintf(page, _format "\n");				\
924 }									\
925 									\
926 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
927 
928 #endif /* _LINUX_PERF_EVENT_H */
929