xref: /linux-6.15/include/linux/perf_event.h (revision 12bb14aa)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <[email protected]>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <uapi/linux/perf_event.h>
18 
19 /*
20  * Kernel-internal data types and definitions:
21  */
22 
23 #ifdef CONFIG_PERF_EVENTS
24 # include <asm/perf_event.h>
25 # include <asm/local64.h>
26 #endif
27 
28 struct perf_guest_info_callbacks {
29 	int				(*is_in_guest)(void);
30 	int				(*is_user_mode)(void);
31 	unsigned long			(*get_guest_ip)(void);
32 };
33 
34 #ifdef CONFIG_HAVE_HW_BREAKPOINT
35 #include <asm/hw_breakpoint.h>
36 #endif
37 
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/rculist.h>
41 #include <linux/rcupdate.h>
42 #include <linux/spinlock.h>
43 #include <linux/hrtimer.h>
44 #include <linux/fs.h>
45 #include <linux/pid_namespace.h>
46 #include <linux/workqueue.h>
47 #include <linux/ftrace.h>
48 #include <linux/cpu.h>
49 #include <linux/irq_work.h>
50 #include <linux/static_key.h>
51 #include <linux/jump_label_ratelimit.h>
52 #include <linux/atomic.h>
53 #include <linux/sysfs.h>
54 #include <linux/perf_regs.h>
55 #include <linux/workqueue.h>
56 #include <linux/cgroup.h>
57 #include <asm/local.h>
58 
59 struct perf_callchain_entry {
60 	__u64				nr;
61 	__u64				ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
62 };
63 
64 struct perf_callchain_entry_ctx {
65 	struct perf_callchain_entry *entry;
66 	u32			    max_stack;
67 	u32			    nr;
68 	short			    contexts;
69 	bool			    contexts_maxed;
70 };
71 
72 typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
73 				     unsigned long off, unsigned long len);
74 
75 struct perf_raw_frag {
76 	union {
77 		struct perf_raw_frag	*next;
78 		unsigned long		pad;
79 	};
80 	perf_copy_f			copy;
81 	void				*data;
82 	u32				size;
83 } __packed;
84 
85 struct perf_raw_record {
86 	struct perf_raw_frag		frag;
87 	u32				size;
88 };
89 
90 /*
91  * branch stack layout:
92  *  nr: number of taken branches stored in entries[]
93  *
94  * Note that nr can vary from sample to sample
95  * branches (to, from) are stored from most recent
96  * to least recent, i.e., entries[0] contains the most
97  * recent branch.
98  */
99 struct perf_branch_stack {
100 	__u64				nr;
101 	struct perf_branch_entry	entries[0];
102 };
103 
104 struct task_struct;
105 
106 /*
107  * extra PMU register associated with an event
108  */
109 struct hw_perf_event_extra {
110 	u64		config;	/* register value */
111 	unsigned int	reg;	/* register address or index */
112 	int		alloc;	/* extra register already allocated */
113 	int		idx;	/* index in shared_regs->regs[] */
114 };
115 
116 /**
117  * struct hw_perf_event - performance event hardware details:
118  */
119 struct hw_perf_event {
120 #ifdef CONFIG_PERF_EVENTS
121 	union {
122 		struct { /* hardware */
123 			u64		config;
124 			u64		last_tag;
125 			unsigned long	config_base;
126 			unsigned long	event_base;
127 			int		event_base_rdpmc;
128 			int		idx;
129 			int		last_cpu;
130 			int		flags;
131 
132 			struct hw_perf_event_extra extra_reg;
133 			struct hw_perf_event_extra branch_reg;
134 		};
135 		struct { /* software */
136 			struct hrtimer	hrtimer;
137 		};
138 		struct { /* tracepoint */
139 			/* for tp_event->class */
140 			struct list_head	tp_list;
141 		};
142 		struct { /* amd_power */
143 			u64	pwr_acc;
144 			u64	ptsc;
145 		};
146 #ifdef CONFIG_HAVE_HW_BREAKPOINT
147 		struct { /* breakpoint */
148 			/*
149 			 * Crufty hack to avoid the chicken and egg
150 			 * problem hw_breakpoint has with context
151 			 * creation and event initalization.
152 			 */
153 			struct arch_hw_breakpoint	info;
154 			struct list_head		bp_list;
155 		};
156 #endif
157 		struct { /* amd_iommu */
158 			u8	iommu_bank;
159 			u8	iommu_cntr;
160 			u16	padding;
161 			u64	conf;
162 			u64	conf1;
163 		};
164 	};
165 	/*
166 	 * If the event is a per task event, this will point to the task in
167 	 * question. See the comment in perf_event_alloc().
168 	 */
169 	struct task_struct		*target;
170 
171 	/*
172 	 * PMU would store hardware filter configuration
173 	 * here.
174 	 */
175 	void				*addr_filters;
176 
177 	/* Last sync'ed generation of filters */
178 	unsigned long			addr_filters_gen;
179 
180 /*
181  * hw_perf_event::state flags; used to track the PERF_EF_* state.
182  */
183 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
184 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
185 #define PERF_HES_ARCH		0x04
186 
187 	int				state;
188 
189 	/*
190 	 * The last observed hardware counter value, updated with a
191 	 * local64_cmpxchg() such that pmu::read() can be called nested.
192 	 */
193 	local64_t			prev_count;
194 
195 	/*
196 	 * The period to start the next sample with.
197 	 */
198 	u64				sample_period;
199 
200 	/*
201 	 * The period we started this sample with.
202 	 */
203 	u64				last_period;
204 
205 	/*
206 	 * However much is left of the current period; note that this is
207 	 * a full 64bit value and allows for generation of periods longer
208 	 * than hardware might allow.
209 	 */
210 	local64_t			period_left;
211 
212 	/*
213 	 * State for throttling the event, see __perf_event_overflow() and
214 	 * perf_adjust_freq_unthr_context().
215 	 */
216 	u64                             interrupts_seq;
217 	u64				interrupts;
218 
219 	/*
220 	 * State for freq target events, see __perf_event_overflow() and
221 	 * perf_adjust_freq_unthr_context().
222 	 */
223 	u64				freq_time_stamp;
224 	u64				freq_count_stamp;
225 #endif
226 };
227 
228 struct perf_event;
229 
230 /*
231  * Common implementation detail of pmu::{start,commit,cancel}_txn
232  */
233 #define PERF_PMU_TXN_ADD  0x1		/* txn to add/schedule event on PMU */
234 #define PERF_PMU_TXN_READ 0x2		/* txn to read event group from PMU */
235 
236 /**
237  * pmu::capabilities flags
238  */
239 #define PERF_PMU_CAP_NO_INTERRUPT		0x01
240 #define PERF_PMU_CAP_NO_NMI			0x02
241 #define PERF_PMU_CAP_AUX_NO_SG			0x04
242 #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF		0x08
243 #define PERF_PMU_CAP_EXCLUSIVE			0x10
244 #define PERF_PMU_CAP_ITRACE			0x20
245 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS		0x40
246 
247 /**
248  * struct pmu - generic performance monitoring unit
249  */
250 struct pmu {
251 	struct list_head		entry;
252 
253 	struct module			*module;
254 	struct device			*dev;
255 	const struct attribute_group	**attr_groups;
256 	const char			*name;
257 	int				type;
258 
259 	/*
260 	 * various common per-pmu feature flags
261 	 */
262 	int				capabilities;
263 
264 	int * __percpu			pmu_disable_count;
265 	struct perf_cpu_context * __percpu pmu_cpu_context;
266 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
267 	int				task_ctx_nr;
268 	int				hrtimer_interval_ms;
269 
270 	/* number of address filters this PMU can do */
271 	unsigned int			nr_addr_filters;
272 
273 	/*
274 	 * Fully disable/enable this PMU, can be used to protect from the PMI
275 	 * as well as for lazy/batch writing of the MSRs.
276 	 */
277 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
278 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
279 
280 	/*
281 	 * Try and initialize the event for this PMU.
282 	 *
283 	 * Returns:
284 	 *  -ENOENT	-- @event is not for this PMU
285 	 *
286 	 *  -ENODEV	-- @event is for this PMU but PMU not present
287 	 *  -EBUSY	-- @event is for this PMU but PMU temporarily unavailable
288 	 *  -EINVAL	-- @event is for this PMU but @event is not valid
289 	 *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
290 	 *  -EACCESS	-- @event is for this PMU, @event is valid, but no privilidges
291 	 *
292 	 *  0		-- @event is for this PMU and valid
293 	 *
294 	 * Other error return values are allowed.
295 	 */
296 	int (*event_init)		(struct perf_event *event);
297 
298 	/*
299 	 * Notification that the event was mapped or unmapped.  Called
300 	 * in the context of the mapping task.
301 	 */
302 	void (*event_mapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
303 	void (*event_unmapped)		(struct perf_event *event, struct mm_struct *mm); /* optional */
304 
305 	/*
306 	 * Flags for ->add()/->del()/ ->start()/->stop(). There are
307 	 * matching hw_perf_event::state flags.
308 	 */
309 #define PERF_EF_START	0x01		/* start the counter when adding    */
310 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
311 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
312 
313 	/*
314 	 * Adds/Removes a counter to/from the PMU, can be done inside a
315 	 * transaction, see the ->*_txn() methods.
316 	 *
317 	 * The add/del callbacks will reserve all hardware resources required
318 	 * to service the event, this includes any counter constraint
319 	 * scheduling etc.
320 	 *
321 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
322 	 * is on.
323 	 *
324 	 * ->add() called without PERF_EF_START should result in the same state
325 	 *  as ->add() followed by ->stop().
326 	 *
327 	 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
328 	 *  ->stop() that must deal with already being stopped without
329 	 *  PERF_EF_UPDATE.
330 	 */
331 	int  (*add)			(struct perf_event *event, int flags);
332 	void (*del)			(struct perf_event *event, int flags);
333 
334 	/*
335 	 * Starts/Stops a counter present on the PMU.
336 	 *
337 	 * The PMI handler should stop the counter when perf_event_overflow()
338 	 * returns !0. ->start() will be used to continue.
339 	 *
340 	 * Also used to change the sample period.
341 	 *
342 	 * Called with IRQs disabled and the PMU disabled on the CPU the event
343 	 * is on -- will be called from NMI context with the PMU generates
344 	 * NMIs.
345 	 *
346 	 * ->stop() with PERF_EF_UPDATE will read the counter and update
347 	 *  period/count values like ->read() would.
348 	 *
349 	 * ->start() with PERF_EF_RELOAD will reprogram the the counter
350 	 *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
351 	 */
352 	void (*start)			(struct perf_event *event, int flags);
353 	void (*stop)			(struct perf_event *event, int flags);
354 
355 	/*
356 	 * Updates the counter value of the event.
357 	 *
358 	 * For sampling capable PMUs this will also update the software period
359 	 * hw_perf_event::period_left field.
360 	 */
361 	void (*read)			(struct perf_event *event);
362 
363 	/*
364 	 * Group events scheduling is treated as a transaction, add
365 	 * group events as a whole and perform one schedulability test.
366 	 * If the test fails, roll back the whole group
367 	 *
368 	 * Start the transaction, after this ->add() doesn't need to
369 	 * do schedulability tests.
370 	 *
371 	 * Optional.
372 	 */
373 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
374 	/*
375 	 * If ->start_txn() disabled the ->add() schedulability test
376 	 * then ->commit_txn() is required to perform one. On success
377 	 * the transaction is closed. On error the transaction is kept
378 	 * open until ->cancel_txn() is called.
379 	 *
380 	 * Optional.
381 	 */
382 	int  (*commit_txn)		(struct pmu *pmu);
383 	/*
384 	 * Will cancel the transaction, assumes ->del() is called
385 	 * for each successful ->add() during the transaction.
386 	 *
387 	 * Optional.
388 	 */
389 	void (*cancel_txn)		(struct pmu *pmu);
390 
391 	/*
392 	 * Will return the value for perf_event_mmap_page::index for this event,
393 	 * if no implementation is provided it will default to: event->hw.idx + 1.
394 	 */
395 	int (*event_idx)		(struct perf_event *event); /*optional */
396 
397 	/*
398 	 * context-switches callback
399 	 */
400 	void (*sched_task)		(struct perf_event_context *ctx,
401 					bool sched_in);
402 	/*
403 	 * PMU specific data size
404 	 */
405 	size_t				task_ctx_size;
406 
407 
408 	/*
409 	 * Set up pmu-private data structures for an AUX area
410 	 */
411 	void *(*setup_aux)		(int cpu, void **pages,
412 					 int nr_pages, bool overwrite);
413 					/* optional */
414 
415 	/*
416 	 * Free pmu-private AUX data structures
417 	 */
418 	void (*free_aux)		(void *aux); /* optional */
419 
420 	/*
421 	 * Validate address range filters: make sure the HW supports the
422 	 * requested configuration and number of filters; return 0 if the
423 	 * supplied filters are valid, -errno otherwise.
424 	 *
425 	 * Runs in the context of the ioctl()ing process and is not serialized
426 	 * with the rest of the PMU callbacks.
427 	 */
428 	int (*addr_filters_validate)	(struct list_head *filters);
429 					/* optional */
430 
431 	/*
432 	 * Synchronize address range filter configuration:
433 	 * translate hw-agnostic filters into hardware configuration in
434 	 * event::hw::addr_filters.
435 	 *
436 	 * Runs as a part of filter sync sequence that is done in ->start()
437 	 * callback by calling perf_event_addr_filters_sync().
438 	 *
439 	 * May (and should) traverse event::addr_filters::list, for which its
440 	 * caller provides necessary serialization.
441 	 */
442 	void (*addr_filters_sync)	(struct perf_event *event);
443 					/* optional */
444 
445 	/*
446 	 * Filter events for PMU-specific reasons.
447 	 */
448 	int (*filter_match)		(struct perf_event *event); /* optional */
449 };
450 
451 /**
452  * struct perf_addr_filter - address range filter definition
453  * @entry:	event's filter list linkage
454  * @inode:	object file's inode for file-based filters
455  * @offset:	filter range offset
456  * @size:	filter range size
457  * @range:	1: range, 0: address
458  * @filter:	1: filter/start, 0: stop
459  *
460  * This is a hardware-agnostic filter configuration as specified by the user.
461  */
462 struct perf_addr_filter {
463 	struct list_head	entry;
464 	struct inode		*inode;
465 	unsigned long		offset;
466 	unsigned long		size;
467 	unsigned int		range	: 1,
468 				filter	: 1;
469 };
470 
471 /**
472  * struct perf_addr_filters_head - container for address range filters
473  * @list:	list of filters for this event
474  * @lock:	spinlock that serializes accesses to the @list and event's
475  *		(and its children's) filter generations.
476  * @nr_file_filters:	number of file-based filters
477  *
478  * A child event will use parent's @list (and therefore @lock), so they are
479  * bundled together; see perf_event_addr_filters().
480  */
481 struct perf_addr_filters_head {
482 	struct list_head	list;
483 	raw_spinlock_t		lock;
484 	unsigned int		nr_file_filters;
485 };
486 
487 /**
488  * enum perf_event_state - the states of a event
489  */
490 enum perf_event_state {
491 	PERF_EVENT_STATE_DEAD		= -4,
492 	PERF_EVENT_STATE_EXIT		= -3,
493 	PERF_EVENT_STATE_ERROR		= -2,
494 	PERF_EVENT_STATE_OFF		= -1,
495 	PERF_EVENT_STATE_INACTIVE	=  0,
496 	PERF_EVENT_STATE_ACTIVE		=  1,
497 };
498 
499 struct file;
500 struct perf_sample_data;
501 
502 typedef void (*perf_overflow_handler_t)(struct perf_event *,
503 					struct perf_sample_data *,
504 					struct pt_regs *regs);
505 
506 /*
507  * Event capabilities. For event_caps and groups caps.
508  *
509  * PERF_EV_CAP_SOFTWARE: Is a software event.
510  * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
511  * from any CPU in the package where it is active.
512  */
513 #define PERF_EV_CAP_SOFTWARE		BIT(0)
514 #define PERF_EV_CAP_READ_ACTIVE_PKG	BIT(1)
515 
516 #define SWEVENT_HLIST_BITS		8
517 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
518 
519 struct swevent_hlist {
520 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
521 	struct rcu_head			rcu_head;
522 };
523 
524 #define PERF_ATTACH_CONTEXT	0x01
525 #define PERF_ATTACH_GROUP	0x02
526 #define PERF_ATTACH_TASK	0x04
527 #define PERF_ATTACH_TASK_DATA	0x08
528 #define PERF_ATTACH_ITRACE	0x10
529 
530 struct perf_cgroup;
531 struct ring_buffer;
532 
533 struct pmu_event_list {
534 	raw_spinlock_t		lock;
535 	struct list_head	list;
536 };
537 
538 /**
539  * struct perf_event - performance event kernel representation:
540  */
541 struct perf_event {
542 #ifdef CONFIG_PERF_EVENTS
543 	/*
544 	 * entry onto perf_event_context::event_list;
545 	 *   modifications require ctx->lock
546 	 *   RCU safe iterations.
547 	 */
548 	struct list_head		event_entry;
549 
550 	/*
551 	 * XXX: group_entry and sibling_list should be mutually exclusive;
552 	 * either you're a sibling on a group, or you're the group leader.
553 	 * Rework the code to always use the same list element.
554 	 *
555 	 * Locked for modification by both ctx->mutex and ctx->lock; holding
556 	 * either sufficies for read.
557 	 */
558 	struct list_head		group_entry;
559 	struct list_head		sibling_list;
560 
561 	/*
562 	 * We need storage to track the entries in perf_pmu_migrate_context; we
563 	 * cannot use the event_entry because of RCU and we want to keep the
564 	 * group in tact which avoids us using the other two entries.
565 	 */
566 	struct list_head		migrate_entry;
567 
568 	struct hlist_node		hlist_entry;
569 	struct list_head		active_entry;
570 	int				nr_siblings;
571 
572 	/* Not serialized. Only written during event initialization. */
573 	int				event_caps;
574 	/* The cumulative AND of all event_caps for events in this group. */
575 	int				group_caps;
576 
577 	struct perf_event		*group_leader;
578 	struct pmu			*pmu;
579 	void				*pmu_private;
580 
581 	enum perf_event_state		state;
582 	unsigned int			attach_state;
583 	local64_t			count;
584 	atomic64_t			child_count;
585 
586 	/*
587 	 * These are the total time in nanoseconds that the event
588 	 * has been enabled (i.e. eligible to run, and the task has
589 	 * been scheduled in, if this is a per-task event)
590 	 * and running (scheduled onto the CPU), respectively.
591 	 */
592 	u64				total_time_enabled;
593 	u64				total_time_running;
594 	u64				tstamp;
595 
596 	/*
597 	 * timestamp shadows the actual context timing but it can
598 	 * be safely used in NMI interrupt context. It reflects the
599 	 * context time as it was when the event was last scheduled in.
600 	 *
601 	 * ctx_time already accounts for ctx->timestamp. Therefore to
602 	 * compute ctx_time for a sample, simply add perf_clock().
603 	 */
604 	u64				shadow_ctx_time;
605 
606 	struct perf_event_attr		attr;
607 	u16				header_size;
608 	u16				id_header_size;
609 	u16				read_size;
610 	struct hw_perf_event		hw;
611 
612 	struct perf_event_context	*ctx;
613 	atomic_long_t			refcount;
614 
615 	/*
616 	 * These accumulate total time (in nanoseconds) that children
617 	 * events have been enabled and running, respectively.
618 	 */
619 	atomic64_t			child_total_time_enabled;
620 	atomic64_t			child_total_time_running;
621 
622 	/*
623 	 * Protect attach/detach and child_list:
624 	 */
625 	struct mutex			child_mutex;
626 	struct list_head		child_list;
627 	struct perf_event		*parent;
628 
629 	int				oncpu;
630 	int				cpu;
631 
632 	struct list_head		owner_entry;
633 	struct task_struct		*owner;
634 
635 	/* mmap bits */
636 	struct mutex			mmap_mutex;
637 	atomic_t			mmap_count;
638 
639 	struct ring_buffer		*rb;
640 	struct list_head		rb_entry;
641 	unsigned long			rcu_batches;
642 	int				rcu_pending;
643 
644 	/* poll related */
645 	wait_queue_head_t		waitq;
646 	struct fasync_struct		*fasync;
647 
648 	/* delayed work for NMIs and such */
649 	int				pending_wakeup;
650 	int				pending_kill;
651 	int				pending_disable;
652 	struct irq_work			pending;
653 
654 	atomic_t			event_limit;
655 
656 	/* address range filters */
657 	struct perf_addr_filters_head	addr_filters;
658 	/* vma address array for file-based filders */
659 	unsigned long			*addr_filters_offs;
660 	unsigned long			addr_filters_gen;
661 
662 	void (*destroy)(struct perf_event *);
663 	struct rcu_head			rcu_head;
664 
665 	struct pid_namespace		*ns;
666 	u64				id;
667 
668 	u64				(*clock)(void);
669 	perf_overflow_handler_t		overflow_handler;
670 	void				*overflow_handler_context;
671 #ifdef CONFIG_BPF_SYSCALL
672 	perf_overflow_handler_t		orig_overflow_handler;
673 	struct bpf_prog			*prog;
674 #endif
675 
676 #ifdef CONFIG_EVENT_TRACING
677 	struct trace_event_call		*tp_event;
678 	struct event_filter		*filter;
679 #ifdef CONFIG_FUNCTION_TRACER
680 	struct ftrace_ops               ftrace_ops;
681 #endif
682 #endif
683 
684 #ifdef CONFIG_CGROUP_PERF
685 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
686 #endif
687 
688 	struct list_head		sb_list;
689 #endif /* CONFIG_PERF_EVENTS */
690 };
691 
692 /**
693  * struct perf_event_context - event context structure
694  *
695  * Used as a container for task events and CPU events as well:
696  */
697 struct perf_event_context {
698 	struct pmu			*pmu;
699 	/*
700 	 * Protect the states of the events in the list,
701 	 * nr_active, and the list:
702 	 */
703 	raw_spinlock_t			lock;
704 	/*
705 	 * Protect the list of events.  Locking either mutex or lock
706 	 * is sufficient to ensure the list doesn't change; to change
707 	 * the list you need to lock both the mutex and the spinlock.
708 	 */
709 	struct mutex			mutex;
710 
711 	struct list_head		active_ctx_list;
712 	struct list_head		pinned_groups;
713 	struct list_head		flexible_groups;
714 	struct list_head		event_list;
715 	int				nr_events;
716 	int				nr_active;
717 	int				is_active;
718 	int				nr_stat;
719 	int				nr_freq;
720 	int				rotate_disable;
721 	atomic_t			refcount;
722 	struct task_struct		*task;
723 
724 	/*
725 	 * Context clock, runs when context enabled.
726 	 */
727 	u64				time;
728 	u64				timestamp;
729 
730 	/*
731 	 * These fields let us detect when two contexts have both
732 	 * been cloned (inherited) from a common ancestor.
733 	 */
734 	struct perf_event_context	*parent_ctx;
735 	u64				parent_gen;
736 	u64				generation;
737 	int				pin_count;
738 #ifdef CONFIG_CGROUP_PERF
739 	int				nr_cgroups;	 /* cgroup evts */
740 #endif
741 	void				*task_ctx_data; /* pmu specific data */
742 	struct rcu_head			rcu_head;
743 };
744 
745 /*
746  * Number of contexts where an event can trigger:
747  *	task, softirq, hardirq, nmi.
748  */
749 #define PERF_NR_CONTEXTS	4
750 
751 /**
752  * struct perf_event_cpu_context - per cpu event context structure
753  */
754 struct perf_cpu_context {
755 	struct perf_event_context	ctx;
756 	struct perf_event_context	*task_ctx;
757 	int				active_oncpu;
758 	int				exclusive;
759 
760 	raw_spinlock_t			hrtimer_lock;
761 	struct hrtimer			hrtimer;
762 	ktime_t				hrtimer_interval;
763 	unsigned int			hrtimer_active;
764 
765 #ifdef CONFIG_CGROUP_PERF
766 	struct perf_cgroup		*cgrp;
767 	struct list_head		cgrp_cpuctx_entry;
768 #endif
769 
770 	struct list_head		sched_cb_entry;
771 	int				sched_cb_usage;
772 
773 	int				online;
774 };
775 
776 struct perf_output_handle {
777 	struct perf_event		*event;
778 	struct ring_buffer		*rb;
779 	unsigned long			wakeup;
780 	unsigned long			size;
781 	u64				aux_flags;
782 	union {
783 		void			*addr;
784 		unsigned long		head;
785 	};
786 	int				page;
787 };
788 
789 struct bpf_perf_event_data_kern {
790 	struct pt_regs *regs;
791 	struct perf_sample_data *data;
792 	struct perf_event *event;
793 };
794 
795 #ifdef CONFIG_CGROUP_PERF
796 
797 /*
798  * perf_cgroup_info keeps track of time_enabled for a cgroup.
799  * This is a per-cpu dynamically allocated data structure.
800  */
801 struct perf_cgroup_info {
802 	u64				time;
803 	u64				timestamp;
804 };
805 
806 struct perf_cgroup {
807 	struct cgroup_subsys_state	css;
808 	struct perf_cgroup_info	__percpu *info;
809 };
810 
811 /*
812  * Must ensure cgroup is pinned (css_get) before calling
813  * this function. In other words, we cannot call this function
814  * if there is no cgroup event for the current CPU context.
815  */
816 static inline struct perf_cgroup *
817 perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
818 {
819 	return container_of(task_css_check(task, perf_event_cgrp_id,
820 					   ctx ? lockdep_is_held(&ctx->lock)
821 					       : true),
822 			    struct perf_cgroup, css);
823 }
824 #endif /* CONFIG_CGROUP_PERF */
825 
826 #ifdef CONFIG_PERF_EVENTS
827 
828 extern void *perf_aux_output_begin(struct perf_output_handle *handle,
829 				   struct perf_event *event);
830 extern void perf_aux_output_end(struct perf_output_handle *handle,
831 				unsigned long size);
832 extern int perf_aux_output_skip(struct perf_output_handle *handle,
833 				unsigned long size);
834 extern void *perf_get_aux(struct perf_output_handle *handle);
835 extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
836 extern void perf_event_itrace_started(struct perf_event *event);
837 
838 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
839 extern void perf_pmu_unregister(struct pmu *pmu);
840 
841 extern int perf_num_counters(void);
842 extern const char *perf_pmu_name(void);
843 extern void __perf_event_task_sched_in(struct task_struct *prev,
844 				       struct task_struct *task);
845 extern void __perf_event_task_sched_out(struct task_struct *prev,
846 					struct task_struct *next);
847 extern int perf_event_init_task(struct task_struct *child);
848 extern void perf_event_exit_task(struct task_struct *child);
849 extern void perf_event_free_task(struct task_struct *task);
850 extern void perf_event_delayed_put(struct task_struct *task);
851 extern struct file *perf_event_get(unsigned int fd);
852 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
853 extern void perf_event_print_debug(void);
854 extern void perf_pmu_disable(struct pmu *pmu);
855 extern void perf_pmu_enable(struct pmu *pmu);
856 extern void perf_sched_cb_dec(struct pmu *pmu);
857 extern void perf_sched_cb_inc(struct pmu *pmu);
858 extern int perf_event_task_disable(void);
859 extern int perf_event_task_enable(void);
860 extern int perf_event_refresh(struct perf_event *event, int refresh);
861 extern void perf_event_update_userpage(struct perf_event *event);
862 extern int perf_event_release_kernel(struct perf_event *event);
863 extern struct perf_event *
864 perf_event_create_kernel_counter(struct perf_event_attr *attr,
865 				int cpu,
866 				struct task_struct *task,
867 				perf_overflow_handler_t callback,
868 				void *context);
869 extern void perf_pmu_migrate_context(struct pmu *pmu,
870 				int src_cpu, int dst_cpu);
871 int perf_event_read_local(struct perf_event *event, u64 *value,
872 			  u64 *enabled, u64 *running);
873 extern u64 perf_event_read_value(struct perf_event *event,
874 				 u64 *enabled, u64 *running);
875 
876 
877 struct perf_sample_data {
878 	/*
879 	 * Fields set by perf_sample_data_init(), group so as to
880 	 * minimize the cachelines touched.
881 	 */
882 	u64				addr;
883 	struct perf_raw_record		*raw;
884 	struct perf_branch_stack	*br_stack;
885 	u64				period;
886 	u64				weight;
887 	u64				txn;
888 	union  perf_mem_data_src	data_src;
889 
890 	/*
891 	 * The other fields, optionally {set,used} by
892 	 * perf_{prepare,output}_sample().
893 	 */
894 	u64				type;
895 	u64				ip;
896 	struct {
897 		u32	pid;
898 		u32	tid;
899 	}				tid_entry;
900 	u64				time;
901 	u64				id;
902 	u64				stream_id;
903 	struct {
904 		u32	cpu;
905 		u32	reserved;
906 	}				cpu_entry;
907 	struct perf_callchain_entry	*callchain;
908 
909 	/*
910 	 * regs_user may point to task_pt_regs or to regs_user_copy, depending
911 	 * on arch details.
912 	 */
913 	struct perf_regs		regs_user;
914 	struct pt_regs			regs_user_copy;
915 
916 	struct perf_regs		regs_intr;
917 	u64				stack_user_size;
918 
919 	u64				phys_addr;
920 } ____cacheline_aligned;
921 
922 /* default value for data source */
923 #define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
924 		    PERF_MEM_S(LVL, NA)   |\
925 		    PERF_MEM_S(SNOOP, NA) |\
926 		    PERF_MEM_S(LOCK, NA)  |\
927 		    PERF_MEM_S(TLB, NA))
928 
929 static inline void perf_sample_data_init(struct perf_sample_data *data,
930 					 u64 addr, u64 period)
931 {
932 	/* remaining struct members initialized in perf_prepare_sample() */
933 	data->addr = addr;
934 	data->raw  = NULL;
935 	data->br_stack = NULL;
936 	data->period = period;
937 	data->weight = 0;
938 	data->data_src.val = PERF_MEM_NA;
939 	data->txn = 0;
940 }
941 
942 extern void perf_output_sample(struct perf_output_handle *handle,
943 			       struct perf_event_header *header,
944 			       struct perf_sample_data *data,
945 			       struct perf_event *event);
946 extern void perf_prepare_sample(struct perf_event_header *header,
947 				struct perf_sample_data *data,
948 				struct perf_event *event,
949 				struct pt_regs *regs);
950 
951 extern int perf_event_overflow(struct perf_event *event,
952 				 struct perf_sample_data *data,
953 				 struct pt_regs *regs);
954 
955 extern void perf_event_output_forward(struct perf_event *event,
956 				     struct perf_sample_data *data,
957 				     struct pt_regs *regs);
958 extern void perf_event_output_backward(struct perf_event *event,
959 				       struct perf_sample_data *data,
960 				       struct pt_regs *regs);
961 extern void perf_event_output(struct perf_event *event,
962 			      struct perf_sample_data *data,
963 			      struct pt_regs *regs);
964 
965 static inline bool
966 is_default_overflow_handler(struct perf_event *event)
967 {
968 	if (likely(event->overflow_handler == perf_event_output_forward))
969 		return true;
970 	if (unlikely(event->overflow_handler == perf_event_output_backward))
971 		return true;
972 	return false;
973 }
974 
975 extern void
976 perf_event_header__init_id(struct perf_event_header *header,
977 			   struct perf_sample_data *data,
978 			   struct perf_event *event);
979 extern void
980 perf_event__output_id_sample(struct perf_event *event,
981 			     struct perf_output_handle *handle,
982 			     struct perf_sample_data *sample);
983 
984 extern void
985 perf_log_lost_samples(struct perf_event *event, u64 lost);
986 
987 static inline bool is_sampling_event(struct perf_event *event)
988 {
989 	return event->attr.sample_period != 0;
990 }
991 
992 /*
993  * Return 1 for a software event, 0 for a hardware event
994  */
995 static inline int is_software_event(struct perf_event *event)
996 {
997 	return event->event_caps & PERF_EV_CAP_SOFTWARE;
998 }
999 
1000 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1001 
1002 extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1003 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1004 
1005 #ifndef perf_arch_fetch_caller_regs
1006 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1007 #endif
1008 
1009 /*
1010  * Take a snapshot of the regs. Skip ip and frame pointer to
1011  * the nth caller. We only need a few of the regs:
1012  * - ip for PERF_SAMPLE_IP
1013  * - cs for user_mode() tests
1014  * - bp for callchains
1015  * - eflags, for future purposes, just in case
1016  */
1017 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1018 {
1019 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1020 }
1021 
1022 static __always_inline void
1023 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1024 {
1025 	if (static_key_false(&perf_swevent_enabled[event_id]))
1026 		__perf_sw_event(event_id, nr, regs, addr);
1027 }
1028 
1029 DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1030 
1031 /*
1032  * 'Special' version for the scheduler, it hard assumes no recursion,
1033  * which is guaranteed by us not actually scheduling inside other swevents
1034  * because those disable preemption.
1035  */
1036 static __always_inline void
1037 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1038 {
1039 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1040 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1041 
1042 		perf_fetch_caller_regs(regs);
1043 		___perf_sw_event(event_id, nr, regs, addr);
1044 	}
1045 }
1046 
1047 extern struct static_key_false perf_sched_events;
1048 
1049 static __always_inline bool
1050 perf_sw_migrate_enabled(void)
1051 {
1052 	if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1053 		return true;
1054 	return false;
1055 }
1056 
1057 static inline void perf_event_task_migrate(struct task_struct *task)
1058 {
1059 	if (perf_sw_migrate_enabled())
1060 		task->sched_migrated = 1;
1061 }
1062 
1063 static inline void perf_event_task_sched_in(struct task_struct *prev,
1064 					    struct task_struct *task)
1065 {
1066 	if (static_branch_unlikely(&perf_sched_events))
1067 		__perf_event_task_sched_in(prev, task);
1068 
1069 	if (perf_sw_migrate_enabled() && task->sched_migrated) {
1070 		struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1071 
1072 		perf_fetch_caller_regs(regs);
1073 		___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1074 		task->sched_migrated = 0;
1075 	}
1076 }
1077 
1078 static inline void perf_event_task_sched_out(struct task_struct *prev,
1079 					     struct task_struct *next)
1080 {
1081 	perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1082 
1083 	if (static_branch_unlikely(&perf_sched_events))
1084 		__perf_event_task_sched_out(prev, next);
1085 }
1086 
1087 extern void perf_event_mmap(struct vm_area_struct *vma);
1088 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1089 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1090 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1091 
1092 extern void perf_event_exec(void);
1093 extern void perf_event_comm(struct task_struct *tsk, bool exec);
1094 extern void perf_event_namespaces(struct task_struct *tsk);
1095 extern void perf_event_fork(struct task_struct *tsk);
1096 
1097 /* Callchains */
1098 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1099 
1100 extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1101 extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1102 extern struct perf_callchain_entry *
1103 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1104 		   u32 max_stack, bool crosstask, bool add_mark);
1105 extern int get_callchain_buffers(int max_stack);
1106 extern void put_callchain_buffers(void);
1107 
1108 extern int sysctl_perf_event_max_stack;
1109 extern int sysctl_perf_event_max_contexts_per_stack;
1110 
1111 static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1112 {
1113 	if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1114 		struct perf_callchain_entry *entry = ctx->entry;
1115 		entry->ip[entry->nr++] = ip;
1116 		++ctx->contexts;
1117 		return 0;
1118 	} else {
1119 		ctx->contexts_maxed = true;
1120 		return -1; /* no more room, stop walking the stack */
1121 	}
1122 }
1123 
1124 static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1125 {
1126 	if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1127 		struct perf_callchain_entry *entry = ctx->entry;
1128 		entry->ip[entry->nr++] = ip;
1129 		++ctx->nr;
1130 		return 0;
1131 	} else {
1132 		return -1; /* no more room, stop walking the stack */
1133 	}
1134 }
1135 
1136 extern int sysctl_perf_event_paranoid;
1137 extern int sysctl_perf_event_mlock;
1138 extern int sysctl_perf_event_sample_rate;
1139 extern int sysctl_perf_cpu_time_max_percent;
1140 
1141 extern void perf_sample_event_took(u64 sample_len_ns);
1142 
1143 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1144 		void __user *buffer, size_t *lenp,
1145 		loff_t *ppos);
1146 extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1147 		void __user *buffer, size_t *lenp,
1148 		loff_t *ppos);
1149 
1150 int perf_event_max_stack_handler(struct ctl_table *table, int write,
1151 				 void __user *buffer, size_t *lenp, loff_t *ppos);
1152 
1153 static inline bool perf_paranoid_tracepoint_raw(void)
1154 {
1155 	return sysctl_perf_event_paranoid > -1;
1156 }
1157 
1158 static inline bool perf_paranoid_cpu(void)
1159 {
1160 	return sysctl_perf_event_paranoid > 0;
1161 }
1162 
1163 static inline bool perf_paranoid_kernel(void)
1164 {
1165 	return sysctl_perf_event_paranoid > 1;
1166 }
1167 
1168 extern void perf_event_init(void);
1169 extern void perf_tp_event(u16 event_type, u64 count, void *record,
1170 			  int entry_size, struct pt_regs *regs,
1171 			  struct hlist_head *head, int rctx,
1172 			  struct task_struct *task);
1173 extern void perf_bp_event(struct perf_event *event, void *data);
1174 
1175 #ifndef perf_misc_flags
1176 # define perf_misc_flags(regs) \
1177 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1178 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1179 #endif
1180 
1181 static inline bool has_branch_stack(struct perf_event *event)
1182 {
1183 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1184 }
1185 
1186 static inline bool needs_branch_stack(struct perf_event *event)
1187 {
1188 	return event->attr.branch_sample_type != 0;
1189 }
1190 
1191 static inline bool has_aux(struct perf_event *event)
1192 {
1193 	return event->pmu->setup_aux;
1194 }
1195 
1196 static inline bool is_write_backward(struct perf_event *event)
1197 {
1198 	return !!event->attr.write_backward;
1199 }
1200 
1201 static inline bool has_addr_filter(struct perf_event *event)
1202 {
1203 	return event->pmu->nr_addr_filters;
1204 }
1205 
1206 /*
1207  * An inherited event uses parent's filters
1208  */
1209 static inline struct perf_addr_filters_head *
1210 perf_event_addr_filters(struct perf_event *event)
1211 {
1212 	struct perf_addr_filters_head *ifh = &event->addr_filters;
1213 
1214 	if (event->parent)
1215 		ifh = &event->parent->addr_filters;
1216 
1217 	return ifh;
1218 }
1219 
1220 extern void perf_event_addr_filters_sync(struct perf_event *event);
1221 
1222 extern int perf_output_begin(struct perf_output_handle *handle,
1223 			     struct perf_event *event, unsigned int size);
1224 extern int perf_output_begin_forward(struct perf_output_handle *handle,
1225 				    struct perf_event *event,
1226 				    unsigned int size);
1227 extern int perf_output_begin_backward(struct perf_output_handle *handle,
1228 				      struct perf_event *event,
1229 				      unsigned int size);
1230 
1231 extern void perf_output_end(struct perf_output_handle *handle);
1232 extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1233 			     const void *buf, unsigned int len);
1234 extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1235 				     unsigned int len);
1236 extern int perf_swevent_get_recursion_context(void);
1237 extern void perf_swevent_put_recursion_context(int rctx);
1238 extern u64 perf_swevent_set_period(struct perf_event *event);
1239 extern void perf_event_enable(struct perf_event *event);
1240 extern void perf_event_disable(struct perf_event *event);
1241 extern void perf_event_disable_local(struct perf_event *event);
1242 extern void perf_event_disable_inatomic(struct perf_event *event);
1243 extern void perf_event_task_tick(void);
1244 extern int perf_event_account_interrupt(struct perf_event *event);
1245 #else /* !CONFIG_PERF_EVENTS: */
1246 static inline void *
1247 perf_aux_output_begin(struct perf_output_handle *handle,
1248 		      struct perf_event *event)				{ return NULL; }
1249 static inline void
1250 perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1251 									{ }
1252 static inline int
1253 perf_aux_output_skip(struct perf_output_handle *handle,
1254 		     unsigned long size)				{ return -EINVAL; }
1255 static inline void *
1256 perf_get_aux(struct perf_output_handle *handle)				{ return NULL; }
1257 static inline void
1258 perf_event_task_migrate(struct task_struct *task)			{ }
1259 static inline void
1260 perf_event_task_sched_in(struct task_struct *prev,
1261 			 struct task_struct *task)			{ }
1262 static inline void
1263 perf_event_task_sched_out(struct task_struct *prev,
1264 			  struct task_struct *next)			{ }
1265 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
1266 static inline void perf_event_exit_task(struct task_struct *child)	{ }
1267 static inline void perf_event_free_task(struct task_struct *task)	{ }
1268 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
1269 static inline struct file *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
1270 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1271 {
1272 	return ERR_PTR(-EINVAL);
1273 }
1274 static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1275 					u64 *enabled, u64 *running)
1276 {
1277 	return -EINVAL;
1278 }
1279 static inline void perf_event_print_debug(void)				{ }
1280 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
1281 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1282 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1283 {
1284 	return -EINVAL;
1285 }
1286 
1287 static inline void
1288 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1289 static inline void
1290 perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)			{ }
1291 static inline void
1292 perf_bp_event(struct perf_event *event, void *data)			{ }
1293 
1294 static inline int perf_register_guest_info_callbacks
1295 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1296 static inline int perf_unregister_guest_info_callbacks
1297 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1298 
1299 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1300 static inline void perf_event_exec(void)				{ }
1301 static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
1302 static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
1303 static inline void perf_event_fork(struct task_struct *tsk)		{ }
1304 static inline void perf_event_init(void)				{ }
1305 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1306 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1307 static inline u64 perf_swevent_set_period(struct perf_event *event)	{ return 0; }
1308 static inline void perf_event_enable(struct perf_event *event)		{ }
1309 static inline void perf_event_disable(struct perf_event *event)		{ }
1310 static inline int __perf_event_disable(void *info)			{ return -1; }
1311 static inline void perf_event_task_tick(void)				{ }
1312 static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
1313 #endif
1314 
1315 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1316 extern void perf_restore_debug_store(void);
1317 #else
1318 static inline void perf_restore_debug_store(void)			{ }
1319 #endif
1320 
1321 static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1322 {
1323 	return frag->pad < sizeof(u64);
1324 }
1325 
1326 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1327 
1328 struct perf_pmu_events_attr {
1329 	struct device_attribute attr;
1330 	u64 id;
1331 	const char *event_str;
1332 };
1333 
1334 struct perf_pmu_events_ht_attr {
1335 	struct device_attribute			attr;
1336 	u64					id;
1337 	const char				*event_str_ht;
1338 	const char				*event_str_noht;
1339 };
1340 
1341 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1342 			      char *page);
1343 
1344 #define PMU_EVENT_ATTR(_name, _var, _id, _show)				\
1345 static struct perf_pmu_events_attr _var = {				\
1346 	.attr = __ATTR(_name, 0444, _show, NULL),			\
1347 	.id   =  _id,							\
1348 };
1349 
1350 #define PMU_EVENT_ATTR_STRING(_name, _var, _str)			    \
1351 static struct perf_pmu_events_attr _var = {				    \
1352 	.attr		= __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1353 	.id		= 0,						    \
1354 	.event_str	= _str,						    \
1355 };
1356 
1357 #define PMU_FORMAT_ATTR(_name, _format)					\
1358 static ssize_t								\
1359 _name##_show(struct device *dev,					\
1360 			       struct device_attribute *attr,		\
1361 			       char *page)				\
1362 {									\
1363 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
1364 	return sprintf(page, _format "\n");				\
1365 }									\
1366 									\
1367 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1368 
1369 /* Performance counter hotplug functions */
1370 #ifdef CONFIG_PERF_EVENTS
1371 int perf_event_init_cpu(unsigned int cpu);
1372 int perf_event_exit_cpu(unsigned int cpu);
1373 #else
1374 #define perf_event_init_cpu	NULL
1375 #define perf_event_exit_cpu	NULL
1376 #endif
1377 
1378 #endif /* _LINUX_PERF_EVENT_H */
1379