xref: /linux-6.15/include/linux/perf_event.h (revision 9ffc93f2)
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <[email protected]>
5  *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16 
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20 
21 /*
22  * User-space ABI bits:
23  */
24 
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29 	PERF_TYPE_HARDWARE			= 0,
30 	PERF_TYPE_SOFTWARE			= 1,
31 	PERF_TYPE_TRACEPOINT			= 2,
32 	PERF_TYPE_HW_CACHE			= 3,
33 	PERF_TYPE_RAW				= 4,
34 	PERF_TYPE_BREAKPOINT			= 5,
35 
36 	PERF_TYPE_MAX,				/* non-ABI */
37 };
38 
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45 	/*
46 	 * Common hardware events, generalized by the kernel:
47 	 */
48 	PERF_COUNT_HW_CPU_CYCLES		= 0,
49 	PERF_COUNT_HW_INSTRUCTIONS		= 1,
50 	PERF_COUNT_HW_CACHE_REFERENCES		= 2,
51 	PERF_COUNT_HW_CACHE_MISSES		= 3,
52 	PERF_COUNT_HW_BRANCH_INSTRUCTIONS	= 4,
53 	PERF_COUNT_HW_BRANCH_MISSES		= 5,
54 	PERF_COUNT_HW_BUS_CYCLES		= 6,
55 	PERF_COUNT_HW_STALLED_CYCLES_FRONTEND	= 7,
56 	PERF_COUNT_HW_STALLED_CYCLES_BACKEND	= 8,
57 	PERF_COUNT_HW_REF_CPU_CYCLES		= 9,
58 
59 	PERF_COUNT_HW_MAX,			/* non-ABI */
60 };
61 
62 /*
63  * Generalized hardware cache events:
64  *
65  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66  *       { read, write, prefetch } x
67  *       { accesses, misses }
68  */
69 enum perf_hw_cache_id {
70 	PERF_COUNT_HW_CACHE_L1D			= 0,
71 	PERF_COUNT_HW_CACHE_L1I			= 1,
72 	PERF_COUNT_HW_CACHE_LL			= 2,
73 	PERF_COUNT_HW_CACHE_DTLB		= 3,
74 	PERF_COUNT_HW_CACHE_ITLB		= 4,
75 	PERF_COUNT_HW_CACHE_BPU			= 5,
76 	PERF_COUNT_HW_CACHE_NODE		= 6,
77 
78 	PERF_COUNT_HW_CACHE_MAX,		/* non-ABI */
79 };
80 
81 enum perf_hw_cache_op_id {
82 	PERF_COUNT_HW_CACHE_OP_READ		= 0,
83 	PERF_COUNT_HW_CACHE_OP_WRITE		= 1,
84 	PERF_COUNT_HW_CACHE_OP_PREFETCH		= 2,
85 
86 	PERF_COUNT_HW_CACHE_OP_MAX,		/* non-ABI */
87 };
88 
89 enum perf_hw_cache_op_result_id {
90 	PERF_COUNT_HW_CACHE_RESULT_ACCESS	= 0,
91 	PERF_COUNT_HW_CACHE_RESULT_MISS		= 1,
92 
93 	PERF_COUNT_HW_CACHE_RESULT_MAX,		/* non-ABI */
94 };
95 
96 /*
97  * Special "software" events provided by the kernel, even if the hardware
98  * does not support performance events. These events measure various
99  * physical and sw events of the kernel (and allow the profiling of them as
100  * well):
101  */
102 enum perf_sw_ids {
103 	PERF_COUNT_SW_CPU_CLOCK			= 0,
104 	PERF_COUNT_SW_TASK_CLOCK		= 1,
105 	PERF_COUNT_SW_PAGE_FAULTS		= 2,
106 	PERF_COUNT_SW_CONTEXT_SWITCHES		= 3,
107 	PERF_COUNT_SW_CPU_MIGRATIONS		= 4,
108 	PERF_COUNT_SW_PAGE_FAULTS_MIN		= 5,
109 	PERF_COUNT_SW_PAGE_FAULTS_MAJ		= 6,
110 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
111 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
112 
113 	PERF_COUNT_SW_MAX,			/* non-ABI */
114 };
115 
116 /*
117  * Bits that can be set in attr.sample_type to request information
118  * in the overflow packets.
119  */
120 enum perf_event_sample_format {
121 	PERF_SAMPLE_IP				= 1U << 0,
122 	PERF_SAMPLE_TID				= 1U << 1,
123 	PERF_SAMPLE_TIME			= 1U << 2,
124 	PERF_SAMPLE_ADDR			= 1U << 3,
125 	PERF_SAMPLE_READ			= 1U << 4,
126 	PERF_SAMPLE_CALLCHAIN			= 1U << 5,
127 	PERF_SAMPLE_ID				= 1U << 6,
128 	PERF_SAMPLE_CPU				= 1U << 7,
129 	PERF_SAMPLE_PERIOD			= 1U << 8,
130 	PERF_SAMPLE_STREAM_ID			= 1U << 9,
131 	PERF_SAMPLE_RAW				= 1U << 10,
132 	PERF_SAMPLE_BRANCH_STACK		= 1U << 11,
133 
134 	PERF_SAMPLE_MAX = 1U << 12,		/* non-ABI */
135 };
136 
137 /*
138  * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
139  *
140  * If the user does not pass priv level information via branch_sample_type,
141  * the kernel uses the event's priv level. Branch and event priv levels do
142  * not have to match. Branch priv level is checked for permissions.
143  *
144  * The branch types can be combined, however BRANCH_ANY covers all types
145  * of branches and therefore it supersedes all the other types.
146  */
147 enum perf_branch_sample_type {
148 	PERF_SAMPLE_BRANCH_USER		= 1U << 0, /* user branches */
149 	PERF_SAMPLE_BRANCH_KERNEL	= 1U << 1, /* kernel branches */
150 	PERF_SAMPLE_BRANCH_HV		= 1U << 2, /* hypervisor branches */
151 
152 	PERF_SAMPLE_BRANCH_ANY		= 1U << 3, /* any branch types */
153 	PERF_SAMPLE_BRANCH_ANY_CALL	= 1U << 4, /* any call branch */
154 	PERF_SAMPLE_BRANCH_ANY_RETURN	= 1U << 5, /* any return branch */
155 	PERF_SAMPLE_BRANCH_IND_CALL	= 1U << 6, /* indirect calls */
156 
157 	PERF_SAMPLE_BRANCH_MAX		= 1U << 7, /* non-ABI */
158 };
159 
160 #define PERF_SAMPLE_BRANCH_PLM_ALL \
161 	(PERF_SAMPLE_BRANCH_USER|\
162 	 PERF_SAMPLE_BRANCH_KERNEL|\
163 	 PERF_SAMPLE_BRANCH_HV)
164 
165 /*
166  * The format of the data returned by read() on a perf event fd,
167  * as specified by attr.read_format:
168  *
169  * struct read_format {
170  *	{ u64		value;
171  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
172  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
173  *	  { u64		id;           } && PERF_FORMAT_ID
174  *	} && !PERF_FORMAT_GROUP
175  *
176  *	{ u64		nr;
177  *	  { u64		time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
178  *	  { u64		time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
179  *	  { u64		value;
180  *	    { u64	id;           } && PERF_FORMAT_ID
181  *	  }		cntr[nr];
182  *	} && PERF_FORMAT_GROUP
183  * };
184  */
185 enum perf_event_read_format {
186 	PERF_FORMAT_TOTAL_TIME_ENABLED		= 1U << 0,
187 	PERF_FORMAT_TOTAL_TIME_RUNNING		= 1U << 1,
188 	PERF_FORMAT_ID				= 1U << 2,
189 	PERF_FORMAT_GROUP			= 1U << 3,
190 
191 	PERF_FORMAT_MAX = 1U << 4,		/* non-ABI */
192 };
193 
194 #define PERF_ATTR_SIZE_VER0	64	/* sizeof first published struct */
195 #define PERF_ATTR_SIZE_VER1	72	/* add: config2 */
196 #define PERF_ATTR_SIZE_VER2	80	/* add: branch_sample_type */
197 
198 /*
199  * Hardware event_id to monitor via a performance monitoring event:
200  */
201 struct perf_event_attr {
202 
203 	/*
204 	 * Major type: hardware/software/tracepoint/etc.
205 	 */
206 	__u32			type;
207 
208 	/*
209 	 * Size of the attr structure, for fwd/bwd compat.
210 	 */
211 	__u32			size;
212 
213 	/*
214 	 * Type specific configuration information.
215 	 */
216 	__u64			config;
217 
218 	union {
219 		__u64		sample_period;
220 		__u64		sample_freq;
221 	};
222 
223 	__u64			sample_type;
224 	__u64			read_format;
225 
226 	__u64			disabled       :  1, /* off by default        */
227 				inherit	       :  1, /* children inherit it   */
228 				pinned	       :  1, /* must always be on PMU */
229 				exclusive      :  1, /* only group on PMU     */
230 				exclude_user   :  1, /* don't count user      */
231 				exclude_kernel :  1, /* ditto kernel          */
232 				exclude_hv     :  1, /* ditto hypervisor      */
233 				exclude_idle   :  1, /* don't count when idle */
234 				mmap           :  1, /* include mmap data     */
235 				comm	       :  1, /* include comm data     */
236 				freq           :  1, /* use freq, not period  */
237 				inherit_stat   :  1, /* per task counts       */
238 				enable_on_exec :  1, /* next exec enables     */
239 				task           :  1, /* trace fork/exit       */
240 				watermark      :  1, /* wakeup_watermark      */
241 				/*
242 				 * precise_ip:
243 				 *
244 				 *  0 - SAMPLE_IP can have arbitrary skid
245 				 *  1 - SAMPLE_IP must have constant skid
246 				 *  2 - SAMPLE_IP requested to have 0 skid
247 				 *  3 - SAMPLE_IP must have 0 skid
248 				 *
249 				 *  See also PERF_RECORD_MISC_EXACT_IP
250 				 */
251 				precise_ip     :  2, /* skid constraint       */
252 				mmap_data      :  1, /* non-exec mmap data    */
253 				sample_id_all  :  1, /* sample_type all events */
254 
255 				exclude_host   :  1, /* don't count in host   */
256 				exclude_guest  :  1, /* don't count in guest  */
257 
258 				__reserved_1   : 43;
259 
260 	union {
261 		__u32		wakeup_events;	  /* wakeup every n events */
262 		__u32		wakeup_watermark; /* bytes before wakeup   */
263 	};
264 
265 	__u32			bp_type;
266 	union {
267 		__u64		bp_addr;
268 		__u64		config1; /* extension of config */
269 	};
270 	union {
271 		__u64		bp_len;
272 		__u64		config2; /* extension of config1 */
273 	};
274 	__u64	branch_sample_type; /* enum branch_sample_type */
275 };
276 
277 /*
278  * Ioctls that can be done on a perf event fd:
279  */
280 #define PERF_EVENT_IOC_ENABLE		_IO ('$', 0)
281 #define PERF_EVENT_IOC_DISABLE		_IO ('$', 1)
282 #define PERF_EVENT_IOC_REFRESH		_IO ('$', 2)
283 #define PERF_EVENT_IOC_RESET		_IO ('$', 3)
284 #define PERF_EVENT_IOC_PERIOD		_IOW('$', 4, __u64)
285 #define PERF_EVENT_IOC_SET_OUTPUT	_IO ('$', 5)
286 #define PERF_EVENT_IOC_SET_FILTER	_IOW('$', 6, char *)
287 
288 enum perf_event_ioc_flags {
289 	PERF_IOC_FLAG_GROUP		= 1U << 0,
290 };
291 
292 /*
293  * Structure of the page that can be mapped via mmap
294  */
295 struct perf_event_mmap_page {
296 	__u32	version;		/* version number of this structure */
297 	__u32	compat_version;		/* lowest version this is compat with */
298 
299 	/*
300 	 * Bits needed to read the hw events in user-space.
301 	 *
302 	 *   u32 seq;
303 	 *   s64 count;
304 	 *
305 	 *   do {
306 	 *     seq = pc->lock;
307 	 *
308 	 *     barrier()
309 	 *     if (pc->index) {
310 	 *       count = pmc_read(pc->index - 1);
311 	 *       count += pc->offset;
312 	 *     } else
313 	 *       goto regular_read;
314 	 *
315 	 *     barrier();
316 	 *   } while (pc->lock != seq);
317 	 *
318 	 * NOTE: for obvious reason this only works on self-monitoring
319 	 *       processes.
320 	 */
321 	__u32	lock;			/* seqlock for synchronization */
322 	__u32	index;			/* hardware event identifier */
323 	__s64	offset;			/* add to hardware event value */
324 	__u64	time_enabled;		/* time event active */
325 	__u64	time_running;		/* time event on cpu */
326 	__u32	time_mult, time_shift;
327 	__u64	time_offset;
328 
329 		/*
330 		 * Hole for extension of the self monitor capabilities
331 		 */
332 
333 	__u64	__reserved[121];	/* align to 1k */
334 
335 	/*
336 	 * Control data for the mmap() data buffer.
337 	 *
338 	 * User-space reading the @data_head value should issue an rmb(), on
339 	 * SMP capable platforms, after reading this value -- see
340 	 * perf_event_wakeup().
341 	 *
342 	 * When the mapping is PROT_WRITE the @data_tail value should be
343 	 * written by userspace to reflect the last read data. In this case
344 	 * the kernel will not over-write unread data.
345 	 */
346 	__u64   data_head;		/* head in the data section */
347 	__u64	data_tail;		/* user-space written tail */
348 };
349 
350 #define PERF_RECORD_MISC_CPUMODE_MASK		(7 << 0)
351 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN	(0 << 0)
352 #define PERF_RECORD_MISC_KERNEL			(1 << 0)
353 #define PERF_RECORD_MISC_USER			(2 << 0)
354 #define PERF_RECORD_MISC_HYPERVISOR		(3 << 0)
355 #define PERF_RECORD_MISC_GUEST_KERNEL		(4 << 0)
356 #define PERF_RECORD_MISC_GUEST_USER		(5 << 0)
357 
358 /*
359  * Indicates that the content of PERF_SAMPLE_IP points to
360  * the actual instruction that triggered the event. See also
361  * perf_event_attr::precise_ip.
362  */
363 #define PERF_RECORD_MISC_EXACT_IP		(1 << 14)
364 /*
365  * Reserve the last bit to indicate some extended misc field
366  */
367 #define PERF_RECORD_MISC_EXT_RESERVED		(1 << 15)
368 
369 struct perf_event_header {
370 	__u32	type;
371 	__u16	misc;
372 	__u16	size;
373 };
374 
375 enum perf_event_type {
376 
377 	/*
378 	 * If perf_event_attr.sample_id_all is set then all event types will
379 	 * have the sample_type selected fields related to where/when
380 	 * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
381 	 * described in PERF_RECORD_SAMPLE below, it will be stashed just after
382 	 * the perf_event_header and the fields already present for the existing
383 	 * fields, i.e. at the end of the payload. That way a newer perf.data
384 	 * file will be supported by older perf tools, with these new optional
385 	 * fields being ignored.
386 	 *
387 	 * The MMAP events record the PROT_EXEC mappings so that we can
388 	 * correlate userspace IPs to code. They have the following structure:
389 	 *
390 	 * struct {
391 	 *	struct perf_event_header	header;
392 	 *
393 	 *	u32				pid, tid;
394 	 *	u64				addr;
395 	 *	u64				len;
396 	 *	u64				pgoff;
397 	 *	char				filename[];
398 	 * };
399 	 */
400 	PERF_RECORD_MMAP			= 1,
401 
402 	/*
403 	 * struct {
404 	 *	struct perf_event_header	header;
405 	 *	u64				id;
406 	 *	u64				lost;
407 	 * };
408 	 */
409 	PERF_RECORD_LOST			= 2,
410 
411 	/*
412 	 * struct {
413 	 *	struct perf_event_header	header;
414 	 *
415 	 *	u32				pid, tid;
416 	 *	char				comm[];
417 	 * };
418 	 */
419 	PERF_RECORD_COMM			= 3,
420 
421 	/*
422 	 * struct {
423 	 *	struct perf_event_header	header;
424 	 *	u32				pid, ppid;
425 	 *	u32				tid, ptid;
426 	 *	u64				time;
427 	 * };
428 	 */
429 	PERF_RECORD_EXIT			= 4,
430 
431 	/*
432 	 * struct {
433 	 *	struct perf_event_header	header;
434 	 *	u64				time;
435 	 *	u64				id;
436 	 *	u64				stream_id;
437 	 * };
438 	 */
439 	PERF_RECORD_THROTTLE			= 5,
440 	PERF_RECORD_UNTHROTTLE			= 6,
441 
442 	/*
443 	 * struct {
444 	 *	struct perf_event_header	header;
445 	 *	u32				pid, ppid;
446 	 *	u32				tid, ptid;
447 	 *	u64				time;
448 	 * };
449 	 */
450 	PERF_RECORD_FORK			= 7,
451 
452 	/*
453 	 * struct {
454 	 *	struct perf_event_header	header;
455 	 *	u32				pid, tid;
456 	 *
457 	 *	struct read_format		values;
458 	 * };
459 	 */
460 	PERF_RECORD_READ			= 8,
461 
462 	/*
463 	 * struct {
464 	 *	struct perf_event_header	header;
465 	 *
466 	 *	{ u64			ip;	  } && PERF_SAMPLE_IP
467 	 *	{ u32			pid, tid; } && PERF_SAMPLE_TID
468 	 *	{ u64			time;     } && PERF_SAMPLE_TIME
469 	 *	{ u64			addr;     } && PERF_SAMPLE_ADDR
470 	 *	{ u64			id;	  } && PERF_SAMPLE_ID
471 	 *	{ u64			stream_id;} && PERF_SAMPLE_STREAM_ID
472 	 *	{ u32			cpu, res; } && PERF_SAMPLE_CPU
473 	 *	{ u64			period;   } && PERF_SAMPLE_PERIOD
474 	 *
475 	 *	{ struct read_format	values;	  } && PERF_SAMPLE_READ
476 	 *
477 	 *	{ u64			nr,
478 	 *	  u64			ips[nr];  } && PERF_SAMPLE_CALLCHAIN
479 	 *
480 	 *	#
481 	 *	# The RAW record below is opaque data wrt the ABI
482 	 *	#
483 	 *	# That is, the ABI doesn't make any promises wrt to
484 	 *	# the stability of its content, it may vary depending
485 	 *	# on event, hardware, kernel version and phase of
486 	 *	# the moon.
487 	 *	#
488 	 *	# In other words, PERF_SAMPLE_RAW contents are not an ABI.
489 	 *	#
490 	 *
491 	 *	{ u32			size;
492 	 *	  char                  data[size];}&& PERF_SAMPLE_RAW
493 	 *
494 	 *	{ u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
495 	 * };
496 	 */
497 	PERF_RECORD_SAMPLE			= 9,
498 
499 	PERF_RECORD_MAX,			/* non-ABI */
500 };
501 
502 enum perf_callchain_context {
503 	PERF_CONTEXT_HV			= (__u64)-32,
504 	PERF_CONTEXT_KERNEL		= (__u64)-128,
505 	PERF_CONTEXT_USER		= (__u64)-512,
506 
507 	PERF_CONTEXT_GUEST		= (__u64)-2048,
508 	PERF_CONTEXT_GUEST_KERNEL	= (__u64)-2176,
509 	PERF_CONTEXT_GUEST_USER		= (__u64)-2560,
510 
511 	PERF_CONTEXT_MAX		= (__u64)-4095,
512 };
513 
514 #define PERF_FLAG_FD_NO_GROUP		(1U << 0)
515 #define PERF_FLAG_FD_OUTPUT		(1U << 1)
516 #define PERF_FLAG_PID_CGROUP		(1U << 2) /* pid=cgroup id, per-cpu mode only */
517 
518 #ifdef __KERNEL__
519 /*
520  * Kernel-internal data types and definitions:
521  */
522 
523 #ifdef CONFIG_PERF_EVENTS
524 # include <linux/cgroup.h>
525 # include <asm/perf_event.h>
526 # include <asm/local64.h>
527 #endif
528 
529 struct perf_guest_info_callbacks {
530 	int				(*is_in_guest)(void);
531 	int				(*is_user_mode)(void);
532 	unsigned long			(*get_guest_ip)(void);
533 };
534 
535 #ifdef CONFIG_HAVE_HW_BREAKPOINT
536 #include <asm/hw_breakpoint.h>
537 #endif
538 
539 #include <linux/list.h>
540 #include <linux/mutex.h>
541 #include <linux/rculist.h>
542 #include <linux/rcupdate.h>
543 #include <linux/spinlock.h>
544 #include <linux/hrtimer.h>
545 #include <linux/fs.h>
546 #include <linux/pid_namespace.h>
547 #include <linux/workqueue.h>
548 #include <linux/ftrace.h>
549 #include <linux/cpu.h>
550 #include <linux/irq_work.h>
551 #include <linux/static_key.h>
552 #include <linux/atomic.h>
553 #include <asm/local.h>
554 
555 #define PERF_MAX_STACK_DEPTH		255
556 
557 struct perf_callchain_entry {
558 	__u64				nr;
559 	__u64				ip[PERF_MAX_STACK_DEPTH];
560 };
561 
562 struct perf_raw_record {
563 	u32				size;
564 	void				*data;
565 };
566 
567 /*
568  * single taken branch record layout:
569  *
570  *      from: source instruction (may not always be a branch insn)
571  *        to: branch target
572  *   mispred: branch target was mispredicted
573  * predicted: branch target was predicted
574  *
575  * support for mispred, predicted is optional. In case it
576  * is not supported mispred = predicted = 0.
577  */
578 struct perf_branch_entry {
579 	__u64	from;
580 	__u64	to;
581 	__u64	mispred:1,  /* target mispredicted */
582 		predicted:1,/* target predicted */
583 		reserved:62;
584 };
585 
586 /*
587  * branch stack layout:
588  *  nr: number of taken branches stored in entries[]
589  *
590  * Note that nr can vary from sample to sample
591  * branches (to, from) are stored from most recent
592  * to least recent, i.e., entries[0] contains the most
593  * recent branch.
594  */
595 struct perf_branch_stack {
596 	__u64				nr;
597 	struct perf_branch_entry	entries[0];
598 };
599 
600 struct task_struct;
601 
602 /*
603  * extra PMU register associated with an event
604  */
605 struct hw_perf_event_extra {
606 	u64		config;	/* register value */
607 	unsigned int	reg;	/* register address or index */
608 	int		alloc;	/* extra register already allocated */
609 	int		idx;	/* index in shared_regs->regs[] */
610 };
611 
612 /**
613  * struct hw_perf_event - performance event hardware details:
614  */
615 struct hw_perf_event {
616 #ifdef CONFIG_PERF_EVENTS
617 	union {
618 		struct { /* hardware */
619 			u64		config;
620 			u64		last_tag;
621 			unsigned long	config_base;
622 			unsigned long	event_base;
623 			int		idx;
624 			int		last_cpu;
625 
626 			struct hw_perf_event_extra extra_reg;
627 			struct hw_perf_event_extra branch_reg;
628 		};
629 		struct { /* software */
630 			struct hrtimer	hrtimer;
631 		};
632 #ifdef CONFIG_HAVE_HW_BREAKPOINT
633 		struct { /* breakpoint */
634 			struct arch_hw_breakpoint	info;
635 			struct list_head		bp_list;
636 			/*
637 			 * Crufty hack to avoid the chicken and egg
638 			 * problem hw_breakpoint has with context
639 			 * creation and event initalization.
640 			 */
641 			struct task_struct		*bp_target;
642 		};
643 #endif
644 	};
645 	int				state;
646 	local64_t			prev_count;
647 	u64				sample_period;
648 	u64				last_period;
649 	local64_t			period_left;
650 	u64                             interrupts_seq;
651 	u64				interrupts;
652 
653 	u64				freq_time_stamp;
654 	u64				freq_count_stamp;
655 #endif
656 };
657 
658 /*
659  * hw_perf_event::state flags
660  */
661 #define PERF_HES_STOPPED	0x01 /* the counter is stopped */
662 #define PERF_HES_UPTODATE	0x02 /* event->count up-to-date */
663 #define PERF_HES_ARCH		0x04
664 
665 struct perf_event;
666 
667 /*
668  * Common implementation detail of pmu::{start,commit,cancel}_txn
669  */
670 #define PERF_EVENT_TXN 0x1
671 
672 /**
673  * struct pmu - generic performance monitoring unit
674  */
675 struct pmu {
676 	struct list_head		entry;
677 
678 	struct device			*dev;
679 	const struct attribute_group	**attr_groups;
680 	char				*name;
681 	int				type;
682 
683 	int * __percpu			pmu_disable_count;
684 	struct perf_cpu_context * __percpu pmu_cpu_context;
685 	int				task_ctx_nr;
686 
687 	/*
688 	 * Fully disable/enable this PMU, can be used to protect from the PMI
689 	 * as well as for lazy/batch writing of the MSRs.
690 	 */
691 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
692 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
693 
694 	/*
695 	 * Try and initialize the event for this PMU.
696 	 * Should return -ENOENT when the @event doesn't match this PMU.
697 	 */
698 	int (*event_init)		(struct perf_event *event);
699 
700 #define PERF_EF_START	0x01		/* start the counter when adding    */
701 #define PERF_EF_RELOAD	0x02		/* reload the counter when starting */
702 #define PERF_EF_UPDATE	0x04		/* update the counter when stopping */
703 
704 	/*
705 	 * Adds/Removes a counter to/from the PMU, can be done inside
706 	 * a transaction, see the ->*_txn() methods.
707 	 */
708 	int  (*add)			(struct perf_event *event, int flags);
709 	void (*del)			(struct perf_event *event, int flags);
710 
711 	/*
712 	 * Starts/Stops a counter present on the PMU. The PMI handler
713 	 * should stop the counter when perf_event_overflow() returns
714 	 * !0. ->start() will be used to continue.
715 	 */
716 	void (*start)			(struct perf_event *event, int flags);
717 	void (*stop)			(struct perf_event *event, int flags);
718 
719 	/*
720 	 * Updates the counter value of the event.
721 	 */
722 	void (*read)			(struct perf_event *event);
723 
724 	/*
725 	 * Group events scheduling is treated as a transaction, add
726 	 * group events as a whole and perform one schedulability test.
727 	 * If the test fails, roll back the whole group
728 	 *
729 	 * Start the transaction, after this ->add() doesn't need to
730 	 * do schedulability tests.
731 	 */
732 	void (*start_txn)		(struct pmu *pmu); /* optional */
733 	/*
734 	 * If ->start_txn() disabled the ->add() schedulability test
735 	 * then ->commit_txn() is required to perform one. On success
736 	 * the transaction is closed. On error the transaction is kept
737 	 * open until ->cancel_txn() is called.
738 	 */
739 	int  (*commit_txn)		(struct pmu *pmu); /* optional */
740 	/*
741 	 * Will cancel the transaction, assumes ->del() is called
742 	 * for each successful ->add() during the transaction.
743 	 */
744 	void (*cancel_txn)		(struct pmu *pmu); /* optional */
745 
746 	/*
747 	 * Will return the value for perf_event_mmap_page::index for this event,
748 	 * if no implementation is provided it will default to: event->hw.idx + 1.
749 	 */
750 	int (*event_idx)		(struct perf_event *event); /*optional */
751 
752 	/*
753 	 * flush branch stack on context-switches (needed in cpu-wide mode)
754 	 */
755 	void (*flush_branch_stack)	(void);
756 };
757 
758 /**
759  * enum perf_event_active_state - the states of a event
760  */
761 enum perf_event_active_state {
762 	PERF_EVENT_STATE_ERROR		= -2,
763 	PERF_EVENT_STATE_OFF		= -1,
764 	PERF_EVENT_STATE_INACTIVE	=  0,
765 	PERF_EVENT_STATE_ACTIVE		=  1,
766 };
767 
768 struct file;
769 struct perf_sample_data;
770 
771 typedef void (*perf_overflow_handler_t)(struct perf_event *,
772 					struct perf_sample_data *,
773 					struct pt_regs *regs);
774 
775 enum perf_group_flag {
776 	PERF_GROUP_SOFTWARE		= 0x1,
777 };
778 
779 #define SWEVENT_HLIST_BITS		8
780 #define SWEVENT_HLIST_SIZE		(1 << SWEVENT_HLIST_BITS)
781 
782 struct swevent_hlist {
783 	struct hlist_head		heads[SWEVENT_HLIST_SIZE];
784 	struct rcu_head			rcu_head;
785 };
786 
787 #define PERF_ATTACH_CONTEXT	0x01
788 #define PERF_ATTACH_GROUP	0x02
789 #define PERF_ATTACH_TASK	0x04
790 
791 #ifdef CONFIG_CGROUP_PERF
792 /*
793  * perf_cgroup_info keeps track of time_enabled for a cgroup.
794  * This is a per-cpu dynamically allocated data structure.
795  */
796 struct perf_cgroup_info {
797 	u64				time;
798 	u64				timestamp;
799 };
800 
801 struct perf_cgroup {
802 	struct				cgroup_subsys_state css;
803 	struct				perf_cgroup_info *info;	/* timing info, one per cpu */
804 };
805 #endif
806 
807 struct ring_buffer;
808 
809 /**
810  * struct perf_event - performance event kernel representation:
811  */
812 struct perf_event {
813 #ifdef CONFIG_PERF_EVENTS
814 	struct list_head		group_entry;
815 	struct list_head		event_entry;
816 	struct list_head		sibling_list;
817 	struct hlist_node		hlist_entry;
818 	int				nr_siblings;
819 	int				group_flags;
820 	struct perf_event		*group_leader;
821 	struct pmu			*pmu;
822 
823 	enum perf_event_active_state	state;
824 	unsigned int			attach_state;
825 	local64_t			count;
826 	atomic64_t			child_count;
827 
828 	/*
829 	 * These are the total time in nanoseconds that the event
830 	 * has been enabled (i.e. eligible to run, and the task has
831 	 * been scheduled in, if this is a per-task event)
832 	 * and running (scheduled onto the CPU), respectively.
833 	 *
834 	 * They are computed from tstamp_enabled, tstamp_running and
835 	 * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
836 	 */
837 	u64				total_time_enabled;
838 	u64				total_time_running;
839 
840 	/*
841 	 * These are timestamps used for computing total_time_enabled
842 	 * and total_time_running when the event is in INACTIVE or
843 	 * ACTIVE state, measured in nanoseconds from an arbitrary point
844 	 * in time.
845 	 * tstamp_enabled: the notional time when the event was enabled
846 	 * tstamp_running: the notional time when the event was scheduled on
847 	 * tstamp_stopped: in INACTIVE state, the notional time when the
848 	 *	event was scheduled off.
849 	 */
850 	u64				tstamp_enabled;
851 	u64				tstamp_running;
852 	u64				tstamp_stopped;
853 
854 	/*
855 	 * timestamp shadows the actual context timing but it can
856 	 * be safely used in NMI interrupt context. It reflects the
857 	 * context time as it was when the event was last scheduled in.
858 	 *
859 	 * ctx_time already accounts for ctx->timestamp. Therefore to
860 	 * compute ctx_time for a sample, simply add perf_clock().
861 	 */
862 	u64				shadow_ctx_time;
863 
864 	struct perf_event_attr		attr;
865 	u16				header_size;
866 	u16				id_header_size;
867 	u16				read_size;
868 	struct hw_perf_event		hw;
869 
870 	struct perf_event_context	*ctx;
871 	struct file			*filp;
872 
873 	/*
874 	 * These accumulate total time (in nanoseconds) that children
875 	 * events have been enabled and running, respectively.
876 	 */
877 	atomic64_t			child_total_time_enabled;
878 	atomic64_t			child_total_time_running;
879 
880 	/*
881 	 * Protect attach/detach and child_list:
882 	 */
883 	struct mutex			child_mutex;
884 	struct list_head		child_list;
885 	struct perf_event		*parent;
886 
887 	int				oncpu;
888 	int				cpu;
889 
890 	struct list_head		owner_entry;
891 	struct task_struct		*owner;
892 
893 	/* mmap bits */
894 	struct mutex			mmap_mutex;
895 	atomic_t			mmap_count;
896 	int				mmap_locked;
897 	struct user_struct		*mmap_user;
898 	struct ring_buffer		*rb;
899 	struct list_head		rb_entry;
900 
901 	/* poll related */
902 	wait_queue_head_t		waitq;
903 	struct fasync_struct		*fasync;
904 
905 	/* delayed work for NMIs and such */
906 	int				pending_wakeup;
907 	int				pending_kill;
908 	int				pending_disable;
909 	struct irq_work			pending;
910 
911 	atomic_t			event_limit;
912 
913 	void (*destroy)(struct perf_event *);
914 	struct rcu_head			rcu_head;
915 
916 	struct pid_namespace		*ns;
917 	u64				id;
918 
919 	perf_overflow_handler_t		overflow_handler;
920 	void				*overflow_handler_context;
921 
922 #ifdef CONFIG_EVENT_TRACING
923 	struct ftrace_event_call	*tp_event;
924 	struct event_filter		*filter;
925 #ifdef CONFIG_FUNCTION_TRACER
926 	struct ftrace_ops               ftrace_ops;
927 #endif
928 #endif
929 
930 #ifdef CONFIG_CGROUP_PERF
931 	struct perf_cgroup		*cgrp; /* cgroup event is attach to */
932 	int				cgrp_defer_enabled;
933 #endif
934 
935 #endif /* CONFIG_PERF_EVENTS */
936 };
937 
938 enum perf_event_context_type {
939 	task_context,
940 	cpu_context,
941 };
942 
943 /**
944  * struct perf_event_context - event context structure
945  *
946  * Used as a container for task events and CPU events as well:
947  */
948 struct perf_event_context {
949 	struct pmu			*pmu;
950 	enum perf_event_context_type	type;
951 	/*
952 	 * Protect the states of the events in the list,
953 	 * nr_active, and the list:
954 	 */
955 	raw_spinlock_t			lock;
956 	/*
957 	 * Protect the list of events.  Locking either mutex or lock
958 	 * is sufficient to ensure the list doesn't change; to change
959 	 * the list you need to lock both the mutex and the spinlock.
960 	 */
961 	struct mutex			mutex;
962 
963 	struct list_head		pinned_groups;
964 	struct list_head		flexible_groups;
965 	struct list_head		event_list;
966 	int				nr_events;
967 	int				nr_active;
968 	int				is_active;
969 	int				nr_stat;
970 	int				nr_freq;
971 	int				rotate_disable;
972 	atomic_t			refcount;
973 	struct task_struct		*task;
974 
975 	/*
976 	 * Context clock, runs when context enabled.
977 	 */
978 	u64				time;
979 	u64				timestamp;
980 
981 	/*
982 	 * These fields let us detect when two contexts have both
983 	 * been cloned (inherited) from a common ancestor.
984 	 */
985 	struct perf_event_context	*parent_ctx;
986 	u64				parent_gen;
987 	u64				generation;
988 	int				pin_count;
989 	int				nr_cgroups;	 /* cgroup evts */
990 	int				nr_branch_stack; /* branch_stack evt */
991 	struct rcu_head			rcu_head;
992 };
993 
994 /*
995  * Number of contexts where an event can trigger:
996  *	task, softirq, hardirq, nmi.
997  */
998 #define PERF_NR_CONTEXTS	4
999 
1000 /**
1001  * struct perf_event_cpu_context - per cpu event context structure
1002  */
1003 struct perf_cpu_context {
1004 	struct perf_event_context	ctx;
1005 	struct perf_event_context	*task_ctx;
1006 	int				active_oncpu;
1007 	int				exclusive;
1008 	struct list_head		rotation_list;
1009 	int				jiffies_interval;
1010 	struct pmu			*active_pmu;
1011 	struct perf_cgroup		*cgrp;
1012 };
1013 
1014 struct perf_output_handle {
1015 	struct perf_event		*event;
1016 	struct ring_buffer		*rb;
1017 	unsigned long			wakeup;
1018 	unsigned long			size;
1019 	void				*addr;
1020 	int				page;
1021 };
1022 
1023 #ifdef CONFIG_PERF_EVENTS
1024 
1025 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
1026 extern void perf_pmu_unregister(struct pmu *pmu);
1027 
1028 extern int perf_num_counters(void);
1029 extern const char *perf_pmu_name(void);
1030 extern void __perf_event_task_sched_in(struct task_struct *prev,
1031 				       struct task_struct *task);
1032 extern void __perf_event_task_sched_out(struct task_struct *prev,
1033 					struct task_struct *next);
1034 extern int perf_event_init_task(struct task_struct *child);
1035 extern void perf_event_exit_task(struct task_struct *child);
1036 extern void perf_event_free_task(struct task_struct *task);
1037 extern void perf_event_delayed_put(struct task_struct *task);
1038 extern void perf_event_print_debug(void);
1039 extern void perf_pmu_disable(struct pmu *pmu);
1040 extern void perf_pmu_enable(struct pmu *pmu);
1041 extern int perf_event_task_disable(void);
1042 extern int perf_event_task_enable(void);
1043 extern int perf_event_refresh(struct perf_event *event, int refresh);
1044 extern void perf_event_update_userpage(struct perf_event *event);
1045 extern int perf_event_release_kernel(struct perf_event *event);
1046 extern struct perf_event *
1047 perf_event_create_kernel_counter(struct perf_event_attr *attr,
1048 				int cpu,
1049 				struct task_struct *task,
1050 				perf_overflow_handler_t callback,
1051 				void *context);
1052 extern u64 perf_event_read_value(struct perf_event *event,
1053 				 u64 *enabled, u64 *running);
1054 
1055 
1056 struct perf_sample_data {
1057 	u64				type;
1058 
1059 	u64				ip;
1060 	struct {
1061 		u32	pid;
1062 		u32	tid;
1063 	}				tid_entry;
1064 	u64				time;
1065 	u64				addr;
1066 	u64				id;
1067 	u64				stream_id;
1068 	struct {
1069 		u32	cpu;
1070 		u32	reserved;
1071 	}				cpu_entry;
1072 	u64				period;
1073 	struct perf_callchain_entry	*callchain;
1074 	struct perf_raw_record		*raw;
1075 	struct perf_branch_stack	*br_stack;
1076 };
1077 
1078 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
1079 {
1080 	data->addr = addr;
1081 	data->raw  = NULL;
1082 	data->br_stack = NULL;
1083 }
1084 
1085 extern void perf_output_sample(struct perf_output_handle *handle,
1086 			       struct perf_event_header *header,
1087 			       struct perf_sample_data *data,
1088 			       struct perf_event *event);
1089 extern void perf_prepare_sample(struct perf_event_header *header,
1090 				struct perf_sample_data *data,
1091 				struct perf_event *event,
1092 				struct pt_regs *regs);
1093 
1094 extern int perf_event_overflow(struct perf_event *event,
1095 				 struct perf_sample_data *data,
1096 				 struct pt_regs *regs);
1097 
1098 static inline bool is_sampling_event(struct perf_event *event)
1099 {
1100 	return event->attr.sample_period != 0;
1101 }
1102 
1103 /*
1104  * Return 1 for a software event, 0 for a hardware event
1105  */
1106 static inline int is_software_event(struct perf_event *event)
1107 {
1108 	return event->pmu->task_ctx_nr == perf_sw_context;
1109 }
1110 
1111 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1112 
1113 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1114 
1115 #ifndef perf_arch_fetch_caller_regs
1116 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1117 #endif
1118 
1119 /*
1120  * Take a snapshot of the regs. Skip ip and frame pointer to
1121  * the nth caller. We only need a few of the regs:
1122  * - ip for PERF_SAMPLE_IP
1123  * - cs for user_mode() tests
1124  * - bp for callchains
1125  * - eflags, for future purposes, just in case
1126  */
1127 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1128 {
1129 	memset(regs, 0, sizeof(*regs));
1130 
1131 	perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1132 }
1133 
1134 static __always_inline void
1135 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1136 {
1137 	struct pt_regs hot_regs;
1138 
1139 	if (static_key_false(&perf_swevent_enabled[event_id])) {
1140 		if (!regs) {
1141 			perf_fetch_caller_regs(&hot_regs);
1142 			regs = &hot_regs;
1143 		}
1144 		__perf_sw_event(event_id, nr, regs, addr);
1145 	}
1146 }
1147 
1148 extern struct static_key_deferred perf_sched_events;
1149 
1150 static inline void perf_event_task_sched_in(struct task_struct *prev,
1151 					    struct task_struct *task)
1152 {
1153 	if (static_key_false(&perf_sched_events.key))
1154 		__perf_event_task_sched_in(prev, task);
1155 }
1156 
1157 static inline void perf_event_task_sched_out(struct task_struct *prev,
1158 					     struct task_struct *next)
1159 {
1160 	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1161 
1162 	if (static_key_false(&perf_sched_events.key))
1163 		__perf_event_task_sched_out(prev, next);
1164 }
1165 
1166 extern void perf_event_mmap(struct vm_area_struct *vma);
1167 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1168 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1169 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1170 
1171 extern void perf_event_comm(struct task_struct *tsk);
1172 extern void perf_event_fork(struct task_struct *tsk);
1173 
1174 /* Callchains */
1175 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1176 
1177 extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1178 extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1179 
1180 static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1181 {
1182 	if (entry->nr < PERF_MAX_STACK_DEPTH)
1183 		entry->ip[entry->nr++] = ip;
1184 }
1185 
1186 extern int sysctl_perf_event_paranoid;
1187 extern int sysctl_perf_event_mlock;
1188 extern int sysctl_perf_event_sample_rate;
1189 
1190 extern int perf_proc_update_handler(struct ctl_table *table, int write,
1191 		void __user *buffer, size_t *lenp,
1192 		loff_t *ppos);
1193 
1194 static inline bool perf_paranoid_tracepoint_raw(void)
1195 {
1196 	return sysctl_perf_event_paranoid > -1;
1197 }
1198 
1199 static inline bool perf_paranoid_cpu(void)
1200 {
1201 	return sysctl_perf_event_paranoid > 0;
1202 }
1203 
1204 static inline bool perf_paranoid_kernel(void)
1205 {
1206 	return sysctl_perf_event_paranoid > 1;
1207 }
1208 
1209 extern void perf_event_init(void);
1210 extern void perf_tp_event(u64 addr, u64 count, void *record,
1211 			  int entry_size, struct pt_regs *regs,
1212 			  struct hlist_head *head, int rctx);
1213 extern void perf_bp_event(struct perf_event *event, void *data);
1214 
1215 #ifndef perf_misc_flags
1216 # define perf_misc_flags(regs) \
1217 		(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1218 # define perf_instruction_pointer(regs)	instruction_pointer(regs)
1219 #endif
1220 
1221 static inline bool has_branch_stack(struct perf_event *event)
1222 {
1223 	return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1224 }
1225 
1226 extern int perf_output_begin(struct perf_output_handle *handle,
1227 			     struct perf_event *event, unsigned int size);
1228 extern void perf_output_end(struct perf_output_handle *handle);
1229 extern void perf_output_copy(struct perf_output_handle *handle,
1230 			     const void *buf, unsigned int len);
1231 extern int perf_swevent_get_recursion_context(void);
1232 extern void perf_swevent_put_recursion_context(int rctx);
1233 extern void perf_event_enable(struct perf_event *event);
1234 extern void perf_event_disable(struct perf_event *event);
1235 extern void perf_event_task_tick(void);
1236 #else
1237 static inline void
1238 perf_event_task_sched_in(struct task_struct *prev,
1239 			 struct task_struct *task)			{ }
1240 static inline void
1241 perf_event_task_sched_out(struct task_struct *prev,
1242 			  struct task_struct *next)			{ }
1243 static inline int perf_event_init_task(struct task_struct *child)	{ return 0; }
1244 static inline void perf_event_exit_task(struct task_struct *child)	{ }
1245 static inline void perf_event_free_task(struct task_struct *task)	{ }
1246 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
1247 static inline void perf_event_print_debug(void)				{ }
1248 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
1249 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
1250 static inline int perf_event_refresh(struct perf_event *event, int refresh)
1251 {
1252 	return -EINVAL;
1253 }
1254 
1255 static inline void
1256 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)	{ }
1257 static inline void
1258 perf_bp_event(struct perf_event *event, void *data)			{ }
1259 
1260 static inline int perf_register_guest_info_callbacks
1261 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1262 static inline int perf_unregister_guest_info_callbacks
1263 (struct perf_guest_info_callbacks *callbacks)				{ return 0; }
1264 
1265 static inline void perf_event_mmap(struct vm_area_struct *vma)		{ }
1266 static inline void perf_event_comm(struct task_struct *tsk)		{ }
1267 static inline void perf_event_fork(struct task_struct *tsk)		{ }
1268 static inline void perf_event_init(void)				{ }
1269 static inline int  perf_swevent_get_recursion_context(void)		{ return -1; }
1270 static inline void perf_swevent_put_recursion_context(int rctx)		{ }
1271 static inline void perf_event_enable(struct perf_event *event)		{ }
1272 static inline void perf_event_disable(struct perf_event *event)		{ }
1273 static inline void perf_event_task_tick(void)				{ }
1274 #endif
1275 
1276 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1277 
1278 /*
1279  * This has to have a higher priority than migration_notifier in sched.c.
1280  */
1281 #define perf_cpu_notifier(fn)						\
1282 do {									\
1283 	static struct notifier_block fn##_nb __cpuinitdata =		\
1284 		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
1285 	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
1286 		(void *)(unsigned long)smp_processor_id());		\
1287 	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
1288 		(void *)(unsigned long)smp_processor_id());		\
1289 	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
1290 		(void *)(unsigned long)smp_processor_id());		\
1291 	register_cpu_notifier(&fn##_nb);				\
1292 } while (0)
1293 
1294 #endif /* __KERNEL__ */
1295 #endif /* _LINUX_PERF_EVENT_H */
1296