xref: /linux-6.15/kernel/trace/ring_buffer.c (revision 308f7eeb)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
714131f2fSIngo Molnar #include <linux/trace_clock.h>
87a8e76a3SSteven Rostedt #include <linux/spinlock.h>
97a8e76a3SSteven Rostedt #include <linux/debugfs.h>
107a8e76a3SSteven Rostedt #include <linux/uaccess.h>
11a81bd80aSSteven Rostedt #include <linux/hardirq.h>
121744a21dSVegard Nossum #include <linux/kmemcheck.h>
137a8e76a3SSteven Rostedt #include <linux/module.h>
147a8e76a3SSteven Rostedt #include <linux/percpu.h>
157a8e76a3SSteven Rostedt #include <linux/mutex.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
177a8e76a3SSteven Rostedt #include <linux/init.h>
187a8e76a3SSteven Rostedt #include <linux/hash.h>
197a8e76a3SSteven Rostedt #include <linux/list.h>
20554f786eSSteven Rostedt #include <linux/cpu.h>
217a8e76a3SSteven Rostedt #include <linux/fs.h>
227a8e76a3SSteven Rostedt 
2379615760SChristoph Lameter #include <asm/local.h>
24182e9f5fSSteven Rostedt #include "trace.h"
25182e9f5fSSteven Rostedt 
2683f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work);
2783f40318SVaibhav Nagarnaik 
28033601a3SSteven Rostedt /*
29d1b182a8SSteven Rostedt  * The ring buffer header is special. We must manually up keep it.
30d1b182a8SSteven Rostedt  */
31d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
32d1b182a8SSteven Rostedt {
33d1b182a8SSteven Rostedt 	int ret;
34d1b182a8SSteven Rostedt 
35334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "# compressed entry header\n");
36334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
37d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
38d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
39d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\n");
40d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
41d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_PADDING);
42d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
43d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_TIME_EXTEND);
44334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
45334d4169SLai Jiangshan 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46d1b182a8SSteven Rostedt 
47d1b182a8SSteven Rostedt 	return ret;
48d1b182a8SSteven Rostedt }
49d1b182a8SSteven Rostedt 
50d1b182a8SSteven Rostedt /*
515cc98548SSteven Rostedt  * The ring buffer is made up of a list of pages. A separate list of pages is
525cc98548SSteven Rostedt  * allocated for each CPU. A writer may only write to a buffer that is
535cc98548SSteven Rostedt  * associated with the CPU it is currently executing on.  A reader may read
545cc98548SSteven Rostedt  * from any per cpu buffer.
555cc98548SSteven Rostedt  *
565cc98548SSteven Rostedt  * The reader is special. For each per cpu buffer, the reader has its own
575cc98548SSteven Rostedt  * reader page. When a reader has read the entire reader page, this reader
585cc98548SSteven Rostedt  * page is swapped with another page in the ring buffer.
595cc98548SSteven Rostedt  *
605cc98548SSteven Rostedt  * Now, as long as the writer is off the reader page, the reader can do what
615cc98548SSteven Rostedt  * ever it wants with that page. The writer will never write to that page
625cc98548SSteven Rostedt  * again (as long as it is out of the ring buffer).
635cc98548SSteven Rostedt  *
645cc98548SSteven Rostedt  * Here's some silly ASCII art.
655cc98548SSteven Rostedt  *
665cc98548SSteven Rostedt  *   +------+
675cc98548SSteven Rostedt  *   |reader|          RING BUFFER
685cc98548SSteven Rostedt  *   |page  |
695cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
705cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
715cc98548SSteven Rostedt  *                   +---+   +---+   +---+
725cc98548SSteven Rostedt  *                     ^               |
735cc98548SSteven Rostedt  *                     |               |
745cc98548SSteven Rostedt  *                     +---------------+
755cc98548SSteven Rostedt  *
765cc98548SSteven Rostedt  *
775cc98548SSteven Rostedt  *   +------+
785cc98548SSteven Rostedt  *   |reader|          RING BUFFER
795cc98548SSteven Rostedt  *   |page  |------------------v
805cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
815cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
825cc98548SSteven Rostedt  *                   +---+   +---+   +---+
835cc98548SSteven Rostedt  *                     ^               |
845cc98548SSteven Rostedt  *                     |               |
855cc98548SSteven Rostedt  *                     +---------------+
865cc98548SSteven Rostedt  *
875cc98548SSteven Rostedt  *
885cc98548SSteven Rostedt  *   +------+
895cc98548SSteven Rostedt  *   |reader|          RING BUFFER
905cc98548SSteven Rostedt  *   |page  |------------------v
915cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
925cc98548SSteven Rostedt  *      ^            |   |-->|   |-->|   |
935cc98548SSteven Rostedt  *      |            +---+   +---+   +---+
945cc98548SSteven Rostedt  *      |                              |
955cc98548SSteven Rostedt  *      |                              |
965cc98548SSteven Rostedt  *      +------------------------------+
975cc98548SSteven Rostedt  *
985cc98548SSteven Rostedt  *
995cc98548SSteven Rostedt  *   +------+
1005cc98548SSteven Rostedt  *   |buffer|          RING BUFFER
1015cc98548SSteven Rostedt  *   |page  |------------------v
1025cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
1035cc98548SSteven Rostedt  *      ^            |   |   |   |-->|   |
1045cc98548SSteven Rostedt  *      |   New      +---+   +---+   +---+
1055cc98548SSteven Rostedt  *      |  Reader------^               |
1065cc98548SSteven Rostedt  *      |   page                       |
1075cc98548SSteven Rostedt  *      +------------------------------+
1085cc98548SSteven Rostedt  *
1095cc98548SSteven Rostedt  *
1105cc98548SSteven Rostedt  * After we make this swap, the reader can hand this page off to the splice
1115cc98548SSteven Rostedt  * code and be done with it. It can even allocate a new page if it needs to
1125cc98548SSteven Rostedt  * and swap that into the ring buffer.
1135cc98548SSteven Rostedt  *
1145cc98548SSteven Rostedt  * We will be using cmpxchg soon to make all this lockless.
1155cc98548SSteven Rostedt  *
1165cc98548SSteven Rostedt  */
1175cc98548SSteven Rostedt 
1185cc98548SSteven Rostedt /*
119033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
120033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
121033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
122033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
123033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
124033601a3SSteven Rostedt  *
125033601a3SSteven Rostedt  * There's three layers that must be on in order to write
126033601a3SSteven Rostedt  * to the ring buffer.
127033601a3SSteven Rostedt  *
128033601a3SSteven Rostedt  * 1) This global flag must be set.
129033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
130033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
131033601a3SSteven Rostedt  *
132033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
133033601a3SSteven Rostedt  * will permantly disable all ring buffers.
134033601a3SSteven Rostedt  */
135033601a3SSteven Rostedt 
136033601a3SSteven Rostedt /*
137033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
138033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
139033601a3SSteven Rostedt  *
140033601a3SSteven Rostedt  *  ON   DISABLED
141033601a3SSteven Rostedt  * ---- ----------
142033601a3SSteven Rostedt  *   0      0        : ring buffers are off
143033601a3SSteven Rostedt  *   1      0        : ring buffers are on
144033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
145033601a3SSteven Rostedt  */
146033601a3SSteven Rostedt 
147033601a3SSteven Rostedt enum {
148033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
149033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
150033601a3SSteven Rostedt };
151033601a3SSteven Rostedt 
152033601a3SSteven Rostedt enum {
153033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
154033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
155033601a3SSteven Rostedt };
156033601a3SSteven Rostedt 
1575e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
158a3583244SSteven Rostedt 
159499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */
160499e5470SSteven Rostedt #define RB_BUFFER_OFF		(1 << 20)
161499e5470SSteven Rostedt 
162474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
163474d32b6SSteven Rostedt 
164a3583244SSteven Rostedt /**
165033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
166033601a3SSteven Rostedt  *
167033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
168c3706f00SWenji Huang  * permanently.
169033601a3SSteven Rostedt  */
170033601a3SSteven Rostedt void tracing_off_permanent(void)
171033601a3SSteven Rostedt {
172033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173a3583244SSteven Rostedt }
174a3583244SSteven Rostedt 
175e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
17667d34724SAndrew Morton #define RB_ALIGNMENT		4U
177334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
178c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
179334d4169SLai Jiangshan 
1802271048dSSteven Rostedt #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1812271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT	0
1822271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
1832271048dSSteven Rostedt #else
1842271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT	1
1852271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT		8U
1862271048dSSteven Rostedt #endif
1872271048dSSteven Rostedt 
188334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
189334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
1907a8e76a3SSteven Rostedt 
1917a8e76a3SSteven Rostedt enum {
1927a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
1937a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
1947a8e76a3SSteven Rostedt };
1957a8e76a3SSteven Rostedt 
19669d1b839SSteven Rostedt #define skip_time_extend(event) \
19769d1b839SSteven Rostedt 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
19869d1b839SSteven Rostedt 
1992d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event)
2002d622719STom Zanussi {
201a1863c21SSteven Rostedt 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2022d622719STom Zanussi }
2032d622719STom Zanussi 
2042d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
2052d622719STom Zanussi {
206a1863c21SSteven Rostedt 	/* padding has a NULL time_delta */
207334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2082d622719STom Zanussi 	event->time_delta = 0;
2092d622719STom Zanussi }
2102d622719STom Zanussi 
2112d622719STom Zanussi static unsigned
2122d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2132d622719STom Zanussi {
2142d622719STom Zanussi 	unsigned length;
2152d622719STom Zanussi 
216334d4169SLai Jiangshan 	if (event->type_len)
217334d4169SLai Jiangshan 		length = event->type_len * RB_ALIGNMENT;
2182d622719STom Zanussi 	else
2192d622719STom Zanussi 		length = event->array[0];
2202d622719STom Zanussi 	return length + RB_EVNT_HDR_SIZE;
2212d622719STom Zanussi }
2222d622719STom Zanussi 
22369d1b839SSteven Rostedt /*
22469d1b839SSteven Rostedt  * Return the length of the given event. Will return
22569d1b839SSteven Rostedt  * the length of the time extend if the event is a
22669d1b839SSteven Rostedt  * time extend.
22769d1b839SSteven Rostedt  */
22869d1b839SSteven Rostedt static inline unsigned
2297a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2307a8e76a3SSteven Rostedt {
231334d4169SLai Jiangshan 	switch (event->type_len) {
2327a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
2332d622719STom Zanussi 		if (rb_null_event(event))
2347a8e76a3SSteven Rostedt 			/* undefined */
2357a8e76a3SSteven Rostedt 			return -1;
236334d4169SLai Jiangshan 		return  event->array[0] + RB_EVNT_HDR_SIZE;
2377a8e76a3SSteven Rostedt 
2387a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
2397a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
2407a8e76a3SSteven Rostedt 
2417a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
2427a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
2437a8e76a3SSteven Rostedt 
2447a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
2452d622719STom Zanussi 		return rb_event_data_length(event);
2467a8e76a3SSteven Rostedt 	default:
2477a8e76a3SSteven Rostedt 		BUG();
2487a8e76a3SSteven Rostedt 	}
2497a8e76a3SSteven Rostedt 	/* not hit */
2507a8e76a3SSteven Rostedt 	return 0;
2517a8e76a3SSteven Rostedt }
2527a8e76a3SSteven Rostedt 
25369d1b839SSteven Rostedt /*
25469d1b839SSteven Rostedt  * Return total length of time extend and data,
25569d1b839SSteven Rostedt  *   or just the event length for all other events.
25669d1b839SSteven Rostedt  */
25769d1b839SSteven Rostedt static inline unsigned
25869d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event)
25969d1b839SSteven Rostedt {
26069d1b839SSteven Rostedt 	unsigned len = 0;
26169d1b839SSteven Rostedt 
26269d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
26369d1b839SSteven Rostedt 		/* time extends include the data event after it */
26469d1b839SSteven Rostedt 		len = RB_LEN_TIME_EXTEND;
26569d1b839SSteven Rostedt 		event = skip_time_extend(event);
26669d1b839SSteven Rostedt 	}
26769d1b839SSteven Rostedt 	return len + rb_event_length(event);
26869d1b839SSteven Rostedt }
26969d1b839SSteven Rostedt 
2707a8e76a3SSteven Rostedt /**
2717a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
2727a8e76a3SSteven Rostedt  * @event: the event to get the length of
27369d1b839SSteven Rostedt  *
27469d1b839SSteven Rostedt  * Returns the size of the data load of a data event.
27569d1b839SSteven Rostedt  * If the event is something other than a data event, it
27669d1b839SSteven Rostedt  * returns the size of the event itself. With the exception
27769d1b839SSteven Rostedt  * of a TIME EXTEND, where it still returns the size of the
27869d1b839SSteven Rostedt  * data load of the data event after it.
2797a8e76a3SSteven Rostedt  */
2807a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2817a8e76a3SSteven Rostedt {
28269d1b839SSteven Rostedt 	unsigned length;
28369d1b839SSteven Rostedt 
28469d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
28569d1b839SSteven Rostedt 		event = skip_time_extend(event);
28669d1b839SSteven Rostedt 
28769d1b839SSteven Rostedt 	length = rb_event_length(event);
288334d4169SLai Jiangshan 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
289465634adSRobert Richter 		return length;
290465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
291465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
292465634adSRobert Richter                 length -= sizeof(event->array[0]);
293465634adSRobert Richter 	return length;
2947a8e76a3SSteven Rostedt }
295c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2967a8e76a3SSteven Rostedt 
2977a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
29834a148bfSAndrew Morton static void *
2997a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
3007a8e76a3SSteven Rostedt {
30169d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
30269d1b839SSteven Rostedt 		event = skip_time_extend(event);
303334d4169SLai Jiangshan 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
3047a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
305334d4169SLai Jiangshan 	if (event->type_len)
3067a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
3077a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
3087a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
3097a8e76a3SSteven Rostedt }
3107a8e76a3SSteven Rostedt 
3117a8e76a3SSteven Rostedt /**
3127a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
3137a8e76a3SSteven Rostedt  * @event: the event to get the data from
3147a8e76a3SSteven Rostedt  */
3157a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3167a8e76a3SSteven Rostedt {
3177a8e76a3SSteven Rostedt 	return rb_event_data(event);
3187a8e76a3SSteven Rostedt }
319c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3207a8e76a3SSteven Rostedt 
3217a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
3229e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
3237a8e76a3SSteven Rostedt 
3247a8e76a3SSteven Rostedt #define TS_SHIFT	27
3257a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
3267a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
3277a8e76a3SSteven Rostedt 
32866a8cb95SSteven Rostedt /* Flag when events were overwritten */
32966a8cb95SSteven Rostedt #define RB_MISSED_EVENTS	(1 << 31)
330ff0ff84aSSteven Rostedt /* Missed count stored at end */
331ff0ff84aSSteven Rostedt #define RB_MISSED_STORED	(1 << 30)
33266a8cb95SSteven Rostedt 
333abc9b56dSSteven Rostedt struct buffer_data_page {
3347a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
335c3706f00SWenji Huang 	local_t		 commit;	/* write committed index */
336abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
337abc9b56dSSteven Rostedt };
338abc9b56dSSteven Rostedt 
33977ae365eSSteven Rostedt /*
34077ae365eSSteven Rostedt  * Note, the buffer_page list must be first. The buffer pages
34177ae365eSSteven Rostedt  * are allocated in cache lines, which means that each buffer
34277ae365eSSteven Rostedt  * page will be at the beginning of a cache line, and thus
34377ae365eSSteven Rostedt  * the least significant bits will be zero. We use this to
34477ae365eSSteven Rostedt  * add flags in the list struct pointers, to make the ring buffer
34577ae365eSSteven Rostedt  * lockless.
34677ae365eSSteven Rostedt  */
347abc9b56dSSteven Rostedt struct buffer_page {
348778c55d4SSteven Rostedt 	struct list_head list;		/* list of buffer pages */
349abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
3506f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
351778c55d4SSteven Rostedt 	local_t		 entries;	/* entries on this page */
352ff0ff84aSSteven Rostedt 	unsigned long	 real_end;	/* real end of data */
353abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
3547a8e76a3SSteven Rostedt };
3557a8e76a3SSteven Rostedt 
35677ae365eSSteven Rostedt /*
35777ae365eSSteven Rostedt  * The buffer page counters, write and entries, must be reset
35877ae365eSSteven Rostedt  * atomically when crossing page boundaries. To synchronize this
35977ae365eSSteven Rostedt  * update, two counters are inserted into the number. One is
36077ae365eSSteven Rostedt  * the actual counter for the write position or count on the page.
36177ae365eSSteven Rostedt  *
36277ae365eSSteven Rostedt  * The other is a counter of updaters. Before an update happens
36377ae365eSSteven Rostedt  * the update partition of the counter is incremented. This will
36477ae365eSSteven Rostedt  * allow the updater to update the counter atomically.
36577ae365eSSteven Rostedt  *
36677ae365eSSteven Rostedt  * The counter is 20 bits, and the state data is 12.
36777ae365eSSteven Rostedt  */
36877ae365eSSteven Rostedt #define RB_WRITE_MASK		0xfffff
36977ae365eSSteven Rostedt #define RB_WRITE_INTCNT		(1 << 20)
37077ae365eSSteven Rostedt 
371044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
372abc9b56dSSteven Rostedt {
373044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
374abc9b56dSSteven Rostedt }
375abc9b56dSSteven Rostedt 
376474d32b6SSteven Rostedt /**
377474d32b6SSteven Rostedt  * ring_buffer_page_len - the size of data on the page.
378474d32b6SSteven Rostedt  * @page: The page to read
379474d32b6SSteven Rostedt  *
380474d32b6SSteven Rostedt  * Returns the amount of data on the page, including buffer page header.
381474d32b6SSteven Rostedt  */
382ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page)
383ef7a4a16SSteven Rostedt {
384474d32b6SSteven Rostedt 	return local_read(&((struct buffer_data_page *)page)->commit)
385474d32b6SSteven Rostedt 		+ BUF_PAGE_HDR_SIZE;
386ef7a4a16SSteven Rostedt }
387ef7a4a16SSteven Rostedt 
3887a8e76a3SSteven Rostedt /*
389ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
390ed56829cSSteven Rostedt  * this issue out.
391ed56829cSSteven Rostedt  */
39234a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
393ed56829cSSteven Rostedt {
3946ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
395e4c2ce82SSteven Rostedt 	kfree(bpage);
396ed56829cSSteven Rostedt }
397ed56829cSSteven Rostedt 
398ed56829cSSteven Rostedt /*
3997a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
4007a8e76a3SSteven Rostedt  */
4017a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
4027a8e76a3SSteven Rostedt {
4037a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
4047a8e76a3SSteven Rostedt 		return 1;
4057a8e76a3SSteven Rostedt 	return 0;
4067a8e76a3SSteven Rostedt }
4077a8e76a3SSteven Rostedt 
408474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
4097a8e76a3SSteven Rostedt 
410be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
411be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
412be957c44SSteven Rostedt 
413d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s)
414d1b182a8SSteven Rostedt {
415d1b182a8SSteven Rostedt 	struct buffer_data_page field;
416d1b182a8SSteven Rostedt 	int ret;
417d1b182a8SSteven Rostedt 
418d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
41926a50744STom Zanussi 			       "offset:0;\tsize:%u;\tsigned:%u;\n",
42026a50744STom Zanussi 			       (unsigned int)sizeof(field.time_stamp),
42126a50744STom Zanussi 			       (unsigned int)is_signed_type(u64));
422d1b182a8SSteven Rostedt 
423d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
42426a50744STom Zanussi 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
425d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
42626a50744STom Zanussi 			       (unsigned int)sizeof(field.commit),
42726a50744STom Zanussi 			       (unsigned int)is_signed_type(long));
428d1b182a8SSteven Rostedt 
42966a8cb95SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
43066a8cb95SSteven Rostedt 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
43166a8cb95SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
43266a8cb95SSteven Rostedt 			       1,
43366a8cb95SSteven Rostedt 			       (unsigned int)is_signed_type(long));
43466a8cb95SSteven Rostedt 
435d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: char data;\t"
43626a50744STom Zanussi 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
437d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), data),
43826a50744STom Zanussi 			       (unsigned int)BUF_PAGE_SIZE,
43926a50744STom Zanussi 			       (unsigned int)is_signed_type(char));
440d1b182a8SSteven Rostedt 
441d1b182a8SSteven Rostedt 	return ret;
442d1b182a8SSteven Rostedt }
443d1b182a8SSteven Rostedt 
4447a8e76a3SSteven Rostedt /*
4457a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
4467a8e76a3SSteven Rostedt  */
4477a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
4487a8e76a3SSteven Rostedt 	int				cpu;
449985023deSRichard Kennedy 	atomic_t			record_disabled;
4507a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
4515389f6faSThomas Gleixner 	raw_spinlock_t			reader_lock;	/* serialize readers */
452445c8951SThomas Gleixner 	arch_spinlock_t			lock;
4537a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
454438ced17SVaibhav Nagarnaik 	unsigned int			nr_pages;
4553adc54faSSteven Rostedt 	struct list_head		*pages;
4566f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
4576f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
458c3706f00SWenji Huang 	struct buffer_page		*commit_page;	/* committed pages */
459d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
46066a8cb95SSteven Rostedt 	unsigned long			lost_events;
46166a8cb95SSteven Rostedt 	unsigned long			last_overrun;
462c64e148aSVaibhav Nagarnaik 	local_t				entries_bytes;
46377ae365eSSteven Rostedt 	local_t				commit_overrun;
46477ae365eSSteven Rostedt 	local_t				overrun;
465e4906effSSteven Rostedt 	local_t				entries;
466fa743953SSteven Rostedt 	local_t				committing;
467fa743953SSteven Rostedt 	local_t				commits;
46877ae365eSSteven Rostedt 	unsigned long			read;
469c64e148aSVaibhav Nagarnaik 	unsigned long			read_bytes;
4707a8e76a3SSteven Rostedt 	u64				write_stamp;
4717a8e76a3SSteven Rostedt 	u64				read_stamp;
472438ced17SVaibhav Nagarnaik 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
473438ced17SVaibhav Nagarnaik 	int				nr_pages_to_update;
474438ced17SVaibhav Nagarnaik 	struct list_head		new_pages; /* new pages to add */
47583f40318SVaibhav Nagarnaik 	struct work_struct		update_pages_work;
47683f40318SVaibhav Nagarnaik 	struct completion		update_completion;
4777a8e76a3SSteven Rostedt };
4787a8e76a3SSteven Rostedt 
4797a8e76a3SSteven Rostedt struct ring_buffer {
4807a8e76a3SSteven Rostedt 	unsigned			flags;
4817a8e76a3SSteven Rostedt 	int				cpus;
4827a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
48383f40318SVaibhav Nagarnaik 	atomic_t			resize_disabled;
48400f62f61SArnaldo Carvalho de Melo 	cpumask_var_t			cpumask;
4857a8e76a3SSteven Rostedt 
4861f8a6a10SPeter Zijlstra 	struct lock_class_key		*reader_lock_key;
4871f8a6a10SPeter Zijlstra 
4887a8e76a3SSteven Rostedt 	struct mutex			mutex;
4897a8e76a3SSteven Rostedt 
4907a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
491554f786eSSteven Rostedt 
49259222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
493554f786eSSteven Rostedt 	struct notifier_block		cpu_notify;
494554f786eSSteven Rostedt #endif
49537886f6aSSteven Rostedt 	u64				(*clock)(void);
4967a8e76a3SSteven Rostedt };
4977a8e76a3SSteven Rostedt 
4987a8e76a3SSteven Rostedt struct ring_buffer_iter {
4997a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
5007a8e76a3SSteven Rostedt 	unsigned long			head;
5017a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
502492a74f4SSteven Rostedt 	struct buffer_page		*cache_reader_page;
503492a74f4SSteven Rostedt 	unsigned long			cache_read;
5047a8e76a3SSteven Rostedt 	u64				read_stamp;
5057a8e76a3SSteven Rostedt };
5067a8e76a3SSteven Rostedt 
507f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
508077c5407SSteven Rostedt #define RB_WARN_ON(b, cond)						\
5093e89c7bbSSteven Rostedt 	({								\
5103e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);				\
5113e89c7bbSSteven Rostedt 		if (_____ret) {						\
512077c5407SSteven Rostedt 			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
513077c5407SSteven Rostedt 				struct ring_buffer_per_cpu *__b =	\
514077c5407SSteven Rostedt 					(void *)b;			\
515077c5407SSteven Rostedt 				atomic_inc(&__b->buffer->record_disabled); \
516077c5407SSteven Rostedt 			} else						\
517077c5407SSteven Rostedt 				atomic_inc(&b->record_disabled);	\
518bf41a158SSteven Rostedt 			WARN_ON(1);					\
519bf41a158SSteven Rostedt 		}							\
5203e89c7bbSSteven Rostedt 		_____ret;						\
5213e89c7bbSSteven Rostedt 	})
522f536aafcSSteven Rostedt 
52337886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
52437886f6aSSteven Rostedt #define DEBUG_SHIFT 0
52537886f6aSSteven Rostedt 
5266d3f1e12SJiri Olsa static inline u64 rb_time_stamp(struct ring_buffer *buffer)
52788eb0125SSteven Rostedt {
52888eb0125SSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
52988eb0125SSteven Rostedt 	return buffer->clock() << DEBUG_SHIFT;
53088eb0125SSteven Rostedt }
53188eb0125SSteven Rostedt 
53237886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
53337886f6aSSteven Rostedt {
53437886f6aSSteven Rostedt 	u64 time;
53537886f6aSSteven Rostedt 
53637886f6aSSteven Rostedt 	preempt_disable_notrace();
5376d3f1e12SJiri Olsa 	time = rb_time_stamp(buffer);
53837886f6aSSteven Rostedt 	preempt_enable_no_resched_notrace();
53937886f6aSSteven Rostedt 
54037886f6aSSteven Rostedt 	return time;
54137886f6aSSteven Rostedt }
54237886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
54337886f6aSSteven Rostedt 
54437886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
54537886f6aSSteven Rostedt 				      int cpu, u64 *ts)
54637886f6aSSteven Rostedt {
54737886f6aSSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
54837886f6aSSteven Rostedt 	*ts >>= DEBUG_SHIFT;
54937886f6aSSteven Rostedt }
55037886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
55137886f6aSSteven Rostedt 
55277ae365eSSteven Rostedt /*
55377ae365eSSteven Rostedt  * Making the ring buffer lockless makes things tricky.
55477ae365eSSteven Rostedt  * Although writes only happen on the CPU that they are on,
55577ae365eSSteven Rostedt  * and they only need to worry about interrupts. Reads can
55677ae365eSSteven Rostedt  * happen on any CPU.
55777ae365eSSteven Rostedt  *
55877ae365eSSteven Rostedt  * The reader page is always off the ring buffer, but when the
55977ae365eSSteven Rostedt  * reader finishes with a page, it needs to swap its page with
56077ae365eSSteven Rostedt  * a new one from the buffer. The reader needs to take from
56177ae365eSSteven Rostedt  * the head (writes go to the tail). But if a writer is in overwrite
56277ae365eSSteven Rostedt  * mode and wraps, it must push the head page forward.
56377ae365eSSteven Rostedt  *
56477ae365eSSteven Rostedt  * Here lies the problem.
56577ae365eSSteven Rostedt  *
56677ae365eSSteven Rostedt  * The reader must be careful to replace only the head page, and
56777ae365eSSteven Rostedt  * not another one. As described at the top of the file in the
56877ae365eSSteven Rostedt  * ASCII art, the reader sets its old page to point to the next
56977ae365eSSteven Rostedt  * page after head. It then sets the page after head to point to
57077ae365eSSteven Rostedt  * the old reader page. But if the writer moves the head page
57177ae365eSSteven Rostedt  * during this operation, the reader could end up with the tail.
57277ae365eSSteven Rostedt  *
57377ae365eSSteven Rostedt  * We use cmpxchg to help prevent this race. We also do something
57477ae365eSSteven Rostedt  * special with the page before head. We set the LSB to 1.
57577ae365eSSteven Rostedt  *
57677ae365eSSteven Rostedt  * When the writer must push the page forward, it will clear the
57777ae365eSSteven Rostedt  * bit that points to the head page, move the head, and then set
57877ae365eSSteven Rostedt  * the bit that points to the new head page.
57977ae365eSSteven Rostedt  *
58077ae365eSSteven Rostedt  * We also don't want an interrupt coming in and moving the head
58177ae365eSSteven Rostedt  * page on another writer. Thus we use the second LSB to catch
58277ae365eSSteven Rostedt  * that too. Thus:
58377ae365eSSteven Rostedt  *
58477ae365eSSteven Rostedt  * head->list->prev->next        bit 1          bit 0
58577ae365eSSteven Rostedt  *                              -------        -------
58677ae365eSSteven Rostedt  * Normal page                     0              0
58777ae365eSSteven Rostedt  * Points to head page             0              1
58877ae365eSSteven Rostedt  * New head page                   1              0
58977ae365eSSteven Rostedt  *
59077ae365eSSteven Rostedt  * Note we can not trust the prev pointer of the head page, because:
59177ae365eSSteven Rostedt  *
59277ae365eSSteven Rostedt  * +----+       +-----+        +-----+
59377ae365eSSteven Rostedt  * |    |------>|  T  |---X--->|  N  |
59477ae365eSSteven Rostedt  * |    |<------|     |        |     |
59577ae365eSSteven Rostedt  * +----+       +-----+        +-----+
59677ae365eSSteven Rostedt  *   ^                           ^ |
59777ae365eSSteven Rostedt  *   |          +-----+          | |
59877ae365eSSteven Rostedt  *   +----------|  R  |----------+ |
59977ae365eSSteven Rostedt  *              |     |<-----------+
60077ae365eSSteven Rostedt  *              +-----+
60177ae365eSSteven Rostedt  *
60277ae365eSSteven Rostedt  * Key:  ---X-->  HEAD flag set in pointer
60377ae365eSSteven Rostedt  *         T      Tail page
60477ae365eSSteven Rostedt  *         R      Reader page
60577ae365eSSteven Rostedt  *         N      Next page
60677ae365eSSteven Rostedt  *
60777ae365eSSteven Rostedt  * (see __rb_reserve_next() to see where this happens)
60877ae365eSSteven Rostedt  *
60977ae365eSSteven Rostedt  *  What the above shows is that the reader just swapped out
61077ae365eSSteven Rostedt  *  the reader page with a page in the buffer, but before it
61177ae365eSSteven Rostedt  *  could make the new header point back to the new page added
61277ae365eSSteven Rostedt  *  it was preempted by a writer. The writer moved forward onto
61377ae365eSSteven Rostedt  *  the new page added by the reader and is about to move forward
61477ae365eSSteven Rostedt  *  again.
61577ae365eSSteven Rostedt  *
61677ae365eSSteven Rostedt  *  You can see, it is legitimate for the previous pointer of
61777ae365eSSteven Rostedt  *  the head (or any page) not to point back to itself. But only
61877ae365eSSteven Rostedt  *  temporarially.
61977ae365eSSteven Rostedt  */
62077ae365eSSteven Rostedt 
62177ae365eSSteven Rostedt #define RB_PAGE_NORMAL		0UL
62277ae365eSSteven Rostedt #define RB_PAGE_HEAD		1UL
62377ae365eSSteven Rostedt #define RB_PAGE_UPDATE		2UL
62477ae365eSSteven Rostedt 
62577ae365eSSteven Rostedt 
62677ae365eSSteven Rostedt #define RB_FLAG_MASK		3UL
62777ae365eSSteven Rostedt 
62877ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */
62977ae365eSSteven Rostedt #define RB_PAGE_MOVED		4UL
63077ae365eSSteven Rostedt 
63177ae365eSSteven Rostedt /*
63277ae365eSSteven Rostedt  * rb_list_head - remove any bit
63377ae365eSSteven Rostedt  */
63477ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list)
63577ae365eSSteven Rostedt {
63677ae365eSSteven Rostedt 	unsigned long val = (unsigned long)list;
63777ae365eSSteven Rostedt 
63877ae365eSSteven Rostedt 	return (struct list_head *)(val & ~RB_FLAG_MASK);
63977ae365eSSteven Rostedt }
64077ae365eSSteven Rostedt 
64177ae365eSSteven Rostedt /*
6426d3f1e12SJiri Olsa  * rb_is_head_page - test if the given page is the head page
64377ae365eSSteven Rostedt  *
64477ae365eSSteven Rostedt  * Because the reader may move the head_page pointer, we can
64577ae365eSSteven Rostedt  * not trust what the head page is (it may be pointing to
64677ae365eSSteven Rostedt  * the reader page). But if the next page is a header page,
64777ae365eSSteven Rostedt  * its flags will be non zero.
64877ae365eSSteven Rostedt  */
64942b16b3fSJesper Juhl static inline int
65077ae365eSSteven Rostedt rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
65177ae365eSSteven Rostedt 		struct buffer_page *page, struct list_head *list)
65277ae365eSSteven Rostedt {
65377ae365eSSteven Rostedt 	unsigned long val;
65477ae365eSSteven Rostedt 
65577ae365eSSteven Rostedt 	val = (unsigned long)list->next;
65677ae365eSSteven Rostedt 
65777ae365eSSteven Rostedt 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
65877ae365eSSteven Rostedt 		return RB_PAGE_MOVED;
65977ae365eSSteven Rostedt 
66077ae365eSSteven Rostedt 	return val & RB_FLAG_MASK;
66177ae365eSSteven Rostedt }
66277ae365eSSteven Rostedt 
66377ae365eSSteven Rostedt /*
66477ae365eSSteven Rostedt  * rb_is_reader_page
66577ae365eSSteven Rostedt  *
66677ae365eSSteven Rostedt  * The unique thing about the reader page, is that, if the
66777ae365eSSteven Rostedt  * writer is ever on it, the previous pointer never points
66877ae365eSSteven Rostedt  * back to the reader page.
66977ae365eSSteven Rostedt  */
67077ae365eSSteven Rostedt static int rb_is_reader_page(struct buffer_page *page)
67177ae365eSSteven Rostedt {
67277ae365eSSteven Rostedt 	struct list_head *list = page->list.prev;
67377ae365eSSteven Rostedt 
67477ae365eSSteven Rostedt 	return rb_list_head(list->next) != &page->list;
67577ae365eSSteven Rostedt }
67677ae365eSSteven Rostedt 
67777ae365eSSteven Rostedt /*
67877ae365eSSteven Rostedt  * rb_set_list_to_head - set a list_head to be pointing to head.
67977ae365eSSteven Rostedt  */
68077ae365eSSteven Rostedt static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
68177ae365eSSteven Rostedt 				struct list_head *list)
68277ae365eSSteven Rostedt {
68377ae365eSSteven Rostedt 	unsigned long *ptr;
68477ae365eSSteven Rostedt 
68577ae365eSSteven Rostedt 	ptr = (unsigned long *)&list->next;
68677ae365eSSteven Rostedt 	*ptr |= RB_PAGE_HEAD;
68777ae365eSSteven Rostedt 	*ptr &= ~RB_PAGE_UPDATE;
68877ae365eSSteven Rostedt }
68977ae365eSSteven Rostedt 
69077ae365eSSteven Rostedt /*
69177ae365eSSteven Rostedt  * rb_head_page_activate - sets up head page
69277ae365eSSteven Rostedt  */
69377ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
69477ae365eSSteven Rostedt {
69577ae365eSSteven Rostedt 	struct buffer_page *head;
69677ae365eSSteven Rostedt 
69777ae365eSSteven Rostedt 	head = cpu_buffer->head_page;
69877ae365eSSteven Rostedt 	if (!head)
69977ae365eSSteven Rostedt 		return;
70077ae365eSSteven Rostedt 
70177ae365eSSteven Rostedt 	/*
70277ae365eSSteven Rostedt 	 * Set the previous list pointer to have the HEAD flag.
70377ae365eSSteven Rostedt 	 */
70477ae365eSSteven Rostedt 	rb_set_list_to_head(cpu_buffer, head->list.prev);
70577ae365eSSteven Rostedt }
70677ae365eSSteven Rostedt 
70777ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list)
70877ae365eSSteven Rostedt {
70977ae365eSSteven Rostedt 	unsigned long *ptr = (unsigned long *)&list->next;
71077ae365eSSteven Rostedt 
71177ae365eSSteven Rostedt 	*ptr &= ~RB_FLAG_MASK;
71277ae365eSSteven Rostedt }
71377ae365eSSteven Rostedt 
71477ae365eSSteven Rostedt /*
71577ae365eSSteven Rostedt  * rb_head_page_dactivate - clears head page ptr (for free list)
71677ae365eSSteven Rostedt  */
71777ae365eSSteven Rostedt static void
71877ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
71977ae365eSSteven Rostedt {
72077ae365eSSteven Rostedt 	struct list_head *hd;
72177ae365eSSteven Rostedt 
72277ae365eSSteven Rostedt 	/* Go through the whole list and clear any pointers found. */
72377ae365eSSteven Rostedt 	rb_list_head_clear(cpu_buffer->pages);
72477ae365eSSteven Rostedt 
72577ae365eSSteven Rostedt 	list_for_each(hd, cpu_buffer->pages)
72677ae365eSSteven Rostedt 		rb_list_head_clear(hd);
72777ae365eSSteven Rostedt }
72877ae365eSSteven Rostedt 
72977ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
73077ae365eSSteven Rostedt 			    struct buffer_page *head,
73177ae365eSSteven Rostedt 			    struct buffer_page *prev,
73277ae365eSSteven Rostedt 			    int old_flag, int new_flag)
73377ae365eSSteven Rostedt {
73477ae365eSSteven Rostedt 	struct list_head *list;
73577ae365eSSteven Rostedt 	unsigned long val = (unsigned long)&head->list;
73677ae365eSSteven Rostedt 	unsigned long ret;
73777ae365eSSteven Rostedt 
73877ae365eSSteven Rostedt 	list = &prev->list;
73977ae365eSSteven Rostedt 
74077ae365eSSteven Rostedt 	val &= ~RB_FLAG_MASK;
74177ae365eSSteven Rostedt 
74208a40816SSteven Rostedt 	ret = cmpxchg((unsigned long *)&list->next,
74377ae365eSSteven Rostedt 		      val | old_flag, val | new_flag);
74477ae365eSSteven Rostedt 
74577ae365eSSteven Rostedt 	/* check if the reader took the page */
74677ae365eSSteven Rostedt 	if ((ret & ~RB_FLAG_MASK) != val)
74777ae365eSSteven Rostedt 		return RB_PAGE_MOVED;
74877ae365eSSteven Rostedt 
74977ae365eSSteven Rostedt 	return ret & RB_FLAG_MASK;
75077ae365eSSteven Rostedt }
75177ae365eSSteven Rostedt 
75277ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
75377ae365eSSteven Rostedt 				   struct buffer_page *head,
75477ae365eSSteven Rostedt 				   struct buffer_page *prev,
75577ae365eSSteven Rostedt 				   int old_flag)
75677ae365eSSteven Rostedt {
75777ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
75877ae365eSSteven Rostedt 				old_flag, RB_PAGE_UPDATE);
75977ae365eSSteven Rostedt }
76077ae365eSSteven Rostedt 
76177ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
76277ae365eSSteven Rostedt 				 struct buffer_page *head,
76377ae365eSSteven Rostedt 				 struct buffer_page *prev,
76477ae365eSSteven Rostedt 				 int old_flag)
76577ae365eSSteven Rostedt {
76677ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
76777ae365eSSteven Rostedt 				old_flag, RB_PAGE_HEAD);
76877ae365eSSteven Rostedt }
76977ae365eSSteven Rostedt 
77077ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
77177ae365eSSteven Rostedt 				   struct buffer_page *head,
77277ae365eSSteven Rostedt 				   struct buffer_page *prev,
77377ae365eSSteven Rostedt 				   int old_flag)
77477ae365eSSteven Rostedt {
77577ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
77677ae365eSSteven Rostedt 				old_flag, RB_PAGE_NORMAL);
77777ae365eSSteven Rostedt }
77877ae365eSSteven Rostedt 
77977ae365eSSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
78077ae365eSSteven Rostedt 			       struct buffer_page **bpage)
78177ae365eSSteven Rostedt {
78277ae365eSSteven Rostedt 	struct list_head *p = rb_list_head((*bpage)->list.next);
78377ae365eSSteven Rostedt 
78477ae365eSSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
78577ae365eSSteven Rostedt }
78677ae365eSSteven Rostedt 
78777ae365eSSteven Rostedt static struct buffer_page *
78877ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
78977ae365eSSteven Rostedt {
79077ae365eSSteven Rostedt 	struct buffer_page *head;
79177ae365eSSteven Rostedt 	struct buffer_page *page;
79277ae365eSSteven Rostedt 	struct list_head *list;
79377ae365eSSteven Rostedt 	int i;
79477ae365eSSteven Rostedt 
79577ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
79677ae365eSSteven Rostedt 		return NULL;
79777ae365eSSteven Rostedt 
79877ae365eSSteven Rostedt 	/* sanity check */
79977ae365eSSteven Rostedt 	list = cpu_buffer->pages;
80077ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
80177ae365eSSteven Rostedt 		return NULL;
80277ae365eSSteven Rostedt 
80377ae365eSSteven Rostedt 	page = head = cpu_buffer->head_page;
80477ae365eSSteven Rostedt 	/*
80577ae365eSSteven Rostedt 	 * It is possible that the writer moves the header behind
80677ae365eSSteven Rostedt 	 * where we started, and we miss in one loop.
80777ae365eSSteven Rostedt 	 * A second loop should grab the header, but we'll do
80877ae365eSSteven Rostedt 	 * three loops just because I'm paranoid.
80977ae365eSSteven Rostedt 	 */
81077ae365eSSteven Rostedt 	for (i = 0; i < 3; i++) {
81177ae365eSSteven Rostedt 		do {
81277ae365eSSteven Rostedt 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
81377ae365eSSteven Rostedt 				cpu_buffer->head_page = page;
81477ae365eSSteven Rostedt 				return page;
81577ae365eSSteven Rostedt 			}
81677ae365eSSteven Rostedt 			rb_inc_page(cpu_buffer, &page);
81777ae365eSSteven Rostedt 		} while (page != head);
81877ae365eSSteven Rostedt 	}
81977ae365eSSteven Rostedt 
82077ae365eSSteven Rostedt 	RB_WARN_ON(cpu_buffer, 1);
82177ae365eSSteven Rostedt 
82277ae365eSSteven Rostedt 	return NULL;
82377ae365eSSteven Rostedt }
82477ae365eSSteven Rostedt 
82577ae365eSSteven Rostedt static int rb_head_page_replace(struct buffer_page *old,
82677ae365eSSteven Rostedt 				struct buffer_page *new)
82777ae365eSSteven Rostedt {
82877ae365eSSteven Rostedt 	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
82977ae365eSSteven Rostedt 	unsigned long val;
83077ae365eSSteven Rostedt 	unsigned long ret;
83177ae365eSSteven Rostedt 
83277ae365eSSteven Rostedt 	val = *ptr & ~RB_FLAG_MASK;
83377ae365eSSteven Rostedt 	val |= RB_PAGE_HEAD;
83477ae365eSSteven Rostedt 
83508a40816SSteven Rostedt 	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
83677ae365eSSteven Rostedt 
83777ae365eSSteven Rostedt 	return ret == val;
83877ae365eSSteven Rostedt }
83977ae365eSSteven Rostedt 
84077ae365eSSteven Rostedt /*
84177ae365eSSteven Rostedt  * rb_tail_page_update - move the tail page forward
84277ae365eSSteven Rostedt  *
84377ae365eSSteven Rostedt  * Returns 1 if moved tail page, 0 if someone else did.
84477ae365eSSteven Rostedt  */
84577ae365eSSteven Rostedt static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84677ae365eSSteven Rostedt 			       struct buffer_page *tail_page,
84777ae365eSSteven Rostedt 			       struct buffer_page *next_page)
84877ae365eSSteven Rostedt {
84977ae365eSSteven Rostedt 	struct buffer_page *old_tail;
85077ae365eSSteven Rostedt 	unsigned long old_entries;
85177ae365eSSteven Rostedt 	unsigned long old_write;
85277ae365eSSteven Rostedt 	int ret = 0;
85377ae365eSSteven Rostedt 
85477ae365eSSteven Rostedt 	/*
85577ae365eSSteven Rostedt 	 * The tail page now needs to be moved forward.
85677ae365eSSteven Rostedt 	 *
85777ae365eSSteven Rostedt 	 * We need to reset the tail page, but without messing
85877ae365eSSteven Rostedt 	 * with possible erasing of data brought in by interrupts
85977ae365eSSteven Rostedt 	 * that have moved the tail page and are currently on it.
86077ae365eSSteven Rostedt 	 *
86177ae365eSSteven Rostedt 	 * We add a counter to the write field to denote this.
86277ae365eSSteven Rostedt 	 */
86377ae365eSSteven Rostedt 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
86477ae365eSSteven Rostedt 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
86577ae365eSSteven Rostedt 
86677ae365eSSteven Rostedt 	/*
86777ae365eSSteven Rostedt 	 * Just make sure we have seen our old_write and synchronize
86877ae365eSSteven Rostedt 	 * with any interrupts that come in.
86977ae365eSSteven Rostedt 	 */
87077ae365eSSteven Rostedt 	barrier();
87177ae365eSSteven Rostedt 
87277ae365eSSteven Rostedt 	/*
87377ae365eSSteven Rostedt 	 * If the tail page is still the same as what we think
87477ae365eSSteven Rostedt 	 * it is, then it is up to us to update the tail
87577ae365eSSteven Rostedt 	 * pointer.
87677ae365eSSteven Rostedt 	 */
87777ae365eSSteven Rostedt 	if (tail_page == cpu_buffer->tail_page) {
87877ae365eSSteven Rostedt 		/* Zero the write counter */
87977ae365eSSteven Rostedt 		unsigned long val = old_write & ~RB_WRITE_MASK;
88077ae365eSSteven Rostedt 		unsigned long eval = old_entries & ~RB_WRITE_MASK;
88177ae365eSSteven Rostedt 
88277ae365eSSteven Rostedt 		/*
88377ae365eSSteven Rostedt 		 * This will only succeed if an interrupt did
88477ae365eSSteven Rostedt 		 * not come in and change it. In which case, we
88577ae365eSSteven Rostedt 		 * do not want to modify it.
886da706d8bSLai Jiangshan 		 *
887da706d8bSLai Jiangshan 		 * We add (void) to let the compiler know that we do not care
888da706d8bSLai Jiangshan 		 * about the return value of these functions. We use the
889da706d8bSLai Jiangshan 		 * cmpxchg to only update if an interrupt did not already
890da706d8bSLai Jiangshan 		 * do it for us. If the cmpxchg fails, we don't care.
89177ae365eSSteven Rostedt 		 */
892da706d8bSLai Jiangshan 		(void)local_cmpxchg(&next_page->write, old_write, val);
893da706d8bSLai Jiangshan 		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
89477ae365eSSteven Rostedt 
89577ae365eSSteven Rostedt 		/*
89677ae365eSSteven Rostedt 		 * No need to worry about races with clearing out the commit.
89777ae365eSSteven Rostedt 		 * it only can increment when a commit takes place. But that
89877ae365eSSteven Rostedt 		 * only happens in the outer most nested commit.
89977ae365eSSteven Rostedt 		 */
90077ae365eSSteven Rostedt 		local_set(&next_page->page->commit, 0);
90177ae365eSSteven Rostedt 
90277ae365eSSteven Rostedt 		old_tail = cmpxchg(&cpu_buffer->tail_page,
90377ae365eSSteven Rostedt 				   tail_page, next_page);
90477ae365eSSteven Rostedt 
90577ae365eSSteven Rostedt 		if (old_tail == tail_page)
90677ae365eSSteven Rostedt 			ret = 1;
90777ae365eSSteven Rostedt 	}
90877ae365eSSteven Rostedt 
90977ae365eSSteven Rostedt 	return ret;
91077ae365eSSteven Rostedt }
91177ae365eSSteven Rostedt 
91277ae365eSSteven Rostedt static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
91377ae365eSSteven Rostedt 			  struct buffer_page *bpage)
91477ae365eSSteven Rostedt {
91577ae365eSSteven Rostedt 	unsigned long val = (unsigned long)bpage;
91677ae365eSSteven Rostedt 
91777ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
91877ae365eSSteven Rostedt 		return 1;
91977ae365eSSteven Rostedt 
92077ae365eSSteven Rostedt 	return 0;
92177ae365eSSteven Rostedt }
92277ae365eSSteven Rostedt 
92377ae365eSSteven Rostedt /**
92477ae365eSSteven Rostedt  * rb_check_list - make sure a pointer to a list has the last bits zero
92577ae365eSSteven Rostedt  */
92677ae365eSSteven Rostedt static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
92777ae365eSSteven Rostedt 			 struct list_head *list)
92877ae365eSSteven Rostedt {
92977ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
93077ae365eSSteven Rostedt 		return 1;
93177ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
93277ae365eSSteven Rostedt 		return 1;
93377ae365eSSteven Rostedt 	return 0;
93477ae365eSSteven Rostedt }
93577ae365eSSteven Rostedt 
9367a8e76a3SSteven Rostedt /**
9377a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
9387a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
9397a8e76a3SSteven Rostedt  *
940c3706f00SWenji Huang  * As a safety measure we check to make sure the data pages have not
9417a8e76a3SSteven Rostedt  * been corrupted.
9427a8e76a3SSteven Rostedt  */
9437a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
9447a8e76a3SSteven Rostedt {
9453adc54faSSteven Rostedt 	struct list_head *head = cpu_buffer->pages;
946044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
9477a8e76a3SSteven Rostedt 
948*308f7eebSSteven Rostedt 	/* Reset the head page if it exists */
949*308f7eebSSteven Rostedt 	if (cpu_buffer->head_page)
950*308f7eebSSteven Rostedt 		rb_set_head_page(cpu_buffer);
951*308f7eebSSteven Rostedt 
95277ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
95377ae365eSSteven Rostedt 
9543e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
9553e89c7bbSSteven Rostedt 		return -1;
9563e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
9573e89c7bbSSteven Rostedt 		return -1;
9587a8e76a3SSteven Rostedt 
95977ae365eSSteven Rostedt 	if (rb_check_list(cpu_buffer, head))
96077ae365eSSteven Rostedt 		return -1;
96177ae365eSSteven Rostedt 
962044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
9633e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
964044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
9653e89c7bbSSteven Rostedt 			return -1;
9663e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
967044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
9683e89c7bbSSteven Rostedt 			return -1;
96977ae365eSSteven Rostedt 		if (rb_check_list(cpu_buffer, &bpage->list))
97077ae365eSSteven Rostedt 			return -1;
9717a8e76a3SSteven Rostedt 	}
9727a8e76a3SSteven Rostedt 
97377ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
97477ae365eSSteven Rostedt 
9757a8e76a3SSteven Rostedt 	return 0;
9767a8e76a3SSteven Rostedt }
9777a8e76a3SSteven Rostedt 
978438ced17SVaibhav Nagarnaik static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
9797a8e76a3SSteven Rostedt {
980438ced17SVaibhav Nagarnaik 	int i;
981044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
9823adc54faSSteven Rostedt 
9837a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
9847ea59064SVaibhav Nagarnaik 		struct page *page;
985d7ec4bfeSVaibhav Nagarnaik 		/*
986d7ec4bfeSVaibhav Nagarnaik 		 * __GFP_NORETRY flag makes sure that the allocation fails
987d7ec4bfeSVaibhav Nagarnaik 		 * gracefully without invoking oom-killer and the system is
988d7ec4bfeSVaibhav Nagarnaik 		 * not destabilized.
989d7ec4bfeSVaibhav Nagarnaik 		 */
990044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
991d7ec4bfeSVaibhav Nagarnaik 				    GFP_KERNEL | __GFP_NORETRY,
992438ced17SVaibhav Nagarnaik 				    cpu_to_node(cpu));
993044fa782SSteven Rostedt 		if (!bpage)
994e4c2ce82SSteven Rostedt 			goto free_pages;
99577ae365eSSteven Rostedt 
996438ced17SVaibhav Nagarnaik 		list_add(&bpage->list, pages);
99777ae365eSSteven Rostedt 
998438ced17SVaibhav Nagarnaik 		page = alloc_pages_node(cpu_to_node(cpu),
999d7ec4bfeSVaibhav Nagarnaik 					GFP_KERNEL | __GFP_NORETRY, 0);
10007ea59064SVaibhav Nagarnaik 		if (!page)
10017a8e76a3SSteven Rostedt 			goto free_pages;
10027ea59064SVaibhav Nagarnaik 		bpage->page = page_address(page);
1003044fa782SSteven Rostedt 		rb_init_page(bpage->page);
10047a8e76a3SSteven Rostedt 	}
10057a8e76a3SSteven Rostedt 
1006438ced17SVaibhav Nagarnaik 	return 0;
1007438ced17SVaibhav Nagarnaik 
1008438ced17SVaibhav Nagarnaik free_pages:
1009438ced17SVaibhav Nagarnaik 	list_for_each_entry_safe(bpage, tmp, pages, list) {
1010438ced17SVaibhav Nagarnaik 		list_del_init(&bpage->list);
1011438ced17SVaibhav Nagarnaik 		free_buffer_page(bpage);
1012438ced17SVaibhav Nagarnaik 	}
1013438ced17SVaibhav Nagarnaik 
1014438ced17SVaibhav Nagarnaik 	return -ENOMEM;
1015438ced17SVaibhav Nagarnaik }
1016438ced17SVaibhav Nagarnaik 
1017438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1018438ced17SVaibhav Nagarnaik 			     unsigned nr_pages)
1019438ced17SVaibhav Nagarnaik {
1020438ced17SVaibhav Nagarnaik 	LIST_HEAD(pages);
1021438ced17SVaibhav Nagarnaik 
1022438ced17SVaibhav Nagarnaik 	WARN_ON(!nr_pages);
1023438ced17SVaibhav Nagarnaik 
1024438ced17SVaibhav Nagarnaik 	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1025438ced17SVaibhav Nagarnaik 		return -ENOMEM;
1026438ced17SVaibhav Nagarnaik 
10273adc54faSSteven Rostedt 	/*
10283adc54faSSteven Rostedt 	 * The ring buffer page list is a circular list that does not
10293adc54faSSteven Rostedt 	 * start and end with a list head. All page list items point to
10303adc54faSSteven Rostedt 	 * other pages.
10313adc54faSSteven Rostedt 	 */
10323adc54faSSteven Rostedt 	cpu_buffer->pages = pages.next;
10333adc54faSSteven Rostedt 	list_del(&pages);
10347a8e76a3SSteven Rostedt 
1035438ced17SVaibhav Nagarnaik 	cpu_buffer->nr_pages = nr_pages;
1036438ced17SVaibhav Nagarnaik 
10377a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
10387a8e76a3SSteven Rostedt 
10397a8e76a3SSteven Rostedt 	return 0;
10407a8e76a3SSteven Rostedt }
10417a8e76a3SSteven Rostedt 
10427a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
1043438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
10447a8e76a3SSteven Rostedt {
10457a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1046044fa782SSteven Rostedt 	struct buffer_page *bpage;
10477ea59064SVaibhav Nagarnaik 	struct page *page;
10487a8e76a3SSteven Rostedt 	int ret;
10497a8e76a3SSteven Rostedt 
10507a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
10517a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
10527a8e76a3SSteven Rostedt 	if (!cpu_buffer)
10537a8e76a3SSteven Rostedt 		return NULL;
10547a8e76a3SSteven Rostedt 
10557a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
10567a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
10575389f6faSThomas Gleixner 	raw_spin_lock_init(&cpu_buffer->reader_lock);
10581f8a6a10SPeter Zijlstra 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1059edc35bd7SThomas Gleixner 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
106083f40318SVaibhav Nagarnaik 	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
106183f40318SVaibhav Nagarnaik 	init_completion(&cpu_buffer->update_completion);
10627a8e76a3SSteven Rostedt 
1063044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1064e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
1065044fa782SSteven Rostedt 	if (!bpage)
1066e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
1067e4c2ce82SSteven Rostedt 
106877ae365eSSteven Rostedt 	rb_check_bpage(cpu_buffer, bpage);
106977ae365eSSteven Rostedt 
1070044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
10717ea59064SVaibhav Nagarnaik 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
10727ea59064SVaibhav Nagarnaik 	if (!page)
1073e4c2ce82SSteven Rostedt 		goto fail_free_reader;
10747ea59064SVaibhav Nagarnaik 	bpage->page = page_address(page);
1075044fa782SSteven Rostedt 	rb_init_page(bpage->page);
1076e4c2ce82SSteven Rostedt 
1077d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1078d769041fSSteven Rostedt 
1079438ced17SVaibhav Nagarnaik 	ret = rb_allocate_pages(cpu_buffer, nr_pages);
10807a8e76a3SSteven Rostedt 	if (ret < 0)
1081d769041fSSteven Rostedt 		goto fail_free_reader;
10827a8e76a3SSteven Rostedt 
10837a8e76a3SSteven Rostedt 	cpu_buffer->head_page
10843adc54faSSteven Rostedt 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1085bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
10867a8e76a3SSteven Rostedt 
108777ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
108877ae365eSSteven Rostedt 
10897a8e76a3SSteven Rostedt 	return cpu_buffer;
10907a8e76a3SSteven Rostedt 
1091d769041fSSteven Rostedt  fail_free_reader:
1092d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
1093d769041fSSteven Rostedt 
10947a8e76a3SSteven Rostedt  fail_free_buffer:
10957a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
10967a8e76a3SSteven Rostedt 	return NULL;
10977a8e76a3SSteven Rostedt }
10987a8e76a3SSteven Rostedt 
10997a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
11007a8e76a3SSteven Rostedt {
11013adc54faSSteven Rostedt 	struct list_head *head = cpu_buffer->pages;
1102044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
11037a8e76a3SSteven Rostedt 
1104d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
1105d769041fSSteven Rostedt 
110677ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
110777ae365eSSteven Rostedt 
11083adc54faSSteven Rostedt 	if (head) {
1109044fa782SSteven Rostedt 		list_for_each_entry_safe(bpage, tmp, head, list) {
1110044fa782SSteven Rostedt 			list_del_init(&bpage->list);
1111044fa782SSteven Rostedt 			free_buffer_page(bpage);
11127a8e76a3SSteven Rostedt 		}
11133adc54faSSteven Rostedt 		bpage = list_entry(head, struct buffer_page, list);
11143adc54faSSteven Rostedt 		free_buffer_page(bpage);
11153adc54faSSteven Rostedt 	}
11163adc54faSSteven Rostedt 
11177a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
11187a8e76a3SSteven Rostedt }
11197a8e76a3SSteven Rostedt 
112059222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
112109c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
1122554f786eSSteven Rostedt 			 unsigned long action, void *hcpu);
1123554f786eSSteven Rostedt #endif
1124554f786eSSteven Rostedt 
11257a8e76a3SSteven Rostedt /**
11267a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
112768814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
11287a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
11297a8e76a3SSteven Rostedt  *
11307a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
11317a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
11327a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
11337a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
11347a8e76a3SSteven Rostedt  */
11351f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
11361f8a6a10SPeter Zijlstra 					struct lock_class_key *key)
11377a8e76a3SSteven Rostedt {
11387a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
11397a8e76a3SSteven Rostedt 	int bsize;
1140438ced17SVaibhav Nagarnaik 	int cpu, nr_pages;
11417a8e76a3SSteven Rostedt 
11427a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
11437a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
11447a8e76a3SSteven Rostedt 			 GFP_KERNEL);
11457a8e76a3SSteven Rostedt 	if (!buffer)
11467a8e76a3SSteven Rostedt 		return NULL;
11477a8e76a3SSteven Rostedt 
11489e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
11499e01c1b7SRusty Russell 		goto fail_free_buffer;
11509e01c1b7SRusty Russell 
1151438ced17SVaibhav Nagarnaik 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
11527a8e76a3SSteven Rostedt 	buffer->flags = flags;
115337886f6aSSteven Rostedt 	buffer->clock = trace_clock_local;
11541f8a6a10SPeter Zijlstra 	buffer->reader_lock_key = key;
11557a8e76a3SSteven Rostedt 
11567a8e76a3SSteven Rostedt 	/* need at least two pages */
1157438ced17SVaibhav Nagarnaik 	if (nr_pages < 2)
1158438ced17SVaibhav Nagarnaik 		nr_pages = 2;
11597a8e76a3SSteven Rostedt 
11603bf832ceSFrederic Weisbecker 	/*
11613bf832ceSFrederic Weisbecker 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
11623bf832ceSFrederic Weisbecker 	 * in early initcall, it will not be notified of secondary cpus.
11633bf832ceSFrederic Weisbecker 	 * In that off case, we need to allocate for all possible cpus.
11643bf832ceSFrederic Weisbecker 	 */
11653bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU
1166554f786eSSteven Rostedt 	get_online_cpus();
1167554f786eSSteven Rostedt 	cpumask_copy(buffer->cpumask, cpu_online_mask);
11683bf832ceSFrederic Weisbecker #else
11693bf832ceSFrederic Weisbecker 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
11703bf832ceSFrederic Weisbecker #endif
11717a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
11727a8e76a3SSteven Rostedt 
11737a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
11747a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
11757a8e76a3SSteven Rostedt 				  GFP_KERNEL);
11767a8e76a3SSteven Rostedt 	if (!buffer->buffers)
11779e01c1b7SRusty Russell 		goto fail_free_cpumask;
11787a8e76a3SSteven Rostedt 
11797a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
11807a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
1181438ced17SVaibhav Nagarnaik 			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
11827a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
11837a8e76a3SSteven Rostedt 			goto fail_free_buffers;
11847a8e76a3SSteven Rostedt 	}
11857a8e76a3SSteven Rostedt 
118659222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
1187554f786eSSteven Rostedt 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1188554f786eSSteven Rostedt 	buffer->cpu_notify.priority = 0;
1189554f786eSSteven Rostedt 	register_cpu_notifier(&buffer->cpu_notify);
1190554f786eSSteven Rostedt #endif
1191554f786eSSteven Rostedt 
1192554f786eSSteven Rostedt 	put_online_cpus();
11937a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
11947a8e76a3SSteven Rostedt 
11957a8e76a3SSteven Rostedt 	return buffer;
11967a8e76a3SSteven Rostedt 
11977a8e76a3SSteven Rostedt  fail_free_buffers:
11987a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
11997a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
12007a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
12017a8e76a3SSteven Rostedt 	}
12027a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
12037a8e76a3SSteven Rostedt 
12049e01c1b7SRusty Russell  fail_free_cpumask:
12059e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
1206554f786eSSteven Rostedt 	put_online_cpus();
12079e01c1b7SRusty Russell 
12087a8e76a3SSteven Rostedt  fail_free_buffer:
12097a8e76a3SSteven Rostedt 	kfree(buffer);
12107a8e76a3SSteven Rostedt 	return NULL;
12117a8e76a3SSteven Rostedt }
12121f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
12137a8e76a3SSteven Rostedt 
12147a8e76a3SSteven Rostedt /**
12157a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
12167a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
12177a8e76a3SSteven Rostedt  */
12187a8e76a3SSteven Rostedt void
12197a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
12207a8e76a3SSteven Rostedt {
12217a8e76a3SSteven Rostedt 	int cpu;
12227a8e76a3SSteven Rostedt 
1223554f786eSSteven Rostedt 	get_online_cpus();
1224554f786eSSteven Rostedt 
122559222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
1226554f786eSSteven Rostedt 	unregister_cpu_notifier(&buffer->cpu_notify);
1227554f786eSSteven Rostedt #endif
1228554f786eSSteven Rostedt 
12297a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
12307a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
12317a8e76a3SSteven Rostedt 
1232554f786eSSteven Rostedt 	put_online_cpus();
1233554f786eSSteven Rostedt 
1234bd3f0221SEric Dumazet 	kfree(buffer->buffers);
12359e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
12369e01c1b7SRusty Russell 
12377a8e76a3SSteven Rostedt 	kfree(buffer);
12387a8e76a3SSteven Rostedt }
1239c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
12407a8e76a3SSteven Rostedt 
124137886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer,
124237886f6aSSteven Rostedt 			   u64 (*clock)(void))
124337886f6aSSteven Rostedt {
124437886f6aSSteven Rostedt 	buffer->clock = clock;
124537886f6aSSteven Rostedt }
124637886f6aSSteven Rostedt 
12477a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
12487a8e76a3SSteven Rostedt 
124983f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage)
12507a8e76a3SSteven Rostedt {
125183f40318SVaibhav Nagarnaik 	return local_read(&bpage->entries) & RB_WRITE_MASK;
125283f40318SVaibhav Nagarnaik }
125383f40318SVaibhav Nagarnaik 
125483f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage)
125583f40318SVaibhav Nagarnaik {
125683f40318SVaibhav Nagarnaik 	return local_read(&bpage->write) & RB_WRITE_MASK;
125783f40318SVaibhav Nagarnaik }
125883f40318SVaibhav Nagarnaik 
12595040b4b7SVaibhav Nagarnaik static int
126083f40318SVaibhav Nagarnaik rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
126183f40318SVaibhav Nagarnaik {
126283f40318SVaibhav Nagarnaik 	struct list_head *tail_page, *to_remove, *next_page;
126383f40318SVaibhav Nagarnaik 	struct buffer_page *to_remove_page, *tmp_iter_page;
126483f40318SVaibhav Nagarnaik 	struct buffer_page *last_page, *first_page;
126583f40318SVaibhav Nagarnaik 	unsigned int nr_removed;
126683f40318SVaibhav Nagarnaik 	unsigned long head_bit;
126783f40318SVaibhav Nagarnaik 	int page_entries;
126883f40318SVaibhav Nagarnaik 
126983f40318SVaibhav Nagarnaik 	head_bit = 0;
12707a8e76a3SSteven Rostedt 
12715389f6faSThomas Gleixner 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
127283f40318SVaibhav Nagarnaik 	atomic_inc(&cpu_buffer->record_disabled);
127383f40318SVaibhav Nagarnaik 	/*
127483f40318SVaibhav Nagarnaik 	 * We don't race with the readers since we have acquired the reader
127583f40318SVaibhav Nagarnaik 	 * lock. We also don't race with writers after disabling recording.
127683f40318SVaibhav Nagarnaik 	 * This makes it easy to figure out the first and the last page to be
127783f40318SVaibhav Nagarnaik 	 * removed from the list. We unlink all the pages in between including
127883f40318SVaibhav Nagarnaik 	 * the first and last pages. This is done in a busy loop so that we
127983f40318SVaibhav Nagarnaik 	 * lose the least number of traces.
128083f40318SVaibhav Nagarnaik 	 * The pages are freed after we restart recording and unlock readers.
128183f40318SVaibhav Nagarnaik 	 */
128283f40318SVaibhav Nagarnaik 	tail_page = &cpu_buffer->tail_page->list;
128377ae365eSSteven Rostedt 
128483f40318SVaibhav Nagarnaik 	/*
128583f40318SVaibhav Nagarnaik 	 * tail page might be on reader page, we remove the next page
128683f40318SVaibhav Nagarnaik 	 * from the ring buffer
128783f40318SVaibhav Nagarnaik 	 */
128883f40318SVaibhav Nagarnaik 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
128983f40318SVaibhav Nagarnaik 		tail_page = rb_list_head(tail_page->next);
129083f40318SVaibhav Nagarnaik 	to_remove = tail_page;
129183f40318SVaibhav Nagarnaik 
129283f40318SVaibhav Nagarnaik 	/* start of pages to remove */
129383f40318SVaibhav Nagarnaik 	first_page = list_entry(rb_list_head(to_remove->next),
129483f40318SVaibhav Nagarnaik 				struct buffer_page, list);
129583f40318SVaibhav Nagarnaik 
129683f40318SVaibhav Nagarnaik 	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
129783f40318SVaibhav Nagarnaik 		to_remove = rb_list_head(to_remove)->next;
129883f40318SVaibhav Nagarnaik 		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
12997a8e76a3SSteven Rostedt 	}
13007a8e76a3SSteven Rostedt 
130183f40318SVaibhav Nagarnaik 	next_page = rb_list_head(to_remove)->next;
13027a8e76a3SSteven Rostedt 
130383f40318SVaibhav Nagarnaik 	/*
130483f40318SVaibhav Nagarnaik 	 * Now we remove all pages between tail_page and next_page.
130583f40318SVaibhav Nagarnaik 	 * Make sure that we have head_bit value preserved for the
130683f40318SVaibhav Nagarnaik 	 * next page
130783f40318SVaibhav Nagarnaik 	 */
130883f40318SVaibhav Nagarnaik 	tail_page->next = (struct list_head *)((unsigned long)next_page |
130983f40318SVaibhav Nagarnaik 						head_bit);
131083f40318SVaibhav Nagarnaik 	next_page = rb_list_head(next_page);
131183f40318SVaibhav Nagarnaik 	next_page->prev = tail_page;
131283f40318SVaibhav Nagarnaik 
131383f40318SVaibhav Nagarnaik 	/* make sure pages points to a valid page in the ring buffer */
131483f40318SVaibhav Nagarnaik 	cpu_buffer->pages = next_page;
131583f40318SVaibhav Nagarnaik 
131683f40318SVaibhav Nagarnaik 	/* update head page */
131783f40318SVaibhav Nagarnaik 	if (head_bit)
131883f40318SVaibhav Nagarnaik 		cpu_buffer->head_page = list_entry(next_page,
131983f40318SVaibhav Nagarnaik 						struct buffer_page, list);
132083f40318SVaibhav Nagarnaik 
132183f40318SVaibhav Nagarnaik 	/*
132283f40318SVaibhav Nagarnaik 	 * change read pointer to make sure any read iterators reset
132383f40318SVaibhav Nagarnaik 	 * themselves
132483f40318SVaibhav Nagarnaik 	 */
132583f40318SVaibhav Nagarnaik 	cpu_buffer->read = 0;
132683f40318SVaibhav Nagarnaik 
132783f40318SVaibhav Nagarnaik 	/* pages are removed, resume tracing and then free the pages */
132883f40318SVaibhav Nagarnaik 	atomic_dec(&cpu_buffer->record_disabled);
13295389f6faSThomas Gleixner 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
133083f40318SVaibhav Nagarnaik 
133183f40318SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
133283f40318SVaibhav Nagarnaik 
133383f40318SVaibhav Nagarnaik 	/* last buffer page to remove */
133483f40318SVaibhav Nagarnaik 	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
133583f40318SVaibhav Nagarnaik 				list);
133683f40318SVaibhav Nagarnaik 	tmp_iter_page = first_page;
133783f40318SVaibhav Nagarnaik 
133883f40318SVaibhav Nagarnaik 	do {
133983f40318SVaibhav Nagarnaik 		to_remove_page = tmp_iter_page;
134083f40318SVaibhav Nagarnaik 		rb_inc_page(cpu_buffer, &tmp_iter_page);
134183f40318SVaibhav Nagarnaik 
134283f40318SVaibhav Nagarnaik 		/* update the counters */
134383f40318SVaibhav Nagarnaik 		page_entries = rb_page_entries(to_remove_page);
134483f40318SVaibhav Nagarnaik 		if (page_entries) {
134583f40318SVaibhav Nagarnaik 			/*
134683f40318SVaibhav Nagarnaik 			 * If something was added to this page, it was full
134783f40318SVaibhav Nagarnaik 			 * since it is not the tail page. So we deduct the
134883f40318SVaibhav Nagarnaik 			 * bytes consumed in ring buffer from here.
134983f40318SVaibhav Nagarnaik 			 * No need to update overruns, since this page is
135083f40318SVaibhav Nagarnaik 			 * deleted from ring buffer and its entries are
135183f40318SVaibhav Nagarnaik 			 * already accounted for.
135283f40318SVaibhav Nagarnaik 			 */
135383f40318SVaibhav Nagarnaik 			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
135483f40318SVaibhav Nagarnaik 		}
135583f40318SVaibhav Nagarnaik 
135683f40318SVaibhav Nagarnaik 		/*
135783f40318SVaibhav Nagarnaik 		 * We have already removed references to this list item, just
135883f40318SVaibhav Nagarnaik 		 * free up the buffer_page and its page
135983f40318SVaibhav Nagarnaik 		 */
136083f40318SVaibhav Nagarnaik 		free_buffer_page(to_remove_page);
136183f40318SVaibhav Nagarnaik 		nr_removed--;
136283f40318SVaibhav Nagarnaik 
136383f40318SVaibhav Nagarnaik 	} while (to_remove_page != last_page);
136483f40318SVaibhav Nagarnaik 
136583f40318SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, nr_removed);
13665040b4b7SVaibhav Nagarnaik 
13675040b4b7SVaibhav Nagarnaik 	return nr_removed == 0;
13687a8e76a3SSteven Rostedt }
13697a8e76a3SSteven Rostedt 
13705040b4b7SVaibhav Nagarnaik static int
13715040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
13727a8e76a3SSteven Rostedt {
13735040b4b7SVaibhav Nagarnaik 	struct list_head *pages = &cpu_buffer->new_pages;
13745040b4b7SVaibhav Nagarnaik 	int retries, success;
13757a8e76a3SSteven Rostedt 
13765389f6faSThomas Gleixner 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
13775040b4b7SVaibhav Nagarnaik 	/*
13785040b4b7SVaibhav Nagarnaik 	 * We are holding the reader lock, so the reader page won't be swapped
13795040b4b7SVaibhav Nagarnaik 	 * in the ring buffer. Now we are racing with the writer trying to
13805040b4b7SVaibhav Nagarnaik 	 * move head page and the tail page.
13815040b4b7SVaibhav Nagarnaik 	 * We are going to adapt the reader page update process where:
13825040b4b7SVaibhav Nagarnaik 	 * 1. We first splice the start and end of list of new pages between
13835040b4b7SVaibhav Nagarnaik 	 *    the head page and its previous page.
13845040b4b7SVaibhav Nagarnaik 	 * 2. We cmpxchg the prev_page->next to point from head page to the
13855040b4b7SVaibhav Nagarnaik 	 *    start of new pages list.
13865040b4b7SVaibhav Nagarnaik 	 * 3. Finally, we update the head->prev to the end of new list.
13875040b4b7SVaibhav Nagarnaik 	 *
13885040b4b7SVaibhav Nagarnaik 	 * We will try this process 10 times, to make sure that we don't keep
13895040b4b7SVaibhav Nagarnaik 	 * spinning.
13905040b4b7SVaibhav Nagarnaik 	 */
13915040b4b7SVaibhav Nagarnaik 	retries = 10;
13925040b4b7SVaibhav Nagarnaik 	success = 0;
13935040b4b7SVaibhav Nagarnaik 	while (retries--) {
13945040b4b7SVaibhav Nagarnaik 		struct list_head *head_page, *prev_page, *r;
13955040b4b7SVaibhav Nagarnaik 		struct list_head *last_page, *first_page;
13965040b4b7SVaibhav Nagarnaik 		struct list_head *head_page_with_bit;
139777ae365eSSteven Rostedt 
13985040b4b7SVaibhav Nagarnaik 		head_page = &rb_set_head_page(cpu_buffer)->list;
13995040b4b7SVaibhav Nagarnaik 		prev_page = head_page->prev;
14005040b4b7SVaibhav Nagarnaik 
14015040b4b7SVaibhav Nagarnaik 		first_page = pages->next;
14025040b4b7SVaibhav Nagarnaik 		last_page  = pages->prev;
14035040b4b7SVaibhav Nagarnaik 
14045040b4b7SVaibhav Nagarnaik 		head_page_with_bit = (struct list_head *)
14055040b4b7SVaibhav Nagarnaik 				     ((unsigned long)head_page | RB_PAGE_HEAD);
14065040b4b7SVaibhav Nagarnaik 
14075040b4b7SVaibhav Nagarnaik 		last_page->next = head_page_with_bit;
14085040b4b7SVaibhav Nagarnaik 		first_page->prev = prev_page;
14095040b4b7SVaibhav Nagarnaik 
14105040b4b7SVaibhav Nagarnaik 		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
14115040b4b7SVaibhav Nagarnaik 
14125040b4b7SVaibhav Nagarnaik 		if (r == head_page_with_bit) {
14135040b4b7SVaibhav Nagarnaik 			/*
14145040b4b7SVaibhav Nagarnaik 			 * yay, we replaced the page pointer to our new list,
14155040b4b7SVaibhav Nagarnaik 			 * now, we just have to update to head page's prev
14165040b4b7SVaibhav Nagarnaik 			 * pointer to point to end of list
14175040b4b7SVaibhav Nagarnaik 			 */
14185040b4b7SVaibhav Nagarnaik 			head_page->prev = last_page;
14195040b4b7SVaibhav Nagarnaik 			success = 1;
14205040b4b7SVaibhav Nagarnaik 			break;
14217a8e76a3SSteven Rostedt 		}
14225040b4b7SVaibhav Nagarnaik 	}
14237a8e76a3SSteven Rostedt 
14245040b4b7SVaibhav Nagarnaik 	if (success)
14255040b4b7SVaibhav Nagarnaik 		INIT_LIST_HEAD(pages);
14265040b4b7SVaibhav Nagarnaik 	/*
14275040b4b7SVaibhav Nagarnaik 	 * If we weren't successful in adding in new pages, warn and stop
14285040b4b7SVaibhav Nagarnaik 	 * tracing
14295040b4b7SVaibhav Nagarnaik 	 */
14305040b4b7SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, !success);
14315389f6faSThomas Gleixner 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
14325040b4b7SVaibhav Nagarnaik 
14335040b4b7SVaibhav Nagarnaik 	/* free pages if they weren't inserted */
14345040b4b7SVaibhav Nagarnaik 	if (!success) {
14355040b4b7SVaibhav Nagarnaik 		struct buffer_page *bpage, *tmp;
14365040b4b7SVaibhav Nagarnaik 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
14375040b4b7SVaibhav Nagarnaik 					 list) {
14385040b4b7SVaibhav Nagarnaik 			list_del_init(&bpage->list);
14395040b4b7SVaibhav Nagarnaik 			free_buffer_page(bpage);
14405040b4b7SVaibhav Nagarnaik 		}
14415040b4b7SVaibhav Nagarnaik 	}
14425040b4b7SVaibhav Nagarnaik 	return success;
14437a8e76a3SSteven Rostedt }
14447a8e76a3SSteven Rostedt 
144583f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1446438ced17SVaibhav Nagarnaik {
14475040b4b7SVaibhav Nagarnaik 	int success;
144883f40318SVaibhav Nagarnaik 
14495040b4b7SVaibhav Nagarnaik 	if (cpu_buffer->nr_pages_to_update > 0)
14505040b4b7SVaibhav Nagarnaik 		success = rb_insert_pages(cpu_buffer);
14515040b4b7SVaibhav Nagarnaik 	else
14525040b4b7SVaibhav Nagarnaik 		success = rb_remove_pages(cpu_buffer,
14535040b4b7SVaibhav Nagarnaik 					-cpu_buffer->nr_pages_to_update);
14545040b4b7SVaibhav Nagarnaik 
14555040b4b7SVaibhav Nagarnaik 	if (success)
1456438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
145783f40318SVaibhav Nagarnaik }
145883f40318SVaibhav Nagarnaik 
145983f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work)
146083f40318SVaibhav Nagarnaik {
146183f40318SVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
146283f40318SVaibhav Nagarnaik 			struct ring_buffer_per_cpu, update_pages_work);
146383f40318SVaibhav Nagarnaik 	rb_update_pages(cpu_buffer);
146483f40318SVaibhav Nagarnaik 	complete(&cpu_buffer->update_completion);
1465438ced17SVaibhav Nagarnaik }
1466438ced17SVaibhav Nagarnaik 
14677a8e76a3SSteven Rostedt /**
14687a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
14697a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
14707a8e76a3SSteven Rostedt  * @size: the new size.
14717a8e76a3SSteven Rostedt  *
14727a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
14737a8e76a3SSteven Rostedt  *
147483f40318SVaibhav Nagarnaik  * Returns 0 on success and < 0 on failure.
14757a8e76a3SSteven Rostedt  */
1476438ced17SVaibhav Nagarnaik int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1477438ced17SVaibhav Nagarnaik 			int cpu_id)
14787a8e76a3SSteven Rostedt {
14797a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1480438ced17SVaibhav Nagarnaik 	unsigned nr_pages;
148183f40318SVaibhav Nagarnaik 	int cpu, err = 0;
14827a8e76a3SSteven Rostedt 
1483ee51a1deSIngo Molnar 	/*
1484ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
1485ee51a1deSIngo Molnar 	 */
1486ee51a1deSIngo Molnar 	if (!buffer)
1487ee51a1deSIngo Molnar 		return size;
1488ee51a1deSIngo Molnar 
14897a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
14907a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
14917a8e76a3SSteven Rostedt 
14927a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
14937a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
14947a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
14957a8e76a3SSteven Rostedt 
14967a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
14977a8e76a3SSteven Rostedt 
149883f40318SVaibhav Nagarnaik 	/*
149983f40318SVaibhav Nagarnaik 	 * Don't succeed if resizing is disabled, as a reader might be
150083f40318SVaibhav Nagarnaik 	 * manipulating the ring buffer and is expecting a sane state while
150183f40318SVaibhav Nagarnaik 	 * this is true.
150283f40318SVaibhav Nagarnaik 	 */
150383f40318SVaibhav Nagarnaik 	if (atomic_read(&buffer->resize_disabled))
150483f40318SVaibhav Nagarnaik 		return -EBUSY;
150583f40318SVaibhav Nagarnaik 
150683f40318SVaibhav Nagarnaik 	/* prevent another thread from changing buffer sizes */
150783f40318SVaibhav Nagarnaik 	mutex_lock(&buffer->mutex);
150883f40318SVaibhav Nagarnaik 
1509438ced17SVaibhav Nagarnaik 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1510438ced17SVaibhav Nagarnaik 		/* calculate the pages to update */
15117a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
15127a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
1513438ced17SVaibhav Nagarnaik 
1514438ced17SVaibhav Nagarnaik 			cpu_buffer->nr_pages_to_update = nr_pages -
1515438ced17SVaibhav Nagarnaik 							cpu_buffer->nr_pages;
1516438ced17SVaibhav Nagarnaik 			/*
1517438ced17SVaibhav Nagarnaik 			 * nothing more to do for removing pages or no update
1518438ced17SVaibhav Nagarnaik 			 */
1519438ced17SVaibhav Nagarnaik 			if (cpu_buffer->nr_pages_to_update <= 0)
1520438ced17SVaibhav Nagarnaik 				continue;
1521438ced17SVaibhav Nagarnaik 			/*
1522438ced17SVaibhav Nagarnaik 			 * to add pages, make sure all new pages can be
1523438ced17SVaibhav Nagarnaik 			 * allocated without receiving ENOMEM
1524438ced17SVaibhav Nagarnaik 			 */
1525438ced17SVaibhav Nagarnaik 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1526438ced17SVaibhav Nagarnaik 			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
152783f40318SVaibhav Nagarnaik 						&cpu_buffer->new_pages, cpu)) {
1528438ced17SVaibhav Nagarnaik 				/* not enough memory for new pages */
152983f40318SVaibhav Nagarnaik 				err = -ENOMEM;
153083f40318SVaibhav Nagarnaik 				goto out_err;
153183f40318SVaibhav Nagarnaik 			}
153283f40318SVaibhav Nagarnaik 		}
153383f40318SVaibhav Nagarnaik 
153483f40318SVaibhav Nagarnaik 		get_online_cpus();
153583f40318SVaibhav Nagarnaik 		/*
153683f40318SVaibhav Nagarnaik 		 * Fire off all the required work handlers
153783f40318SVaibhav Nagarnaik 		 * Look out for offline CPUs
153883f40318SVaibhav Nagarnaik 		 */
153983f40318SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu) {
154083f40318SVaibhav Nagarnaik 			cpu_buffer = buffer->buffers[cpu];
154183f40318SVaibhav Nagarnaik 			if (!cpu_buffer->nr_pages_to_update ||
154283f40318SVaibhav Nagarnaik 			    !cpu_online(cpu))
154383f40318SVaibhav Nagarnaik 				continue;
154483f40318SVaibhav Nagarnaik 
154583f40318SVaibhav Nagarnaik 			schedule_work_on(cpu, &cpu_buffer->update_pages_work);
154683f40318SVaibhav Nagarnaik 		}
154783f40318SVaibhav Nagarnaik 		/*
154883f40318SVaibhav Nagarnaik 		 * This loop is for the CPUs that are not online.
154983f40318SVaibhav Nagarnaik 		 * We can't schedule anything on them, but it's not necessary
155083f40318SVaibhav Nagarnaik 		 * since we can change their buffer sizes without any race.
155183f40318SVaibhav Nagarnaik 		 */
155283f40318SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu) {
155383f40318SVaibhav Nagarnaik 			cpu_buffer = buffer->buffers[cpu];
155483f40318SVaibhav Nagarnaik 			if (!cpu_buffer->nr_pages_to_update ||
155583f40318SVaibhav Nagarnaik 			    cpu_online(cpu))
155683f40318SVaibhav Nagarnaik 				continue;
155783f40318SVaibhav Nagarnaik 
155883f40318SVaibhav Nagarnaik 			rb_update_pages(cpu_buffer);
15597a8e76a3SSteven Rostedt 		}
1560438ced17SVaibhav Nagarnaik 
1561438ced17SVaibhav Nagarnaik 		/* wait for all the updates to complete */
1562438ced17SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu) {
1563438ced17SVaibhav Nagarnaik 			cpu_buffer = buffer->buffers[cpu];
156483f40318SVaibhav Nagarnaik 			if (!cpu_buffer->nr_pages_to_update ||
156583f40318SVaibhav Nagarnaik 			    !cpu_online(cpu))
156683f40318SVaibhav Nagarnaik 				continue;
156783f40318SVaibhav Nagarnaik 
156883f40318SVaibhav Nagarnaik 			wait_for_completion(&cpu_buffer->update_completion);
156983f40318SVaibhav Nagarnaik 			/* reset this value */
157083f40318SVaibhav Nagarnaik 			cpu_buffer->nr_pages_to_update = 0;
1571438ced17SVaibhav Nagarnaik 		}
157283f40318SVaibhav Nagarnaik 
157383f40318SVaibhav Nagarnaik 		put_online_cpus();
1574438ced17SVaibhav Nagarnaik 	} else {
1575438ced17SVaibhav Nagarnaik 		cpu_buffer = buffer->buffers[cpu_id];
157683f40318SVaibhav Nagarnaik 
1577438ced17SVaibhav Nagarnaik 		if (nr_pages == cpu_buffer->nr_pages)
15787a8e76a3SSteven Rostedt 			goto out;
1579438ced17SVaibhav Nagarnaik 
1580438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = nr_pages -
1581438ced17SVaibhav Nagarnaik 						cpu_buffer->nr_pages;
1582438ced17SVaibhav Nagarnaik 
1583438ced17SVaibhav Nagarnaik 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1584438ced17SVaibhav Nagarnaik 		if (cpu_buffer->nr_pages_to_update > 0 &&
1585438ced17SVaibhav Nagarnaik 			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
158683f40318SVaibhav Nagarnaik 					    &cpu_buffer->new_pages, cpu_id)) {
158783f40318SVaibhav Nagarnaik 			err = -ENOMEM;
158883f40318SVaibhav Nagarnaik 			goto out_err;
158983f40318SVaibhav Nagarnaik 		}
1590438ced17SVaibhav Nagarnaik 
159183f40318SVaibhav Nagarnaik 		get_online_cpus();
159283f40318SVaibhav Nagarnaik 
159383f40318SVaibhav Nagarnaik 		if (cpu_online(cpu_id)) {
159483f40318SVaibhav Nagarnaik 			schedule_work_on(cpu_id,
159583f40318SVaibhav Nagarnaik 					 &cpu_buffer->update_pages_work);
159683f40318SVaibhav Nagarnaik 			wait_for_completion(&cpu_buffer->update_completion);
159783f40318SVaibhav Nagarnaik 		} else
159883f40318SVaibhav Nagarnaik 			rb_update_pages(cpu_buffer);
159983f40318SVaibhav Nagarnaik 
160083f40318SVaibhav Nagarnaik 		put_online_cpus();
160183f40318SVaibhav Nagarnaik 		/* reset this value */
160283f40318SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = 0;
16037a8e76a3SSteven Rostedt 	}
16047a8e76a3SSteven Rostedt 
16057a8e76a3SSteven Rostedt  out:
1606659f451fSSteven Rostedt 	/*
1607659f451fSSteven Rostedt 	 * The ring buffer resize can happen with the ring buffer
1608659f451fSSteven Rostedt 	 * enabled, so that the update disturbs the tracing as little
1609659f451fSSteven Rostedt 	 * as possible. But if the buffer is disabled, we do not need
1610659f451fSSteven Rostedt 	 * to worry about that, and we can take the time to verify
1611659f451fSSteven Rostedt 	 * that the buffer is not corrupt.
1612659f451fSSteven Rostedt 	 */
1613659f451fSSteven Rostedt 	if (atomic_read(&buffer->record_disabled)) {
1614659f451fSSteven Rostedt 		atomic_inc(&buffer->record_disabled);
1615659f451fSSteven Rostedt 		/*
1616659f451fSSteven Rostedt 		 * Even though the buffer was disabled, we must make sure
1617659f451fSSteven Rostedt 		 * that it is truly disabled before calling rb_check_pages.
1618659f451fSSteven Rostedt 		 * There could have been a race between checking
1619659f451fSSteven Rostedt 		 * record_disable and incrementing it.
1620659f451fSSteven Rostedt 		 */
1621659f451fSSteven Rostedt 		synchronize_sched();
1622659f451fSSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
1623659f451fSSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
1624659f451fSSteven Rostedt 			rb_check_pages(cpu_buffer);
1625659f451fSSteven Rostedt 		}
1626659f451fSSteven Rostedt 		atomic_dec(&buffer->record_disabled);
1627659f451fSSteven Rostedt 	}
1628659f451fSSteven Rostedt 
16297a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
16307a8e76a3SSteven Rostedt 	return size;
16317a8e76a3SSteven Rostedt 
163283f40318SVaibhav Nagarnaik  out_err:
1633438ced17SVaibhav Nagarnaik 	for_each_buffer_cpu(buffer, cpu) {
1634438ced17SVaibhav Nagarnaik 		struct buffer_page *bpage, *tmp;
163583f40318SVaibhav Nagarnaik 
1636438ced17SVaibhav Nagarnaik 		cpu_buffer = buffer->buffers[cpu];
1637438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = 0;
163883f40318SVaibhav Nagarnaik 
1639438ced17SVaibhav Nagarnaik 		if (list_empty(&cpu_buffer->new_pages))
1640438ced17SVaibhav Nagarnaik 			continue;
164183f40318SVaibhav Nagarnaik 
1642438ced17SVaibhav Nagarnaik 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1643438ced17SVaibhav Nagarnaik 					list) {
1644044fa782SSteven Rostedt 			list_del_init(&bpage->list);
1645044fa782SSteven Rostedt 			free_buffer_page(bpage);
16467a8e76a3SSteven Rostedt 		}
1647438ced17SVaibhav Nagarnaik 	}
1648641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
164983f40318SVaibhav Nagarnaik 	return err;
16507a8e76a3SSteven Rostedt }
1651c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
16527a8e76a3SSteven Rostedt 
1653750912faSDavid Sharp void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1654750912faSDavid Sharp {
1655750912faSDavid Sharp 	mutex_lock(&buffer->mutex);
1656750912faSDavid Sharp 	if (val)
1657750912faSDavid Sharp 		buffer->flags |= RB_FL_OVERWRITE;
1658750912faSDavid Sharp 	else
1659750912faSDavid Sharp 		buffer->flags &= ~RB_FL_OVERWRITE;
1660750912faSDavid Sharp 	mutex_unlock(&buffer->mutex);
1661750912faSDavid Sharp }
1662750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1663750912faSDavid Sharp 
16648789a9e7SSteven Rostedt static inline void *
1665044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
16668789a9e7SSteven Rostedt {
1667044fa782SSteven Rostedt 	return bpage->data + index;
16688789a9e7SSteven Rostedt }
16698789a9e7SSteven Rostedt 
1670044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
16717a8e76a3SSteven Rostedt {
1672044fa782SSteven Rostedt 	return bpage->page->data + index;
16737a8e76a3SSteven Rostedt }
16747a8e76a3SSteven Rostedt 
16757a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
1676d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
16777a8e76a3SSteven Rostedt {
16786f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
16796f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
16806f807acdSSteven Rostedt }
16816f807acdSSteven Rostedt 
16826f807acdSSteven Rostedt static inline struct ring_buffer_event *
16837a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
16847a8e76a3SSteven Rostedt {
16856f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
16867a8e76a3SSteven Rostedt }
16877a8e76a3SSteven Rostedt 
1688bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
1689bf41a158SSteven Rostedt {
1690abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
1691bf41a158SSteven Rostedt }
1692bf41a158SSteven Rostedt 
169325985edcSLucas De Marchi /* Size is determined by what has been committed */
1694bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
1695bf41a158SSteven Rostedt {
1696bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
1697bf41a158SSteven Rostedt }
1698bf41a158SSteven Rostedt 
1699bf41a158SSteven Rostedt static inline unsigned
1700bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1701bf41a158SSteven Rostedt {
1702bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
1703bf41a158SSteven Rostedt }
1704bf41a158SSteven Rostedt 
1705bf41a158SSteven Rostedt static inline unsigned
1706bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
17077a8e76a3SSteven Rostedt {
1708bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1709bf41a158SSteven Rostedt 
171022f470f8SSteven Rostedt 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
17117a8e76a3SSteven Rostedt }
17127a8e76a3SSteven Rostedt 
17130f0c85fcSSteven Rostedt static inline int
1714fa743953SSteven Rostedt rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1715bf41a158SSteven Rostedt 		   struct ring_buffer_event *event)
17167a8e76a3SSteven Rostedt {
1717bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1718bf41a158SSteven Rostedt 	unsigned long index;
1719bf41a158SSteven Rostedt 
1720bf41a158SSteven Rostedt 	index = rb_event_index(event);
1721bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1722bf41a158SSteven Rostedt 
1723bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
1724bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
1725bf41a158SSteven Rostedt }
1726bf41a158SSteven Rostedt 
172734a148bfSAndrew Morton static void
1728bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1729bf41a158SSteven Rostedt {
173077ae365eSSteven Rostedt 	unsigned long max_count;
173177ae365eSSteven Rostedt 
1732bf41a158SSteven Rostedt 	/*
1733bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
1734bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
1735bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
1736bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
1737bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
1738bf41a158SSteven Rostedt 	 * assign the commit to the tail.
1739bf41a158SSteven Rostedt 	 */
1740a8ccf1d6SSteven Rostedt  again:
1741438ced17SVaibhav Nagarnaik 	max_count = cpu_buffer->nr_pages * 100;
174277ae365eSSteven Rostedt 
1743bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
174477ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
174577ae365eSSteven Rostedt 			return;
174677ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
174777ae365eSSteven Rostedt 			       rb_is_reader_page(cpu_buffer->tail_page)))
174877ae365eSSteven Rostedt 			return;
174977ae365eSSteven Rostedt 		local_set(&cpu_buffer->commit_page->page->commit,
175077ae365eSSteven Rostedt 			  rb_page_write(cpu_buffer->commit_page));
1751bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1752abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1753abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1754bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
1755bf41a158SSteven Rostedt 		barrier();
1756bf41a158SSteven Rostedt 	}
1757bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
1758bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
175977ae365eSSteven Rostedt 
176077ae365eSSteven Rostedt 		local_set(&cpu_buffer->commit_page->page->commit,
176177ae365eSSteven Rostedt 			  rb_page_write(cpu_buffer->commit_page));
176277ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer,
176377ae365eSSteven Rostedt 			   local_read(&cpu_buffer->commit_page->page->commit) &
176477ae365eSSteven Rostedt 			   ~RB_WRITE_MASK);
1765bf41a158SSteven Rostedt 		barrier();
1766bf41a158SSteven Rostedt 	}
1767a8ccf1d6SSteven Rostedt 
1768a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
1769a8ccf1d6SSteven Rostedt 	barrier();
1770a8ccf1d6SSteven Rostedt 
1771a8ccf1d6SSteven Rostedt 	/*
1772a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
1773a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
1774a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
1775a8ccf1d6SSteven Rostedt 	 */
1776a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1777a8ccf1d6SSteven Rostedt 		goto again;
17787a8e76a3SSteven Rostedt }
17797a8e76a3SSteven Rostedt 
1780d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
17817a8e76a3SSteven Rostedt {
1782abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
17836f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
1784d769041fSSteven Rostedt }
1785d769041fSSteven Rostedt 
178634a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
1787d769041fSSteven Rostedt {
1788d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1789d769041fSSteven Rostedt 
1790d769041fSSteven Rostedt 	/*
1791d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
1792d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
1793d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
1794d769041fSSteven Rostedt 	 * to the head page instead of next.
1795d769041fSSteven Rostedt 	 */
1796d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
179777ae365eSSteven Rostedt 		iter->head_page = rb_set_head_page(cpu_buffer);
1798d769041fSSteven Rostedt 	else
1799d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
1800d769041fSSteven Rostedt 
1801abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
18027a8e76a3SSteven Rostedt 	iter->head = 0;
18037a8e76a3SSteven Rostedt }
18047a8e76a3SSteven Rostedt 
180569d1b839SSteven Rostedt /* Slow path, do not inline */
180669d1b839SSteven Rostedt static noinline struct ring_buffer_event *
180769d1b839SSteven Rostedt rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
180869d1b839SSteven Rostedt {
180969d1b839SSteven Rostedt 	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
181069d1b839SSteven Rostedt 
181169d1b839SSteven Rostedt 	/* Not the first event on the page? */
181269d1b839SSteven Rostedt 	if (rb_event_index(event)) {
181369d1b839SSteven Rostedt 		event->time_delta = delta & TS_MASK;
181469d1b839SSteven Rostedt 		event->array[0] = delta >> TS_SHIFT;
181569d1b839SSteven Rostedt 	} else {
181669d1b839SSteven Rostedt 		/* nope, just zero it */
181769d1b839SSteven Rostedt 		event->time_delta = 0;
181869d1b839SSteven Rostedt 		event->array[0] = 0;
181969d1b839SSteven Rostedt 	}
182069d1b839SSteven Rostedt 
182169d1b839SSteven Rostedt 	return skip_time_extend(event);
182269d1b839SSteven Rostedt }
182369d1b839SSteven Rostedt 
18247a8e76a3SSteven Rostedt /**
18257a8e76a3SSteven Rostedt  * ring_buffer_update_event - update event type and data
18267a8e76a3SSteven Rostedt  * @event: the even to update
18277a8e76a3SSteven Rostedt  * @type: the type of event
18287a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
18297a8e76a3SSteven Rostedt  *
18307a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
18317a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
18327a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
18337a8e76a3SSteven Rostedt  * data field.
18347a8e76a3SSteven Rostedt  */
183534a148bfSAndrew Morton static void
183669d1b839SSteven Rostedt rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
183769d1b839SSteven Rostedt 		struct ring_buffer_event *event, unsigned length,
183869d1b839SSteven Rostedt 		int add_timestamp, u64 delta)
18397a8e76a3SSteven Rostedt {
184069d1b839SSteven Rostedt 	/* Only a commit updates the timestamp */
184169d1b839SSteven Rostedt 	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
184269d1b839SSteven Rostedt 		delta = 0;
18437a8e76a3SSteven Rostedt 
184469d1b839SSteven Rostedt 	/*
184569d1b839SSteven Rostedt 	 * If we need to add a timestamp, then we
184669d1b839SSteven Rostedt 	 * add it to the start of the resevered space.
184769d1b839SSteven Rostedt 	 */
184869d1b839SSteven Rostedt 	if (unlikely(add_timestamp)) {
184969d1b839SSteven Rostedt 		event = rb_add_time_stamp(event, delta);
185069d1b839SSteven Rostedt 		length -= RB_LEN_TIME_EXTEND;
185169d1b839SSteven Rostedt 		delta = 0;
18527a8e76a3SSteven Rostedt 	}
185369d1b839SSteven Rostedt 
185469d1b839SSteven Rostedt 	event->time_delta = delta;
185569d1b839SSteven Rostedt 	length -= RB_EVNT_HDR_SIZE;
185669d1b839SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
185769d1b839SSteven Rostedt 		event->type_len = 0;
185869d1b839SSteven Rostedt 		event->array[0] = length;
185969d1b839SSteven Rostedt 	} else
186069d1b839SSteven Rostedt 		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
18617a8e76a3SSteven Rostedt }
18627a8e76a3SSteven Rostedt 
186377ae365eSSteven Rostedt /*
186477ae365eSSteven Rostedt  * rb_handle_head_page - writer hit the head page
186577ae365eSSteven Rostedt  *
186677ae365eSSteven Rostedt  * Returns: +1 to retry page
186777ae365eSSteven Rostedt  *           0 to continue
186877ae365eSSteven Rostedt  *          -1 on error
186977ae365eSSteven Rostedt  */
187077ae365eSSteven Rostedt static int
187177ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
187277ae365eSSteven Rostedt 		    struct buffer_page *tail_page,
187377ae365eSSteven Rostedt 		    struct buffer_page *next_page)
187477ae365eSSteven Rostedt {
187577ae365eSSteven Rostedt 	struct buffer_page *new_head;
187677ae365eSSteven Rostedt 	int entries;
187777ae365eSSteven Rostedt 	int type;
187877ae365eSSteven Rostedt 	int ret;
187977ae365eSSteven Rostedt 
188077ae365eSSteven Rostedt 	entries = rb_page_entries(next_page);
188177ae365eSSteven Rostedt 
188277ae365eSSteven Rostedt 	/*
188377ae365eSSteven Rostedt 	 * The hard part is here. We need to move the head
188477ae365eSSteven Rostedt 	 * forward, and protect against both readers on
188577ae365eSSteven Rostedt 	 * other CPUs and writers coming in via interrupts.
188677ae365eSSteven Rostedt 	 */
188777ae365eSSteven Rostedt 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
188877ae365eSSteven Rostedt 				       RB_PAGE_HEAD);
188977ae365eSSteven Rostedt 
189077ae365eSSteven Rostedt 	/*
189177ae365eSSteven Rostedt 	 * type can be one of four:
189277ae365eSSteven Rostedt 	 *  NORMAL - an interrupt already moved it for us
189377ae365eSSteven Rostedt 	 *  HEAD   - we are the first to get here.
189477ae365eSSteven Rostedt 	 *  UPDATE - we are the interrupt interrupting
189577ae365eSSteven Rostedt 	 *           a current move.
189677ae365eSSteven Rostedt 	 *  MOVED  - a reader on another CPU moved the next
189777ae365eSSteven Rostedt 	 *           pointer to its reader page. Give up
189877ae365eSSteven Rostedt 	 *           and try again.
189977ae365eSSteven Rostedt 	 */
190077ae365eSSteven Rostedt 
190177ae365eSSteven Rostedt 	switch (type) {
190277ae365eSSteven Rostedt 	case RB_PAGE_HEAD:
190377ae365eSSteven Rostedt 		/*
190477ae365eSSteven Rostedt 		 * We changed the head to UPDATE, thus
190577ae365eSSteven Rostedt 		 * it is our responsibility to update
190677ae365eSSteven Rostedt 		 * the counters.
190777ae365eSSteven Rostedt 		 */
190877ae365eSSteven Rostedt 		local_add(entries, &cpu_buffer->overrun);
1909c64e148aSVaibhav Nagarnaik 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
191077ae365eSSteven Rostedt 
191177ae365eSSteven Rostedt 		/*
191277ae365eSSteven Rostedt 		 * The entries will be zeroed out when we move the
191377ae365eSSteven Rostedt 		 * tail page.
191477ae365eSSteven Rostedt 		 */
191577ae365eSSteven Rostedt 
191677ae365eSSteven Rostedt 		/* still more to do */
191777ae365eSSteven Rostedt 		break;
191877ae365eSSteven Rostedt 
191977ae365eSSteven Rostedt 	case RB_PAGE_UPDATE:
192077ae365eSSteven Rostedt 		/*
192177ae365eSSteven Rostedt 		 * This is an interrupt that interrupt the
192277ae365eSSteven Rostedt 		 * previous update. Still more to do.
192377ae365eSSteven Rostedt 		 */
192477ae365eSSteven Rostedt 		break;
192577ae365eSSteven Rostedt 	case RB_PAGE_NORMAL:
192677ae365eSSteven Rostedt 		/*
192777ae365eSSteven Rostedt 		 * An interrupt came in before the update
192877ae365eSSteven Rostedt 		 * and processed this for us.
192977ae365eSSteven Rostedt 		 * Nothing left to do.
193077ae365eSSteven Rostedt 		 */
193177ae365eSSteven Rostedt 		return 1;
193277ae365eSSteven Rostedt 	case RB_PAGE_MOVED:
193377ae365eSSteven Rostedt 		/*
193477ae365eSSteven Rostedt 		 * The reader is on another CPU and just did
193577ae365eSSteven Rostedt 		 * a swap with our next_page.
193677ae365eSSteven Rostedt 		 * Try again.
193777ae365eSSteven Rostedt 		 */
193877ae365eSSteven Rostedt 		return 1;
193977ae365eSSteven Rostedt 	default:
194077ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
194177ae365eSSteven Rostedt 		return -1;
194277ae365eSSteven Rostedt 	}
194377ae365eSSteven Rostedt 
194477ae365eSSteven Rostedt 	/*
194577ae365eSSteven Rostedt 	 * Now that we are here, the old head pointer is
194677ae365eSSteven Rostedt 	 * set to UPDATE. This will keep the reader from
194777ae365eSSteven Rostedt 	 * swapping the head page with the reader page.
194877ae365eSSteven Rostedt 	 * The reader (on another CPU) will spin till
194977ae365eSSteven Rostedt 	 * we are finished.
195077ae365eSSteven Rostedt 	 *
195177ae365eSSteven Rostedt 	 * We just need to protect against interrupts
195277ae365eSSteven Rostedt 	 * doing the job. We will set the next pointer
195377ae365eSSteven Rostedt 	 * to HEAD. After that, we set the old pointer
195477ae365eSSteven Rostedt 	 * to NORMAL, but only if it was HEAD before.
195577ae365eSSteven Rostedt 	 * otherwise we are an interrupt, and only
195677ae365eSSteven Rostedt 	 * want the outer most commit to reset it.
195777ae365eSSteven Rostedt 	 */
195877ae365eSSteven Rostedt 	new_head = next_page;
195977ae365eSSteven Rostedt 	rb_inc_page(cpu_buffer, &new_head);
196077ae365eSSteven Rostedt 
196177ae365eSSteven Rostedt 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
196277ae365eSSteven Rostedt 				    RB_PAGE_NORMAL);
196377ae365eSSteven Rostedt 
196477ae365eSSteven Rostedt 	/*
196577ae365eSSteven Rostedt 	 * Valid returns are:
196677ae365eSSteven Rostedt 	 *  HEAD   - an interrupt came in and already set it.
196777ae365eSSteven Rostedt 	 *  NORMAL - One of two things:
196877ae365eSSteven Rostedt 	 *            1) We really set it.
196977ae365eSSteven Rostedt 	 *            2) A bunch of interrupts came in and moved
197077ae365eSSteven Rostedt 	 *               the page forward again.
197177ae365eSSteven Rostedt 	 */
197277ae365eSSteven Rostedt 	switch (ret) {
197377ae365eSSteven Rostedt 	case RB_PAGE_HEAD:
197477ae365eSSteven Rostedt 	case RB_PAGE_NORMAL:
197577ae365eSSteven Rostedt 		/* OK */
197677ae365eSSteven Rostedt 		break;
197777ae365eSSteven Rostedt 	default:
197877ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer, 1);
197977ae365eSSteven Rostedt 		return -1;
198077ae365eSSteven Rostedt 	}
198177ae365eSSteven Rostedt 
198277ae365eSSteven Rostedt 	/*
198377ae365eSSteven Rostedt 	 * It is possible that an interrupt came in,
198477ae365eSSteven Rostedt 	 * set the head up, then more interrupts came in
198577ae365eSSteven Rostedt 	 * and moved it again. When we get back here,
198677ae365eSSteven Rostedt 	 * the page would have been set to NORMAL but we
198777ae365eSSteven Rostedt 	 * just set it back to HEAD.
198877ae365eSSteven Rostedt 	 *
198977ae365eSSteven Rostedt 	 * How do you detect this? Well, if that happened
199077ae365eSSteven Rostedt 	 * the tail page would have moved.
199177ae365eSSteven Rostedt 	 */
199277ae365eSSteven Rostedt 	if (ret == RB_PAGE_NORMAL) {
199377ae365eSSteven Rostedt 		/*
199477ae365eSSteven Rostedt 		 * If the tail had moved passed next, then we need
199577ae365eSSteven Rostedt 		 * to reset the pointer.
199677ae365eSSteven Rostedt 		 */
199777ae365eSSteven Rostedt 		if (cpu_buffer->tail_page != tail_page &&
199877ae365eSSteven Rostedt 		    cpu_buffer->tail_page != next_page)
199977ae365eSSteven Rostedt 			rb_head_page_set_normal(cpu_buffer, new_head,
200077ae365eSSteven Rostedt 						next_page,
200177ae365eSSteven Rostedt 						RB_PAGE_HEAD);
200277ae365eSSteven Rostedt 	}
200377ae365eSSteven Rostedt 
200477ae365eSSteven Rostedt 	/*
200577ae365eSSteven Rostedt 	 * If this was the outer most commit (the one that
200677ae365eSSteven Rostedt 	 * changed the original pointer from HEAD to UPDATE),
200777ae365eSSteven Rostedt 	 * then it is up to us to reset it to NORMAL.
200877ae365eSSteven Rostedt 	 */
200977ae365eSSteven Rostedt 	if (type == RB_PAGE_HEAD) {
201077ae365eSSteven Rostedt 		ret = rb_head_page_set_normal(cpu_buffer, next_page,
201177ae365eSSteven Rostedt 					      tail_page,
201277ae365eSSteven Rostedt 					      RB_PAGE_UPDATE);
201377ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
201477ae365eSSteven Rostedt 			       ret != RB_PAGE_UPDATE))
201577ae365eSSteven Rostedt 			return -1;
201677ae365eSSteven Rostedt 	}
201777ae365eSSteven Rostedt 
201877ae365eSSteven Rostedt 	return 0;
201977ae365eSSteven Rostedt }
202077ae365eSSteven Rostedt 
202134a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
20227a8e76a3SSteven Rostedt {
20237a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
20247a8e76a3SSteven Rostedt 
20257a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
20267a8e76a3SSteven Rostedt 	if (!length)
20277a8e76a3SSteven Rostedt 		length = 1;
20287a8e76a3SSteven Rostedt 
20292271048dSSteven Rostedt 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
20307a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
20317a8e76a3SSteven Rostedt 
20327a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
20332271048dSSteven Rostedt 	length = ALIGN(length, RB_ARCH_ALIGNMENT);
20347a8e76a3SSteven Rostedt 
20357a8e76a3SSteven Rostedt 	return length;
20367a8e76a3SSteven Rostedt }
20377a8e76a3SSteven Rostedt 
2038c7b09308SSteven Rostedt static inline void
2039c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2040c7b09308SSteven Rostedt 	      struct buffer_page *tail_page,
2041c7b09308SSteven Rostedt 	      unsigned long tail, unsigned long length)
2042c7b09308SSteven Rostedt {
2043c7b09308SSteven Rostedt 	struct ring_buffer_event *event;
2044c7b09308SSteven Rostedt 
2045c7b09308SSteven Rostedt 	/*
2046c7b09308SSteven Rostedt 	 * Only the event that crossed the page boundary
2047c7b09308SSteven Rostedt 	 * must fill the old tail_page with padding.
2048c7b09308SSteven Rostedt 	 */
2049c7b09308SSteven Rostedt 	if (tail >= BUF_PAGE_SIZE) {
2050b3230c8bSSteven Rostedt 		/*
2051b3230c8bSSteven Rostedt 		 * If the page was filled, then we still need
2052b3230c8bSSteven Rostedt 		 * to update the real_end. Reset it to zero
2053b3230c8bSSteven Rostedt 		 * and the reader will ignore it.
2054b3230c8bSSteven Rostedt 		 */
2055b3230c8bSSteven Rostedt 		if (tail == BUF_PAGE_SIZE)
2056b3230c8bSSteven Rostedt 			tail_page->real_end = 0;
2057b3230c8bSSteven Rostedt 
2058c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
2059c7b09308SSteven Rostedt 		return;
2060c7b09308SSteven Rostedt 	}
2061c7b09308SSteven Rostedt 
2062c7b09308SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
2063b0b7065bSLinus Torvalds 	kmemcheck_annotate_bitfield(event, bitfield);
2064c7b09308SSteven Rostedt 
2065c64e148aSVaibhav Nagarnaik 	/* account for padding bytes */
2066c64e148aSVaibhav Nagarnaik 	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2067c64e148aSVaibhav Nagarnaik 
2068c7b09308SSteven Rostedt 	/*
2069ff0ff84aSSteven Rostedt 	 * Save the original length to the meta data.
2070ff0ff84aSSteven Rostedt 	 * This will be used by the reader to add lost event
2071ff0ff84aSSteven Rostedt 	 * counter.
2072ff0ff84aSSteven Rostedt 	 */
2073ff0ff84aSSteven Rostedt 	tail_page->real_end = tail;
2074ff0ff84aSSteven Rostedt 
2075ff0ff84aSSteven Rostedt 	/*
2076c7b09308SSteven Rostedt 	 * If this event is bigger than the minimum size, then
2077c7b09308SSteven Rostedt 	 * we need to be careful that we don't subtract the
2078c7b09308SSteven Rostedt 	 * write counter enough to allow another writer to slip
2079c7b09308SSteven Rostedt 	 * in on this page.
2080c7b09308SSteven Rostedt 	 * We put in a discarded commit instead, to make sure
2081c7b09308SSteven Rostedt 	 * that this space is not used again.
2082c7b09308SSteven Rostedt 	 *
2083c7b09308SSteven Rostedt 	 * If we are less than the minimum size, we don't need to
2084c7b09308SSteven Rostedt 	 * worry about it.
2085c7b09308SSteven Rostedt 	 */
2086c7b09308SSteven Rostedt 	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2087c7b09308SSteven Rostedt 		/* No room for any events */
2088c7b09308SSteven Rostedt 
2089c7b09308SSteven Rostedt 		/* Mark the rest of the page with padding */
2090c7b09308SSteven Rostedt 		rb_event_set_padding(event);
2091c7b09308SSteven Rostedt 
2092c7b09308SSteven Rostedt 		/* Set the write back to the previous setting */
2093c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
2094c7b09308SSteven Rostedt 		return;
2095c7b09308SSteven Rostedt 	}
2096c7b09308SSteven Rostedt 
2097c7b09308SSteven Rostedt 	/* Put in a discarded event */
2098c7b09308SSteven Rostedt 	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2099c7b09308SSteven Rostedt 	event->type_len = RINGBUF_TYPE_PADDING;
2100c7b09308SSteven Rostedt 	/* time delta must be non zero */
2101c7b09308SSteven Rostedt 	event->time_delta = 1;
2102c7b09308SSteven Rostedt 
2103c7b09308SSteven Rostedt 	/* Set write to end of buffer */
2104c7b09308SSteven Rostedt 	length = (tail + length) - BUF_PAGE_SIZE;
2105c7b09308SSteven Rostedt 	local_sub(length, &tail_page->write);
2106c7b09308SSteven Rostedt }
21076634ff26SSteven Rostedt 
2108747e94aeSSteven Rostedt /*
2109747e94aeSSteven Rostedt  * This is the slow path, force gcc not to inline it.
2110747e94aeSSteven Rostedt  */
2111747e94aeSSteven Rostedt static noinline struct ring_buffer_event *
21126634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
21136634ff26SSteven Rostedt 	     unsigned long length, unsigned long tail,
2114e8bc43e8SSteven Rostedt 	     struct buffer_page *tail_page, u64 ts)
21157a8e76a3SSteven Rostedt {
21165a50e33cSSteven Rostedt 	struct buffer_page *commit_page = cpu_buffer->commit_page;
21177a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
211877ae365eSSteven Rostedt 	struct buffer_page *next_page;
211977ae365eSSteven Rostedt 	int ret;
2120aa20ae84SSteven Rostedt 
2121aa20ae84SSteven Rostedt 	next_page = tail_page;
21227a8e76a3SSteven Rostedt 
21237a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &next_page);
21247a8e76a3SSteven Rostedt 
2125bf41a158SSteven Rostedt 	/*
2126bf41a158SSteven Rostedt 	 * If for some reason, we had an interrupt storm that made
2127bf41a158SSteven Rostedt 	 * it all the way around the buffer, bail, and warn
2128bf41a158SSteven Rostedt 	 * about it.
2129bf41a158SSteven Rostedt 	 */
213098db8df7SSteven Rostedt 	if (unlikely(next_page == commit_page)) {
213177ae365eSSteven Rostedt 		local_inc(&cpu_buffer->commit_overrun);
213245141d46SSteven Rostedt 		goto out_reset;
2133bf41a158SSteven Rostedt 	}
2134d769041fSSteven Rostedt 
2135bf41a158SSteven Rostedt 	/*
213677ae365eSSteven Rostedt 	 * This is where the fun begins!
213777ae365eSSteven Rostedt 	 *
213877ae365eSSteven Rostedt 	 * We are fighting against races between a reader that
213977ae365eSSteven Rostedt 	 * could be on another CPU trying to swap its reader
214077ae365eSSteven Rostedt 	 * page with the buffer head.
214177ae365eSSteven Rostedt 	 *
214277ae365eSSteven Rostedt 	 * We are also fighting against interrupts coming in and
214377ae365eSSteven Rostedt 	 * moving the head or tail on us as well.
214477ae365eSSteven Rostedt 	 *
214577ae365eSSteven Rostedt 	 * If the next page is the head page then we have filled
214677ae365eSSteven Rostedt 	 * the buffer, unless the commit page is still on the
214777ae365eSSteven Rostedt 	 * reader page.
2148bf41a158SSteven Rostedt 	 */
214977ae365eSSteven Rostedt 	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2150bf41a158SSteven Rostedt 
215177ae365eSSteven Rostedt 		/*
215277ae365eSSteven Rostedt 		 * If the commit is not on the reader page, then
215377ae365eSSteven Rostedt 		 * move the header page.
215477ae365eSSteven Rostedt 		 */
215577ae365eSSteven Rostedt 		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
215677ae365eSSteven Rostedt 			/*
215777ae365eSSteven Rostedt 			 * If we are not in overwrite mode,
215877ae365eSSteven Rostedt 			 * this is easy, just stop here.
215977ae365eSSteven Rostedt 			 */
216077ae365eSSteven Rostedt 			if (!(buffer->flags & RB_FL_OVERWRITE))
216177ae365eSSteven Rostedt 				goto out_reset;
216277ae365eSSteven Rostedt 
216377ae365eSSteven Rostedt 			ret = rb_handle_head_page(cpu_buffer,
216477ae365eSSteven Rostedt 						  tail_page,
216577ae365eSSteven Rostedt 						  next_page);
216677ae365eSSteven Rostedt 			if (ret < 0)
216777ae365eSSteven Rostedt 				goto out_reset;
216877ae365eSSteven Rostedt 			if (ret)
216977ae365eSSteven Rostedt 				goto out_again;
217077ae365eSSteven Rostedt 		} else {
217177ae365eSSteven Rostedt 			/*
217277ae365eSSteven Rostedt 			 * We need to be careful here too. The
217377ae365eSSteven Rostedt 			 * commit page could still be on the reader
217477ae365eSSteven Rostedt 			 * page. We could have a small buffer, and
217577ae365eSSteven Rostedt 			 * have filled up the buffer with events
217677ae365eSSteven Rostedt 			 * from interrupts and such, and wrapped.
217777ae365eSSteven Rostedt 			 *
217877ae365eSSteven Rostedt 			 * Note, if the tail page is also the on the
217977ae365eSSteven Rostedt 			 * reader_page, we let it move out.
218077ae365eSSteven Rostedt 			 */
218177ae365eSSteven Rostedt 			if (unlikely((cpu_buffer->commit_page !=
218277ae365eSSteven Rostedt 				      cpu_buffer->tail_page) &&
218377ae365eSSteven Rostedt 				     (cpu_buffer->commit_page ==
218477ae365eSSteven Rostedt 				      cpu_buffer->reader_page))) {
218577ae365eSSteven Rostedt 				local_inc(&cpu_buffer->commit_overrun);
218677ae365eSSteven Rostedt 				goto out_reset;
218777ae365eSSteven Rostedt 			}
218877ae365eSSteven Rostedt 		}
2189bf41a158SSteven Rostedt 	}
2190bf41a158SSteven Rostedt 
219177ae365eSSteven Rostedt 	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
219277ae365eSSteven Rostedt 	if (ret) {
219377ae365eSSteven Rostedt 		/*
219477ae365eSSteven Rostedt 		 * Nested commits always have zero deltas, so
219577ae365eSSteven Rostedt 		 * just reread the time stamp
219677ae365eSSteven Rostedt 		 */
2197e8bc43e8SSteven Rostedt 		ts = rb_time_stamp(buffer);
2198e8bc43e8SSteven Rostedt 		next_page->page->time_stamp = ts;
219977ae365eSSteven Rostedt 	}
22007a8e76a3SSteven Rostedt 
220177ae365eSSteven Rostedt  out_again:
220277ae365eSSteven Rostedt 
220377ae365eSSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2204bf41a158SSteven Rostedt 
2205bf41a158SSteven Rostedt 	/* fail and let the caller try again */
2206bf41a158SSteven Rostedt 	return ERR_PTR(-EAGAIN);
2207bf41a158SSteven Rostedt 
220845141d46SSteven Rostedt  out_reset:
22096f3b3440SLai Jiangshan 	/* reset write */
2210c7b09308SSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
22116f3b3440SLai Jiangshan 
2212bf41a158SSteven Rostedt 	return NULL;
22137a8e76a3SSteven Rostedt }
22147a8e76a3SSteven Rostedt 
22156634ff26SSteven Rostedt static struct ring_buffer_event *
22166634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
221769d1b839SSteven Rostedt 		  unsigned long length, u64 ts,
221869d1b839SSteven Rostedt 		  u64 delta, int add_timestamp)
22196634ff26SSteven Rostedt {
22205a50e33cSSteven Rostedt 	struct buffer_page *tail_page;
22216634ff26SSteven Rostedt 	struct ring_buffer_event *event;
22226634ff26SSteven Rostedt 	unsigned long tail, write;
22236634ff26SSteven Rostedt 
222469d1b839SSteven Rostedt 	/*
222569d1b839SSteven Rostedt 	 * If the time delta since the last event is too big to
222669d1b839SSteven Rostedt 	 * hold in the time field of the event, then we append a
222769d1b839SSteven Rostedt 	 * TIME EXTEND event ahead of the data event.
222869d1b839SSteven Rostedt 	 */
222969d1b839SSteven Rostedt 	if (unlikely(add_timestamp))
223069d1b839SSteven Rostedt 		length += RB_LEN_TIME_EXTEND;
223169d1b839SSteven Rostedt 
22326634ff26SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
22336634ff26SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
223477ae365eSSteven Rostedt 
223577ae365eSSteven Rostedt 	/* set write to only the index of the write */
223677ae365eSSteven Rostedt 	write &= RB_WRITE_MASK;
22376634ff26SSteven Rostedt 	tail = write - length;
22386634ff26SSteven Rostedt 
22396634ff26SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
2240747e94aeSSteven Rostedt 	if (unlikely(write > BUF_PAGE_SIZE))
22416634ff26SSteven Rostedt 		return rb_move_tail(cpu_buffer, length, tail,
22425a50e33cSSteven Rostedt 				    tail_page, ts);
22436634ff26SSteven Rostedt 
22446634ff26SSteven Rostedt 	/* We reserved something on the buffer */
22456634ff26SSteven Rostedt 
22466634ff26SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
22471744a21dSVegard Nossum 	kmemcheck_annotate_bitfield(event, bitfield);
224869d1b839SSteven Rostedt 	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
22496634ff26SSteven Rostedt 
22506634ff26SSteven Rostedt 	local_inc(&tail_page->entries);
22516634ff26SSteven Rostedt 
22526634ff26SSteven Rostedt 	/*
2253fa743953SSteven Rostedt 	 * If this is the first commit on the page, then update
2254fa743953SSteven Rostedt 	 * its timestamp.
22556634ff26SSteven Rostedt 	 */
2256fa743953SSteven Rostedt 	if (!tail)
2257e8bc43e8SSteven Rostedt 		tail_page->page->time_stamp = ts;
22586634ff26SSteven Rostedt 
2259c64e148aSVaibhav Nagarnaik 	/* account for these added bytes */
2260c64e148aSVaibhav Nagarnaik 	local_add(length, &cpu_buffer->entries_bytes);
2261c64e148aSVaibhav Nagarnaik 
22626634ff26SSteven Rostedt 	return event;
22636634ff26SSteven Rostedt }
22646634ff26SSteven Rostedt 
2265edd813bfSSteven Rostedt static inline int
2266edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2267edd813bfSSteven Rostedt 		  struct ring_buffer_event *event)
2268edd813bfSSteven Rostedt {
2269edd813bfSSteven Rostedt 	unsigned long new_index, old_index;
2270edd813bfSSteven Rostedt 	struct buffer_page *bpage;
2271edd813bfSSteven Rostedt 	unsigned long index;
2272edd813bfSSteven Rostedt 	unsigned long addr;
2273edd813bfSSteven Rostedt 
2274edd813bfSSteven Rostedt 	new_index = rb_event_index(event);
227569d1b839SSteven Rostedt 	old_index = new_index + rb_event_ts_length(event);
2276edd813bfSSteven Rostedt 	addr = (unsigned long)event;
2277edd813bfSSteven Rostedt 	addr &= PAGE_MASK;
2278edd813bfSSteven Rostedt 
2279edd813bfSSteven Rostedt 	bpage = cpu_buffer->tail_page;
2280edd813bfSSteven Rostedt 
2281edd813bfSSteven Rostedt 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
228277ae365eSSteven Rostedt 		unsigned long write_mask =
228377ae365eSSteven Rostedt 			local_read(&bpage->write) & ~RB_WRITE_MASK;
2284c64e148aSVaibhav Nagarnaik 		unsigned long event_length = rb_event_length(event);
2285edd813bfSSteven Rostedt 		/*
2286edd813bfSSteven Rostedt 		 * This is on the tail page. It is possible that
2287edd813bfSSteven Rostedt 		 * a write could come in and move the tail page
2288edd813bfSSteven Rostedt 		 * and write to the next page. That is fine
2289edd813bfSSteven Rostedt 		 * because we just shorten what is on this page.
2290edd813bfSSteven Rostedt 		 */
229177ae365eSSteven Rostedt 		old_index += write_mask;
229277ae365eSSteven Rostedt 		new_index += write_mask;
2293edd813bfSSteven Rostedt 		index = local_cmpxchg(&bpage->write, old_index, new_index);
2294c64e148aSVaibhav Nagarnaik 		if (index == old_index) {
2295c64e148aSVaibhav Nagarnaik 			/* update counters */
2296c64e148aSVaibhav Nagarnaik 			local_sub(event_length, &cpu_buffer->entries_bytes);
2297edd813bfSSteven Rostedt 			return 1;
2298edd813bfSSteven Rostedt 		}
2299c64e148aSVaibhav Nagarnaik 	}
2300edd813bfSSteven Rostedt 
2301edd813bfSSteven Rostedt 	/* could not discard */
2302edd813bfSSteven Rostedt 	return 0;
2303edd813bfSSteven Rostedt }
2304edd813bfSSteven Rostedt 
2305fa743953SSteven Rostedt static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2306fa743953SSteven Rostedt {
2307fa743953SSteven Rostedt 	local_inc(&cpu_buffer->committing);
2308fa743953SSteven Rostedt 	local_inc(&cpu_buffer->commits);
2309fa743953SSteven Rostedt }
2310fa743953SSteven Rostedt 
2311d9abde21SSteven Rostedt static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2312fa743953SSteven Rostedt {
2313fa743953SSteven Rostedt 	unsigned long commits;
2314fa743953SSteven Rostedt 
2315fa743953SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
2316fa743953SSteven Rostedt 		       !local_read(&cpu_buffer->committing)))
2317fa743953SSteven Rostedt 		return;
2318fa743953SSteven Rostedt 
2319fa743953SSteven Rostedt  again:
2320fa743953SSteven Rostedt 	commits = local_read(&cpu_buffer->commits);
2321fa743953SSteven Rostedt 	/* synchronize with interrupts */
2322fa743953SSteven Rostedt 	barrier();
2323fa743953SSteven Rostedt 	if (local_read(&cpu_buffer->committing) == 1)
2324fa743953SSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
2325fa743953SSteven Rostedt 
2326fa743953SSteven Rostedt 	local_dec(&cpu_buffer->committing);
2327fa743953SSteven Rostedt 
2328fa743953SSteven Rostedt 	/* synchronize with interrupts */
2329fa743953SSteven Rostedt 	barrier();
2330fa743953SSteven Rostedt 
2331fa743953SSteven Rostedt 	/*
2332fa743953SSteven Rostedt 	 * Need to account for interrupts coming in between the
2333fa743953SSteven Rostedt 	 * updating of the commit page and the clearing of the
2334fa743953SSteven Rostedt 	 * committing counter.
2335fa743953SSteven Rostedt 	 */
2336fa743953SSteven Rostedt 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2337fa743953SSteven Rostedt 	    !local_read(&cpu_buffer->committing)) {
2338fa743953SSteven Rostedt 		local_inc(&cpu_buffer->committing);
2339fa743953SSteven Rostedt 		goto again;
2340fa743953SSteven Rostedt 	}
2341fa743953SSteven Rostedt }
2342fa743953SSteven Rostedt 
23437a8e76a3SSteven Rostedt static struct ring_buffer_event *
234462f0b3ebSSteven Rostedt rb_reserve_next_event(struct ring_buffer *buffer,
234562f0b3ebSSteven Rostedt 		      struct ring_buffer_per_cpu *cpu_buffer,
23461cd8d735SSteven Rostedt 		      unsigned long length)
23477a8e76a3SSteven Rostedt {
23487a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
234969d1b839SSteven Rostedt 	u64 ts, delta;
2350818e3dd3SSteven Rostedt 	int nr_loops = 0;
235169d1b839SSteven Rostedt 	int add_timestamp;
2352140ff891SSteven Rostedt 	u64 diff;
23537a8e76a3SSteven Rostedt 
2354fa743953SSteven Rostedt 	rb_start_commit(cpu_buffer);
2355fa743953SSteven Rostedt 
235685bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
235762f0b3ebSSteven Rostedt 	/*
235862f0b3ebSSteven Rostedt 	 * Due to the ability to swap a cpu buffer from a buffer
235962f0b3ebSSteven Rostedt 	 * it is possible it was swapped before we committed.
236062f0b3ebSSteven Rostedt 	 * (committing stops a swap). We check for it here and
236162f0b3ebSSteven Rostedt 	 * if it happened, we have to fail the write.
236262f0b3ebSSteven Rostedt 	 */
236362f0b3ebSSteven Rostedt 	barrier();
236462f0b3ebSSteven Rostedt 	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
236562f0b3ebSSteven Rostedt 		local_dec(&cpu_buffer->committing);
236662f0b3ebSSteven Rostedt 		local_dec(&cpu_buffer->commits);
236762f0b3ebSSteven Rostedt 		return NULL;
236862f0b3ebSSteven Rostedt 	}
236985bac32cSSteven Rostedt #endif
237062f0b3ebSSteven Rostedt 
2371be957c44SSteven Rostedt 	length = rb_calculate_event_length(length);
2372bf41a158SSteven Rostedt  again:
237369d1b839SSteven Rostedt 	add_timestamp = 0;
237469d1b839SSteven Rostedt 	delta = 0;
237569d1b839SSteven Rostedt 
2376818e3dd3SSteven Rostedt 	/*
2377818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
2378818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
2379818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
2380818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
2381818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
2382818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
2383818e3dd3SSteven Rostedt 	 * Bail!
2384818e3dd3SSteven Rostedt 	 */
23853e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2386fa743953SSteven Rostedt 		goto out_fail;
2387818e3dd3SSteven Rostedt 
23886d3f1e12SJiri Olsa 	ts = rb_time_stamp(cpu_buffer->buffer);
2389168b6b1dSSteven Rostedt 	diff = ts - cpu_buffer->write_stamp;
23907a8e76a3SSteven Rostedt 
2391168b6b1dSSteven Rostedt 	/* make sure this diff is calculated here */
2392bf41a158SSteven Rostedt 	barrier();
23937a8e76a3SSteven Rostedt 
2394bf41a158SSteven Rostedt 	/* Did the write stamp get updated already? */
2395140ff891SSteven Rostedt 	if (likely(ts >= cpu_buffer->write_stamp)) {
2396168b6b1dSSteven Rostedt 		delta = diff;
2397168b6b1dSSteven Rostedt 		if (unlikely(test_time_stamp(delta))) {
239831274d72SJiri Olsa 			int local_clock_stable = 1;
239931274d72SJiri Olsa #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
240031274d72SJiri Olsa 			local_clock_stable = sched_clock_stable;
240131274d72SJiri Olsa #endif
240269d1b839SSteven Rostedt 			WARN_ONCE(delta > (1ULL << 59),
240331274d72SJiri Olsa 				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
240469d1b839SSteven Rostedt 				  (unsigned long long)delta,
240569d1b839SSteven Rostedt 				  (unsigned long long)ts,
240631274d72SJiri Olsa 				  (unsigned long long)cpu_buffer->write_stamp,
240731274d72SJiri Olsa 				  local_clock_stable ? "" :
240831274d72SJiri Olsa 				  "If you just came from a suspend/resume,\n"
240931274d72SJiri Olsa 				  "please switch to the trace global clock:\n"
241031274d72SJiri Olsa 				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
241169d1b839SSteven Rostedt 			add_timestamp = 1;
24127a8e76a3SSteven Rostedt 		}
2413168b6b1dSSteven Rostedt 	}
24147a8e76a3SSteven Rostedt 
241569d1b839SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, length, ts,
241669d1b839SSteven Rostedt 				  delta, add_timestamp);
2417168b6b1dSSteven Rostedt 	if (unlikely(PTR_ERR(event) == -EAGAIN))
2418bf41a158SSteven Rostedt 		goto again;
24197a8e76a3SSteven Rostedt 
2420fa743953SSteven Rostedt 	if (!event)
2421fa743953SSteven Rostedt 		goto out_fail;
2422bf41a158SSteven Rostedt 
24237a8e76a3SSteven Rostedt 	return event;
2424fa743953SSteven Rostedt 
2425fa743953SSteven Rostedt  out_fail:
2426fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
2427fa743953SSteven Rostedt 	return NULL;
24287a8e76a3SSteven Rostedt }
24297a8e76a3SSteven Rostedt 
24301155de47SPaul Mundt #ifdef CONFIG_TRACING
24311155de47SPaul Mundt 
2432aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16
2433261842b7SSteven Rostedt 
2434d9abde21SSteven Rostedt /* Keep this code out of the fast path cache */
2435d9abde21SSteven Rostedt static noinline void trace_recursive_fail(void)
2436261842b7SSteven Rostedt {
2437261842b7SSteven Rostedt 	/* Disable all tracing before we do anything else */
2438261842b7SSteven Rostedt 	tracing_off_permanent();
2439e057a5e5SFrederic Weisbecker 
24407d7d2b80SSteven Rostedt 	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2441e057a5e5SFrederic Weisbecker 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2442b1cff0adSSteven Rostedt 		    trace_recursion_buffer(),
2443e057a5e5SFrederic Weisbecker 		    hardirq_count() >> HARDIRQ_SHIFT,
2444e057a5e5SFrederic Weisbecker 		    softirq_count() >> SOFTIRQ_SHIFT,
2445e057a5e5SFrederic Weisbecker 		    in_nmi());
2446e057a5e5SFrederic Weisbecker 
2447261842b7SSteven Rostedt 	WARN_ON_ONCE(1);
2448d9abde21SSteven Rostedt }
2449d9abde21SSteven Rostedt 
2450d9abde21SSteven Rostedt static inline int trace_recursive_lock(void)
2451d9abde21SSteven Rostedt {
2452b1cff0adSSteven Rostedt 	trace_recursion_inc();
2453d9abde21SSteven Rostedt 
2454b1cff0adSSteven Rostedt 	if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2455d9abde21SSteven Rostedt 		return 0;
2456d9abde21SSteven Rostedt 
2457d9abde21SSteven Rostedt 	trace_recursive_fail();
2458d9abde21SSteven Rostedt 
2459261842b7SSteven Rostedt 	return -1;
2460261842b7SSteven Rostedt }
2461261842b7SSteven Rostedt 
2462d9abde21SSteven Rostedt static inline void trace_recursive_unlock(void)
2463261842b7SSteven Rostedt {
2464b1cff0adSSteven Rostedt 	WARN_ON_ONCE(!trace_recursion_buffer());
2465261842b7SSteven Rostedt 
2466b1cff0adSSteven Rostedt 	trace_recursion_dec();
2467261842b7SSteven Rostedt }
2468261842b7SSteven Rostedt 
24691155de47SPaul Mundt #else
24701155de47SPaul Mundt 
24711155de47SPaul Mundt #define trace_recursive_lock()		(0)
24721155de47SPaul Mundt #define trace_recursive_unlock()	do { } while (0)
24731155de47SPaul Mundt 
24741155de47SPaul Mundt #endif
24751155de47SPaul Mundt 
24767a8e76a3SSteven Rostedt /**
24777a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
24787a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
24797a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
24807a8e76a3SSteven Rostedt  *
24817a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
24827a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
24837a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
24847a8e76a3SSteven Rostedt  *
24857a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
24867a8e76a3SSteven Rostedt  * which also includes the event header.
24877a8e76a3SSteven Rostedt  *
24887a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
24897a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
24907a8e76a3SSteven Rostedt  */
24917a8e76a3SSteven Rostedt struct ring_buffer_event *
24920a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
24937a8e76a3SSteven Rostedt {
24947a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
24957a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
24965168ae50SSteven Rostedt 	int cpu;
24977a8e76a3SSteven Rostedt 
2498033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2499a3583244SSteven Rostedt 		return NULL;
2500a3583244SSteven Rostedt 
2501bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
25025168ae50SSteven Rostedt 	preempt_disable_notrace();
2503bf41a158SSteven Rostedt 
250452fbe9cdSLai Jiangshan 	if (atomic_read(&buffer->record_disabled))
250552fbe9cdSLai Jiangshan 		goto out_nocheck;
250652fbe9cdSLai Jiangshan 
2507261842b7SSteven Rostedt 	if (trace_recursive_lock())
2508261842b7SSteven Rostedt 		goto out_nocheck;
2509261842b7SSteven Rostedt 
25107a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
25117a8e76a3SSteven Rostedt 
25129e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2513d769041fSSteven Rostedt 		goto out;
25147a8e76a3SSteven Rostedt 
25157a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25167a8e76a3SSteven Rostedt 
25177a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
2518d769041fSSteven Rostedt 		goto out;
25197a8e76a3SSteven Rostedt 
2520be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
2521bf41a158SSteven Rostedt 		goto out;
25227a8e76a3SSteven Rostedt 
252362f0b3ebSSteven Rostedt 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
25247a8e76a3SSteven Rostedt 	if (!event)
2525d769041fSSteven Rostedt 		goto out;
25267a8e76a3SSteven Rostedt 
25277a8e76a3SSteven Rostedt 	return event;
25287a8e76a3SSteven Rostedt 
2529d769041fSSteven Rostedt  out:
2530261842b7SSteven Rostedt 	trace_recursive_unlock();
2531261842b7SSteven Rostedt 
2532261842b7SSteven Rostedt  out_nocheck:
25335168ae50SSteven Rostedt 	preempt_enable_notrace();
25347a8e76a3SSteven Rostedt 	return NULL;
25357a8e76a3SSteven Rostedt }
2536c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
25377a8e76a3SSteven Rostedt 
2538a1863c21SSteven Rostedt static void
2539a1863c21SSteven Rostedt rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
25407a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
25417a8e76a3SSteven Rostedt {
254269d1b839SSteven Rostedt 	u64 delta;
254369d1b839SSteven Rostedt 
2544fa743953SSteven Rostedt 	/*
2545fa743953SSteven Rostedt 	 * The event first in the commit queue updates the
2546fa743953SSteven Rostedt 	 * time stamp.
2547fa743953SSteven Rostedt 	 */
254869d1b839SSteven Rostedt 	if (rb_event_is_commit(cpu_buffer, event)) {
254969d1b839SSteven Rostedt 		/*
255069d1b839SSteven Rostedt 		 * A commit event that is first on a page
255169d1b839SSteven Rostedt 		 * updates the write timestamp with the page stamp
255269d1b839SSteven Rostedt 		 */
255369d1b839SSteven Rostedt 		if (!rb_event_index(event))
255469d1b839SSteven Rostedt 			cpu_buffer->write_stamp =
255569d1b839SSteven Rostedt 				cpu_buffer->commit_page->page->time_stamp;
255669d1b839SSteven Rostedt 		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
255769d1b839SSteven Rostedt 			delta = event->array[0];
255869d1b839SSteven Rostedt 			delta <<= TS_SHIFT;
255969d1b839SSteven Rostedt 			delta += event->time_delta;
256069d1b839SSteven Rostedt 			cpu_buffer->write_stamp += delta;
256169d1b839SSteven Rostedt 		} else
2562bf41a158SSteven Rostedt 			cpu_buffer->write_stamp += event->time_delta;
2563a1863c21SSteven Rostedt 	}
256469d1b839SSteven Rostedt }
2565bf41a158SSteven Rostedt 
2566a1863c21SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2567a1863c21SSteven Rostedt 		      struct ring_buffer_event *event)
2568a1863c21SSteven Rostedt {
2569a1863c21SSteven Rostedt 	local_inc(&cpu_buffer->entries);
2570a1863c21SSteven Rostedt 	rb_update_write_stamp(cpu_buffer, event);
2571fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
25727a8e76a3SSteven Rostedt }
25737a8e76a3SSteven Rostedt 
25747a8e76a3SSteven Rostedt /**
25757a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
25767a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
25777a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
25787a8e76a3SSteven Rostedt  *
25797a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
25807a8e76a3SSteven Rostedt  *
25817a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
25827a8e76a3SSteven Rostedt  */
25837a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
25840a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
25857a8e76a3SSteven Rostedt {
25867a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
25877a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
25887a8e76a3SSteven Rostedt 
25897a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25907a8e76a3SSteven Rostedt 
25917a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
25927a8e76a3SSteven Rostedt 
2593261842b7SSteven Rostedt 	trace_recursive_unlock();
2594261842b7SSteven Rostedt 
25955168ae50SSteven Rostedt 	preempt_enable_notrace();
25967a8e76a3SSteven Rostedt 
25977a8e76a3SSteven Rostedt 	return 0;
25987a8e76a3SSteven Rostedt }
2599c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
26007a8e76a3SSteven Rostedt 
2601f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event)
2602f3b9aae1SFrederic Weisbecker {
260369d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
260469d1b839SSteven Rostedt 		event = skip_time_extend(event);
260569d1b839SSteven Rostedt 
2606334d4169SLai Jiangshan 	/* array[0] holds the actual length for the discarded event */
2607334d4169SLai Jiangshan 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2608334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2609f3b9aae1SFrederic Weisbecker 	/* time delta must be non zero */
2610f3b9aae1SFrederic Weisbecker 	if (!event->time_delta)
2611f3b9aae1SFrederic Weisbecker 		event->time_delta = 1;
2612f3b9aae1SFrederic Weisbecker }
2613f3b9aae1SFrederic Weisbecker 
2614a1863c21SSteven Rostedt /*
2615a1863c21SSteven Rostedt  * Decrement the entries to the page that an event is on.
2616a1863c21SSteven Rostedt  * The event does not even need to exist, only the pointer
2617a1863c21SSteven Rostedt  * to the page it is on. This may only be called before the commit
2618a1863c21SSteven Rostedt  * takes place.
2619a1863c21SSteven Rostedt  */
2620a1863c21SSteven Rostedt static inline void
2621a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2622a1863c21SSteven Rostedt 		   struct ring_buffer_event *event)
2623a1863c21SSteven Rostedt {
2624a1863c21SSteven Rostedt 	unsigned long addr = (unsigned long)event;
2625a1863c21SSteven Rostedt 	struct buffer_page *bpage = cpu_buffer->commit_page;
2626a1863c21SSteven Rostedt 	struct buffer_page *start;
2627a1863c21SSteven Rostedt 
2628a1863c21SSteven Rostedt 	addr &= PAGE_MASK;
2629a1863c21SSteven Rostedt 
2630a1863c21SSteven Rostedt 	/* Do the likely case first */
2631a1863c21SSteven Rostedt 	if (likely(bpage->page == (void *)addr)) {
2632a1863c21SSteven Rostedt 		local_dec(&bpage->entries);
2633a1863c21SSteven Rostedt 		return;
2634a1863c21SSteven Rostedt 	}
2635a1863c21SSteven Rostedt 
2636a1863c21SSteven Rostedt 	/*
2637a1863c21SSteven Rostedt 	 * Because the commit page may be on the reader page we
2638a1863c21SSteven Rostedt 	 * start with the next page and check the end loop there.
2639a1863c21SSteven Rostedt 	 */
2640a1863c21SSteven Rostedt 	rb_inc_page(cpu_buffer, &bpage);
2641a1863c21SSteven Rostedt 	start = bpage;
2642a1863c21SSteven Rostedt 	do {
2643a1863c21SSteven Rostedt 		if (bpage->page == (void *)addr) {
2644a1863c21SSteven Rostedt 			local_dec(&bpage->entries);
2645a1863c21SSteven Rostedt 			return;
2646a1863c21SSteven Rostedt 		}
2647a1863c21SSteven Rostedt 		rb_inc_page(cpu_buffer, &bpage);
2648a1863c21SSteven Rostedt 	} while (bpage != start);
2649a1863c21SSteven Rostedt 
2650a1863c21SSteven Rostedt 	/* commit not part of this buffer?? */
2651a1863c21SSteven Rostedt 	RB_WARN_ON(cpu_buffer, 1);
2652a1863c21SSteven Rostedt }
2653a1863c21SSteven Rostedt 
26547a8e76a3SSteven Rostedt /**
2655fa1b47ddSSteven Rostedt  * ring_buffer_commit_discard - discard an event that has not been committed
2656fa1b47ddSSteven Rostedt  * @buffer: the ring buffer
2657fa1b47ddSSteven Rostedt  * @event: non committed event to discard
2658fa1b47ddSSteven Rostedt  *
2659dc892f73SSteven Rostedt  * Sometimes an event that is in the ring buffer needs to be ignored.
2660dc892f73SSteven Rostedt  * This function lets the user discard an event in the ring buffer
2661dc892f73SSteven Rostedt  * and then that event will not be read later.
2662dc892f73SSteven Rostedt  *
2663dc892f73SSteven Rostedt  * This function only works if it is called before the the item has been
2664dc892f73SSteven Rostedt  * committed. It will try to free the event from the ring buffer
2665fa1b47ddSSteven Rostedt  * if another event has not been added behind it.
2666fa1b47ddSSteven Rostedt  *
2667fa1b47ddSSteven Rostedt  * If another event has been added behind it, it will set the event
2668fa1b47ddSSteven Rostedt  * up as discarded, and perform the commit.
2669fa1b47ddSSteven Rostedt  *
2670fa1b47ddSSteven Rostedt  * If this function is called, do not call ring_buffer_unlock_commit on
2671fa1b47ddSSteven Rostedt  * the event.
2672fa1b47ddSSteven Rostedt  */
2673fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer,
2674fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event)
2675fa1b47ddSSteven Rostedt {
2676fa1b47ddSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2677fa1b47ddSSteven Rostedt 	int cpu;
2678fa1b47ddSSteven Rostedt 
2679fa1b47ddSSteven Rostedt 	/* The event is discarded regardless */
2680f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
2681fa1b47ddSSteven Rostedt 
2682fa743953SSteven Rostedt 	cpu = smp_processor_id();
2683fa743953SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2684fa743953SSteven Rostedt 
2685fa1b47ddSSteven Rostedt 	/*
2686fa1b47ddSSteven Rostedt 	 * This must only be called if the event has not been
2687fa1b47ddSSteven Rostedt 	 * committed yet. Thus we can assume that preemption
2688fa1b47ddSSteven Rostedt 	 * is still disabled.
2689fa1b47ddSSteven Rostedt 	 */
2690fa743953SSteven Rostedt 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2691fa1b47ddSSteven Rostedt 
2692a1863c21SSteven Rostedt 	rb_decrement_entry(cpu_buffer, event);
26930f2541d2SSteven Rostedt 	if (rb_try_to_discard(cpu_buffer, event))
2694fa1b47ddSSteven Rostedt 		goto out;
2695fa1b47ddSSteven Rostedt 
2696fa1b47ddSSteven Rostedt 	/*
2697fa1b47ddSSteven Rostedt 	 * The commit is still visible by the reader, so we
2698a1863c21SSteven Rostedt 	 * must still update the timestamp.
2699fa1b47ddSSteven Rostedt 	 */
2700a1863c21SSteven Rostedt 	rb_update_write_stamp(cpu_buffer, event);
2701fa1b47ddSSteven Rostedt  out:
2702fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
2703fa1b47ddSSteven Rostedt 
2704f3b9aae1SFrederic Weisbecker 	trace_recursive_unlock();
2705f3b9aae1SFrederic Weisbecker 
27065168ae50SSteven Rostedt 	preempt_enable_notrace();
2707fa1b47ddSSteven Rostedt 
2708fa1b47ddSSteven Rostedt }
2709fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2710fa1b47ddSSteven Rostedt 
2711fa1b47ddSSteven Rostedt /**
27127a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
27137a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
27147a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
27157a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
27167a8e76a3SSteven Rostedt  *
27177a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
27187a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
27197a8e76a3SSteven Rostedt  * may be easier to simply call this function.
27207a8e76a3SSteven Rostedt  *
27217a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
27227a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
27237a8e76a3SSteven Rostedt  */
27247a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
27257a8e76a3SSteven Rostedt 			unsigned long length,
27267a8e76a3SSteven Rostedt 			void *data)
27277a8e76a3SSteven Rostedt {
27287a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
27297a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
27307a8e76a3SSteven Rostedt 	void *body;
27317a8e76a3SSteven Rostedt 	int ret = -EBUSY;
27325168ae50SSteven Rostedt 	int cpu;
27337a8e76a3SSteven Rostedt 
2734033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2735a3583244SSteven Rostedt 		return -EBUSY;
2736a3583244SSteven Rostedt 
27375168ae50SSteven Rostedt 	preempt_disable_notrace();
2738bf41a158SSteven Rostedt 
273952fbe9cdSLai Jiangshan 	if (atomic_read(&buffer->record_disabled))
274052fbe9cdSLai Jiangshan 		goto out;
274152fbe9cdSLai Jiangshan 
27427a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
27437a8e76a3SSteven Rostedt 
27449e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2745d769041fSSteven Rostedt 		goto out;
27467a8e76a3SSteven Rostedt 
27477a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
27487a8e76a3SSteven Rostedt 
27497a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
27507a8e76a3SSteven Rostedt 		goto out;
27517a8e76a3SSteven Rostedt 
2752be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
2753be957c44SSteven Rostedt 		goto out;
2754be957c44SSteven Rostedt 
275562f0b3ebSSteven Rostedt 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
27567a8e76a3SSteven Rostedt 	if (!event)
27577a8e76a3SSteven Rostedt 		goto out;
27587a8e76a3SSteven Rostedt 
27597a8e76a3SSteven Rostedt 	body = rb_event_data(event);
27607a8e76a3SSteven Rostedt 
27617a8e76a3SSteven Rostedt 	memcpy(body, data, length);
27627a8e76a3SSteven Rostedt 
27637a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
27647a8e76a3SSteven Rostedt 
27657a8e76a3SSteven Rostedt 	ret = 0;
27667a8e76a3SSteven Rostedt  out:
27675168ae50SSteven Rostedt 	preempt_enable_notrace();
27687a8e76a3SSteven Rostedt 
27697a8e76a3SSteven Rostedt 	return ret;
27707a8e76a3SSteven Rostedt }
2771c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
27727a8e76a3SSteven Rostedt 
277334a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2774bf41a158SSteven Rostedt {
2775bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
277677ae365eSSteven Rostedt 	struct buffer_page *head = rb_set_head_page(cpu_buffer);
2777bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
2778bf41a158SSteven Rostedt 
277977ae365eSSteven Rostedt 	/* In case of error, head will be NULL */
278077ae365eSSteven Rostedt 	if (unlikely(!head))
278177ae365eSSteven Rostedt 		return 1;
278277ae365eSSteven Rostedt 
2783bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
2784bf41a158SSteven Rostedt 		(commit == reader ||
2785bf41a158SSteven Rostedt 		 (commit == head &&
2786bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
2787bf41a158SSteven Rostedt }
2788bf41a158SSteven Rostedt 
27897a8e76a3SSteven Rostedt /**
27907a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
27917a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
27927a8e76a3SSteven Rostedt  *
27937a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
27947a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
27957a8e76a3SSteven Rostedt  *
27967a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
27977a8e76a3SSteven Rostedt  */
27987a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
27997a8e76a3SSteven Rostedt {
28007a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
28017a8e76a3SSteven Rostedt }
2802c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
28037a8e76a3SSteven Rostedt 
28047a8e76a3SSteven Rostedt /**
28057a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
28067a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
28077a8e76a3SSteven Rostedt  *
28087a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
2809c41b20e7SAdam Buchbinder  * to truly enable the writing (much like preempt_disable).
28107a8e76a3SSteven Rostedt  */
28117a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
28127a8e76a3SSteven Rostedt {
28137a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
28147a8e76a3SSteven Rostedt }
2815c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
28167a8e76a3SSteven Rostedt 
28177a8e76a3SSteven Rostedt /**
2818499e5470SSteven Rostedt  * ring_buffer_record_off - stop all writes into the buffer
2819499e5470SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
2820499e5470SSteven Rostedt  *
2821499e5470SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
2822499e5470SSteven Rostedt  * to the buffer after this will fail and return NULL.
2823499e5470SSteven Rostedt  *
2824499e5470SSteven Rostedt  * This is different than ring_buffer_record_disable() as
2825499e5470SSteven Rostedt  * it works like an on/off switch, where as the disable() verison
2826499e5470SSteven Rostedt  * must be paired with a enable().
2827499e5470SSteven Rostedt  */
2828499e5470SSteven Rostedt void ring_buffer_record_off(struct ring_buffer *buffer)
2829499e5470SSteven Rostedt {
2830499e5470SSteven Rostedt 	unsigned int rd;
2831499e5470SSteven Rostedt 	unsigned int new_rd;
2832499e5470SSteven Rostedt 
2833499e5470SSteven Rostedt 	do {
2834499e5470SSteven Rostedt 		rd = atomic_read(&buffer->record_disabled);
2835499e5470SSteven Rostedt 		new_rd = rd | RB_BUFFER_OFF;
2836499e5470SSteven Rostedt 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2837499e5470SSteven Rostedt }
2838499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2839499e5470SSteven Rostedt 
2840499e5470SSteven Rostedt /**
2841499e5470SSteven Rostedt  * ring_buffer_record_on - restart writes into the buffer
2842499e5470SSteven Rostedt  * @buffer: The ring buffer to start writes to.
2843499e5470SSteven Rostedt  *
2844499e5470SSteven Rostedt  * This enables all writes to the buffer that was disabled by
2845499e5470SSteven Rostedt  * ring_buffer_record_off().
2846499e5470SSteven Rostedt  *
2847499e5470SSteven Rostedt  * This is different than ring_buffer_record_enable() as
2848499e5470SSteven Rostedt  * it works like an on/off switch, where as the enable() verison
2849499e5470SSteven Rostedt  * must be paired with a disable().
2850499e5470SSteven Rostedt  */
2851499e5470SSteven Rostedt void ring_buffer_record_on(struct ring_buffer *buffer)
2852499e5470SSteven Rostedt {
2853499e5470SSteven Rostedt 	unsigned int rd;
2854499e5470SSteven Rostedt 	unsigned int new_rd;
2855499e5470SSteven Rostedt 
2856499e5470SSteven Rostedt 	do {
2857499e5470SSteven Rostedt 		rd = atomic_read(&buffer->record_disabled);
2858499e5470SSteven Rostedt 		new_rd = rd & ~RB_BUFFER_OFF;
2859499e5470SSteven Rostedt 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2860499e5470SSteven Rostedt }
2861499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2862499e5470SSteven Rostedt 
2863499e5470SSteven Rostedt /**
2864499e5470SSteven Rostedt  * ring_buffer_record_is_on - return true if the ring buffer can write
2865499e5470SSteven Rostedt  * @buffer: The ring buffer to see if write is enabled
2866499e5470SSteven Rostedt  *
2867499e5470SSteven Rostedt  * Returns true if the ring buffer is in a state that it accepts writes.
2868499e5470SSteven Rostedt  */
2869499e5470SSteven Rostedt int ring_buffer_record_is_on(struct ring_buffer *buffer)
2870499e5470SSteven Rostedt {
2871499e5470SSteven Rostedt 	return !atomic_read(&buffer->record_disabled);
2872499e5470SSteven Rostedt }
2873499e5470SSteven Rostedt 
2874499e5470SSteven Rostedt /**
28757a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
28767a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
28777a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
28787a8e76a3SSteven Rostedt  *
28797a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
28807a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
28817a8e76a3SSteven Rostedt  *
28827a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
28837a8e76a3SSteven Rostedt  */
28847a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
28857a8e76a3SSteven Rostedt {
28867a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
28877a8e76a3SSteven Rostedt 
28889e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
28898aabee57SSteven Rostedt 		return;
28907a8e76a3SSteven Rostedt 
28917a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
28927a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
28937a8e76a3SSteven Rostedt }
2894c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
28957a8e76a3SSteven Rostedt 
28967a8e76a3SSteven Rostedt /**
28977a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
28987a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
28997a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
29007a8e76a3SSteven Rostedt  *
29017a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
2902c41b20e7SAdam Buchbinder  * to truly enable the writing (much like preempt_disable).
29037a8e76a3SSteven Rostedt  */
29047a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
29057a8e76a3SSteven Rostedt {
29067a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
29077a8e76a3SSteven Rostedt 
29089e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
29098aabee57SSteven Rostedt 		return;
29107a8e76a3SSteven Rostedt 
29117a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
29127a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
29137a8e76a3SSteven Rostedt }
2914c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
29157a8e76a3SSteven Rostedt 
2916f6195aa0SSteven Rostedt /*
2917f6195aa0SSteven Rostedt  * The total entries in the ring buffer is the running counter
2918f6195aa0SSteven Rostedt  * of entries entered into the ring buffer, minus the sum of
2919f6195aa0SSteven Rostedt  * the entries read from the ring buffer and the number of
2920f6195aa0SSteven Rostedt  * entries that were overwritten.
2921f6195aa0SSteven Rostedt  */
2922f6195aa0SSteven Rostedt static inline unsigned long
2923f6195aa0SSteven Rostedt rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2924f6195aa0SSteven Rostedt {
2925f6195aa0SSteven Rostedt 	return local_read(&cpu_buffer->entries) -
2926f6195aa0SSteven Rostedt 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2927f6195aa0SSteven Rostedt }
2928f6195aa0SSteven Rostedt 
29297a8e76a3SSteven Rostedt /**
2930c64e148aSVaibhav Nagarnaik  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2931c64e148aSVaibhav Nagarnaik  * @buffer: The ring buffer
2932c64e148aSVaibhav Nagarnaik  * @cpu: The per CPU buffer to read from.
2933c64e148aSVaibhav Nagarnaik  */
2934c64e148aSVaibhav Nagarnaik unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2935c64e148aSVaibhav Nagarnaik {
2936c64e148aSVaibhav Nagarnaik 	unsigned long flags;
2937c64e148aSVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer;
2938c64e148aSVaibhav Nagarnaik 	struct buffer_page *bpage;
2939c64e148aSVaibhav Nagarnaik 	unsigned long ret;
2940c64e148aSVaibhav Nagarnaik 
2941c64e148aSVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2942c64e148aSVaibhav Nagarnaik 		return 0;
2943c64e148aSVaibhav Nagarnaik 
2944c64e148aSVaibhav Nagarnaik 	cpu_buffer = buffer->buffers[cpu];
29457115e3fcSLinus Torvalds 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2946c64e148aSVaibhav Nagarnaik 	/*
2947c64e148aSVaibhav Nagarnaik 	 * if the tail is on reader_page, oldest time stamp is on the reader
2948c64e148aSVaibhav Nagarnaik 	 * page
2949c64e148aSVaibhav Nagarnaik 	 */
2950c64e148aSVaibhav Nagarnaik 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2951c64e148aSVaibhav Nagarnaik 		bpage = cpu_buffer->reader_page;
2952c64e148aSVaibhav Nagarnaik 	else
2953c64e148aSVaibhav Nagarnaik 		bpage = rb_set_head_page(cpu_buffer);
2954c64e148aSVaibhav Nagarnaik 	ret = bpage->page->time_stamp;
29557115e3fcSLinus Torvalds 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2956c64e148aSVaibhav Nagarnaik 
2957c64e148aSVaibhav Nagarnaik 	return ret;
2958c64e148aSVaibhav Nagarnaik }
2959c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2960c64e148aSVaibhav Nagarnaik 
2961c64e148aSVaibhav Nagarnaik /**
2962c64e148aSVaibhav Nagarnaik  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2963c64e148aSVaibhav Nagarnaik  * @buffer: The ring buffer
2964c64e148aSVaibhav Nagarnaik  * @cpu: The per CPU buffer to read from.
2965c64e148aSVaibhav Nagarnaik  */
2966c64e148aSVaibhav Nagarnaik unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2967c64e148aSVaibhav Nagarnaik {
2968c64e148aSVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer;
2969c64e148aSVaibhav Nagarnaik 	unsigned long ret;
2970c64e148aSVaibhav Nagarnaik 
2971c64e148aSVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2972c64e148aSVaibhav Nagarnaik 		return 0;
2973c64e148aSVaibhav Nagarnaik 
2974c64e148aSVaibhav Nagarnaik 	cpu_buffer = buffer->buffers[cpu];
2975c64e148aSVaibhav Nagarnaik 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2976c64e148aSVaibhav Nagarnaik 
2977c64e148aSVaibhav Nagarnaik 	return ret;
2978c64e148aSVaibhav Nagarnaik }
2979c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2980c64e148aSVaibhav Nagarnaik 
2981c64e148aSVaibhav Nagarnaik /**
29827a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
29837a8e76a3SSteven Rostedt  * @buffer: The ring buffer
29847a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
29857a8e76a3SSteven Rostedt  */
29867a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
29877a8e76a3SSteven Rostedt {
29887a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
29897a8e76a3SSteven Rostedt 
29909e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
29918aabee57SSteven Rostedt 		return 0;
29927a8e76a3SSteven Rostedt 
29937a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2994554f786eSSteven Rostedt 
2995f6195aa0SSteven Rostedt 	return rb_num_of_entries(cpu_buffer);
29967a8e76a3SSteven Rostedt }
2997c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
29987a8e76a3SSteven Rostedt 
29997a8e76a3SSteven Rostedt /**
30007a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
30017a8e76a3SSteven Rostedt  * @buffer: The ring buffer
30027a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
30037a8e76a3SSteven Rostedt  */
30047a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
30057a8e76a3SSteven Rostedt {
30067a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
30078aabee57SSteven Rostedt 	unsigned long ret;
30087a8e76a3SSteven Rostedt 
30099e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
30108aabee57SSteven Rostedt 		return 0;
30117a8e76a3SSteven Rostedt 
30127a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
301377ae365eSSteven Rostedt 	ret = local_read(&cpu_buffer->overrun);
3014554f786eSSteven Rostedt 
3015554f786eSSteven Rostedt 	return ret;
30167a8e76a3SSteven Rostedt }
3017c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
30187a8e76a3SSteven Rostedt 
30197a8e76a3SSteven Rostedt /**
3020f0d2c681SSteven Rostedt  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
3021f0d2c681SSteven Rostedt  * @buffer: The ring buffer
3022f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
3023f0d2c681SSteven Rostedt  */
3024f0d2c681SSteven Rostedt unsigned long
3025f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3026f0d2c681SSteven Rostedt {
3027f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3028f0d2c681SSteven Rostedt 	unsigned long ret;
3029f0d2c681SSteven Rostedt 
3030f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3031f0d2c681SSteven Rostedt 		return 0;
3032f0d2c681SSteven Rostedt 
3033f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
303477ae365eSSteven Rostedt 	ret = local_read(&cpu_buffer->commit_overrun);
3035f0d2c681SSteven Rostedt 
3036f0d2c681SSteven Rostedt 	return ret;
3037f0d2c681SSteven Rostedt }
3038f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3039f0d2c681SSteven Rostedt 
3040f0d2c681SSteven Rostedt /**
30417a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
30427a8e76a3SSteven Rostedt  * @buffer: The ring buffer
30437a8e76a3SSteven Rostedt  *
30447a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
30457a8e76a3SSteven Rostedt  * (all CPU entries)
30467a8e76a3SSteven Rostedt  */
30477a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
30487a8e76a3SSteven Rostedt {
30497a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
30507a8e76a3SSteven Rostedt 	unsigned long entries = 0;
30517a8e76a3SSteven Rostedt 	int cpu;
30527a8e76a3SSteven Rostedt 
30537a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
30547a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
30557a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
3056f6195aa0SSteven Rostedt 		entries += rb_num_of_entries(cpu_buffer);
30577a8e76a3SSteven Rostedt 	}
30587a8e76a3SSteven Rostedt 
30597a8e76a3SSteven Rostedt 	return entries;
30607a8e76a3SSteven Rostedt }
3061c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
30627a8e76a3SSteven Rostedt 
30637a8e76a3SSteven Rostedt /**
306467b394f7SJiri Olsa  * ring_buffer_overruns - get the number of overruns in buffer
30657a8e76a3SSteven Rostedt  * @buffer: The ring buffer
30667a8e76a3SSteven Rostedt  *
30677a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
30687a8e76a3SSteven Rostedt  * (all CPU entries)
30697a8e76a3SSteven Rostedt  */
30707a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
30717a8e76a3SSteven Rostedt {
30727a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
30737a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
30747a8e76a3SSteven Rostedt 	int cpu;
30757a8e76a3SSteven Rostedt 
30767a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
30777a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
30787a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
307977ae365eSSteven Rostedt 		overruns += local_read(&cpu_buffer->overrun);
30807a8e76a3SSteven Rostedt 	}
30817a8e76a3SSteven Rostedt 
30827a8e76a3SSteven Rostedt 	return overruns;
30837a8e76a3SSteven Rostedt }
3084c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
30857a8e76a3SSteven Rostedt 
3086642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
30877a8e76a3SSteven Rostedt {
30887a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
30897a8e76a3SSteven Rostedt 
3090d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
3091d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
309277ae365eSSteven Rostedt 		iter->head_page = rb_set_head_page(cpu_buffer);
309377ae365eSSteven Rostedt 		if (unlikely(!iter->head_page))
309477ae365eSSteven Rostedt 			return;
309577ae365eSSteven Rostedt 		iter->head = iter->head_page->read;
3096d769041fSSteven Rostedt 	} else {
3097d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
30986f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
3099d769041fSSteven Rostedt 	}
3100d769041fSSteven Rostedt 	if (iter->head)
3101d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
3102d769041fSSteven Rostedt 	else
3103abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
3104492a74f4SSteven Rostedt 	iter->cache_reader_page = cpu_buffer->reader_page;
3105492a74f4SSteven Rostedt 	iter->cache_read = cpu_buffer->read;
3106642edba5SSteven Rostedt }
3107f83c9d0fSSteven Rostedt 
3108642edba5SSteven Rostedt /**
3109642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
3110642edba5SSteven Rostedt  * @iter: The iterator to reset
3111642edba5SSteven Rostedt  *
3112642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
3113642edba5SSteven Rostedt  * again.
3114642edba5SSteven Rostedt  */
3115642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3116642edba5SSteven Rostedt {
3117554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3118642edba5SSteven Rostedt 	unsigned long flags;
3119642edba5SSteven Rostedt 
3120554f786eSSteven Rostedt 	if (!iter)
3121554f786eSSteven Rostedt 		return;
3122554f786eSSteven Rostedt 
3123554f786eSSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
3124554f786eSSteven Rostedt 
31255389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3126642edba5SSteven Rostedt 	rb_iter_reset(iter);
31275389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
31287a8e76a3SSteven Rostedt }
3129c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
31307a8e76a3SSteven Rostedt 
31317a8e76a3SSteven Rostedt /**
31327a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
31337a8e76a3SSteven Rostedt  * @iter: The iterator to check
31347a8e76a3SSteven Rostedt  */
31357a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
31367a8e76a3SSteven Rostedt {
31377a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
31387a8e76a3SSteven Rostedt 
31397a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
31407a8e76a3SSteven Rostedt 
3141bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
3142bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
31437a8e76a3SSteven Rostedt }
3144c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
31457a8e76a3SSteven Rostedt 
31467a8e76a3SSteven Rostedt static void
31477a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
31487a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
31497a8e76a3SSteven Rostedt {
31507a8e76a3SSteven Rostedt 	u64 delta;
31517a8e76a3SSteven Rostedt 
3152334d4169SLai Jiangshan 	switch (event->type_len) {
31537a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
31547a8e76a3SSteven Rostedt 		return;
31557a8e76a3SSteven Rostedt 
31567a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
31577a8e76a3SSteven Rostedt 		delta = event->array[0];
31587a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
31597a8e76a3SSteven Rostedt 		delta += event->time_delta;
31607a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
31617a8e76a3SSteven Rostedt 		return;
31627a8e76a3SSteven Rostedt 
31637a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
31647a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
31657a8e76a3SSteven Rostedt 		return;
31667a8e76a3SSteven Rostedt 
31677a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
31687a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
31697a8e76a3SSteven Rostedt 		return;
31707a8e76a3SSteven Rostedt 
31717a8e76a3SSteven Rostedt 	default:
31727a8e76a3SSteven Rostedt 		BUG();
31737a8e76a3SSteven Rostedt 	}
31747a8e76a3SSteven Rostedt 	return;
31757a8e76a3SSteven Rostedt }
31767a8e76a3SSteven Rostedt 
31777a8e76a3SSteven Rostedt static void
31787a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
31797a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
31807a8e76a3SSteven Rostedt {
31817a8e76a3SSteven Rostedt 	u64 delta;
31827a8e76a3SSteven Rostedt 
3183334d4169SLai Jiangshan 	switch (event->type_len) {
31847a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
31857a8e76a3SSteven Rostedt 		return;
31867a8e76a3SSteven Rostedt 
31877a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
31887a8e76a3SSteven Rostedt 		delta = event->array[0];
31897a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
31907a8e76a3SSteven Rostedt 		delta += event->time_delta;
31917a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
31927a8e76a3SSteven Rostedt 		return;
31937a8e76a3SSteven Rostedt 
31947a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
31957a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
31967a8e76a3SSteven Rostedt 		return;
31977a8e76a3SSteven Rostedt 
31987a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
31997a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
32007a8e76a3SSteven Rostedt 		return;
32017a8e76a3SSteven Rostedt 
32027a8e76a3SSteven Rostedt 	default:
32037a8e76a3SSteven Rostedt 		BUG();
32047a8e76a3SSteven Rostedt 	}
32057a8e76a3SSteven Rostedt 	return;
32067a8e76a3SSteven Rostedt }
32077a8e76a3SSteven Rostedt 
3208d769041fSSteven Rostedt static struct buffer_page *
3209d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
32107a8e76a3SSteven Rostedt {
3211d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
321266a8cb95SSteven Rostedt 	unsigned long overwrite;
3213d769041fSSteven Rostedt 	unsigned long flags;
3214818e3dd3SSteven Rostedt 	int nr_loops = 0;
321577ae365eSSteven Rostedt 	int ret;
3216d769041fSSteven Rostedt 
32173e03fb7fSSteven Rostedt 	local_irq_save(flags);
32180199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
3219d769041fSSteven Rostedt 
3220d769041fSSteven Rostedt  again:
3221818e3dd3SSteven Rostedt 	/*
3222818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
3223818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
3224818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
3225818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
3226818e3dd3SSteven Rostedt 	 */
32273e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3228818e3dd3SSteven Rostedt 		reader = NULL;
3229818e3dd3SSteven Rostedt 		goto out;
3230818e3dd3SSteven Rostedt 	}
3231818e3dd3SSteven Rostedt 
3232d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
3233d769041fSSteven Rostedt 
3234d769041fSSteven Rostedt 	/* If there's more to read, return this page */
3235bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3236d769041fSSteven Rostedt 		goto out;
3237d769041fSSteven Rostedt 
3238d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
32393e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
32403e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
32413e89c7bbSSteven Rostedt 		goto out;
3242d769041fSSteven Rostedt 
3243d769041fSSteven Rostedt 	/* check if we caught up to the tail */
3244d769041fSSteven Rostedt 	reader = NULL;
3245bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3246d769041fSSteven Rostedt 		goto out;
32477a8e76a3SSteven Rostedt 
32487a8e76a3SSteven Rostedt 	/*
3249d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
32507a8e76a3SSteven Rostedt 	 */
325177ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
325277ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
325377ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
3254ff0ff84aSSteven Rostedt 	cpu_buffer->reader_page->real_end = 0;
3255d769041fSSteven Rostedt 
325677ae365eSSteven Rostedt  spin:
325777ae365eSSteven Rostedt 	/*
325877ae365eSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
325977ae365eSSteven Rostedt 	 */
326077ae365eSSteven Rostedt 	reader = rb_set_head_page(cpu_buffer);
32610e1ff5d7SSteven Rostedt 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3262d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
3263bf41a158SSteven Rostedt 
32643adc54faSSteven Rostedt 	/*
32653adc54faSSteven Rostedt 	 * cpu_buffer->pages just needs to point to the buffer, it
32663adc54faSSteven Rostedt 	 *  has no specific buffer page to point to. Lets move it out
326725985edcSLucas De Marchi 	 *  of our way so we don't accidentally swap it.
32683adc54faSSteven Rostedt 	 */
32693adc54faSSteven Rostedt 	cpu_buffer->pages = reader->list.prev;
32703adc54faSSteven Rostedt 
327177ae365eSSteven Rostedt 	/* The reader page will be pointing to the new head */
327277ae365eSSteven Rostedt 	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3273d769041fSSteven Rostedt 
3274d769041fSSteven Rostedt 	/*
327566a8cb95SSteven Rostedt 	 * We want to make sure we read the overruns after we set up our
327666a8cb95SSteven Rostedt 	 * pointers to the next object. The writer side does a
327766a8cb95SSteven Rostedt 	 * cmpxchg to cross pages which acts as the mb on the writer
327866a8cb95SSteven Rostedt 	 * side. Note, the reader will constantly fail the swap
327966a8cb95SSteven Rostedt 	 * while the writer is updating the pointers, so this
328066a8cb95SSteven Rostedt 	 * guarantees that the overwrite recorded here is the one we
328166a8cb95SSteven Rostedt 	 * want to compare with the last_overrun.
328266a8cb95SSteven Rostedt 	 */
328366a8cb95SSteven Rostedt 	smp_mb();
328466a8cb95SSteven Rostedt 	overwrite = local_read(&(cpu_buffer->overrun));
328566a8cb95SSteven Rostedt 
328666a8cb95SSteven Rostedt 	/*
328777ae365eSSteven Rostedt 	 * Here's the tricky part.
328877ae365eSSteven Rostedt 	 *
328977ae365eSSteven Rostedt 	 * We need to move the pointer past the header page.
329077ae365eSSteven Rostedt 	 * But we can only do that if a writer is not currently
329177ae365eSSteven Rostedt 	 * moving it. The page before the header page has the
329277ae365eSSteven Rostedt 	 * flag bit '1' set if it is pointing to the page we want.
329377ae365eSSteven Rostedt 	 * but if the writer is in the process of moving it
329477ae365eSSteven Rostedt 	 * than it will be '2' or already moved '0'.
3295d769041fSSteven Rostedt 	 */
3296d769041fSSteven Rostedt 
329777ae365eSSteven Rostedt 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
329877ae365eSSteven Rostedt 
329977ae365eSSteven Rostedt 	/*
330077ae365eSSteven Rostedt 	 * If we did not convert it, then we must try again.
330177ae365eSSteven Rostedt 	 */
330277ae365eSSteven Rostedt 	if (!ret)
330377ae365eSSteven Rostedt 		goto spin;
330477ae365eSSteven Rostedt 
330577ae365eSSteven Rostedt 	/*
330677ae365eSSteven Rostedt 	 * Yeah! We succeeded in replacing the page.
330777ae365eSSteven Rostedt 	 *
330877ae365eSSteven Rostedt 	 * Now make the new head point back to the reader page.
330977ae365eSSteven Rostedt 	 */
33105ded3dc6SDavid Sharp 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
33117a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3312d769041fSSteven Rostedt 
3313d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
3314d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
3315d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
3316d769041fSSteven Rostedt 
331766a8cb95SSteven Rostedt 	if (overwrite != cpu_buffer->last_overrun) {
331866a8cb95SSteven Rostedt 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
331966a8cb95SSteven Rostedt 		cpu_buffer->last_overrun = overwrite;
332066a8cb95SSteven Rostedt 	}
332166a8cb95SSteven Rostedt 
3322d769041fSSteven Rostedt 	goto again;
3323d769041fSSteven Rostedt 
3324d769041fSSteven Rostedt  out:
33250199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
33263e03fb7fSSteven Rostedt 	local_irq_restore(flags);
3327d769041fSSteven Rostedt 
3328d769041fSSteven Rostedt 	return reader;
33297a8e76a3SSteven Rostedt }
33307a8e76a3SSteven Rostedt 
3331d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3332d769041fSSteven Rostedt {
3333d769041fSSteven Rostedt 	struct ring_buffer_event *event;
3334d769041fSSteven Rostedt 	struct buffer_page *reader;
3335d769041fSSteven Rostedt 	unsigned length;
3336d769041fSSteven Rostedt 
3337d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
3338d769041fSSteven Rostedt 
3339d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
33403e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
33413e89c7bbSSteven Rostedt 		return;
3342d769041fSSteven Rostedt 
3343d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
33447a8e76a3SSteven Rostedt 
3345a1863c21SSteven Rostedt 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3346e4906effSSteven Rostedt 		cpu_buffer->read++;
33477a8e76a3SSteven Rostedt 
33487a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
33497a8e76a3SSteven Rostedt 
3350d769041fSSteven Rostedt 	length = rb_event_length(event);
33516f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
33527a8e76a3SSteven Rostedt }
33537a8e76a3SSteven Rostedt 
33547a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
33557a8e76a3SSteven Rostedt {
33567a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
33577a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
33587a8e76a3SSteven Rostedt 	unsigned length;
33597a8e76a3SSteven Rostedt 
33607a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
33617a8e76a3SSteven Rostedt 
33627a8e76a3SSteven Rostedt 	/*
33637a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
33647a8e76a3SSteven Rostedt 	 */
3365bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
3366ea05b57cSSteven Rostedt 		/* discarded commits can make the page empty */
3367ea05b57cSSteven Rostedt 		if (iter->head_page == cpu_buffer->commit_page)
33683e89c7bbSSteven Rostedt 			return;
3369d769041fSSteven Rostedt 		rb_inc_iter(iter);
33707a8e76a3SSteven Rostedt 		return;
33717a8e76a3SSteven Rostedt 	}
33727a8e76a3SSteven Rostedt 
33737a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
33747a8e76a3SSteven Rostedt 
33757a8e76a3SSteven Rostedt 	length = rb_event_length(event);
33767a8e76a3SSteven Rostedt 
33777a8e76a3SSteven Rostedt 	/*
33787a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
33797a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
33807a8e76a3SSteven Rostedt 	 */
33813e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
3382f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
33833e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
33843e89c7bbSSteven Rostedt 		return;
33857a8e76a3SSteven Rostedt 
33867a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
33877a8e76a3SSteven Rostedt 
33887a8e76a3SSteven Rostedt 	iter->head += length;
33897a8e76a3SSteven Rostedt 
33907a8e76a3SSteven Rostedt 	/* check for end of page padding */
3391bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
3392bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
33937a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
33947a8e76a3SSteven Rostedt }
33957a8e76a3SSteven Rostedt 
339666a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
339766a8cb95SSteven Rostedt {
339866a8cb95SSteven Rostedt 	return cpu_buffer->lost_events;
339966a8cb95SSteven Rostedt }
340066a8cb95SSteven Rostedt 
3401f83c9d0fSSteven Rostedt static struct ring_buffer_event *
340266a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
340366a8cb95SSteven Rostedt 	       unsigned long *lost_events)
34047a8e76a3SSteven Rostedt {
34057a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3406d769041fSSteven Rostedt 	struct buffer_page *reader;
3407818e3dd3SSteven Rostedt 	int nr_loops = 0;
34087a8e76a3SSteven Rostedt 
34097a8e76a3SSteven Rostedt  again:
3410818e3dd3SSteven Rostedt 	/*
341169d1b839SSteven Rostedt 	 * We repeat when a time extend is encountered.
341269d1b839SSteven Rostedt 	 * Since the time extend is always attached to a data event,
341369d1b839SSteven Rostedt 	 * we should never loop more than once.
341469d1b839SSteven Rostedt 	 * (We never hit the following condition more than twice).
3415818e3dd3SSteven Rostedt 	 */
341669d1b839SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3417818e3dd3SSteven Rostedt 		return NULL;
3418818e3dd3SSteven Rostedt 
3419d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
3420d769041fSSteven Rostedt 	if (!reader)
34217a8e76a3SSteven Rostedt 		return NULL;
34227a8e76a3SSteven Rostedt 
3423d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
34247a8e76a3SSteven Rostedt 
3425334d4169SLai Jiangshan 	switch (event->type_len) {
34267a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
34272d622719STom Zanussi 		if (rb_null_event(event))
3428bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, 1);
34292d622719STom Zanussi 		/*
34302d622719STom Zanussi 		 * Because the writer could be discarding every
34312d622719STom Zanussi 		 * event it creates (which would probably be bad)
34322d622719STom Zanussi 		 * if we were to go back to "again" then we may never
34332d622719STom Zanussi 		 * catch up, and will trigger the warn on, or lock
34342d622719STom Zanussi 		 * the box. Return the padding, and we will release
34352d622719STom Zanussi 		 * the current locks, and try again.
34362d622719STom Zanussi 		 */
34372d622719STom Zanussi 		return event;
34387a8e76a3SSteven Rostedt 
34397a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
34407a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
3441d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
34427a8e76a3SSteven Rostedt 		goto again;
34437a8e76a3SSteven Rostedt 
34447a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
34457a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
3446d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
34477a8e76a3SSteven Rostedt 		goto again;
34487a8e76a3SSteven Rostedt 
34497a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
34507a8e76a3SSteven Rostedt 		if (ts) {
34517a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
3452d8eeb2d3SRobert Richter 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
345337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
34547a8e76a3SSteven Rostedt 		}
345566a8cb95SSteven Rostedt 		if (lost_events)
345666a8cb95SSteven Rostedt 			*lost_events = rb_lost_events(cpu_buffer);
34577a8e76a3SSteven Rostedt 		return event;
34587a8e76a3SSteven Rostedt 
34597a8e76a3SSteven Rostedt 	default:
34607a8e76a3SSteven Rostedt 		BUG();
34617a8e76a3SSteven Rostedt 	}
34627a8e76a3SSteven Rostedt 
34637a8e76a3SSteven Rostedt 	return NULL;
34647a8e76a3SSteven Rostedt }
3465c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
34667a8e76a3SSteven Rostedt 
3467f83c9d0fSSteven Rostedt static struct ring_buffer_event *
3468f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
34697a8e76a3SSteven Rostedt {
34707a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
34717a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
34727a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3473818e3dd3SSteven Rostedt 	int nr_loops = 0;
34747a8e76a3SSteven Rostedt 
34757a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
34767a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
34777a8e76a3SSteven Rostedt 
3478492a74f4SSteven Rostedt 	/*
3479492a74f4SSteven Rostedt 	 * Check if someone performed a consuming read to
3480492a74f4SSteven Rostedt 	 * the buffer. A consuming read invalidates the iterator
3481492a74f4SSteven Rostedt 	 * and we need to reset the iterator in this case.
3482492a74f4SSteven Rostedt 	 */
3483492a74f4SSteven Rostedt 	if (unlikely(iter->cache_read != cpu_buffer->read ||
3484492a74f4SSteven Rostedt 		     iter->cache_reader_page != cpu_buffer->reader_page))
3485492a74f4SSteven Rostedt 		rb_iter_reset(iter);
3486492a74f4SSteven Rostedt 
34877a8e76a3SSteven Rostedt  again:
34883c05d748SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
34893c05d748SSteven Rostedt 		return NULL;
34903c05d748SSteven Rostedt 
3491818e3dd3SSteven Rostedt 	/*
349269d1b839SSteven Rostedt 	 * We repeat when a time extend is encountered.
349369d1b839SSteven Rostedt 	 * Since the time extend is always attached to a data event,
349469d1b839SSteven Rostedt 	 * we should never loop more than once.
349569d1b839SSteven Rostedt 	 * (We never hit the following condition more than twice).
3496818e3dd3SSteven Rostedt 	 */
349769d1b839SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3498818e3dd3SSteven Rostedt 		return NULL;
3499818e3dd3SSteven Rostedt 
35007a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
35017a8e76a3SSteven Rostedt 		return NULL;
35027a8e76a3SSteven Rostedt 
35033c05d748SSteven Rostedt 	if (iter->head >= local_read(&iter->head_page->page->commit)) {
35043c05d748SSteven Rostedt 		rb_inc_iter(iter);
35053c05d748SSteven Rostedt 		goto again;
35063c05d748SSteven Rostedt 	}
35073c05d748SSteven Rostedt 
35087a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
35097a8e76a3SSteven Rostedt 
3510334d4169SLai Jiangshan 	switch (event->type_len) {
35117a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
35122d622719STom Zanussi 		if (rb_null_event(event)) {
3513d769041fSSteven Rostedt 			rb_inc_iter(iter);
35147a8e76a3SSteven Rostedt 			goto again;
35152d622719STom Zanussi 		}
35162d622719STom Zanussi 		rb_advance_iter(iter);
35172d622719STom Zanussi 		return event;
35187a8e76a3SSteven Rostedt 
35197a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
35207a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
35217a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
35227a8e76a3SSteven Rostedt 		goto again;
35237a8e76a3SSteven Rostedt 
35247a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
35257a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
35267a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
35277a8e76a3SSteven Rostedt 		goto again;
35287a8e76a3SSteven Rostedt 
35297a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
35307a8e76a3SSteven Rostedt 		if (ts) {
35317a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
353237886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
353337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
35347a8e76a3SSteven Rostedt 		}
35357a8e76a3SSteven Rostedt 		return event;
35367a8e76a3SSteven Rostedt 
35377a8e76a3SSteven Rostedt 	default:
35387a8e76a3SSteven Rostedt 		BUG();
35397a8e76a3SSteven Rostedt 	}
35407a8e76a3SSteven Rostedt 
35417a8e76a3SSteven Rostedt 	return NULL;
35427a8e76a3SSteven Rostedt }
3543c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
35447a8e76a3SSteven Rostedt 
35458d707e8eSSteven Rostedt static inline int rb_ok_to_lock(void)
35468d707e8eSSteven Rostedt {
35478d707e8eSSteven Rostedt 	/*
35488d707e8eSSteven Rostedt 	 * If an NMI die dumps out the content of the ring buffer
35498d707e8eSSteven Rostedt 	 * do not grab locks. We also permanently disable the ring
35508d707e8eSSteven Rostedt 	 * buffer too. A one time deal is all you get from reading
35518d707e8eSSteven Rostedt 	 * the ring buffer from an NMI.
35528d707e8eSSteven Rostedt 	 */
3553464e85ebSSteven Rostedt 	if (likely(!in_nmi()))
35548d707e8eSSteven Rostedt 		return 1;
35558d707e8eSSteven Rostedt 
35568d707e8eSSteven Rostedt 	tracing_off_permanent();
35578d707e8eSSteven Rostedt 	return 0;
35588d707e8eSSteven Rostedt }
35598d707e8eSSteven Rostedt 
35607a8e76a3SSteven Rostedt /**
3561f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
3562f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
3563f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
3564f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
356566a8cb95SSteven Rostedt  * @lost_events: a variable to store if events were lost (may be NULL)
3566f83c9d0fSSteven Rostedt  *
3567f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
3568f83c9d0fSSteven Rostedt  * not consume the data.
3569f83c9d0fSSteven Rostedt  */
3570f83c9d0fSSteven Rostedt struct ring_buffer_event *
357166a8cb95SSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
357266a8cb95SSteven Rostedt 		 unsigned long *lost_events)
3573f83c9d0fSSteven Rostedt {
3574f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
35758aabee57SSteven Rostedt 	struct ring_buffer_event *event;
3576f83c9d0fSSteven Rostedt 	unsigned long flags;
35778d707e8eSSteven Rostedt 	int dolock;
3578f83c9d0fSSteven Rostedt 
3579554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
35808aabee57SSteven Rostedt 		return NULL;
3581554f786eSSteven Rostedt 
35828d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
35832d622719STom Zanussi  again:
35848d707e8eSSteven Rostedt 	local_irq_save(flags);
35858d707e8eSSteven Rostedt 	if (dolock)
35865389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
358766a8cb95SSteven Rostedt 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3588469535a5SRobert Richter 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3589469535a5SRobert Richter 		rb_advance_reader(cpu_buffer);
35908d707e8eSSteven Rostedt 	if (dolock)
35915389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
35928d707e8eSSteven Rostedt 	local_irq_restore(flags);
3593f83c9d0fSSteven Rostedt 
35941b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
35952d622719STom Zanussi 		goto again;
35962d622719STom Zanussi 
3597f83c9d0fSSteven Rostedt 	return event;
3598f83c9d0fSSteven Rostedt }
3599f83c9d0fSSteven Rostedt 
3600f83c9d0fSSteven Rostedt /**
3601f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
3602f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
3603f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
3604f83c9d0fSSteven Rostedt  *
3605f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
3606f83c9d0fSSteven Rostedt  * not increment the iterator.
3607f83c9d0fSSteven Rostedt  */
3608f83c9d0fSSteven Rostedt struct ring_buffer_event *
3609f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3610f83c9d0fSSteven Rostedt {
3611f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3612f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
3613f83c9d0fSSteven Rostedt 	unsigned long flags;
3614f83c9d0fSSteven Rostedt 
36152d622719STom Zanussi  again:
36165389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3617f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
36185389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3619f83c9d0fSSteven Rostedt 
36201b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
36212d622719STom Zanussi 		goto again;
36222d622719STom Zanussi 
3623f83c9d0fSSteven Rostedt 	return event;
3624f83c9d0fSSteven Rostedt }
3625f83c9d0fSSteven Rostedt 
3626f83c9d0fSSteven Rostedt /**
36277a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
36287a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
362966a8cb95SSteven Rostedt  * @cpu: the cpu to read the buffer from
363066a8cb95SSteven Rostedt  * @ts: a variable to store the timestamp (may be NULL)
363166a8cb95SSteven Rostedt  * @lost_events: a variable to store if events were lost (may be NULL)
36327a8e76a3SSteven Rostedt  *
36337a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
36347a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
36357a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
36367a8e76a3SSteven Rostedt  */
36377a8e76a3SSteven Rostedt struct ring_buffer_event *
363866a8cb95SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
363966a8cb95SSteven Rostedt 		    unsigned long *lost_events)
36407a8e76a3SSteven Rostedt {
3641554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3642554f786eSSteven Rostedt 	struct ring_buffer_event *event = NULL;
3643f83c9d0fSSteven Rostedt 	unsigned long flags;
36448d707e8eSSteven Rostedt 	int dolock;
36458d707e8eSSteven Rostedt 
36468d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
36477a8e76a3SSteven Rostedt 
36482d622719STom Zanussi  again:
3649554f786eSSteven Rostedt 	/* might be called in atomic */
3650554f786eSSteven Rostedt 	preempt_disable();
36517a8e76a3SSteven Rostedt 
3652554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3653554f786eSSteven Rostedt 		goto out;
3654554f786eSSteven Rostedt 
3655554f786eSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
36568d707e8eSSteven Rostedt 	local_irq_save(flags);
36578d707e8eSSteven Rostedt 	if (dolock)
36585389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
36597a8e76a3SSteven Rostedt 
366066a8cb95SSteven Rostedt 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
366166a8cb95SSteven Rostedt 	if (event) {
366266a8cb95SSteven Rostedt 		cpu_buffer->lost_events = 0;
3663d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
366466a8cb95SSteven Rostedt 	}
36657a8e76a3SSteven Rostedt 
36668d707e8eSSteven Rostedt 	if (dolock)
36675389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
36688d707e8eSSteven Rostedt 	local_irq_restore(flags);
3669f83c9d0fSSteven Rostedt 
3670554f786eSSteven Rostedt  out:
3671554f786eSSteven Rostedt 	preempt_enable();
3672554f786eSSteven Rostedt 
36731b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
36742d622719STom Zanussi 		goto again;
36752d622719STom Zanussi 
36767a8e76a3SSteven Rostedt 	return event;
36777a8e76a3SSteven Rostedt }
3678c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
36797a8e76a3SSteven Rostedt 
36807a8e76a3SSteven Rostedt /**
368172c9ddfdSDavid Miller  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
36827a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
36837a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
36847a8e76a3SSteven Rostedt  *
368572c9ddfdSDavid Miller  * This performs the initial preparations necessary to iterate
368672c9ddfdSDavid Miller  * through the buffer.  Memory is allocated, buffer recording
368772c9ddfdSDavid Miller  * is disabled, and the iterator pointer is returned to the caller.
36887a8e76a3SSteven Rostedt  *
368972c9ddfdSDavid Miller  * Disabling buffer recordng prevents the reading from being
369072c9ddfdSDavid Miller  * corrupted. This is not a consuming read, so a producer is not
369172c9ddfdSDavid Miller  * expected.
369272c9ddfdSDavid Miller  *
369372c9ddfdSDavid Miller  * After a sequence of ring_buffer_read_prepare calls, the user is
369472c9ddfdSDavid Miller  * expected to make at least one call to ring_buffer_prepare_sync.
369572c9ddfdSDavid Miller  * Afterwards, ring_buffer_read_start is invoked to get things going
369672c9ddfdSDavid Miller  * for real.
369772c9ddfdSDavid Miller  *
369872c9ddfdSDavid Miller  * This overall must be paired with ring_buffer_finish.
36997a8e76a3SSteven Rostedt  */
37007a8e76a3SSteven Rostedt struct ring_buffer_iter *
370172c9ddfdSDavid Miller ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
37027a8e76a3SSteven Rostedt {
37037a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
37048aabee57SSteven Rostedt 	struct ring_buffer_iter *iter;
37057a8e76a3SSteven Rostedt 
37069e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
37078aabee57SSteven Rostedt 		return NULL;
37087a8e76a3SSteven Rostedt 
37097a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
37107a8e76a3SSteven Rostedt 	if (!iter)
37118aabee57SSteven Rostedt 		return NULL;
37127a8e76a3SSteven Rostedt 
37137a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
37147a8e76a3SSteven Rostedt 
37157a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
37167a8e76a3SSteven Rostedt 
371783f40318SVaibhav Nagarnaik 	atomic_inc(&buffer->resize_disabled);
37187a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
371972c9ddfdSDavid Miller 
372072c9ddfdSDavid Miller 	return iter;
372172c9ddfdSDavid Miller }
372272c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
372372c9ddfdSDavid Miller 
372472c9ddfdSDavid Miller /**
372572c9ddfdSDavid Miller  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
372672c9ddfdSDavid Miller  *
372772c9ddfdSDavid Miller  * All previously invoked ring_buffer_read_prepare calls to prepare
372872c9ddfdSDavid Miller  * iterators will be synchronized.  Afterwards, read_buffer_read_start
372972c9ddfdSDavid Miller  * calls on those iterators are allowed.
373072c9ddfdSDavid Miller  */
373172c9ddfdSDavid Miller void
373272c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void)
373372c9ddfdSDavid Miller {
37347a8e76a3SSteven Rostedt 	synchronize_sched();
373572c9ddfdSDavid Miller }
373672c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
373772c9ddfdSDavid Miller 
373872c9ddfdSDavid Miller /**
373972c9ddfdSDavid Miller  * ring_buffer_read_start - start a non consuming read of the buffer
374072c9ddfdSDavid Miller  * @iter: The iterator returned by ring_buffer_read_prepare
374172c9ddfdSDavid Miller  *
374272c9ddfdSDavid Miller  * This finalizes the startup of an iteration through the buffer.
374372c9ddfdSDavid Miller  * The iterator comes from a call to ring_buffer_read_prepare and
374472c9ddfdSDavid Miller  * an intervening ring_buffer_read_prepare_sync must have been
374572c9ddfdSDavid Miller  * performed.
374672c9ddfdSDavid Miller  *
374772c9ddfdSDavid Miller  * Must be paired with ring_buffer_finish.
374872c9ddfdSDavid Miller  */
374972c9ddfdSDavid Miller void
375072c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter)
375172c9ddfdSDavid Miller {
375272c9ddfdSDavid Miller 	struct ring_buffer_per_cpu *cpu_buffer;
375372c9ddfdSDavid Miller 	unsigned long flags;
375472c9ddfdSDavid Miller 
375572c9ddfdSDavid Miller 	if (!iter)
375672c9ddfdSDavid Miller 		return;
375772c9ddfdSDavid Miller 
375872c9ddfdSDavid Miller 	cpu_buffer = iter->cpu_buffer;
37597a8e76a3SSteven Rostedt 
37605389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
37610199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
3762642edba5SSteven Rostedt 	rb_iter_reset(iter);
37630199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
37645389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
37657a8e76a3SSteven Rostedt }
3766c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
37677a8e76a3SSteven Rostedt 
37687a8e76a3SSteven Rostedt /**
37697a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
37707a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
37717a8e76a3SSteven Rostedt  *
37727a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
37737a8e76a3SSteven Rostedt  * iterator.
37747a8e76a3SSteven Rostedt  */
37757a8e76a3SSteven Rostedt void
37767a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
37777a8e76a3SSteven Rostedt {
37787a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
37797a8e76a3SSteven Rostedt 
3780659f451fSSteven Rostedt 	/*
3781659f451fSSteven Rostedt 	 * Ring buffer is disabled from recording, here's a good place
3782659f451fSSteven Rostedt 	 * to check the integrity of the ring buffer.
3783659f451fSSteven Rostedt 	 */
3784659f451fSSteven Rostedt 	rb_check_pages(cpu_buffer);
3785659f451fSSteven Rostedt 
37867a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
378783f40318SVaibhav Nagarnaik 	atomic_dec(&cpu_buffer->buffer->resize_disabled);
37887a8e76a3SSteven Rostedt 	kfree(iter);
37897a8e76a3SSteven Rostedt }
3790c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
37917a8e76a3SSteven Rostedt 
37927a8e76a3SSteven Rostedt /**
37937a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
37947a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
37957a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
37967a8e76a3SSteven Rostedt  *
37977a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
37987a8e76a3SSteven Rostedt  */
37997a8e76a3SSteven Rostedt struct ring_buffer_event *
38007a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
38017a8e76a3SSteven Rostedt {
38027a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3803f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3804f83c9d0fSSteven Rostedt 	unsigned long flags;
38057a8e76a3SSteven Rostedt 
38065389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
38077e9391cfSSteven Rostedt  again:
3808f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
38097a8e76a3SSteven Rostedt 	if (!event)
3810f83c9d0fSSteven Rostedt 		goto out;
38117a8e76a3SSteven Rostedt 
38127e9391cfSSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_PADDING)
38137e9391cfSSteven Rostedt 		goto again;
38147e9391cfSSteven Rostedt 
38157a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
3816f83c9d0fSSteven Rostedt  out:
38175389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
38187a8e76a3SSteven Rostedt 
38197a8e76a3SSteven Rostedt 	return event;
38207a8e76a3SSteven Rostedt }
3821c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
38227a8e76a3SSteven Rostedt 
38237a8e76a3SSteven Rostedt /**
38247a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
38257a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
38267a8e76a3SSteven Rostedt  */
3827438ced17SVaibhav Nagarnaik unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
38287a8e76a3SSteven Rostedt {
3829438ced17SVaibhav Nagarnaik 	/*
3830438ced17SVaibhav Nagarnaik 	 * Earlier, this method returned
3831438ced17SVaibhav Nagarnaik 	 *	BUF_PAGE_SIZE * buffer->nr_pages
3832438ced17SVaibhav Nagarnaik 	 * Since the nr_pages field is now removed, we have converted this to
3833438ced17SVaibhav Nagarnaik 	 * return the per cpu buffer value.
3834438ced17SVaibhav Nagarnaik 	 */
3835438ced17SVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3836438ced17SVaibhav Nagarnaik 		return 0;
3837438ced17SVaibhav Nagarnaik 
3838438ced17SVaibhav Nagarnaik 	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
38397a8e76a3SSteven Rostedt }
3840c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
38417a8e76a3SSteven Rostedt 
38427a8e76a3SSteven Rostedt static void
38437a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
38447a8e76a3SSteven Rostedt {
384577ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
384677ae365eSSteven Rostedt 
38477a8e76a3SSteven Rostedt 	cpu_buffer->head_page
38483adc54faSSteven Rostedt 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
3849bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
3850778c55d4SSteven Rostedt 	local_set(&cpu_buffer->head_page->entries, 0);
3851abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
38527a8e76a3SSteven Rostedt 
38536f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
3854bf41a158SSteven Rostedt 
3855bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
3856bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
3857bf41a158SSteven Rostedt 
3858bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
38595040b4b7SVaibhav Nagarnaik 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
3860bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
3861778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
3862abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
38636f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
3864d769041fSSteven Rostedt 
386577ae365eSSteven Rostedt 	local_set(&cpu_buffer->commit_overrun, 0);
3866c64e148aSVaibhav Nagarnaik 	local_set(&cpu_buffer->entries_bytes, 0);
386777ae365eSSteven Rostedt 	local_set(&cpu_buffer->overrun, 0);
3868e4906effSSteven Rostedt 	local_set(&cpu_buffer->entries, 0);
3869fa743953SSteven Rostedt 	local_set(&cpu_buffer->committing, 0);
3870fa743953SSteven Rostedt 	local_set(&cpu_buffer->commits, 0);
387177ae365eSSteven Rostedt 	cpu_buffer->read = 0;
3872c64e148aSVaibhav Nagarnaik 	cpu_buffer->read_bytes = 0;
387369507c06SSteven Rostedt 
387469507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
387569507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
387677ae365eSSteven Rostedt 
387766a8cb95SSteven Rostedt 	cpu_buffer->lost_events = 0;
387866a8cb95SSteven Rostedt 	cpu_buffer->last_overrun = 0;
387966a8cb95SSteven Rostedt 
388077ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
38817a8e76a3SSteven Rostedt }
38827a8e76a3SSteven Rostedt 
38837a8e76a3SSteven Rostedt /**
38847a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
38857a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
38867a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
38877a8e76a3SSteven Rostedt  */
38887a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
38897a8e76a3SSteven Rostedt {
38907a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
38917a8e76a3SSteven Rostedt 	unsigned long flags;
38927a8e76a3SSteven Rostedt 
38939e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
38948aabee57SSteven Rostedt 		return;
38957a8e76a3SSteven Rostedt 
389683f40318SVaibhav Nagarnaik 	atomic_inc(&buffer->resize_disabled);
389741ede23eSSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
389841ede23eSSteven Rostedt 
389983f40318SVaibhav Nagarnaik 	/* Make sure all commits have finished */
390083f40318SVaibhav Nagarnaik 	synchronize_sched();
390183f40318SVaibhav Nagarnaik 
39025389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3903f83c9d0fSSteven Rostedt 
390441b6a95dSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
390541b6a95dSSteven Rostedt 		goto out;
390641b6a95dSSteven Rostedt 
39070199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
39087a8e76a3SSteven Rostedt 
39097a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
39107a8e76a3SSteven Rostedt 
39110199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
3912f83c9d0fSSteven Rostedt 
391341b6a95dSSteven Rostedt  out:
39145389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
391541ede23eSSteven Rostedt 
391641ede23eSSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
391783f40318SVaibhav Nagarnaik 	atomic_dec(&buffer->resize_disabled);
39187a8e76a3SSteven Rostedt }
3919c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
39207a8e76a3SSteven Rostedt 
39217a8e76a3SSteven Rostedt /**
39227a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
39237a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
39247a8e76a3SSteven Rostedt  */
39257a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
39267a8e76a3SSteven Rostedt {
39277a8e76a3SSteven Rostedt 	int cpu;
39287a8e76a3SSteven Rostedt 
39297a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
3930d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
39317a8e76a3SSteven Rostedt }
3932c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
39337a8e76a3SSteven Rostedt 
39347a8e76a3SSteven Rostedt /**
39357a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
39367a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
39377a8e76a3SSteven Rostedt  */
39387a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
39397a8e76a3SSteven Rostedt {
39407a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3941d4788207SSteven Rostedt 	unsigned long flags;
39428d707e8eSSteven Rostedt 	int dolock;
39437a8e76a3SSteven Rostedt 	int cpu;
3944d4788207SSteven Rostedt 	int ret;
39457a8e76a3SSteven Rostedt 
39468d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
39477a8e76a3SSteven Rostedt 
39487a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
39497a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
39507a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
39518d707e8eSSteven Rostedt 		local_irq_save(flags);
39528d707e8eSSteven Rostedt 		if (dolock)
39535389f6faSThomas Gleixner 			raw_spin_lock(&cpu_buffer->reader_lock);
3954d4788207SSteven Rostedt 		ret = rb_per_cpu_empty(cpu_buffer);
39558d707e8eSSteven Rostedt 		if (dolock)
39565389f6faSThomas Gleixner 			raw_spin_unlock(&cpu_buffer->reader_lock);
39578d707e8eSSteven Rostedt 		local_irq_restore(flags);
39588d707e8eSSteven Rostedt 
3959d4788207SSteven Rostedt 		if (!ret)
39607a8e76a3SSteven Rostedt 			return 0;
39617a8e76a3SSteven Rostedt 	}
3962554f786eSSteven Rostedt 
39637a8e76a3SSteven Rostedt 	return 1;
39647a8e76a3SSteven Rostedt }
3965c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
39667a8e76a3SSteven Rostedt 
39677a8e76a3SSteven Rostedt /**
39687a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
39697a8e76a3SSteven Rostedt  * @buffer: The ring buffer
39707a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
39717a8e76a3SSteven Rostedt  */
39727a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
39737a8e76a3SSteven Rostedt {
39747a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3975d4788207SSteven Rostedt 	unsigned long flags;
39768d707e8eSSteven Rostedt 	int dolock;
39778aabee57SSteven Rostedt 	int ret;
39787a8e76a3SSteven Rostedt 
39799e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
39808aabee57SSteven Rostedt 		return 1;
39817a8e76a3SSteven Rostedt 
39828d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
3983554f786eSSteven Rostedt 
39847a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
39858d707e8eSSteven Rostedt 	local_irq_save(flags);
39868d707e8eSSteven Rostedt 	if (dolock)
39875389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
3988554f786eSSteven Rostedt 	ret = rb_per_cpu_empty(cpu_buffer);
39898d707e8eSSteven Rostedt 	if (dolock)
39905389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
39918d707e8eSSteven Rostedt 	local_irq_restore(flags);
3992554f786eSSteven Rostedt 
3993554f786eSSteven Rostedt 	return ret;
39947a8e76a3SSteven Rostedt }
3995c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
39967a8e76a3SSteven Rostedt 
399785bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
39987a8e76a3SSteven Rostedt /**
39997a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
40007a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
40017a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
40027a8e76a3SSteven Rostedt  *
40037a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
40047a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
40057a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
40067a8e76a3SSteven Rostedt  * used at the moment.
40077a8e76a3SSteven Rostedt  */
40087a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
40097a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
40107a8e76a3SSteven Rostedt {
40117a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
40127a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
4013554f786eSSteven Rostedt 	int ret = -EINVAL;
4014554f786eSSteven Rostedt 
40159e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
40169e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4017554f786eSSteven Rostedt 		goto out;
40187a8e76a3SSteven Rostedt 
4019438ced17SVaibhav Nagarnaik 	cpu_buffer_a = buffer_a->buffers[cpu];
4020438ced17SVaibhav Nagarnaik 	cpu_buffer_b = buffer_b->buffers[cpu];
4021438ced17SVaibhav Nagarnaik 
40227a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
4023438ced17SVaibhav Nagarnaik 	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4024554f786eSSteven Rostedt 		goto out;
4025554f786eSSteven Rostedt 
4026554f786eSSteven Rostedt 	ret = -EAGAIN;
40277a8e76a3SSteven Rostedt 
402897b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
4029554f786eSSteven Rostedt 		goto out;
403097b17efeSSteven Rostedt 
403197b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
4032554f786eSSteven Rostedt 		goto out;
403397b17efeSSteven Rostedt 
403497b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
4035554f786eSSteven Rostedt 		goto out;
403697b17efeSSteven Rostedt 
403797b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
4038554f786eSSteven Rostedt 		goto out;
403997b17efeSSteven Rostedt 
404097b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
4041554f786eSSteven Rostedt 		goto out;
404297b17efeSSteven Rostedt 
40437a8e76a3SSteven Rostedt 	/*
40447a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
40457a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
40467a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
40477a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
40487a8e76a3SSteven Rostedt 	 */
40497a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
40507a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
40517a8e76a3SSteven Rostedt 
405298277991SSteven Rostedt 	ret = -EBUSY;
405398277991SSteven Rostedt 	if (local_read(&cpu_buffer_a->committing))
405498277991SSteven Rostedt 		goto out_dec;
405598277991SSteven Rostedt 	if (local_read(&cpu_buffer_b->committing))
405698277991SSteven Rostedt 		goto out_dec;
405798277991SSteven Rostedt 
40587a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
40597a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
40607a8e76a3SSteven Rostedt 
40617a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
40627a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
40637a8e76a3SSteven Rostedt 
406498277991SSteven Rostedt 	ret = 0;
406598277991SSteven Rostedt 
406698277991SSteven Rostedt out_dec:
40677a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
40687a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
4069554f786eSSteven Rostedt out:
4070554f786eSSteven Rostedt 	return ret;
40717a8e76a3SSteven Rostedt }
4072c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
407385bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
40747a8e76a3SSteven Rostedt 
40758789a9e7SSteven Rostedt /**
40768789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
40778789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
40788789a9e7SSteven Rostedt  *
40798789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
40808789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
40818789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
40828789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
40838789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
40848789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
40858789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
40868789a9e7SSteven Rostedt  *
40878789a9e7SSteven Rostedt  * Returns:
40888789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
40898789a9e7SSteven Rostedt  */
40907ea59064SVaibhav Nagarnaik void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
40918789a9e7SSteven Rostedt {
4092044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
40937ea59064SVaibhav Nagarnaik 	struct page *page;
40948789a9e7SSteven Rostedt 
4095d7ec4bfeSVaibhav Nagarnaik 	page = alloc_pages_node(cpu_to_node(cpu),
4096d7ec4bfeSVaibhav Nagarnaik 				GFP_KERNEL | __GFP_NORETRY, 0);
40977ea59064SVaibhav Nagarnaik 	if (!page)
40988789a9e7SSteven Rostedt 		return NULL;
40998789a9e7SSteven Rostedt 
41007ea59064SVaibhav Nagarnaik 	bpage = page_address(page);
41018789a9e7SSteven Rostedt 
4102ef7a4a16SSteven Rostedt 	rb_init_page(bpage);
4103ef7a4a16SSteven Rostedt 
4104044fa782SSteven Rostedt 	return bpage;
41058789a9e7SSteven Rostedt }
4106d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
41078789a9e7SSteven Rostedt 
41088789a9e7SSteven Rostedt /**
41098789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
41108789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
41118789a9e7SSteven Rostedt  * @data: the page to free
41128789a9e7SSteven Rostedt  *
41138789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
41148789a9e7SSteven Rostedt  */
41158789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
41168789a9e7SSteven Rostedt {
41178789a9e7SSteven Rostedt 	free_page((unsigned long)data);
41188789a9e7SSteven Rostedt }
4119d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
41208789a9e7SSteven Rostedt 
41218789a9e7SSteven Rostedt /**
41228789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
41238789a9e7SSteven Rostedt  * @buffer: buffer to extract from
41248789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4125ef7a4a16SSteven Rostedt  * @len: amount to extract
41268789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
41278789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
41288789a9e7SSteven Rostedt  *
41298789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
41308789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
41318789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
41328789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
41338789a9e7SSteven Rostedt  *
41348789a9e7SSteven Rostedt  * for example:
4135b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
41368789a9e7SSteven Rostedt  *	if (!rpage)
41378789a9e7SSteven Rostedt  *		return error;
4138ef7a4a16SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4139667d2412SLai Jiangshan  *	if (ret >= 0)
4140667d2412SLai Jiangshan  *		process_page(rpage, ret);
41418789a9e7SSteven Rostedt  *
41428789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
41438789a9e7SSteven Rostedt  * the writer is off the reader page.
41448789a9e7SSteven Rostedt  *
41458789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
41468789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
41478789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
41488789a9e7SSteven Rostedt  *  responsible for that.
41498789a9e7SSteven Rostedt  *
41508789a9e7SSteven Rostedt  * Returns:
4151667d2412SLai Jiangshan  *  >=0 if data has been transferred, returns the offset of consumed data.
4152667d2412SLai Jiangshan  *  <0 if no data has been transferred.
41538789a9e7SSteven Rostedt  */
41548789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
4155ef7a4a16SSteven Rostedt 			  void **data_page, size_t len, int cpu, int full)
41568789a9e7SSteven Rostedt {
41578789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
41588789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
4159044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
4160ef7a4a16SSteven Rostedt 	struct buffer_page *reader;
4161ff0ff84aSSteven Rostedt 	unsigned long missed_events;
41628789a9e7SSteven Rostedt 	unsigned long flags;
4163ef7a4a16SSteven Rostedt 	unsigned int commit;
4164667d2412SLai Jiangshan 	unsigned int read;
41654f3640f8SSteven Rostedt 	u64 save_timestamp;
4166667d2412SLai Jiangshan 	int ret = -1;
41678789a9e7SSteven Rostedt 
4168554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4169554f786eSSteven Rostedt 		goto out;
4170554f786eSSteven Rostedt 
4171474d32b6SSteven Rostedt 	/*
4172474d32b6SSteven Rostedt 	 * If len is not big enough to hold the page header, then
4173474d32b6SSteven Rostedt 	 * we can not copy anything.
4174474d32b6SSteven Rostedt 	 */
4175474d32b6SSteven Rostedt 	if (len <= BUF_PAGE_HDR_SIZE)
4176554f786eSSteven Rostedt 		goto out;
4177474d32b6SSteven Rostedt 
4178474d32b6SSteven Rostedt 	len -= BUF_PAGE_HDR_SIZE;
4179474d32b6SSteven Rostedt 
41808789a9e7SSteven Rostedt 	if (!data_page)
4181554f786eSSteven Rostedt 		goto out;
41828789a9e7SSteven Rostedt 
4183044fa782SSteven Rostedt 	bpage = *data_page;
4184044fa782SSteven Rostedt 	if (!bpage)
4185554f786eSSteven Rostedt 		goto out;
41868789a9e7SSteven Rostedt 
41875389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
41888789a9e7SSteven Rostedt 
4189ef7a4a16SSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
4190ef7a4a16SSteven Rostedt 	if (!reader)
4191554f786eSSteven Rostedt 		goto out_unlock;
41928789a9e7SSteven Rostedt 
4193ef7a4a16SSteven Rostedt 	event = rb_reader_event(cpu_buffer);
4194667d2412SLai Jiangshan 
4195ef7a4a16SSteven Rostedt 	read = reader->read;
4196ef7a4a16SSteven Rostedt 	commit = rb_page_commit(reader);
4197ef7a4a16SSteven Rostedt 
419866a8cb95SSteven Rostedt 	/* Check if any events were dropped */
4199ff0ff84aSSteven Rostedt 	missed_events = cpu_buffer->lost_events;
420066a8cb95SSteven Rostedt 
42018789a9e7SSteven Rostedt 	/*
4202474d32b6SSteven Rostedt 	 * If this page has been partially read or
4203474d32b6SSteven Rostedt 	 * if len is not big enough to read the rest of the page or
4204474d32b6SSteven Rostedt 	 * a writer is still on the page, then
4205474d32b6SSteven Rostedt 	 * we must copy the data from the page to the buffer.
4206474d32b6SSteven Rostedt 	 * Otherwise, we can simply swap the page with the one passed in.
42078789a9e7SSteven Rostedt 	 */
4208474d32b6SSteven Rostedt 	if (read || (len < (commit - read)) ||
4209ef7a4a16SSteven Rostedt 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4210667d2412SLai Jiangshan 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4211474d32b6SSteven Rostedt 		unsigned int rpos = read;
4212474d32b6SSteven Rostedt 		unsigned int pos = 0;
4213ef7a4a16SSteven Rostedt 		unsigned int size;
42148789a9e7SSteven Rostedt 
42158789a9e7SSteven Rostedt 		if (full)
4216554f786eSSteven Rostedt 			goto out_unlock;
42178789a9e7SSteven Rostedt 
4218ef7a4a16SSteven Rostedt 		if (len > (commit - read))
4219ef7a4a16SSteven Rostedt 			len = (commit - read);
4220ef7a4a16SSteven Rostedt 
422169d1b839SSteven Rostedt 		/* Always keep the time extend and data together */
422269d1b839SSteven Rostedt 		size = rb_event_ts_length(event);
4223ef7a4a16SSteven Rostedt 
4224ef7a4a16SSteven Rostedt 		if (len < size)
4225554f786eSSteven Rostedt 			goto out_unlock;
4226ef7a4a16SSteven Rostedt 
42274f3640f8SSteven Rostedt 		/* save the current timestamp, since the user will need it */
42284f3640f8SSteven Rostedt 		save_timestamp = cpu_buffer->read_stamp;
42294f3640f8SSteven Rostedt 
4230ef7a4a16SSteven Rostedt 		/* Need to copy one event at a time */
4231ef7a4a16SSteven Rostedt 		do {
4232e1e35927SDavid Sharp 			/* We need the size of one event, because
4233e1e35927SDavid Sharp 			 * rb_advance_reader only advances by one event,
4234e1e35927SDavid Sharp 			 * whereas rb_event_ts_length may include the size of
4235e1e35927SDavid Sharp 			 * one or two events.
4236e1e35927SDavid Sharp 			 * We have already ensured there's enough space if this
4237e1e35927SDavid Sharp 			 * is a time extend. */
4238e1e35927SDavid Sharp 			size = rb_event_length(event);
4239474d32b6SSteven Rostedt 			memcpy(bpage->data + pos, rpage->data + rpos, size);
4240ef7a4a16SSteven Rostedt 
4241ef7a4a16SSteven Rostedt 			len -= size;
4242ef7a4a16SSteven Rostedt 
4243ef7a4a16SSteven Rostedt 			rb_advance_reader(cpu_buffer);
4244474d32b6SSteven Rostedt 			rpos = reader->read;
4245474d32b6SSteven Rostedt 			pos += size;
4246ef7a4a16SSteven Rostedt 
424718fab912SHuang Ying 			if (rpos >= commit)
424818fab912SHuang Ying 				break;
424918fab912SHuang Ying 
4250ef7a4a16SSteven Rostedt 			event = rb_reader_event(cpu_buffer);
425169d1b839SSteven Rostedt 			/* Always keep the time extend and data together */
425269d1b839SSteven Rostedt 			size = rb_event_ts_length(event);
4253e1e35927SDavid Sharp 		} while (len >= size);
4254667d2412SLai Jiangshan 
4255667d2412SLai Jiangshan 		/* update bpage */
4256ef7a4a16SSteven Rostedt 		local_set(&bpage->commit, pos);
42574f3640f8SSteven Rostedt 		bpage->time_stamp = save_timestamp;
4258ef7a4a16SSteven Rostedt 
4259474d32b6SSteven Rostedt 		/* we copied everything to the beginning */
4260474d32b6SSteven Rostedt 		read = 0;
42618789a9e7SSteven Rostedt 	} else {
4262afbab76aSSteven Rostedt 		/* update the entry counter */
426377ae365eSSteven Rostedt 		cpu_buffer->read += rb_page_entries(reader);
4264c64e148aSVaibhav Nagarnaik 		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4265afbab76aSSteven Rostedt 
42668789a9e7SSteven Rostedt 		/* swap the pages */
4267044fa782SSteven Rostedt 		rb_init_page(bpage);
4268ef7a4a16SSteven Rostedt 		bpage = reader->page;
4269ef7a4a16SSteven Rostedt 		reader->page = *data_page;
4270ef7a4a16SSteven Rostedt 		local_set(&reader->write, 0);
4271778c55d4SSteven Rostedt 		local_set(&reader->entries, 0);
4272ef7a4a16SSteven Rostedt 		reader->read = 0;
4273044fa782SSteven Rostedt 		*data_page = bpage;
4274ff0ff84aSSteven Rostedt 
4275ff0ff84aSSteven Rostedt 		/*
4276ff0ff84aSSteven Rostedt 		 * Use the real_end for the data size,
4277ff0ff84aSSteven Rostedt 		 * This gives us a chance to store the lost events
4278ff0ff84aSSteven Rostedt 		 * on the page.
4279ff0ff84aSSteven Rostedt 		 */
4280ff0ff84aSSteven Rostedt 		if (reader->real_end)
4281ff0ff84aSSteven Rostedt 			local_set(&bpage->commit, reader->real_end);
4282ef7a4a16SSteven Rostedt 	}
4283ef7a4a16SSteven Rostedt 	ret = read;
4284ef7a4a16SSteven Rostedt 
428566a8cb95SSteven Rostedt 	cpu_buffer->lost_events = 0;
42862711ca23SSteven Rostedt 
42872711ca23SSteven Rostedt 	commit = local_read(&bpage->commit);
428866a8cb95SSteven Rostedt 	/*
428966a8cb95SSteven Rostedt 	 * Set a flag in the commit field if we lost events
429066a8cb95SSteven Rostedt 	 */
4291ff0ff84aSSteven Rostedt 	if (missed_events) {
4292ff0ff84aSSteven Rostedt 		/* If there is room at the end of the page to save the
4293ff0ff84aSSteven Rostedt 		 * missed events, then record it there.
4294ff0ff84aSSteven Rostedt 		 */
4295ff0ff84aSSteven Rostedt 		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4296ff0ff84aSSteven Rostedt 			memcpy(&bpage->data[commit], &missed_events,
4297ff0ff84aSSteven Rostedt 			       sizeof(missed_events));
4298ff0ff84aSSteven Rostedt 			local_add(RB_MISSED_STORED, &bpage->commit);
42992711ca23SSteven Rostedt 			commit += sizeof(missed_events);
4300ff0ff84aSSteven Rostedt 		}
430166a8cb95SSteven Rostedt 		local_add(RB_MISSED_EVENTS, &bpage->commit);
4302ff0ff84aSSteven Rostedt 	}
430366a8cb95SSteven Rostedt 
43042711ca23SSteven Rostedt 	/*
43052711ca23SSteven Rostedt 	 * This page may be off to user land. Zero it out here.
43062711ca23SSteven Rostedt 	 */
43072711ca23SSteven Rostedt 	if (commit < BUF_PAGE_SIZE)
43082711ca23SSteven Rostedt 		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
43092711ca23SSteven Rostedt 
4310554f786eSSteven Rostedt  out_unlock:
43115389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
43128789a9e7SSteven Rostedt 
4313554f786eSSteven Rostedt  out:
43148789a9e7SSteven Rostedt 	return ret;
43158789a9e7SSteven Rostedt }
4316d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page);
43178789a9e7SSteven Rostedt 
431859222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
431909c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
4320554f786eSSteven Rostedt 			 unsigned long action, void *hcpu)
4321554f786eSSteven Rostedt {
4322554f786eSSteven Rostedt 	struct ring_buffer *buffer =
4323554f786eSSteven Rostedt 		container_of(self, struct ring_buffer, cpu_notify);
4324554f786eSSteven Rostedt 	long cpu = (long)hcpu;
4325438ced17SVaibhav Nagarnaik 	int cpu_i, nr_pages_same;
4326438ced17SVaibhav Nagarnaik 	unsigned int nr_pages;
4327554f786eSSteven Rostedt 
4328554f786eSSteven Rostedt 	switch (action) {
4329554f786eSSteven Rostedt 	case CPU_UP_PREPARE:
4330554f786eSSteven Rostedt 	case CPU_UP_PREPARE_FROZEN:
43313f237a79SRusty Russell 		if (cpumask_test_cpu(cpu, buffer->cpumask))
4332554f786eSSteven Rostedt 			return NOTIFY_OK;
4333554f786eSSteven Rostedt 
4334438ced17SVaibhav Nagarnaik 		nr_pages = 0;
4335438ced17SVaibhav Nagarnaik 		nr_pages_same = 1;
4336438ced17SVaibhav Nagarnaik 		/* check if all cpu sizes are same */
4337438ced17SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu_i) {
4338438ced17SVaibhav Nagarnaik 			/* fill in the size from first enabled cpu */
4339438ced17SVaibhav Nagarnaik 			if (nr_pages == 0)
4340438ced17SVaibhav Nagarnaik 				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4341438ced17SVaibhav Nagarnaik 			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4342438ced17SVaibhav Nagarnaik 				nr_pages_same = 0;
4343438ced17SVaibhav Nagarnaik 				break;
4344438ced17SVaibhav Nagarnaik 			}
4345438ced17SVaibhav Nagarnaik 		}
4346438ced17SVaibhav Nagarnaik 		/* allocate minimum pages, user can later expand it */
4347438ced17SVaibhav Nagarnaik 		if (!nr_pages_same)
4348438ced17SVaibhav Nagarnaik 			nr_pages = 2;
4349554f786eSSteven Rostedt 		buffer->buffers[cpu] =
4350438ced17SVaibhav Nagarnaik 			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4351554f786eSSteven Rostedt 		if (!buffer->buffers[cpu]) {
4352554f786eSSteven Rostedt 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4353554f786eSSteven Rostedt 			     cpu);
4354554f786eSSteven Rostedt 			return NOTIFY_OK;
4355554f786eSSteven Rostedt 		}
4356554f786eSSteven Rostedt 		smp_wmb();
43573f237a79SRusty Russell 		cpumask_set_cpu(cpu, buffer->cpumask);
4358554f786eSSteven Rostedt 		break;
4359554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE:
4360554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE_FROZEN:
4361554f786eSSteven Rostedt 		/*
4362554f786eSSteven Rostedt 		 * Do nothing.
4363554f786eSSteven Rostedt 		 *  If we were to free the buffer, then the user would
4364554f786eSSteven Rostedt 		 *  lose any trace that was in the buffer.
4365554f786eSSteven Rostedt 		 */
4366554f786eSSteven Rostedt 		break;
4367554f786eSSteven Rostedt 	default:
4368554f786eSSteven Rostedt 		break;
4369554f786eSSteven Rostedt 	}
4370554f786eSSteven Rostedt 	return NOTIFY_OK;
4371554f786eSSteven Rostedt }
4372554f786eSSteven Rostedt #endif
4373