xref: /linux-6.15/kernel/trace/ring_buffer.c (revision 567cd4da)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
714131f2fSIngo Molnar #include <linux/trace_clock.h>
87a8e76a3SSteven Rostedt #include <linux/spinlock.h>
97a8e76a3SSteven Rostedt #include <linux/debugfs.h>
107a8e76a3SSteven Rostedt #include <linux/uaccess.h>
11a81bd80aSSteven Rostedt #include <linux/hardirq.h>
121744a21dSVegard Nossum #include <linux/kmemcheck.h>
137a8e76a3SSteven Rostedt #include <linux/module.h>
147a8e76a3SSteven Rostedt #include <linux/percpu.h>
157a8e76a3SSteven Rostedt #include <linux/mutex.h>
165a0e3ad6STejun Heo #include <linux/slab.h>
177a8e76a3SSteven Rostedt #include <linux/init.h>
187a8e76a3SSteven Rostedt #include <linux/hash.h>
197a8e76a3SSteven Rostedt #include <linux/list.h>
20554f786eSSteven Rostedt #include <linux/cpu.h>
217a8e76a3SSteven Rostedt #include <linux/fs.h>
227a8e76a3SSteven Rostedt 
2379615760SChristoph Lameter #include <asm/local.h>
24182e9f5fSSteven Rostedt #include "trace.h"
25182e9f5fSSteven Rostedt 
2683f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work);
2783f40318SVaibhav Nagarnaik 
28033601a3SSteven Rostedt /*
29d1b182a8SSteven Rostedt  * The ring buffer header is special. We must manually up keep it.
30d1b182a8SSteven Rostedt  */
31d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
32d1b182a8SSteven Rostedt {
33d1b182a8SSteven Rostedt 	int ret;
34d1b182a8SSteven Rostedt 
35334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "# compressed entry header\n");
36334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
37d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
38d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
39d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\n");
40d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
41d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_PADDING);
42d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
43d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_TIME_EXTEND);
44334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
45334d4169SLai Jiangshan 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46d1b182a8SSteven Rostedt 
47d1b182a8SSteven Rostedt 	return ret;
48d1b182a8SSteven Rostedt }
49d1b182a8SSteven Rostedt 
50d1b182a8SSteven Rostedt /*
515cc98548SSteven Rostedt  * The ring buffer is made up of a list of pages. A separate list of pages is
525cc98548SSteven Rostedt  * allocated for each CPU. A writer may only write to a buffer that is
535cc98548SSteven Rostedt  * associated with the CPU it is currently executing on.  A reader may read
545cc98548SSteven Rostedt  * from any per cpu buffer.
555cc98548SSteven Rostedt  *
565cc98548SSteven Rostedt  * The reader is special. For each per cpu buffer, the reader has its own
575cc98548SSteven Rostedt  * reader page. When a reader has read the entire reader page, this reader
585cc98548SSteven Rostedt  * page is swapped with another page in the ring buffer.
595cc98548SSteven Rostedt  *
605cc98548SSteven Rostedt  * Now, as long as the writer is off the reader page, the reader can do what
615cc98548SSteven Rostedt  * ever it wants with that page. The writer will never write to that page
625cc98548SSteven Rostedt  * again (as long as it is out of the ring buffer).
635cc98548SSteven Rostedt  *
645cc98548SSteven Rostedt  * Here's some silly ASCII art.
655cc98548SSteven Rostedt  *
665cc98548SSteven Rostedt  *   +------+
675cc98548SSteven Rostedt  *   |reader|          RING BUFFER
685cc98548SSteven Rostedt  *   |page  |
695cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
705cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
715cc98548SSteven Rostedt  *                   +---+   +---+   +---+
725cc98548SSteven Rostedt  *                     ^               |
735cc98548SSteven Rostedt  *                     |               |
745cc98548SSteven Rostedt  *                     +---------------+
755cc98548SSteven Rostedt  *
765cc98548SSteven Rostedt  *
775cc98548SSteven Rostedt  *   +------+
785cc98548SSteven Rostedt  *   |reader|          RING BUFFER
795cc98548SSteven Rostedt  *   |page  |------------------v
805cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
815cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
825cc98548SSteven Rostedt  *                   +---+   +---+   +---+
835cc98548SSteven Rostedt  *                     ^               |
845cc98548SSteven Rostedt  *                     |               |
855cc98548SSteven Rostedt  *                     +---------------+
865cc98548SSteven Rostedt  *
875cc98548SSteven Rostedt  *
885cc98548SSteven Rostedt  *   +------+
895cc98548SSteven Rostedt  *   |reader|          RING BUFFER
905cc98548SSteven Rostedt  *   |page  |------------------v
915cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
925cc98548SSteven Rostedt  *      ^            |   |-->|   |-->|   |
935cc98548SSteven Rostedt  *      |            +---+   +---+   +---+
945cc98548SSteven Rostedt  *      |                              |
955cc98548SSteven Rostedt  *      |                              |
965cc98548SSteven Rostedt  *      +------------------------------+
975cc98548SSteven Rostedt  *
985cc98548SSteven Rostedt  *
995cc98548SSteven Rostedt  *   +------+
1005cc98548SSteven Rostedt  *   |buffer|          RING BUFFER
1015cc98548SSteven Rostedt  *   |page  |------------------v
1025cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
1035cc98548SSteven Rostedt  *      ^            |   |   |   |-->|   |
1045cc98548SSteven Rostedt  *      |   New      +---+   +---+   +---+
1055cc98548SSteven Rostedt  *      |  Reader------^               |
1065cc98548SSteven Rostedt  *      |   page                       |
1075cc98548SSteven Rostedt  *      +------------------------------+
1085cc98548SSteven Rostedt  *
1095cc98548SSteven Rostedt  *
1105cc98548SSteven Rostedt  * After we make this swap, the reader can hand this page off to the splice
1115cc98548SSteven Rostedt  * code and be done with it. It can even allocate a new page if it needs to
1125cc98548SSteven Rostedt  * and swap that into the ring buffer.
1135cc98548SSteven Rostedt  *
1145cc98548SSteven Rostedt  * We will be using cmpxchg soon to make all this lockless.
1155cc98548SSteven Rostedt  *
1165cc98548SSteven Rostedt  */
1175cc98548SSteven Rostedt 
1185cc98548SSteven Rostedt /*
119033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
120033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
121033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
122033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
123033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
124033601a3SSteven Rostedt  *
125033601a3SSteven Rostedt  * There's three layers that must be on in order to write
126033601a3SSteven Rostedt  * to the ring buffer.
127033601a3SSteven Rostedt  *
128033601a3SSteven Rostedt  * 1) This global flag must be set.
129033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
130033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
131033601a3SSteven Rostedt  *
132033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
133033601a3SSteven Rostedt  * will permantly disable all ring buffers.
134033601a3SSteven Rostedt  */
135033601a3SSteven Rostedt 
136033601a3SSteven Rostedt /*
137033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
138033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
139033601a3SSteven Rostedt  *
140033601a3SSteven Rostedt  *  ON   DISABLED
141033601a3SSteven Rostedt  * ---- ----------
142033601a3SSteven Rostedt  *   0      0        : ring buffers are off
143033601a3SSteven Rostedt  *   1      0        : ring buffers are on
144033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
145033601a3SSteven Rostedt  */
146033601a3SSteven Rostedt 
147033601a3SSteven Rostedt enum {
148033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
149033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
150033601a3SSteven Rostedt };
151033601a3SSteven Rostedt 
152033601a3SSteven Rostedt enum {
153033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
154033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
155033601a3SSteven Rostedt };
156033601a3SSteven Rostedt 
1575e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
158a3583244SSteven Rostedt 
159499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */
160499e5470SSteven Rostedt #define RB_BUFFER_OFF		(1 << 20)
161499e5470SSteven Rostedt 
162474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
163474d32b6SSteven Rostedt 
164a3583244SSteven Rostedt /**
165033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
166033601a3SSteven Rostedt  *
167033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
168c3706f00SWenji Huang  * permanently.
169033601a3SSteven Rostedt  */
170033601a3SSteven Rostedt void tracing_off_permanent(void)
171033601a3SSteven Rostedt {
172033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173a3583244SSteven Rostedt }
174a3583244SSteven Rostedt 
175e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
17667d34724SAndrew Morton #define RB_ALIGNMENT		4U
177334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
178c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
179334d4169SLai Jiangshan 
1802271048dSSteven Rostedt #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1812271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT	0
1822271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
1832271048dSSteven Rostedt #else
1842271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT	1
1852271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT		8U
1862271048dSSteven Rostedt #endif
1872271048dSSteven Rostedt 
188334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
189334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
1907a8e76a3SSteven Rostedt 
1917a8e76a3SSteven Rostedt enum {
1927a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
1937a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
1947a8e76a3SSteven Rostedt };
1957a8e76a3SSteven Rostedt 
19669d1b839SSteven Rostedt #define skip_time_extend(event) \
19769d1b839SSteven Rostedt 	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
19869d1b839SSteven Rostedt 
1992d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event)
2002d622719STom Zanussi {
201a1863c21SSteven Rostedt 	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
2022d622719STom Zanussi }
2032d622719STom Zanussi 
2042d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
2052d622719STom Zanussi {
206a1863c21SSteven Rostedt 	/* padding has a NULL time_delta */
207334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2082d622719STom Zanussi 	event->time_delta = 0;
2092d622719STom Zanussi }
2102d622719STom Zanussi 
2112d622719STom Zanussi static unsigned
2122d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2132d622719STom Zanussi {
2142d622719STom Zanussi 	unsigned length;
2152d622719STom Zanussi 
216334d4169SLai Jiangshan 	if (event->type_len)
217334d4169SLai Jiangshan 		length = event->type_len * RB_ALIGNMENT;
2182d622719STom Zanussi 	else
2192d622719STom Zanussi 		length = event->array[0];
2202d622719STom Zanussi 	return length + RB_EVNT_HDR_SIZE;
2212d622719STom Zanussi }
2222d622719STom Zanussi 
22369d1b839SSteven Rostedt /*
22469d1b839SSteven Rostedt  * Return the length of the given event. Will return
22569d1b839SSteven Rostedt  * the length of the time extend if the event is a
22669d1b839SSteven Rostedt  * time extend.
22769d1b839SSteven Rostedt  */
22869d1b839SSteven Rostedt static inline unsigned
2297a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2307a8e76a3SSteven Rostedt {
231334d4169SLai Jiangshan 	switch (event->type_len) {
2327a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
2332d622719STom Zanussi 		if (rb_null_event(event))
2347a8e76a3SSteven Rostedt 			/* undefined */
2357a8e76a3SSteven Rostedt 			return -1;
236334d4169SLai Jiangshan 		return  event->array[0] + RB_EVNT_HDR_SIZE;
2377a8e76a3SSteven Rostedt 
2387a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
2397a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
2407a8e76a3SSteven Rostedt 
2417a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
2427a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
2437a8e76a3SSteven Rostedt 
2447a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
2452d622719STom Zanussi 		return rb_event_data_length(event);
2467a8e76a3SSteven Rostedt 	default:
2477a8e76a3SSteven Rostedt 		BUG();
2487a8e76a3SSteven Rostedt 	}
2497a8e76a3SSteven Rostedt 	/* not hit */
2507a8e76a3SSteven Rostedt 	return 0;
2517a8e76a3SSteven Rostedt }
2527a8e76a3SSteven Rostedt 
25369d1b839SSteven Rostedt /*
25469d1b839SSteven Rostedt  * Return total length of time extend and data,
25569d1b839SSteven Rostedt  *   or just the event length for all other events.
25669d1b839SSteven Rostedt  */
25769d1b839SSteven Rostedt static inline unsigned
25869d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event)
25969d1b839SSteven Rostedt {
26069d1b839SSteven Rostedt 	unsigned len = 0;
26169d1b839SSteven Rostedt 
26269d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
26369d1b839SSteven Rostedt 		/* time extends include the data event after it */
26469d1b839SSteven Rostedt 		len = RB_LEN_TIME_EXTEND;
26569d1b839SSteven Rostedt 		event = skip_time_extend(event);
26669d1b839SSteven Rostedt 	}
26769d1b839SSteven Rostedt 	return len + rb_event_length(event);
26869d1b839SSteven Rostedt }
26969d1b839SSteven Rostedt 
2707a8e76a3SSteven Rostedt /**
2717a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
2727a8e76a3SSteven Rostedt  * @event: the event to get the length of
27369d1b839SSteven Rostedt  *
27469d1b839SSteven Rostedt  * Returns the size of the data load of a data event.
27569d1b839SSteven Rostedt  * If the event is something other than a data event, it
27669d1b839SSteven Rostedt  * returns the size of the event itself. With the exception
27769d1b839SSteven Rostedt  * of a TIME EXTEND, where it still returns the size of the
27869d1b839SSteven Rostedt  * data load of the data event after it.
2797a8e76a3SSteven Rostedt  */
2807a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2817a8e76a3SSteven Rostedt {
28269d1b839SSteven Rostedt 	unsigned length;
28369d1b839SSteven Rostedt 
28469d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
28569d1b839SSteven Rostedt 		event = skip_time_extend(event);
28669d1b839SSteven Rostedt 
28769d1b839SSteven Rostedt 	length = rb_event_length(event);
288334d4169SLai Jiangshan 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
289465634adSRobert Richter 		return length;
290465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
291465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
292465634adSRobert Richter                 length -= sizeof(event->array[0]);
293465634adSRobert Richter 	return length;
2947a8e76a3SSteven Rostedt }
295c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2967a8e76a3SSteven Rostedt 
2977a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
29834a148bfSAndrew Morton static void *
2997a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
3007a8e76a3SSteven Rostedt {
30169d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
30269d1b839SSteven Rostedt 		event = skip_time_extend(event);
303334d4169SLai Jiangshan 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
3047a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
305334d4169SLai Jiangshan 	if (event->type_len)
3067a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
3077a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
3087a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
3097a8e76a3SSteven Rostedt }
3107a8e76a3SSteven Rostedt 
3117a8e76a3SSteven Rostedt /**
3127a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
3137a8e76a3SSteven Rostedt  * @event: the event to get the data from
3147a8e76a3SSteven Rostedt  */
3157a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3167a8e76a3SSteven Rostedt {
3177a8e76a3SSteven Rostedt 	return rb_event_data(event);
3187a8e76a3SSteven Rostedt }
319c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3207a8e76a3SSteven Rostedt 
3217a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
3229e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
3237a8e76a3SSteven Rostedt 
3247a8e76a3SSteven Rostedt #define TS_SHIFT	27
3257a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
3267a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
3277a8e76a3SSteven Rostedt 
32866a8cb95SSteven Rostedt /* Flag when events were overwritten */
32966a8cb95SSteven Rostedt #define RB_MISSED_EVENTS	(1 << 31)
330ff0ff84aSSteven Rostedt /* Missed count stored at end */
331ff0ff84aSSteven Rostedt #define RB_MISSED_STORED	(1 << 30)
33266a8cb95SSteven Rostedt 
333abc9b56dSSteven Rostedt struct buffer_data_page {
3347a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
335c3706f00SWenji Huang 	local_t		 commit;	/* write committed index */
336abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
337abc9b56dSSteven Rostedt };
338abc9b56dSSteven Rostedt 
33977ae365eSSteven Rostedt /*
34077ae365eSSteven Rostedt  * Note, the buffer_page list must be first. The buffer pages
34177ae365eSSteven Rostedt  * are allocated in cache lines, which means that each buffer
34277ae365eSSteven Rostedt  * page will be at the beginning of a cache line, and thus
34377ae365eSSteven Rostedt  * the least significant bits will be zero. We use this to
34477ae365eSSteven Rostedt  * add flags in the list struct pointers, to make the ring buffer
34577ae365eSSteven Rostedt  * lockless.
34677ae365eSSteven Rostedt  */
347abc9b56dSSteven Rostedt struct buffer_page {
348778c55d4SSteven Rostedt 	struct list_head list;		/* list of buffer pages */
349abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
3506f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
351778c55d4SSteven Rostedt 	local_t		 entries;	/* entries on this page */
352ff0ff84aSSteven Rostedt 	unsigned long	 real_end;	/* real end of data */
353abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
3547a8e76a3SSteven Rostedt };
3557a8e76a3SSteven Rostedt 
35677ae365eSSteven Rostedt /*
35777ae365eSSteven Rostedt  * The buffer page counters, write and entries, must be reset
35877ae365eSSteven Rostedt  * atomically when crossing page boundaries. To synchronize this
35977ae365eSSteven Rostedt  * update, two counters are inserted into the number. One is
36077ae365eSSteven Rostedt  * the actual counter for the write position or count on the page.
36177ae365eSSteven Rostedt  *
36277ae365eSSteven Rostedt  * The other is a counter of updaters. Before an update happens
36377ae365eSSteven Rostedt  * the update partition of the counter is incremented. This will
36477ae365eSSteven Rostedt  * allow the updater to update the counter atomically.
36577ae365eSSteven Rostedt  *
36677ae365eSSteven Rostedt  * The counter is 20 bits, and the state data is 12.
36777ae365eSSteven Rostedt  */
36877ae365eSSteven Rostedt #define RB_WRITE_MASK		0xfffff
36977ae365eSSteven Rostedt #define RB_WRITE_INTCNT		(1 << 20)
37077ae365eSSteven Rostedt 
371044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
372abc9b56dSSteven Rostedt {
373044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
374abc9b56dSSteven Rostedt }
375abc9b56dSSteven Rostedt 
376474d32b6SSteven Rostedt /**
377474d32b6SSteven Rostedt  * ring_buffer_page_len - the size of data on the page.
378474d32b6SSteven Rostedt  * @page: The page to read
379474d32b6SSteven Rostedt  *
380474d32b6SSteven Rostedt  * Returns the amount of data on the page, including buffer page header.
381474d32b6SSteven Rostedt  */
382ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page)
383ef7a4a16SSteven Rostedt {
384474d32b6SSteven Rostedt 	return local_read(&((struct buffer_data_page *)page)->commit)
385474d32b6SSteven Rostedt 		+ BUF_PAGE_HDR_SIZE;
386ef7a4a16SSteven Rostedt }
387ef7a4a16SSteven Rostedt 
3887a8e76a3SSteven Rostedt /*
389ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
390ed56829cSSteven Rostedt  * this issue out.
391ed56829cSSteven Rostedt  */
39234a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
393ed56829cSSteven Rostedt {
3946ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
395e4c2ce82SSteven Rostedt 	kfree(bpage);
396ed56829cSSteven Rostedt }
397ed56829cSSteven Rostedt 
398ed56829cSSteven Rostedt /*
3997a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
4007a8e76a3SSteven Rostedt  */
4017a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
4027a8e76a3SSteven Rostedt {
4037a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
4047a8e76a3SSteven Rostedt 		return 1;
4057a8e76a3SSteven Rostedt 	return 0;
4067a8e76a3SSteven Rostedt }
4077a8e76a3SSteven Rostedt 
408474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
4097a8e76a3SSteven Rostedt 
410be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
411be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
412be957c44SSteven Rostedt 
413d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s)
414d1b182a8SSteven Rostedt {
415d1b182a8SSteven Rostedt 	struct buffer_data_page field;
416d1b182a8SSteven Rostedt 	int ret;
417d1b182a8SSteven Rostedt 
418d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
41926a50744STom Zanussi 			       "offset:0;\tsize:%u;\tsigned:%u;\n",
42026a50744STom Zanussi 			       (unsigned int)sizeof(field.time_stamp),
42126a50744STom Zanussi 			       (unsigned int)is_signed_type(u64));
422d1b182a8SSteven Rostedt 
423d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
42426a50744STom Zanussi 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
425d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
42626a50744STom Zanussi 			       (unsigned int)sizeof(field.commit),
42726a50744STom Zanussi 			       (unsigned int)is_signed_type(long));
428d1b182a8SSteven Rostedt 
42966a8cb95SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
43066a8cb95SSteven Rostedt 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
43166a8cb95SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
43266a8cb95SSteven Rostedt 			       1,
43366a8cb95SSteven Rostedt 			       (unsigned int)is_signed_type(long));
43466a8cb95SSteven Rostedt 
435d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: char data;\t"
43626a50744STom Zanussi 			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
437d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), data),
43826a50744STom Zanussi 			       (unsigned int)BUF_PAGE_SIZE,
43926a50744STom Zanussi 			       (unsigned int)is_signed_type(char));
440d1b182a8SSteven Rostedt 
441d1b182a8SSteven Rostedt 	return ret;
442d1b182a8SSteven Rostedt }
443d1b182a8SSteven Rostedt 
4447a8e76a3SSteven Rostedt /*
4457a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
4467a8e76a3SSteven Rostedt  */
4477a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
4487a8e76a3SSteven Rostedt 	int				cpu;
449985023deSRichard Kennedy 	atomic_t			record_disabled;
4507a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
4515389f6faSThomas Gleixner 	raw_spinlock_t			reader_lock;	/* serialize readers */
452445c8951SThomas Gleixner 	arch_spinlock_t			lock;
4537a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
454438ced17SVaibhav Nagarnaik 	unsigned int			nr_pages;
4553adc54faSSteven Rostedt 	struct list_head		*pages;
4566f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
4576f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
458c3706f00SWenji Huang 	struct buffer_page		*commit_page;	/* committed pages */
459d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
46066a8cb95SSteven Rostedt 	unsigned long			lost_events;
46166a8cb95SSteven Rostedt 	unsigned long			last_overrun;
462c64e148aSVaibhav Nagarnaik 	local_t				entries_bytes;
463e4906effSSteven Rostedt 	local_t				entries;
464884bfe89SSlava Pestov 	local_t				overrun;
465884bfe89SSlava Pestov 	local_t				commit_overrun;
466884bfe89SSlava Pestov 	local_t				dropped_events;
467fa743953SSteven Rostedt 	local_t				committing;
468fa743953SSteven Rostedt 	local_t				commits;
46977ae365eSSteven Rostedt 	unsigned long			read;
470c64e148aSVaibhav Nagarnaik 	unsigned long			read_bytes;
4717a8e76a3SSteven Rostedt 	u64				write_stamp;
4727a8e76a3SSteven Rostedt 	u64				read_stamp;
473438ced17SVaibhav Nagarnaik 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
474438ced17SVaibhav Nagarnaik 	int				nr_pages_to_update;
475438ced17SVaibhav Nagarnaik 	struct list_head		new_pages; /* new pages to add */
47683f40318SVaibhav Nagarnaik 	struct work_struct		update_pages_work;
47705fdd70dSVaibhav Nagarnaik 	struct completion		update_done;
4787a8e76a3SSteven Rostedt };
4797a8e76a3SSteven Rostedt 
4807a8e76a3SSteven Rostedt struct ring_buffer {
4817a8e76a3SSteven Rostedt 	unsigned			flags;
4827a8e76a3SSteven Rostedt 	int				cpus;
4837a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
48483f40318SVaibhav Nagarnaik 	atomic_t			resize_disabled;
48500f62f61SArnaldo Carvalho de Melo 	cpumask_var_t			cpumask;
4867a8e76a3SSteven Rostedt 
4871f8a6a10SPeter Zijlstra 	struct lock_class_key		*reader_lock_key;
4881f8a6a10SPeter Zijlstra 
4897a8e76a3SSteven Rostedt 	struct mutex			mutex;
4907a8e76a3SSteven Rostedt 
4917a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
492554f786eSSteven Rostedt 
49359222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
494554f786eSSteven Rostedt 	struct notifier_block		cpu_notify;
495554f786eSSteven Rostedt #endif
49637886f6aSSteven Rostedt 	u64				(*clock)(void);
4977a8e76a3SSteven Rostedt };
4987a8e76a3SSteven Rostedt 
4997a8e76a3SSteven Rostedt struct ring_buffer_iter {
5007a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
5017a8e76a3SSteven Rostedt 	unsigned long			head;
5027a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
503492a74f4SSteven Rostedt 	struct buffer_page		*cache_reader_page;
504492a74f4SSteven Rostedt 	unsigned long			cache_read;
5057a8e76a3SSteven Rostedt 	u64				read_stamp;
5067a8e76a3SSteven Rostedt };
5077a8e76a3SSteven Rostedt 
508f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
509077c5407SSteven Rostedt #define RB_WARN_ON(b, cond)						\
5103e89c7bbSSteven Rostedt 	({								\
5113e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);				\
5123e89c7bbSSteven Rostedt 		if (_____ret) {						\
513077c5407SSteven Rostedt 			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
514077c5407SSteven Rostedt 				struct ring_buffer_per_cpu *__b =	\
515077c5407SSteven Rostedt 					(void *)b;			\
516077c5407SSteven Rostedt 				atomic_inc(&__b->buffer->record_disabled); \
517077c5407SSteven Rostedt 			} else						\
518077c5407SSteven Rostedt 				atomic_inc(&b->record_disabled);	\
519bf41a158SSteven Rostedt 			WARN_ON(1);					\
520bf41a158SSteven Rostedt 		}							\
5213e89c7bbSSteven Rostedt 		_____ret;						\
5223e89c7bbSSteven Rostedt 	})
523f536aafcSSteven Rostedt 
52437886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
52537886f6aSSteven Rostedt #define DEBUG_SHIFT 0
52637886f6aSSteven Rostedt 
5276d3f1e12SJiri Olsa static inline u64 rb_time_stamp(struct ring_buffer *buffer)
52888eb0125SSteven Rostedt {
52988eb0125SSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
53088eb0125SSteven Rostedt 	return buffer->clock() << DEBUG_SHIFT;
53188eb0125SSteven Rostedt }
53288eb0125SSteven Rostedt 
53337886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
53437886f6aSSteven Rostedt {
53537886f6aSSteven Rostedt 	u64 time;
53637886f6aSSteven Rostedt 
53737886f6aSSteven Rostedt 	preempt_disable_notrace();
5386d3f1e12SJiri Olsa 	time = rb_time_stamp(buffer);
53937886f6aSSteven Rostedt 	preempt_enable_no_resched_notrace();
54037886f6aSSteven Rostedt 
54137886f6aSSteven Rostedt 	return time;
54237886f6aSSteven Rostedt }
54337886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
54437886f6aSSteven Rostedt 
54537886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
54637886f6aSSteven Rostedt 				      int cpu, u64 *ts)
54737886f6aSSteven Rostedt {
54837886f6aSSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
54937886f6aSSteven Rostedt 	*ts >>= DEBUG_SHIFT;
55037886f6aSSteven Rostedt }
55137886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
55237886f6aSSteven Rostedt 
55377ae365eSSteven Rostedt /*
55477ae365eSSteven Rostedt  * Making the ring buffer lockless makes things tricky.
55577ae365eSSteven Rostedt  * Although writes only happen on the CPU that they are on,
55677ae365eSSteven Rostedt  * and they only need to worry about interrupts. Reads can
55777ae365eSSteven Rostedt  * happen on any CPU.
55877ae365eSSteven Rostedt  *
55977ae365eSSteven Rostedt  * The reader page is always off the ring buffer, but when the
56077ae365eSSteven Rostedt  * reader finishes with a page, it needs to swap its page with
56177ae365eSSteven Rostedt  * a new one from the buffer. The reader needs to take from
56277ae365eSSteven Rostedt  * the head (writes go to the tail). But if a writer is in overwrite
56377ae365eSSteven Rostedt  * mode and wraps, it must push the head page forward.
56477ae365eSSteven Rostedt  *
56577ae365eSSteven Rostedt  * Here lies the problem.
56677ae365eSSteven Rostedt  *
56777ae365eSSteven Rostedt  * The reader must be careful to replace only the head page, and
56877ae365eSSteven Rostedt  * not another one. As described at the top of the file in the
56977ae365eSSteven Rostedt  * ASCII art, the reader sets its old page to point to the next
57077ae365eSSteven Rostedt  * page after head. It then sets the page after head to point to
57177ae365eSSteven Rostedt  * the old reader page. But if the writer moves the head page
57277ae365eSSteven Rostedt  * during this operation, the reader could end up with the tail.
57377ae365eSSteven Rostedt  *
57477ae365eSSteven Rostedt  * We use cmpxchg to help prevent this race. We also do something
57577ae365eSSteven Rostedt  * special with the page before head. We set the LSB to 1.
57677ae365eSSteven Rostedt  *
57777ae365eSSteven Rostedt  * When the writer must push the page forward, it will clear the
57877ae365eSSteven Rostedt  * bit that points to the head page, move the head, and then set
57977ae365eSSteven Rostedt  * the bit that points to the new head page.
58077ae365eSSteven Rostedt  *
58177ae365eSSteven Rostedt  * We also don't want an interrupt coming in and moving the head
58277ae365eSSteven Rostedt  * page on another writer. Thus we use the second LSB to catch
58377ae365eSSteven Rostedt  * that too. Thus:
58477ae365eSSteven Rostedt  *
58577ae365eSSteven Rostedt  * head->list->prev->next        bit 1          bit 0
58677ae365eSSteven Rostedt  *                              -------        -------
58777ae365eSSteven Rostedt  * Normal page                     0              0
58877ae365eSSteven Rostedt  * Points to head page             0              1
58977ae365eSSteven Rostedt  * New head page                   1              0
59077ae365eSSteven Rostedt  *
59177ae365eSSteven Rostedt  * Note we can not trust the prev pointer of the head page, because:
59277ae365eSSteven Rostedt  *
59377ae365eSSteven Rostedt  * +----+       +-----+        +-----+
59477ae365eSSteven Rostedt  * |    |------>|  T  |---X--->|  N  |
59577ae365eSSteven Rostedt  * |    |<------|     |        |     |
59677ae365eSSteven Rostedt  * +----+       +-----+        +-----+
59777ae365eSSteven Rostedt  *   ^                           ^ |
59877ae365eSSteven Rostedt  *   |          +-----+          | |
59977ae365eSSteven Rostedt  *   +----------|  R  |----------+ |
60077ae365eSSteven Rostedt  *              |     |<-----------+
60177ae365eSSteven Rostedt  *              +-----+
60277ae365eSSteven Rostedt  *
60377ae365eSSteven Rostedt  * Key:  ---X-->  HEAD flag set in pointer
60477ae365eSSteven Rostedt  *         T      Tail page
60577ae365eSSteven Rostedt  *         R      Reader page
60677ae365eSSteven Rostedt  *         N      Next page
60777ae365eSSteven Rostedt  *
60877ae365eSSteven Rostedt  * (see __rb_reserve_next() to see where this happens)
60977ae365eSSteven Rostedt  *
61077ae365eSSteven Rostedt  *  What the above shows is that the reader just swapped out
61177ae365eSSteven Rostedt  *  the reader page with a page in the buffer, but before it
61277ae365eSSteven Rostedt  *  could make the new header point back to the new page added
61377ae365eSSteven Rostedt  *  it was preempted by a writer. The writer moved forward onto
61477ae365eSSteven Rostedt  *  the new page added by the reader and is about to move forward
61577ae365eSSteven Rostedt  *  again.
61677ae365eSSteven Rostedt  *
61777ae365eSSteven Rostedt  *  You can see, it is legitimate for the previous pointer of
61877ae365eSSteven Rostedt  *  the head (or any page) not to point back to itself. But only
61977ae365eSSteven Rostedt  *  temporarially.
62077ae365eSSteven Rostedt  */
62177ae365eSSteven Rostedt 
62277ae365eSSteven Rostedt #define RB_PAGE_NORMAL		0UL
62377ae365eSSteven Rostedt #define RB_PAGE_HEAD		1UL
62477ae365eSSteven Rostedt #define RB_PAGE_UPDATE		2UL
62577ae365eSSteven Rostedt 
62677ae365eSSteven Rostedt 
62777ae365eSSteven Rostedt #define RB_FLAG_MASK		3UL
62877ae365eSSteven Rostedt 
62977ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */
63077ae365eSSteven Rostedt #define RB_PAGE_MOVED		4UL
63177ae365eSSteven Rostedt 
63277ae365eSSteven Rostedt /*
63377ae365eSSteven Rostedt  * rb_list_head - remove any bit
63477ae365eSSteven Rostedt  */
63577ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list)
63677ae365eSSteven Rostedt {
63777ae365eSSteven Rostedt 	unsigned long val = (unsigned long)list;
63877ae365eSSteven Rostedt 
63977ae365eSSteven Rostedt 	return (struct list_head *)(val & ~RB_FLAG_MASK);
64077ae365eSSteven Rostedt }
64177ae365eSSteven Rostedt 
64277ae365eSSteven Rostedt /*
6436d3f1e12SJiri Olsa  * rb_is_head_page - test if the given page is the head page
64477ae365eSSteven Rostedt  *
64577ae365eSSteven Rostedt  * Because the reader may move the head_page pointer, we can
64677ae365eSSteven Rostedt  * not trust what the head page is (it may be pointing to
64777ae365eSSteven Rostedt  * the reader page). But if the next page is a header page,
64877ae365eSSteven Rostedt  * its flags will be non zero.
64977ae365eSSteven Rostedt  */
65042b16b3fSJesper Juhl static inline int
65177ae365eSSteven Rostedt rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
65277ae365eSSteven Rostedt 		struct buffer_page *page, struct list_head *list)
65377ae365eSSteven Rostedt {
65477ae365eSSteven Rostedt 	unsigned long val;
65577ae365eSSteven Rostedt 
65677ae365eSSteven Rostedt 	val = (unsigned long)list->next;
65777ae365eSSteven Rostedt 
65877ae365eSSteven Rostedt 	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
65977ae365eSSteven Rostedt 		return RB_PAGE_MOVED;
66077ae365eSSteven Rostedt 
66177ae365eSSteven Rostedt 	return val & RB_FLAG_MASK;
66277ae365eSSteven Rostedt }
66377ae365eSSteven Rostedt 
66477ae365eSSteven Rostedt /*
66577ae365eSSteven Rostedt  * rb_is_reader_page
66677ae365eSSteven Rostedt  *
66777ae365eSSteven Rostedt  * The unique thing about the reader page, is that, if the
66877ae365eSSteven Rostedt  * writer is ever on it, the previous pointer never points
66977ae365eSSteven Rostedt  * back to the reader page.
67077ae365eSSteven Rostedt  */
67177ae365eSSteven Rostedt static int rb_is_reader_page(struct buffer_page *page)
67277ae365eSSteven Rostedt {
67377ae365eSSteven Rostedt 	struct list_head *list = page->list.prev;
67477ae365eSSteven Rostedt 
67577ae365eSSteven Rostedt 	return rb_list_head(list->next) != &page->list;
67677ae365eSSteven Rostedt }
67777ae365eSSteven Rostedt 
67877ae365eSSteven Rostedt /*
67977ae365eSSteven Rostedt  * rb_set_list_to_head - set a list_head to be pointing to head.
68077ae365eSSteven Rostedt  */
68177ae365eSSteven Rostedt static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
68277ae365eSSteven Rostedt 				struct list_head *list)
68377ae365eSSteven Rostedt {
68477ae365eSSteven Rostedt 	unsigned long *ptr;
68577ae365eSSteven Rostedt 
68677ae365eSSteven Rostedt 	ptr = (unsigned long *)&list->next;
68777ae365eSSteven Rostedt 	*ptr |= RB_PAGE_HEAD;
68877ae365eSSteven Rostedt 	*ptr &= ~RB_PAGE_UPDATE;
68977ae365eSSteven Rostedt }
69077ae365eSSteven Rostedt 
69177ae365eSSteven Rostedt /*
69277ae365eSSteven Rostedt  * rb_head_page_activate - sets up head page
69377ae365eSSteven Rostedt  */
69477ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
69577ae365eSSteven Rostedt {
69677ae365eSSteven Rostedt 	struct buffer_page *head;
69777ae365eSSteven Rostedt 
69877ae365eSSteven Rostedt 	head = cpu_buffer->head_page;
69977ae365eSSteven Rostedt 	if (!head)
70077ae365eSSteven Rostedt 		return;
70177ae365eSSteven Rostedt 
70277ae365eSSteven Rostedt 	/*
70377ae365eSSteven Rostedt 	 * Set the previous list pointer to have the HEAD flag.
70477ae365eSSteven Rostedt 	 */
70577ae365eSSteven Rostedt 	rb_set_list_to_head(cpu_buffer, head->list.prev);
70677ae365eSSteven Rostedt }
70777ae365eSSteven Rostedt 
70877ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list)
70977ae365eSSteven Rostedt {
71077ae365eSSteven Rostedt 	unsigned long *ptr = (unsigned long *)&list->next;
71177ae365eSSteven Rostedt 
71277ae365eSSteven Rostedt 	*ptr &= ~RB_FLAG_MASK;
71377ae365eSSteven Rostedt }
71477ae365eSSteven Rostedt 
71577ae365eSSteven Rostedt /*
71677ae365eSSteven Rostedt  * rb_head_page_dactivate - clears head page ptr (for free list)
71777ae365eSSteven Rostedt  */
71877ae365eSSteven Rostedt static void
71977ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
72077ae365eSSteven Rostedt {
72177ae365eSSteven Rostedt 	struct list_head *hd;
72277ae365eSSteven Rostedt 
72377ae365eSSteven Rostedt 	/* Go through the whole list and clear any pointers found. */
72477ae365eSSteven Rostedt 	rb_list_head_clear(cpu_buffer->pages);
72577ae365eSSteven Rostedt 
72677ae365eSSteven Rostedt 	list_for_each(hd, cpu_buffer->pages)
72777ae365eSSteven Rostedt 		rb_list_head_clear(hd);
72877ae365eSSteven Rostedt }
72977ae365eSSteven Rostedt 
73077ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
73177ae365eSSteven Rostedt 			    struct buffer_page *head,
73277ae365eSSteven Rostedt 			    struct buffer_page *prev,
73377ae365eSSteven Rostedt 			    int old_flag, int new_flag)
73477ae365eSSteven Rostedt {
73577ae365eSSteven Rostedt 	struct list_head *list;
73677ae365eSSteven Rostedt 	unsigned long val = (unsigned long)&head->list;
73777ae365eSSteven Rostedt 	unsigned long ret;
73877ae365eSSteven Rostedt 
73977ae365eSSteven Rostedt 	list = &prev->list;
74077ae365eSSteven Rostedt 
74177ae365eSSteven Rostedt 	val &= ~RB_FLAG_MASK;
74277ae365eSSteven Rostedt 
74308a40816SSteven Rostedt 	ret = cmpxchg((unsigned long *)&list->next,
74477ae365eSSteven Rostedt 		      val | old_flag, val | new_flag);
74577ae365eSSteven Rostedt 
74677ae365eSSteven Rostedt 	/* check if the reader took the page */
74777ae365eSSteven Rostedt 	if ((ret & ~RB_FLAG_MASK) != val)
74877ae365eSSteven Rostedt 		return RB_PAGE_MOVED;
74977ae365eSSteven Rostedt 
75077ae365eSSteven Rostedt 	return ret & RB_FLAG_MASK;
75177ae365eSSteven Rostedt }
75277ae365eSSteven Rostedt 
75377ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
75477ae365eSSteven Rostedt 				   struct buffer_page *head,
75577ae365eSSteven Rostedt 				   struct buffer_page *prev,
75677ae365eSSteven Rostedt 				   int old_flag)
75777ae365eSSteven Rostedt {
75877ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
75977ae365eSSteven Rostedt 				old_flag, RB_PAGE_UPDATE);
76077ae365eSSteven Rostedt }
76177ae365eSSteven Rostedt 
76277ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
76377ae365eSSteven Rostedt 				 struct buffer_page *head,
76477ae365eSSteven Rostedt 				 struct buffer_page *prev,
76577ae365eSSteven Rostedt 				 int old_flag)
76677ae365eSSteven Rostedt {
76777ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
76877ae365eSSteven Rostedt 				old_flag, RB_PAGE_HEAD);
76977ae365eSSteven Rostedt }
77077ae365eSSteven Rostedt 
77177ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
77277ae365eSSteven Rostedt 				   struct buffer_page *head,
77377ae365eSSteven Rostedt 				   struct buffer_page *prev,
77477ae365eSSteven Rostedt 				   int old_flag)
77577ae365eSSteven Rostedt {
77677ae365eSSteven Rostedt 	return rb_head_page_set(cpu_buffer, head, prev,
77777ae365eSSteven Rostedt 				old_flag, RB_PAGE_NORMAL);
77877ae365eSSteven Rostedt }
77977ae365eSSteven Rostedt 
78077ae365eSSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
78177ae365eSSteven Rostedt 			       struct buffer_page **bpage)
78277ae365eSSteven Rostedt {
78377ae365eSSteven Rostedt 	struct list_head *p = rb_list_head((*bpage)->list.next);
78477ae365eSSteven Rostedt 
78577ae365eSSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
78677ae365eSSteven Rostedt }
78777ae365eSSteven Rostedt 
78877ae365eSSteven Rostedt static struct buffer_page *
78977ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
79077ae365eSSteven Rostedt {
79177ae365eSSteven Rostedt 	struct buffer_page *head;
79277ae365eSSteven Rostedt 	struct buffer_page *page;
79377ae365eSSteven Rostedt 	struct list_head *list;
79477ae365eSSteven Rostedt 	int i;
79577ae365eSSteven Rostedt 
79677ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
79777ae365eSSteven Rostedt 		return NULL;
79877ae365eSSteven Rostedt 
79977ae365eSSteven Rostedt 	/* sanity check */
80077ae365eSSteven Rostedt 	list = cpu_buffer->pages;
80177ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
80277ae365eSSteven Rostedt 		return NULL;
80377ae365eSSteven Rostedt 
80477ae365eSSteven Rostedt 	page = head = cpu_buffer->head_page;
80577ae365eSSteven Rostedt 	/*
80677ae365eSSteven Rostedt 	 * It is possible that the writer moves the header behind
80777ae365eSSteven Rostedt 	 * where we started, and we miss in one loop.
80877ae365eSSteven Rostedt 	 * A second loop should grab the header, but we'll do
80977ae365eSSteven Rostedt 	 * three loops just because I'm paranoid.
81077ae365eSSteven Rostedt 	 */
81177ae365eSSteven Rostedt 	for (i = 0; i < 3; i++) {
81277ae365eSSteven Rostedt 		do {
81377ae365eSSteven Rostedt 			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
81477ae365eSSteven Rostedt 				cpu_buffer->head_page = page;
81577ae365eSSteven Rostedt 				return page;
81677ae365eSSteven Rostedt 			}
81777ae365eSSteven Rostedt 			rb_inc_page(cpu_buffer, &page);
81877ae365eSSteven Rostedt 		} while (page != head);
81977ae365eSSteven Rostedt 	}
82077ae365eSSteven Rostedt 
82177ae365eSSteven Rostedt 	RB_WARN_ON(cpu_buffer, 1);
82277ae365eSSteven Rostedt 
82377ae365eSSteven Rostedt 	return NULL;
82477ae365eSSteven Rostedt }
82577ae365eSSteven Rostedt 
82677ae365eSSteven Rostedt static int rb_head_page_replace(struct buffer_page *old,
82777ae365eSSteven Rostedt 				struct buffer_page *new)
82877ae365eSSteven Rostedt {
82977ae365eSSteven Rostedt 	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
83077ae365eSSteven Rostedt 	unsigned long val;
83177ae365eSSteven Rostedt 	unsigned long ret;
83277ae365eSSteven Rostedt 
83377ae365eSSteven Rostedt 	val = *ptr & ~RB_FLAG_MASK;
83477ae365eSSteven Rostedt 	val |= RB_PAGE_HEAD;
83577ae365eSSteven Rostedt 
83608a40816SSteven Rostedt 	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
83777ae365eSSteven Rostedt 
83877ae365eSSteven Rostedt 	return ret == val;
83977ae365eSSteven Rostedt }
84077ae365eSSteven Rostedt 
84177ae365eSSteven Rostedt /*
84277ae365eSSteven Rostedt  * rb_tail_page_update - move the tail page forward
84377ae365eSSteven Rostedt  *
84477ae365eSSteven Rostedt  * Returns 1 if moved tail page, 0 if someone else did.
84577ae365eSSteven Rostedt  */
84677ae365eSSteven Rostedt static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
84777ae365eSSteven Rostedt 			       struct buffer_page *tail_page,
84877ae365eSSteven Rostedt 			       struct buffer_page *next_page)
84977ae365eSSteven Rostedt {
85077ae365eSSteven Rostedt 	struct buffer_page *old_tail;
85177ae365eSSteven Rostedt 	unsigned long old_entries;
85277ae365eSSteven Rostedt 	unsigned long old_write;
85377ae365eSSteven Rostedt 	int ret = 0;
85477ae365eSSteven Rostedt 
85577ae365eSSteven Rostedt 	/*
85677ae365eSSteven Rostedt 	 * The tail page now needs to be moved forward.
85777ae365eSSteven Rostedt 	 *
85877ae365eSSteven Rostedt 	 * We need to reset the tail page, but without messing
85977ae365eSSteven Rostedt 	 * with possible erasing of data brought in by interrupts
86077ae365eSSteven Rostedt 	 * that have moved the tail page and are currently on it.
86177ae365eSSteven Rostedt 	 *
86277ae365eSSteven Rostedt 	 * We add a counter to the write field to denote this.
86377ae365eSSteven Rostedt 	 */
86477ae365eSSteven Rostedt 	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
86577ae365eSSteven Rostedt 	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
86677ae365eSSteven Rostedt 
86777ae365eSSteven Rostedt 	/*
86877ae365eSSteven Rostedt 	 * Just make sure we have seen our old_write and synchronize
86977ae365eSSteven Rostedt 	 * with any interrupts that come in.
87077ae365eSSteven Rostedt 	 */
87177ae365eSSteven Rostedt 	barrier();
87277ae365eSSteven Rostedt 
87377ae365eSSteven Rostedt 	/*
87477ae365eSSteven Rostedt 	 * If the tail page is still the same as what we think
87577ae365eSSteven Rostedt 	 * it is, then it is up to us to update the tail
87677ae365eSSteven Rostedt 	 * pointer.
87777ae365eSSteven Rostedt 	 */
87877ae365eSSteven Rostedt 	if (tail_page == cpu_buffer->tail_page) {
87977ae365eSSteven Rostedt 		/* Zero the write counter */
88077ae365eSSteven Rostedt 		unsigned long val = old_write & ~RB_WRITE_MASK;
88177ae365eSSteven Rostedt 		unsigned long eval = old_entries & ~RB_WRITE_MASK;
88277ae365eSSteven Rostedt 
88377ae365eSSteven Rostedt 		/*
88477ae365eSSteven Rostedt 		 * This will only succeed if an interrupt did
88577ae365eSSteven Rostedt 		 * not come in and change it. In which case, we
88677ae365eSSteven Rostedt 		 * do not want to modify it.
887da706d8bSLai Jiangshan 		 *
888da706d8bSLai Jiangshan 		 * We add (void) to let the compiler know that we do not care
889da706d8bSLai Jiangshan 		 * about the return value of these functions. We use the
890da706d8bSLai Jiangshan 		 * cmpxchg to only update if an interrupt did not already
891da706d8bSLai Jiangshan 		 * do it for us. If the cmpxchg fails, we don't care.
89277ae365eSSteven Rostedt 		 */
893da706d8bSLai Jiangshan 		(void)local_cmpxchg(&next_page->write, old_write, val);
894da706d8bSLai Jiangshan 		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
89577ae365eSSteven Rostedt 
89677ae365eSSteven Rostedt 		/*
89777ae365eSSteven Rostedt 		 * No need to worry about races with clearing out the commit.
89877ae365eSSteven Rostedt 		 * it only can increment when a commit takes place. But that
89977ae365eSSteven Rostedt 		 * only happens in the outer most nested commit.
90077ae365eSSteven Rostedt 		 */
90177ae365eSSteven Rostedt 		local_set(&next_page->page->commit, 0);
90277ae365eSSteven Rostedt 
90377ae365eSSteven Rostedt 		old_tail = cmpxchg(&cpu_buffer->tail_page,
90477ae365eSSteven Rostedt 				   tail_page, next_page);
90577ae365eSSteven Rostedt 
90677ae365eSSteven Rostedt 		if (old_tail == tail_page)
90777ae365eSSteven Rostedt 			ret = 1;
90877ae365eSSteven Rostedt 	}
90977ae365eSSteven Rostedt 
91077ae365eSSteven Rostedt 	return ret;
91177ae365eSSteven Rostedt }
91277ae365eSSteven Rostedt 
91377ae365eSSteven Rostedt static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
91477ae365eSSteven Rostedt 			  struct buffer_page *bpage)
91577ae365eSSteven Rostedt {
91677ae365eSSteven Rostedt 	unsigned long val = (unsigned long)bpage;
91777ae365eSSteven Rostedt 
91877ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
91977ae365eSSteven Rostedt 		return 1;
92077ae365eSSteven Rostedt 
92177ae365eSSteven Rostedt 	return 0;
92277ae365eSSteven Rostedt }
92377ae365eSSteven Rostedt 
92477ae365eSSteven Rostedt /**
92577ae365eSSteven Rostedt  * rb_check_list - make sure a pointer to a list has the last bits zero
92677ae365eSSteven Rostedt  */
92777ae365eSSteven Rostedt static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
92877ae365eSSteven Rostedt 			 struct list_head *list)
92977ae365eSSteven Rostedt {
93077ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
93177ae365eSSteven Rostedt 		return 1;
93277ae365eSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
93377ae365eSSteven Rostedt 		return 1;
93477ae365eSSteven Rostedt 	return 0;
93577ae365eSSteven Rostedt }
93677ae365eSSteven Rostedt 
9377a8e76a3SSteven Rostedt /**
9387a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
9397a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
9407a8e76a3SSteven Rostedt  *
941c3706f00SWenji Huang  * As a safety measure we check to make sure the data pages have not
9427a8e76a3SSteven Rostedt  * been corrupted.
9437a8e76a3SSteven Rostedt  */
9447a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
9457a8e76a3SSteven Rostedt {
9463adc54faSSteven Rostedt 	struct list_head *head = cpu_buffer->pages;
947044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
9487a8e76a3SSteven Rostedt 
949308f7eebSSteven Rostedt 	/* Reset the head page if it exists */
950308f7eebSSteven Rostedt 	if (cpu_buffer->head_page)
951308f7eebSSteven Rostedt 		rb_set_head_page(cpu_buffer);
952308f7eebSSteven Rostedt 
95377ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
95477ae365eSSteven Rostedt 
9553e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
9563e89c7bbSSteven Rostedt 		return -1;
9573e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
9583e89c7bbSSteven Rostedt 		return -1;
9597a8e76a3SSteven Rostedt 
96077ae365eSSteven Rostedt 	if (rb_check_list(cpu_buffer, head))
96177ae365eSSteven Rostedt 		return -1;
96277ae365eSSteven Rostedt 
963044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
9643e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
965044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
9663e89c7bbSSteven Rostedt 			return -1;
9673e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
968044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
9693e89c7bbSSteven Rostedt 			return -1;
97077ae365eSSteven Rostedt 		if (rb_check_list(cpu_buffer, &bpage->list))
97177ae365eSSteven Rostedt 			return -1;
9727a8e76a3SSteven Rostedt 	}
9737a8e76a3SSteven Rostedt 
97477ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
97577ae365eSSteven Rostedt 
9767a8e76a3SSteven Rostedt 	return 0;
9777a8e76a3SSteven Rostedt }
9787a8e76a3SSteven Rostedt 
979438ced17SVaibhav Nagarnaik static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
9807a8e76a3SSteven Rostedt {
981438ced17SVaibhav Nagarnaik 	int i;
982044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
9833adc54faSSteven Rostedt 
9847a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
9857ea59064SVaibhav Nagarnaik 		struct page *page;
986d7ec4bfeSVaibhav Nagarnaik 		/*
987d7ec4bfeSVaibhav Nagarnaik 		 * __GFP_NORETRY flag makes sure that the allocation fails
988d7ec4bfeSVaibhav Nagarnaik 		 * gracefully without invoking oom-killer and the system is
989d7ec4bfeSVaibhav Nagarnaik 		 * not destabilized.
990d7ec4bfeSVaibhav Nagarnaik 		 */
991044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
992d7ec4bfeSVaibhav Nagarnaik 				    GFP_KERNEL | __GFP_NORETRY,
993438ced17SVaibhav Nagarnaik 				    cpu_to_node(cpu));
994044fa782SSteven Rostedt 		if (!bpage)
995e4c2ce82SSteven Rostedt 			goto free_pages;
99677ae365eSSteven Rostedt 
997438ced17SVaibhav Nagarnaik 		list_add(&bpage->list, pages);
99877ae365eSSteven Rostedt 
999438ced17SVaibhav Nagarnaik 		page = alloc_pages_node(cpu_to_node(cpu),
1000d7ec4bfeSVaibhav Nagarnaik 					GFP_KERNEL | __GFP_NORETRY, 0);
10017ea59064SVaibhav Nagarnaik 		if (!page)
10027a8e76a3SSteven Rostedt 			goto free_pages;
10037ea59064SVaibhav Nagarnaik 		bpage->page = page_address(page);
1004044fa782SSteven Rostedt 		rb_init_page(bpage->page);
10057a8e76a3SSteven Rostedt 	}
10067a8e76a3SSteven Rostedt 
1007438ced17SVaibhav Nagarnaik 	return 0;
1008438ced17SVaibhav Nagarnaik 
1009438ced17SVaibhav Nagarnaik free_pages:
1010438ced17SVaibhav Nagarnaik 	list_for_each_entry_safe(bpage, tmp, pages, list) {
1011438ced17SVaibhav Nagarnaik 		list_del_init(&bpage->list);
1012438ced17SVaibhav Nagarnaik 		free_buffer_page(bpage);
1013438ced17SVaibhav Nagarnaik 	}
1014438ced17SVaibhav Nagarnaik 
1015438ced17SVaibhav Nagarnaik 	return -ENOMEM;
1016438ced17SVaibhav Nagarnaik }
1017438ced17SVaibhav Nagarnaik 
1018438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1019438ced17SVaibhav Nagarnaik 			     unsigned nr_pages)
1020438ced17SVaibhav Nagarnaik {
1021438ced17SVaibhav Nagarnaik 	LIST_HEAD(pages);
1022438ced17SVaibhav Nagarnaik 
1023438ced17SVaibhav Nagarnaik 	WARN_ON(!nr_pages);
1024438ced17SVaibhav Nagarnaik 
1025438ced17SVaibhav Nagarnaik 	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1026438ced17SVaibhav Nagarnaik 		return -ENOMEM;
1027438ced17SVaibhav Nagarnaik 
10283adc54faSSteven Rostedt 	/*
10293adc54faSSteven Rostedt 	 * The ring buffer page list is a circular list that does not
10303adc54faSSteven Rostedt 	 * start and end with a list head. All page list items point to
10313adc54faSSteven Rostedt 	 * other pages.
10323adc54faSSteven Rostedt 	 */
10333adc54faSSteven Rostedt 	cpu_buffer->pages = pages.next;
10343adc54faSSteven Rostedt 	list_del(&pages);
10357a8e76a3SSteven Rostedt 
1036438ced17SVaibhav Nagarnaik 	cpu_buffer->nr_pages = nr_pages;
1037438ced17SVaibhav Nagarnaik 
10387a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
10397a8e76a3SSteven Rostedt 
10407a8e76a3SSteven Rostedt 	return 0;
10417a8e76a3SSteven Rostedt }
10427a8e76a3SSteven Rostedt 
10437a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
1044438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
10457a8e76a3SSteven Rostedt {
10467a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1047044fa782SSteven Rostedt 	struct buffer_page *bpage;
10487ea59064SVaibhav Nagarnaik 	struct page *page;
10497a8e76a3SSteven Rostedt 	int ret;
10507a8e76a3SSteven Rostedt 
10517a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
10527a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
10537a8e76a3SSteven Rostedt 	if (!cpu_buffer)
10547a8e76a3SSteven Rostedt 		return NULL;
10557a8e76a3SSteven Rostedt 
10567a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
10577a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
10585389f6faSThomas Gleixner 	raw_spin_lock_init(&cpu_buffer->reader_lock);
10591f8a6a10SPeter Zijlstra 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1060edc35bd7SThomas Gleixner 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
106183f40318SVaibhav Nagarnaik 	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
106205fdd70dSVaibhav Nagarnaik 	init_completion(&cpu_buffer->update_done);
10637a8e76a3SSteven Rostedt 
1064044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1065e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
1066044fa782SSteven Rostedt 	if (!bpage)
1067e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
1068e4c2ce82SSteven Rostedt 
106977ae365eSSteven Rostedt 	rb_check_bpage(cpu_buffer, bpage);
107077ae365eSSteven Rostedt 
1071044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
10727ea59064SVaibhav Nagarnaik 	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
10737ea59064SVaibhav Nagarnaik 	if (!page)
1074e4c2ce82SSteven Rostedt 		goto fail_free_reader;
10757ea59064SVaibhav Nagarnaik 	bpage->page = page_address(page);
1076044fa782SSteven Rostedt 	rb_init_page(bpage->page);
1077e4c2ce82SSteven Rostedt 
1078d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
107944b99462SVaibhav Nagarnaik 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1080d769041fSSteven Rostedt 
1081438ced17SVaibhav Nagarnaik 	ret = rb_allocate_pages(cpu_buffer, nr_pages);
10827a8e76a3SSteven Rostedt 	if (ret < 0)
1083d769041fSSteven Rostedt 		goto fail_free_reader;
10847a8e76a3SSteven Rostedt 
10857a8e76a3SSteven Rostedt 	cpu_buffer->head_page
10863adc54faSSteven Rostedt 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1087bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
10887a8e76a3SSteven Rostedt 
108977ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
109077ae365eSSteven Rostedt 
10917a8e76a3SSteven Rostedt 	return cpu_buffer;
10927a8e76a3SSteven Rostedt 
1093d769041fSSteven Rostedt  fail_free_reader:
1094d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
1095d769041fSSteven Rostedt 
10967a8e76a3SSteven Rostedt  fail_free_buffer:
10977a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
10987a8e76a3SSteven Rostedt 	return NULL;
10997a8e76a3SSteven Rostedt }
11007a8e76a3SSteven Rostedt 
11017a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
11027a8e76a3SSteven Rostedt {
11033adc54faSSteven Rostedt 	struct list_head *head = cpu_buffer->pages;
1104044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
11057a8e76a3SSteven Rostedt 
1106d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
1107d769041fSSteven Rostedt 
110877ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
110977ae365eSSteven Rostedt 
11103adc54faSSteven Rostedt 	if (head) {
1111044fa782SSteven Rostedt 		list_for_each_entry_safe(bpage, tmp, head, list) {
1112044fa782SSteven Rostedt 			list_del_init(&bpage->list);
1113044fa782SSteven Rostedt 			free_buffer_page(bpage);
11147a8e76a3SSteven Rostedt 		}
11153adc54faSSteven Rostedt 		bpage = list_entry(head, struct buffer_page, list);
11163adc54faSSteven Rostedt 		free_buffer_page(bpage);
11173adc54faSSteven Rostedt 	}
11183adc54faSSteven Rostedt 
11197a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
11207a8e76a3SSteven Rostedt }
11217a8e76a3SSteven Rostedt 
112259222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
112309c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
1124554f786eSSteven Rostedt 			 unsigned long action, void *hcpu);
1125554f786eSSteven Rostedt #endif
1126554f786eSSteven Rostedt 
11277a8e76a3SSteven Rostedt /**
11287a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
112968814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
11307a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
11317a8e76a3SSteven Rostedt  *
11327a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
11337a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
11347a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
11357a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
11367a8e76a3SSteven Rostedt  */
11371f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
11381f8a6a10SPeter Zijlstra 					struct lock_class_key *key)
11397a8e76a3SSteven Rostedt {
11407a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
11417a8e76a3SSteven Rostedt 	int bsize;
1142438ced17SVaibhav Nagarnaik 	int cpu, nr_pages;
11437a8e76a3SSteven Rostedt 
11447a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
11457a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
11467a8e76a3SSteven Rostedt 			 GFP_KERNEL);
11477a8e76a3SSteven Rostedt 	if (!buffer)
11487a8e76a3SSteven Rostedt 		return NULL;
11497a8e76a3SSteven Rostedt 
11509e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
11519e01c1b7SRusty Russell 		goto fail_free_buffer;
11529e01c1b7SRusty Russell 
1153438ced17SVaibhav Nagarnaik 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
11547a8e76a3SSteven Rostedt 	buffer->flags = flags;
115537886f6aSSteven Rostedt 	buffer->clock = trace_clock_local;
11561f8a6a10SPeter Zijlstra 	buffer->reader_lock_key = key;
11577a8e76a3SSteven Rostedt 
11587a8e76a3SSteven Rostedt 	/* need at least two pages */
1159438ced17SVaibhav Nagarnaik 	if (nr_pages < 2)
1160438ced17SVaibhav Nagarnaik 		nr_pages = 2;
11617a8e76a3SSteven Rostedt 
11623bf832ceSFrederic Weisbecker 	/*
11633bf832ceSFrederic Weisbecker 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
11643bf832ceSFrederic Weisbecker 	 * in early initcall, it will not be notified of secondary cpus.
11653bf832ceSFrederic Weisbecker 	 * In that off case, we need to allocate for all possible cpus.
11663bf832ceSFrederic Weisbecker 	 */
11673bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU
1168554f786eSSteven Rostedt 	get_online_cpus();
1169554f786eSSteven Rostedt 	cpumask_copy(buffer->cpumask, cpu_online_mask);
11703bf832ceSFrederic Weisbecker #else
11713bf832ceSFrederic Weisbecker 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
11723bf832ceSFrederic Weisbecker #endif
11737a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
11747a8e76a3SSteven Rostedt 
11757a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
11767a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
11777a8e76a3SSteven Rostedt 				  GFP_KERNEL);
11787a8e76a3SSteven Rostedt 	if (!buffer->buffers)
11799e01c1b7SRusty Russell 		goto fail_free_cpumask;
11807a8e76a3SSteven Rostedt 
11817a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
11827a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
1183438ced17SVaibhav Nagarnaik 			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
11847a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
11857a8e76a3SSteven Rostedt 			goto fail_free_buffers;
11867a8e76a3SSteven Rostedt 	}
11877a8e76a3SSteven Rostedt 
118859222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
1189554f786eSSteven Rostedt 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1190554f786eSSteven Rostedt 	buffer->cpu_notify.priority = 0;
1191554f786eSSteven Rostedt 	register_cpu_notifier(&buffer->cpu_notify);
1192554f786eSSteven Rostedt #endif
1193554f786eSSteven Rostedt 
1194554f786eSSteven Rostedt 	put_online_cpus();
11957a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
11967a8e76a3SSteven Rostedt 
11977a8e76a3SSteven Rostedt 	return buffer;
11987a8e76a3SSteven Rostedt 
11997a8e76a3SSteven Rostedt  fail_free_buffers:
12007a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
12017a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
12027a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
12037a8e76a3SSteven Rostedt 	}
12047a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
12057a8e76a3SSteven Rostedt 
12069e01c1b7SRusty Russell  fail_free_cpumask:
12079e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
1208554f786eSSteven Rostedt 	put_online_cpus();
12099e01c1b7SRusty Russell 
12107a8e76a3SSteven Rostedt  fail_free_buffer:
12117a8e76a3SSteven Rostedt 	kfree(buffer);
12127a8e76a3SSteven Rostedt 	return NULL;
12137a8e76a3SSteven Rostedt }
12141f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
12157a8e76a3SSteven Rostedt 
12167a8e76a3SSteven Rostedt /**
12177a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
12187a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
12197a8e76a3SSteven Rostedt  */
12207a8e76a3SSteven Rostedt void
12217a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
12227a8e76a3SSteven Rostedt {
12237a8e76a3SSteven Rostedt 	int cpu;
12247a8e76a3SSteven Rostedt 
1225554f786eSSteven Rostedt 	get_online_cpus();
1226554f786eSSteven Rostedt 
122759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
1228554f786eSSteven Rostedt 	unregister_cpu_notifier(&buffer->cpu_notify);
1229554f786eSSteven Rostedt #endif
1230554f786eSSteven Rostedt 
12317a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
12327a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
12337a8e76a3SSteven Rostedt 
1234554f786eSSteven Rostedt 	put_online_cpus();
1235554f786eSSteven Rostedt 
1236bd3f0221SEric Dumazet 	kfree(buffer->buffers);
12379e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
12389e01c1b7SRusty Russell 
12397a8e76a3SSteven Rostedt 	kfree(buffer);
12407a8e76a3SSteven Rostedt }
1241c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
12427a8e76a3SSteven Rostedt 
124337886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer,
124437886f6aSSteven Rostedt 			   u64 (*clock)(void))
124537886f6aSSteven Rostedt {
124637886f6aSSteven Rostedt 	buffer->clock = clock;
124737886f6aSSteven Rostedt }
124837886f6aSSteven Rostedt 
12497a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
12507a8e76a3SSteven Rostedt 
125183f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage)
12527a8e76a3SSteven Rostedt {
125383f40318SVaibhav Nagarnaik 	return local_read(&bpage->entries) & RB_WRITE_MASK;
125483f40318SVaibhav Nagarnaik }
125583f40318SVaibhav Nagarnaik 
125683f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage)
125783f40318SVaibhav Nagarnaik {
125883f40318SVaibhav Nagarnaik 	return local_read(&bpage->write) & RB_WRITE_MASK;
125983f40318SVaibhav Nagarnaik }
126083f40318SVaibhav Nagarnaik 
12615040b4b7SVaibhav Nagarnaik static int
126283f40318SVaibhav Nagarnaik rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
126383f40318SVaibhav Nagarnaik {
126483f40318SVaibhav Nagarnaik 	struct list_head *tail_page, *to_remove, *next_page;
126583f40318SVaibhav Nagarnaik 	struct buffer_page *to_remove_page, *tmp_iter_page;
126683f40318SVaibhav Nagarnaik 	struct buffer_page *last_page, *first_page;
126783f40318SVaibhav Nagarnaik 	unsigned int nr_removed;
126883f40318SVaibhav Nagarnaik 	unsigned long head_bit;
126983f40318SVaibhav Nagarnaik 	int page_entries;
127083f40318SVaibhav Nagarnaik 
127183f40318SVaibhav Nagarnaik 	head_bit = 0;
12727a8e76a3SSteven Rostedt 
12735389f6faSThomas Gleixner 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
127483f40318SVaibhav Nagarnaik 	atomic_inc(&cpu_buffer->record_disabled);
127583f40318SVaibhav Nagarnaik 	/*
127683f40318SVaibhav Nagarnaik 	 * We don't race with the readers since we have acquired the reader
127783f40318SVaibhav Nagarnaik 	 * lock. We also don't race with writers after disabling recording.
127883f40318SVaibhav Nagarnaik 	 * This makes it easy to figure out the first and the last page to be
127983f40318SVaibhav Nagarnaik 	 * removed from the list. We unlink all the pages in between including
128083f40318SVaibhav Nagarnaik 	 * the first and last pages. This is done in a busy loop so that we
128183f40318SVaibhav Nagarnaik 	 * lose the least number of traces.
128283f40318SVaibhav Nagarnaik 	 * The pages are freed after we restart recording and unlock readers.
128383f40318SVaibhav Nagarnaik 	 */
128483f40318SVaibhav Nagarnaik 	tail_page = &cpu_buffer->tail_page->list;
128577ae365eSSteven Rostedt 
128683f40318SVaibhav Nagarnaik 	/*
128783f40318SVaibhav Nagarnaik 	 * tail page might be on reader page, we remove the next page
128883f40318SVaibhav Nagarnaik 	 * from the ring buffer
128983f40318SVaibhav Nagarnaik 	 */
129083f40318SVaibhav Nagarnaik 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
129183f40318SVaibhav Nagarnaik 		tail_page = rb_list_head(tail_page->next);
129283f40318SVaibhav Nagarnaik 	to_remove = tail_page;
129383f40318SVaibhav Nagarnaik 
129483f40318SVaibhav Nagarnaik 	/* start of pages to remove */
129583f40318SVaibhav Nagarnaik 	first_page = list_entry(rb_list_head(to_remove->next),
129683f40318SVaibhav Nagarnaik 				struct buffer_page, list);
129783f40318SVaibhav Nagarnaik 
129883f40318SVaibhav Nagarnaik 	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
129983f40318SVaibhav Nagarnaik 		to_remove = rb_list_head(to_remove)->next;
130083f40318SVaibhav Nagarnaik 		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
13017a8e76a3SSteven Rostedt 	}
13027a8e76a3SSteven Rostedt 
130383f40318SVaibhav Nagarnaik 	next_page = rb_list_head(to_remove)->next;
13047a8e76a3SSteven Rostedt 
130583f40318SVaibhav Nagarnaik 	/*
130683f40318SVaibhav Nagarnaik 	 * Now we remove all pages between tail_page and next_page.
130783f40318SVaibhav Nagarnaik 	 * Make sure that we have head_bit value preserved for the
130883f40318SVaibhav Nagarnaik 	 * next page
130983f40318SVaibhav Nagarnaik 	 */
131083f40318SVaibhav Nagarnaik 	tail_page->next = (struct list_head *)((unsigned long)next_page |
131183f40318SVaibhav Nagarnaik 						head_bit);
131283f40318SVaibhav Nagarnaik 	next_page = rb_list_head(next_page);
131383f40318SVaibhav Nagarnaik 	next_page->prev = tail_page;
131483f40318SVaibhav Nagarnaik 
131583f40318SVaibhav Nagarnaik 	/* make sure pages points to a valid page in the ring buffer */
131683f40318SVaibhav Nagarnaik 	cpu_buffer->pages = next_page;
131783f40318SVaibhav Nagarnaik 
131883f40318SVaibhav Nagarnaik 	/* update head page */
131983f40318SVaibhav Nagarnaik 	if (head_bit)
132083f40318SVaibhav Nagarnaik 		cpu_buffer->head_page = list_entry(next_page,
132183f40318SVaibhav Nagarnaik 						struct buffer_page, list);
132283f40318SVaibhav Nagarnaik 
132383f40318SVaibhav Nagarnaik 	/*
132483f40318SVaibhav Nagarnaik 	 * change read pointer to make sure any read iterators reset
132583f40318SVaibhav Nagarnaik 	 * themselves
132683f40318SVaibhav Nagarnaik 	 */
132783f40318SVaibhav Nagarnaik 	cpu_buffer->read = 0;
132883f40318SVaibhav Nagarnaik 
132983f40318SVaibhav Nagarnaik 	/* pages are removed, resume tracing and then free the pages */
133083f40318SVaibhav Nagarnaik 	atomic_dec(&cpu_buffer->record_disabled);
13315389f6faSThomas Gleixner 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
133283f40318SVaibhav Nagarnaik 
133383f40318SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
133483f40318SVaibhav Nagarnaik 
133583f40318SVaibhav Nagarnaik 	/* last buffer page to remove */
133683f40318SVaibhav Nagarnaik 	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
133783f40318SVaibhav Nagarnaik 				list);
133883f40318SVaibhav Nagarnaik 	tmp_iter_page = first_page;
133983f40318SVaibhav Nagarnaik 
134083f40318SVaibhav Nagarnaik 	do {
134183f40318SVaibhav Nagarnaik 		to_remove_page = tmp_iter_page;
134283f40318SVaibhav Nagarnaik 		rb_inc_page(cpu_buffer, &tmp_iter_page);
134383f40318SVaibhav Nagarnaik 
134483f40318SVaibhav Nagarnaik 		/* update the counters */
134583f40318SVaibhav Nagarnaik 		page_entries = rb_page_entries(to_remove_page);
134683f40318SVaibhav Nagarnaik 		if (page_entries) {
134783f40318SVaibhav Nagarnaik 			/*
134883f40318SVaibhav Nagarnaik 			 * If something was added to this page, it was full
134983f40318SVaibhav Nagarnaik 			 * since it is not the tail page. So we deduct the
135083f40318SVaibhav Nagarnaik 			 * bytes consumed in ring buffer from here.
135148fdc72fSVaibhav Nagarnaik 			 * Increment overrun to account for the lost events.
135283f40318SVaibhav Nagarnaik 			 */
135348fdc72fSVaibhav Nagarnaik 			local_add(page_entries, &cpu_buffer->overrun);
135483f40318SVaibhav Nagarnaik 			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
135583f40318SVaibhav Nagarnaik 		}
135683f40318SVaibhav Nagarnaik 
135783f40318SVaibhav Nagarnaik 		/*
135883f40318SVaibhav Nagarnaik 		 * We have already removed references to this list item, just
135983f40318SVaibhav Nagarnaik 		 * free up the buffer_page and its page
136083f40318SVaibhav Nagarnaik 		 */
136183f40318SVaibhav Nagarnaik 		free_buffer_page(to_remove_page);
136283f40318SVaibhav Nagarnaik 		nr_removed--;
136383f40318SVaibhav Nagarnaik 
136483f40318SVaibhav Nagarnaik 	} while (to_remove_page != last_page);
136583f40318SVaibhav Nagarnaik 
136683f40318SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, nr_removed);
13675040b4b7SVaibhav Nagarnaik 
13685040b4b7SVaibhav Nagarnaik 	return nr_removed == 0;
13697a8e76a3SSteven Rostedt }
13707a8e76a3SSteven Rostedt 
13715040b4b7SVaibhav Nagarnaik static int
13725040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
13737a8e76a3SSteven Rostedt {
13745040b4b7SVaibhav Nagarnaik 	struct list_head *pages = &cpu_buffer->new_pages;
13755040b4b7SVaibhav Nagarnaik 	int retries, success;
13767a8e76a3SSteven Rostedt 
13775389f6faSThomas Gleixner 	raw_spin_lock_irq(&cpu_buffer->reader_lock);
13785040b4b7SVaibhav Nagarnaik 	/*
13795040b4b7SVaibhav Nagarnaik 	 * We are holding the reader lock, so the reader page won't be swapped
13805040b4b7SVaibhav Nagarnaik 	 * in the ring buffer. Now we are racing with the writer trying to
13815040b4b7SVaibhav Nagarnaik 	 * move head page and the tail page.
13825040b4b7SVaibhav Nagarnaik 	 * We are going to adapt the reader page update process where:
13835040b4b7SVaibhav Nagarnaik 	 * 1. We first splice the start and end of list of new pages between
13845040b4b7SVaibhav Nagarnaik 	 *    the head page and its previous page.
13855040b4b7SVaibhav Nagarnaik 	 * 2. We cmpxchg the prev_page->next to point from head page to the
13865040b4b7SVaibhav Nagarnaik 	 *    start of new pages list.
13875040b4b7SVaibhav Nagarnaik 	 * 3. Finally, we update the head->prev to the end of new list.
13885040b4b7SVaibhav Nagarnaik 	 *
13895040b4b7SVaibhav Nagarnaik 	 * We will try this process 10 times, to make sure that we don't keep
13905040b4b7SVaibhav Nagarnaik 	 * spinning.
13915040b4b7SVaibhav Nagarnaik 	 */
13925040b4b7SVaibhav Nagarnaik 	retries = 10;
13935040b4b7SVaibhav Nagarnaik 	success = 0;
13945040b4b7SVaibhav Nagarnaik 	while (retries--) {
13955040b4b7SVaibhav Nagarnaik 		struct list_head *head_page, *prev_page, *r;
13965040b4b7SVaibhav Nagarnaik 		struct list_head *last_page, *first_page;
13975040b4b7SVaibhav Nagarnaik 		struct list_head *head_page_with_bit;
139877ae365eSSteven Rostedt 
13995040b4b7SVaibhav Nagarnaik 		head_page = &rb_set_head_page(cpu_buffer)->list;
140054f7be5bSSteven Rostedt 		if (!head_page)
140154f7be5bSSteven Rostedt 			break;
14025040b4b7SVaibhav Nagarnaik 		prev_page = head_page->prev;
14035040b4b7SVaibhav Nagarnaik 
14045040b4b7SVaibhav Nagarnaik 		first_page = pages->next;
14055040b4b7SVaibhav Nagarnaik 		last_page  = pages->prev;
14065040b4b7SVaibhav Nagarnaik 
14075040b4b7SVaibhav Nagarnaik 		head_page_with_bit = (struct list_head *)
14085040b4b7SVaibhav Nagarnaik 				     ((unsigned long)head_page | RB_PAGE_HEAD);
14095040b4b7SVaibhav Nagarnaik 
14105040b4b7SVaibhav Nagarnaik 		last_page->next = head_page_with_bit;
14115040b4b7SVaibhav Nagarnaik 		first_page->prev = prev_page;
14125040b4b7SVaibhav Nagarnaik 
14135040b4b7SVaibhav Nagarnaik 		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
14145040b4b7SVaibhav Nagarnaik 
14155040b4b7SVaibhav Nagarnaik 		if (r == head_page_with_bit) {
14165040b4b7SVaibhav Nagarnaik 			/*
14175040b4b7SVaibhav Nagarnaik 			 * yay, we replaced the page pointer to our new list,
14185040b4b7SVaibhav Nagarnaik 			 * now, we just have to update to head page's prev
14195040b4b7SVaibhav Nagarnaik 			 * pointer to point to end of list
14205040b4b7SVaibhav Nagarnaik 			 */
14215040b4b7SVaibhav Nagarnaik 			head_page->prev = last_page;
14225040b4b7SVaibhav Nagarnaik 			success = 1;
14235040b4b7SVaibhav Nagarnaik 			break;
14247a8e76a3SSteven Rostedt 		}
14255040b4b7SVaibhav Nagarnaik 	}
14267a8e76a3SSteven Rostedt 
14275040b4b7SVaibhav Nagarnaik 	if (success)
14285040b4b7SVaibhav Nagarnaik 		INIT_LIST_HEAD(pages);
14295040b4b7SVaibhav Nagarnaik 	/*
14305040b4b7SVaibhav Nagarnaik 	 * If we weren't successful in adding in new pages, warn and stop
14315040b4b7SVaibhav Nagarnaik 	 * tracing
14325040b4b7SVaibhav Nagarnaik 	 */
14335040b4b7SVaibhav Nagarnaik 	RB_WARN_ON(cpu_buffer, !success);
14345389f6faSThomas Gleixner 	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
14355040b4b7SVaibhav Nagarnaik 
14365040b4b7SVaibhav Nagarnaik 	/* free pages if they weren't inserted */
14375040b4b7SVaibhav Nagarnaik 	if (!success) {
14385040b4b7SVaibhav Nagarnaik 		struct buffer_page *bpage, *tmp;
14395040b4b7SVaibhav Nagarnaik 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
14405040b4b7SVaibhav Nagarnaik 					 list) {
14415040b4b7SVaibhav Nagarnaik 			list_del_init(&bpage->list);
14425040b4b7SVaibhav Nagarnaik 			free_buffer_page(bpage);
14435040b4b7SVaibhav Nagarnaik 		}
14445040b4b7SVaibhav Nagarnaik 	}
14455040b4b7SVaibhav Nagarnaik 	return success;
14467a8e76a3SSteven Rostedt }
14477a8e76a3SSteven Rostedt 
144883f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1449438ced17SVaibhav Nagarnaik {
14505040b4b7SVaibhav Nagarnaik 	int success;
145183f40318SVaibhav Nagarnaik 
14525040b4b7SVaibhav Nagarnaik 	if (cpu_buffer->nr_pages_to_update > 0)
14535040b4b7SVaibhav Nagarnaik 		success = rb_insert_pages(cpu_buffer);
14545040b4b7SVaibhav Nagarnaik 	else
14555040b4b7SVaibhav Nagarnaik 		success = rb_remove_pages(cpu_buffer,
14565040b4b7SVaibhav Nagarnaik 					-cpu_buffer->nr_pages_to_update);
14575040b4b7SVaibhav Nagarnaik 
14585040b4b7SVaibhav Nagarnaik 	if (success)
1459438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
146083f40318SVaibhav Nagarnaik }
146183f40318SVaibhav Nagarnaik 
146283f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work)
146383f40318SVaibhav Nagarnaik {
146483f40318SVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
146583f40318SVaibhav Nagarnaik 			struct ring_buffer_per_cpu, update_pages_work);
146683f40318SVaibhav Nagarnaik 	rb_update_pages(cpu_buffer);
146705fdd70dSVaibhav Nagarnaik 	complete(&cpu_buffer->update_done);
1468438ced17SVaibhav Nagarnaik }
1469438ced17SVaibhav Nagarnaik 
14707a8e76a3SSteven Rostedt /**
14717a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
14727a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
14737a8e76a3SSteven Rostedt  * @size: the new size.
14747a8e76a3SSteven Rostedt  *
14757a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
14767a8e76a3SSteven Rostedt  *
147783f40318SVaibhav Nagarnaik  * Returns 0 on success and < 0 on failure.
14787a8e76a3SSteven Rostedt  */
1479438ced17SVaibhav Nagarnaik int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1480438ced17SVaibhav Nagarnaik 			int cpu_id)
14817a8e76a3SSteven Rostedt {
14827a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1483438ced17SVaibhav Nagarnaik 	unsigned nr_pages;
148483f40318SVaibhav Nagarnaik 	int cpu, err = 0;
14857a8e76a3SSteven Rostedt 
1486ee51a1deSIngo Molnar 	/*
1487ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
1488ee51a1deSIngo Molnar 	 */
1489ee51a1deSIngo Molnar 	if (!buffer)
1490ee51a1deSIngo Molnar 		return size;
1491ee51a1deSIngo Molnar 
14926a31e1f1SSteven Rostedt 	/* Make sure the requested buffer exists */
14936a31e1f1SSteven Rostedt 	if (cpu_id != RING_BUFFER_ALL_CPUS &&
14946a31e1f1SSteven Rostedt 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
14956a31e1f1SSteven Rostedt 		return size;
14966a31e1f1SSteven Rostedt 
14977a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
14987a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
14997a8e76a3SSteven Rostedt 
15007a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
15017a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
15027a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
15037a8e76a3SSteven Rostedt 
15047a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
15057a8e76a3SSteven Rostedt 
150683f40318SVaibhav Nagarnaik 	/*
150783f40318SVaibhav Nagarnaik 	 * Don't succeed if resizing is disabled, as a reader might be
150883f40318SVaibhav Nagarnaik 	 * manipulating the ring buffer and is expecting a sane state while
150983f40318SVaibhav Nagarnaik 	 * this is true.
151083f40318SVaibhav Nagarnaik 	 */
151183f40318SVaibhav Nagarnaik 	if (atomic_read(&buffer->resize_disabled))
151283f40318SVaibhav Nagarnaik 		return -EBUSY;
151383f40318SVaibhav Nagarnaik 
151483f40318SVaibhav Nagarnaik 	/* prevent another thread from changing buffer sizes */
151583f40318SVaibhav Nagarnaik 	mutex_lock(&buffer->mutex);
151683f40318SVaibhav Nagarnaik 
1517438ced17SVaibhav Nagarnaik 	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1518438ced17SVaibhav Nagarnaik 		/* calculate the pages to update */
15197a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
15207a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
1521438ced17SVaibhav Nagarnaik 
1522438ced17SVaibhav Nagarnaik 			cpu_buffer->nr_pages_to_update = nr_pages -
1523438ced17SVaibhav Nagarnaik 							cpu_buffer->nr_pages;
1524438ced17SVaibhav Nagarnaik 			/*
1525438ced17SVaibhav Nagarnaik 			 * nothing more to do for removing pages or no update
1526438ced17SVaibhav Nagarnaik 			 */
1527438ced17SVaibhav Nagarnaik 			if (cpu_buffer->nr_pages_to_update <= 0)
1528438ced17SVaibhav Nagarnaik 				continue;
1529438ced17SVaibhav Nagarnaik 			/*
1530438ced17SVaibhav Nagarnaik 			 * to add pages, make sure all new pages can be
1531438ced17SVaibhav Nagarnaik 			 * allocated without receiving ENOMEM
1532438ced17SVaibhav Nagarnaik 			 */
1533438ced17SVaibhav Nagarnaik 			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1534438ced17SVaibhav Nagarnaik 			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
153583f40318SVaibhav Nagarnaik 						&cpu_buffer->new_pages, cpu)) {
1536438ced17SVaibhav Nagarnaik 				/* not enough memory for new pages */
153783f40318SVaibhav Nagarnaik 				err = -ENOMEM;
153883f40318SVaibhav Nagarnaik 				goto out_err;
153983f40318SVaibhav Nagarnaik 			}
154083f40318SVaibhav Nagarnaik 		}
154183f40318SVaibhav Nagarnaik 
154283f40318SVaibhav Nagarnaik 		get_online_cpus();
154383f40318SVaibhav Nagarnaik 		/*
154483f40318SVaibhav Nagarnaik 		 * Fire off all the required work handlers
154505fdd70dSVaibhav Nagarnaik 		 * We can't schedule on offline CPUs, but it's not necessary
154683f40318SVaibhav Nagarnaik 		 * since we can change their buffer sizes without any race.
154783f40318SVaibhav Nagarnaik 		 */
154883f40318SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu) {
154983f40318SVaibhav Nagarnaik 			cpu_buffer = buffer->buffers[cpu];
155005fdd70dSVaibhav Nagarnaik 			if (!cpu_buffer->nr_pages_to_update)
155183f40318SVaibhav Nagarnaik 				continue;
155283f40318SVaibhav Nagarnaik 
155305fdd70dSVaibhav Nagarnaik 			if (cpu_online(cpu))
155405fdd70dSVaibhav Nagarnaik 				schedule_work_on(cpu,
155505fdd70dSVaibhav Nagarnaik 						&cpu_buffer->update_pages_work);
155605fdd70dSVaibhav Nagarnaik 			else
155783f40318SVaibhav Nagarnaik 				rb_update_pages(cpu_buffer);
15587a8e76a3SSteven Rostedt 		}
1559438ced17SVaibhav Nagarnaik 
1560438ced17SVaibhav Nagarnaik 		/* wait for all the updates to complete */
1561438ced17SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu) {
1562438ced17SVaibhav Nagarnaik 			cpu_buffer = buffer->buffers[cpu];
156305fdd70dSVaibhav Nagarnaik 			if (!cpu_buffer->nr_pages_to_update)
156483f40318SVaibhav Nagarnaik 				continue;
156583f40318SVaibhav Nagarnaik 
156605fdd70dSVaibhav Nagarnaik 			if (cpu_online(cpu))
156705fdd70dSVaibhav Nagarnaik 				wait_for_completion(&cpu_buffer->update_done);
156883f40318SVaibhav Nagarnaik 			cpu_buffer->nr_pages_to_update = 0;
1569438ced17SVaibhav Nagarnaik 		}
157083f40318SVaibhav Nagarnaik 
157183f40318SVaibhav Nagarnaik 		put_online_cpus();
1572438ced17SVaibhav Nagarnaik 	} else {
15738e49f418SVaibhav Nagarnaik 		/* Make sure this CPU has been intitialized */
15748e49f418SVaibhav Nagarnaik 		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
15758e49f418SVaibhav Nagarnaik 			goto out;
15768e49f418SVaibhav Nagarnaik 
1577438ced17SVaibhav Nagarnaik 		cpu_buffer = buffer->buffers[cpu_id];
157883f40318SVaibhav Nagarnaik 
1579438ced17SVaibhav Nagarnaik 		if (nr_pages == cpu_buffer->nr_pages)
15807a8e76a3SSteven Rostedt 			goto out;
1581438ced17SVaibhav Nagarnaik 
1582438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = nr_pages -
1583438ced17SVaibhav Nagarnaik 						cpu_buffer->nr_pages;
1584438ced17SVaibhav Nagarnaik 
1585438ced17SVaibhav Nagarnaik 		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1586438ced17SVaibhav Nagarnaik 		if (cpu_buffer->nr_pages_to_update > 0 &&
1587438ced17SVaibhav Nagarnaik 			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
158883f40318SVaibhav Nagarnaik 					    &cpu_buffer->new_pages, cpu_id)) {
158983f40318SVaibhav Nagarnaik 			err = -ENOMEM;
159083f40318SVaibhav Nagarnaik 			goto out_err;
159183f40318SVaibhav Nagarnaik 		}
1592438ced17SVaibhav Nagarnaik 
159383f40318SVaibhav Nagarnaik 		get_online_cpus();
159483f40318SVaibhav Nagarnaik 
159583f40318SVaibhav Nagarnaik 		if (cpu_online(cpu_id)) {
159683f40318SVaibhav Nagarnaik 			schedule_work_on(cpu_id,
159783f40318SVaibhav Nagarnaik 					 &cpu_buffer->update_pages_work);
159805fdd70dSVaibhav Nagarnaik 			wait_for_completion(&cpu_buffer->update_done);
159983f40318SVaibhav Nagarnaik 		} else
160083f40318SVaibhav Nagarnaik 			rb_update_pages(cpu_buffer);
160183f40318SVaibhav Nagarnaik 
160283f40318SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = 0;
160305fdd70dSVaibhav Nagarnaik 		put_online_cpus();
16047a8e76a3SSteven Rostedt 	}
16057a8e76a3SSteven Rostedt 
16067a8e76a3SSteven Rostedt  out:
1607659f451fSSteven Rostedt 	/*
1608659f451fSSteven Rostedt 	 * The ring buffer resize can happen with the ring buffer
1609659f451fSSteven Rostedt 	 * enabled, so that the update disturbs the tracing as little
1610659f451fSSteven Rostedt 	 * as possible. But if the buffer is disabled, we do not need
1611659f451fSSteven Rostedt 	 * to worry about that, and we can take the time to verify
1612659f451fSSteven Rostedt 	 * that the buffer is not corrupt.
1613659f451fSSteven Rostedt 	 */
1614659f451fSSteven Rostedt 	if (atomic_read(&buffer->record_disabled)) {
1615659f451fSSteven Rostedt 		atomic_inc(&buffer->record_disabled);
1616659f451fSSteven Rostedt 		/*
1617659f451fSSteven Rostedt 		 * Even though the buffer was disabled, we must make sure
1618659f451fSSteven Rostedt 		 * that it is truly disabled before calling rb_check_pages.
1619659f451fSSteven Rostedt 		 * There could have been a race between checking
1620659f451fSSteven Rostedt 		 * record_disable and incrementing it.
1621659f451fSSteven Rostedt 		 */
1622659f451fSSteven Rostedt 		synchronize_sched();
1623659f451fSSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
1624659f451fSSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
1625659f451fSSteven Rostedt 			rb_check_pages(cpu_buffer);
1626659f451fSSteven Rostedt 		}
1627659f451fSSteven Rostedt 		atomic_dec(&buffer->record_disabled);
1628659f451fSSteven Rostedt 	}
1629659f451fSSteven Rostedt 
16307a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
16317a8e76a3SSteven Rostedt 	return size;
16327a8e76a3SSteven Rostedt 
163383f40318SVaibhav Nagarnaik  out_err:
1634438ced17SVaibhav Nagarnaik 	for_each_buffer_cpu(buffer, cpu) {
1635438ced17SVaibhav Nagarnaik 		struct buffer_page *bpage, *tmp;
163683f40318SVaibhav Nagarnaik 
1637438ced17SVaibhav Nagarnaik 		cpu_buffer = buffer->buffers[cpu];
1638438ced17SVaibhav Nagarnaik 		cpu_buffer->nr_pages_to_update = 0;
163983f40318SVaibhav Nagarnaik 
1640438ced17SVaibhav Nagarnaik 		if (list_empty(&cpu_buffer->new_pages))
1641438ced17SVaibhav Nagarnaik 			continue;
164283f40318SVaibhav Nagarnaik 
1643438ced17SVaibhav Nagarnaik 		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1644438ced17SVaibhav Nagarnaik 					list) {
1645044fa782SSteven Rostedt 			list_del_init(&bpage->list);
1646044fa782SSteven Rostedt 			free_buffer_page(bpage);
16477a8e76a3SSteven Rostedt 		}
1648438ced17SVaibhav Nagarnaik 	}
1649641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
165083f40318SVaibhav Nagarnaik 	return err;
16517a8e76a3SSteven Rostedt }
1652c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
16537a8e76a3SSteven Rostedt 
1654750912faSDavid Sharp void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1655750912faSDavid Sharp {
1656750912faSDavid Sharp 	mutex_lock(&buffer->mutex);
1657750912faSDavid Sharp 	if (val)
1658750912faSDavid Sharp 		buffer->flags |= RB_FL_OVERWRITE;
1659750912faSDavid Sharp 	else
1660750912faSDavid Sharp 		buffer->flags &= ~RB_FL_OVERWRITE;
1661750912faSDavid Sharp 	mutex_unlock(&buffer->mutex);
1662750912faSDavid Sharp }
1663750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1664750912faSDavid Sharp 
16658789a9e7SSteven Rostedt static inline void *
1666044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
16678789a9e7SSteven Rostedt {
1668044fa782SSteven Rostedt 	return bpage->data + index;
16698789a9e7SSteven Rostedt }
16708789a9e7SSteven Rostedt 
1671044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
16727a8e76a3SSteven Rostedt {
1673044fa782SSteven Rostedt 	return bpage->page->data + index;
16747a8e76a3SSteven Rostedt }
16757a8e76a3SSteven Rostedt 
16767a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
1677d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
16787a8e76a3SSteven Rostedt {
16796f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
16806f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
16816f807acdSSteven Rostedt }
16826f807acdSSteven Rostedt 
16836f807acdSSteven Rostedt static inline struct ring_buffer_event *
16847a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
16857a8e76a3SSteven Rostedt {
16866f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
16877a8e76a3SSteven Rostedt }
16887a8e76a3SSteven Rostedt 
1689bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
1690bf41a158SSteven Rostedt {
1691abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
1692bf41a158SSteven Rostedt }
1693bf41a158SSteven Rostedt 
169425985edcSLucas De Marchi /* Size is determined by what has been committed */
1695bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
1696bf41a158SSteven Rostedt {
1697bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
1698bf41a158SSteven Rostedt }
1699bf41a158SSteven Rostedt 
1700bf41a158SSteven Rostedt static inline unsigned
1701bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1702bf41a158SSteven Rostedt {
1703bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
1704bf41a158SSteven Rostedt }
1705bf41a158SSteven Rostedt 
1706bf41a158SSteven Rostedt static inline unsigned
1707bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
17087a8e76a3SSteven Rostedt {
1709bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1710bf41a158SSteven Rostedt 
171122f470f8SSteven Rostedt 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
17127a8e76a3SSteven Rostedt }
17137a8e76a3SSteven Rostedt 
17140f0c85fcSSteven Rostedt static inline int
1715fa743953SSteven Rostedt rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1716bf41a158SSteven Rostedt 		   struct ring_buffer_event *event)
17177a8e76a3SSteven Rostedt {
1718bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1719bf41a158SSteven Rostedt 	unsigned long index;
1720bf41a158SSteven Rostedt 
1721bf41a158SSteven Rostedt 	index = rb_event_index(event);
1722bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1723bf41a158SSteven Rostedt 
1724bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
1725bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
1726bf41a158SSteven Rostedt }
1727bf41a158SSteven Rostedt 
172834a148bfSAndrew Morton static void
1729bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1730bf41a158SSteven Rostedt {
173177ae365eSSteven Rostedt 	unsigned long max_count;
173277ae365eSSteven Rostedt 
1733bf41a158SSteven Rostedt 	/*
1734bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
1735bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
1736bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
1737bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
1738bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
1739bf41a158SSteven Rostedt 	 * assign the commit to the tail.
1740bf41a158SSteven Rostedt 	 */
1741a8ccf1d6SSteven Rostedt  again:
1742438ced17SVaibhav Nagarnaik 	max_count = cpu_buffer->nr_pages * 100;
174377ae365eSSteven Rostedt 
1744bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
174577ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
174677ae365eSSteven Rostedt 			return;
174777ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
174877ae365eSSteven Rostedt 			       rb_is_reader_page(cpu_buffer->tail_page)))
174977ae365eSSteven Rostedt 			return;
175077ae365eSSteven Rostedt 		local_set(&cpu_buffer->commit_page->page->commit,
175177ae365eSSteven Rostedt 			  rb_page_write(cpu_buffer->commit_page));
1752bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1753abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1754abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1755bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
1756bf41a158SSteven Rostedt 		barrier();
1757bf41a158SSteven Rostedt 	}
1758bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
1759bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
176077ae365eSSteven Rostedt 
176177ae365eSSteven Rostedt 		local_set(&cpu_buffer->commit_page->page->commit,
176277ae365eSSteven Rostedt 			  rb_page_write(cpu_buffer->commit_page));
176377ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer,
176477ae365eSSteven Rostedt 			   local_read(&cpu_buffer->commit_page->page->commit) &
176577ae365eSSteven Rostedt 			   ~RB_WRITE_MASK);
1766bf41a158SSteven Rostedt 		barrier();
1767bf41a158SSteven Rostedt 	}
1768a8ccf1d6SSteven Rostedt 
1769a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
1770a8ccf1d6SSteven Rostedt 	barrier();
1771a8ccf1d6SSteven Rostedt 
1772a8ccf1d6SSteven Rostedt 	/*
1773a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
1774a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
1775a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
1776a8ccf1d6SSteven Rostedt 	 */
1777a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1778a8ccf1d6SSteven Rostedt 		goto again;
17797a8e76a3SSteven Rostedt }
17807a8e76a3SSteven Rostedt 
1781d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
17827a8e76a3SSteven Rostedt {
1783abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
17846f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
1785d769041fSSteven Rostedt }
1786d769041fSSteven Rostedt 
178734a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
1788d769041fSSteven Rostedt {
1789d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1790d769041fSSteven Rostedt 
1791d769041fSSteven Rostedt 	/*
1792d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
1793d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
1794d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
1795d769041fSSteven Rostedt 	 * to the head page instead of next.
1796d769041fSSteven Rostedt 	 */
1797d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
179877ae365eSSteven Rostedt 		iter->head_page = rb_set_head_page(cpu_buffer);
1799d769041fSSteven Rostedt 	else
1800d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
1801d769041fSSteven Rostedt 
1802abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
18037a8e76a3SSteven Rostedt 	iter->head = 0;
18047a8e76a3SSteven Rostedt }
18057a8e76a3SSteven Rostedt 
180669d1b839SSteven Rostedt /* Slow path, do not inline */
180769d1b839SSteven Rostedt static noinline struct ring_buffer_event *
180869d1b839SSteven Rostedt rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
180969d1b839SSteven Rostedt {
181069d1b839SSteven Rostedt 	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
181169d1b839SSteven Rostedt 
181269d1b839SSteven Rostedt 	/* Not the first event on the page? */
181369d1b839SSteven Rostedt 	if (rb_event_index(event)) {
181469d1b839SSteven Rostedt 		event->time_delta = delta & TS_MASK;
181569d1b839SSteven Rostedt 		event->array[0] = delta >> TS_SHIFT;
181669d1b839SSteven Rostedt 	} else {
181769d1b839SSteven Rostedt 		/* nope, just zero it */
181869d1b839SSteven Rostedt 		event->time_delta = 0;
181969d1b839SSteven Rostedt 		event->array[0] = 0;
182069d1b839SSteven Rostedt 	}
182169d1b839SSteven Rostedt 
182269d1b839SSteven Rostedt 	return skip_time_extend(event);
182369d1b839SSteven Rostedt }
182469d1b839SSteven Rostedt 
18257a8e76a3SSteven Rostedt /**
182601e3e710SDavid Sharp  * rb_update_event - update event type and data
18277a8e76a3SSteven Rostedt  * @event: the even to update
18287a8e76a3SSteven Rostedt  * @type: the type of event
18297a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
18307a8e76a3SSteven Rostedt  *
18317a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
18327a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
18337a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
18347a8e76a3SSteven Rostedt  * data field.
18357a8e76a3SSteven Rostedt  */
183634a148bfSAndrew Morton static void
183769d1b839SSteven Rostedt rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
183869d1b839SSteven Rostedt 		struct ring_buffer_event *event, unsigned length,
183969d1b839SSteven Rostedt 		int add_timestamp, u64 delta)
18407a8e76a3SSteven Rostedt {
184169d1b839SSteven Rostedt 	/* Only a commit updates the timestamp */
184269d1b839SSteven Rostedt 	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
184369d1b839SSteven Rostedt 		delta = 0;
18447a8e76a3SSteven Rostedt 
184569d1b839SSteven Rostedt 	/*
184669d1b839SSteven Rostedt 	 * If we need to add a timestamp, then we
184769d1b839SSteven Rostedt 	 * add it to the start of the resevered space.
184869d1b839SSteven Rostedt 	 */
184969d1b839SSteven Rostedt 	if (unlikely(add_timestamp)) {
185069d1b839SSteven Rostedt 		event = rb_add_time_stamp(event, delta);
185169d1b839SSteven Rostedt 		length -= RB_LEN_TIME_EXTEND;
185269d1b839SSteven Rostedt 		delta = 0;
18537a8e76a3SSteven Rostedt 	}
185469d1b839SSteven Rostedt 
185569d1b839SSteven Rostedt 	event->time_delta = delta;
185669d1b839SSteven Rostedt 	length -= RB_EVNT_HDR_SIZE;
185769d1b839SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
185869d1b839SSteven Rostedt 		event->type_len = 0;
185969d1b839SSteven Rostedt 		event->array[0] = length;
186069d1b839SSteven Rostedt 	} else
186169d1b839SSteven Rostedt 		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
18627a8e76a3SSteven Rostedt }
18637a8e76a3SSteven Rostedt 
186477ae365eSSteven Rostedt /*
186577ae365eSSteven Rostedt  * rb_handle_head_page - writer hit the head page
186677ae365eSSteven Rostedt  *
186777ae365eSSteven Rostedt  * Returns: +1 to retry page
186877ae365eSSteven Rostedt  *           0 to continue
186977ae365eSSteven Rostedt  *          -1 on error
187077ae365eSSteven Rostedt  */
187177ae365eSSteven Rostedt static int
187277ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
187377ae365eSSteven Rostedt 		    struct buffer_page *tail_page,
187477ae365eSSteven Rostedt 		    struct buffer_page *next_page)
187577ae365eSSteven Rostedt {
187677ae365eSSteven Rostedt 	struct buffer_page *new_head;
187777ae365eSSteven Rostedt 	int entries;
187877ae365eSSteven Rostedt 	int type;
187977ae365eSSteven Rostedt 	int ret;
188077ae365eSSteven Rostedt 
188177ae365eSSteven Rostedt 	entries = rb_page_entries(next_page);
188277ae365eSSteven Rostedt 
188377ae365eSSteven Rostedt 	/*
188477ae365eSSteven Rostedt 	 * The hard part is here. We need to move the head
188577ae365eSSteven Rostedt 	 * forward, and protect against both readers on
188677ae365eSSteven Rostedt 	 * other CPUs and writers coming in via interrupts.
188777ae365eSSteven Rostedt 	 */
188877ae365eSSteven Rostedt 	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
188977ae365eSSteven Rostedt 				       RB_PAGE_HEAD);
189077ae365eSSteven Rostedt 
189177ae365eSSteven Rostedt 	/*
189277ae365eSSteven Rostedt 	 * type can be one of four:
189377ae365eSSteven Rostedt 	 *  NORMAL - an interrupt already moved it for us
189477ae365eSSteven Rostedt 	 *  HEAD   - we are the first to get here.
189577ae365eSSteven Rostedt 	 *  UPDATE - we are the interrupt interrupting
189677ae365eSSteven Rostedt 	 *           a current move.
189777ae365eSSteven Rostedt 	 *  MOVED  - a reader on another CPU moved the next
189877ae365eSSteven Rostedt 	 *           pointer to its reader page. Give up
189977ae365eSSteven Rostedt 	 *           and try again.
190077ae365eSSteven Rostedt 	 */
190177ae365eSSteven Rostedt 
190277ae365eSSteven Rostedt 	switch (type) {
190377ae365eSSteven Rostedt 	case RB_PAGE_HEAD:
190477ae365eSSteven Rostedt 		/*
190577ae365eSSteven Rostedt 		 * We changed the head to UPDATE, thus
190677ae365eSSteven Rostedt 		 * it is our responsibility to update
190777ae365eSSteven Rostedt 		 * the counters.
190877ae365eSSteven Rostedt 		 */
190977ae365eSSteven Rostedt 		local_add(entries, &cpu_buffer->overrun);
1910c64e148aSVaibhav Nagarnaik 		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
191177ae365eSSteven Rostedt 
191277ae365eSSteven Rostedt 		/*
191377ae365eSSteven Rostedt 		 * The entries will be zeroed out when we move the
191477ae365eSSteven Rostedt 		 * tail page.
191577ae365eSSteven Rostedt 		 */
191677ae365eSSteven Rostedt 
191777ae365eSSteven Rostedt 		/* still more to do */
191877ae365eSSteven Rostedt 		break;
191977ae365eSSteven Rostedt 
192077ae365eSSteven Rostedt 	case RB_PAGE_UPDATE:
192177ae365eSSteven Rostedt 		/*
192277ae365eSSteven Rostedt 		 * This is an interrupt that interrupt the
192377ae365eSSteven Rostedt 		 * previous update. Still more to do.
192477ae365eSSteven Rostedt 		 */
192577ae365eSSteven Rostedt 		break;
192677ae365eSSteven Rostedt 	case RB_PAGE_NORMAL:
192777ae365eSSteven Rostedt 		/*
192877ae365eSSteven Rostedt 		 * An interrupt came in before the update
192977ae365eSSteven Rostedt 		 * and processed this for us.
193077ae365eSSteven Rostedt 		 * Nothing left to do.
193177ae365eSSteven Rostedt 		 */
193277ae365eSSteven Rostedt 		return 1;
193377ae365eSSteven Rostedt 	case RB_PAGE_MOVED:
193477ae365eSSteven Rostedt 		/*
193577ae365eSSteven Rostedt 		 * The reader is on another CPU and just did
193677ae365eSSteven Rostedt 		 * a swap with our next_page.
193777ae365eSSteven Rostedt 		 * Try again.
193877ae365eSSteven Rostedt 		 */
193977ae365eSSteven Rostedt 		return 1;
194077ae365eSSteven Rostedt 	default:
194177ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
194277ae365eSSteven Rostedt 		return -1;
194377ae365eSSteven Rostedt 	}
194477ae365eSSteven Rostedt 
194577ae365eSSteven Rostedt 	/*
194677ae365eSSteven Rostedt 	 * Now that we are here, the old head pointer is
194777ae365eSSteven Rostedt 	 * set to UPDATE. This will keep the reader from
194877ae365eSSteven Rostedt 	 * swapping the head page with the reader page.
194977ae365eSSteven Rostedt 	 * The reader (on another CPU) will spin till
195077ae365eSSteven Rostedt 	 * we are finished.
195177ae365eSSteven Rostedt 	 *
195277ae365eSSteven Rostedt 	 * We just need to protect against interrupts
195377ae365eSSteven Rostedt 	 * doing the job. We will set the next pointer
195477ae365eSSteven Rostedt 	 * to HEAD. After that, we set the old pointer
195577ae365eSSteven Rostedt 	 * to NORMAL, but only if it was HEAD before.
195677ae365eSSteven Rostedt 	 * otherwise we are an interrupt, and only
195777ae365eSSteven Rostedt 	 * want the outer most commit to reset it.
195877ae365eSSteven Rostedt 	 */
195977ae365eSSteven Rostedt 	new_head = next_page;
196077ae365eSSteven Rostedt 	rb_inc_page(cpu_buffer, &new_head);
196177ae365eSSteven Rostedt 
196277ae365eSSteven Rostedt 	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
196377ae365eSSteven Rostedt 				    RB_PAGE_NORMAL);
196477ae365eSSteven Rostedt 
196577ae365eSSteven Rostedt 	/*
196677ae365eSSteven Rostedt 	 * Valid returns are:
196777ae365eSSteven Rostedt 	 *  HEAD   - an interrupt came in and already set it.
196877ae365eSSteven Rostedt 	 *  NORMAL - One of two things:
196977ae365eSSteven Rostedt 	 *            1) We really set it.
197077ae365eSSteven Rostedt 	 *            2) A bunch of interrupts came in and moved
197177ae365eSSteven Rostedt 	 *               the page forward again.
197277ae365eSSteven Rostedt 	 */
197377ae365eSSteven Rostedt 	switch (ret) {
197477ae365eSSteven Rostedt 	case RB_PAGE_HEAD:
197577ae365eSSteven Rostedt 	case RB_PAGE_NORMAL:
197677ae365eSSteven Rostedt 		/* OK */
197777ae365eSSteven Rostedt 		break;
197877ae365eSSteven Rostedt 	default:
197977ae365eSSteven Rostedt 		RB_WARN_ON(cpu_buffer, 1);
198077ae365eSSteven Rostedt 		return -1;
198177ae365eSSteven Rostedt 	}
198277ae365eSSteven Rostedt 
198377ae365eSSteven Rostedt 	/*
198477ae365eSSteven Rostedt 	 * It is possible that an interrupt came in,
198577ae365eSSteven Rostedt 	 * set the head up, then more interrupts came in
198677ae365eSSteven Rostedt 	 * and moved it again. When we get back here,
198777ae365eSSteven Rostedt 	 * the page would have been set to NORMAL but we
198877ae365eSSteven Rostedt 	 * just set it back to HEAD.
198977ae365eSSteven Rostedt 	 *
199077ae365eSSteven Rostedt 	 * How do you detect this? Well, if that happened
199177ae365eSSteven Rostedt 	 * the tail page would have moved.
199277ae365eSSteven Rostedt 	 */
199377ae365eSSteven Rostedt 	if (ret == RB_PAGE_NORMAL) {
199477ae365eSSteven Rostedt 		/*
199577ae365eSSteven Rostedt 		 * If the tail had moved passed next, then we need
199677ae365eSSteven Rostedt 		 * to reset the pointer.
199777ae365eSSteven Rostedt 		 */
199877ae365eSSteven Rostedt 		if (cpu_buffer->tail_page != tail_page &&
199977ae365eSSteven Rostedt 		    cpu_buffer->tail_page != next_page)
200077ae365eSSteven Rostedt 			rb_head_page_set_normal(cpu_buffer, new_head,
200177ae365eSSteven Rostedt 						next_page,
200277ae365eSSteven Rostedt 						RB_PAGE_HEAD);
200377ae365eSSteven Rostedt 	}
200477ae365eSSteven Rostedt 
200577ae365eSSteven Rostedt 	/*
200677ae365eSSteven Rostedt 	 * If this was the outer most commit (the one that
200777ae365eSSteven Rostedt 	 * changed the original pointer from HEAD to UPDATE),
200877ae365eSSteven Rostedt 	 * then it is up to us to reset it to NORMAL.
200977ae365eSSteven Rostedt 	 */
201077ae365eSSteven Rostedt 	if (type == RB_PAGE_HEAD) {
201177ae365eSSteven Rostedt 		ret = rb_head_page_set_normal(cpu_buffer, next_page,
201277ae365eSSteven Rostedt 					      tail_page,
201377ae365eSSteven Rostedt 					      RB_PAGE_UPDATE);
201477ae365eSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
201577ae365eSSteven Rostedt 			       ret != RB_PAGE_UPDATE))
201677ae365eSSteven Rostedt 			return -1;
201777ae365eSSteven Rostedt 	}
201877ae365eSSteven Rostedt 
201977ae365eSSteven Rostedt 	return 0;
202077ae365eSSteven Rostedt }
202177ae365eSSteven Rostedt 
202234a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
20237a8e76a3SSteven Rostedt {
20247a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
20257a8e76a3SSteven Rostedt 
20267a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
20277a8e76a3SSteven Rostedt 	if (!length)
20287a8e76a3SSteven Rostedt 		length = 1;
20297a8e76a3SSteven Rostedt 
20302271048dSSteven Rostedt 	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
20317a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
20327a8e76a3SSteven Rostedt 
20337a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
20342271048dSSteven Rostedt 	length = ALIGN(length, RB_ARCH_ALIGNMENT);
20357a8e76a3SSteven Rostedt 
20367a8e76a3SSteven Rostedt 	return length;
20377a8e76a3SSteven Rostedt }
20387a8e76a3SSteven Rostedt 
2039c7b09308SSteven Rostedt static inline void
2040c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2041c7b09308SSteven Rostedt 	      struct buffer_page *tail_page,
2042c7b09308SSteven Rostedt 	      unsigned long tail, unsigned long length)
2043c7b09308SSteven Rostedt {
2044c7b09308SSteven Rostedt 	struct ring_buffer_event *event;
2045c7b09308SSteven Rostedt 
2046c7b09308SSteven Rostedt 	/*
2047c7b09308SSteven Rostedt 	 * Only the event that crossed the page boundary
2048c7b09308SSteven Rostedt 	 * must fill the old tail_page with padding.
2049c7b09308SSteven Rostedt 	 */
2050c7b09308SSteven Rostedt 	if (tail >= BUF_PAGE_SIZE) {
2051b3230c8bSSteven Rostedt 		/*
2052b3230c8bSSteven Rostedt 		 * If the page was filled, then we still need
2053b3230c8bSSteven Rostedt 		 * to update the real_end. Reset it to zero
2054b3230c8bSSteven Rostedt 		 * and the reader will ignore it.
2055b3230c8bSSteven Rostedt 		 */
2056b3230c8bSSteven Rostedt 		if (tail == BUF_PAGE_SIZE)
2057b3230c8bSSteven Rostedt 			tail_page->real_end = 0;
2058b3230c8bSSteven Rostedt 
2059c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
2060c7b09308SSteven Rostedt 		return;
2061c7b09308SSteven Rostedt 	}
2062c7b09308SSteven Rostedt 
2063c7b09308SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
2064b0b7065bSLinus Torvalds 	kmemcheck_annotate_bitfield(event, bitfield);
2065c7b09308SSteven Rostedt 
2066c64e148aSVaibhav Nagarnaik 	/* account for padding bytes */
2067c64e148aSVaibhav Nagarnaik 	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2068c64e148aSVaibhav Nagarnaik 
2069c7b09308SSteven Rostedt 	/*
2070ff0ff84aSSteven Rostedt 	 * Save the original length to the meta data.
2071ff0ff84aSSteven Rostedt 	 * This will be used by the reader to add lost event
2072ff0ff84aSSteven Rostedt 	 * counter.
2073ff0ff84aSSteven Rostedt 	 */
2074ff0ff84aSSteven Rostedt 	tail_page->real_end = tail;
2075ff0ff84aSSteven Rostedt 
2076ff0ff84aSSteven Rostedt 	/*
2077c7b09308SSteven Rostedt 	 * If this event is bigger than the minimum size, then
2078c7b09308SSteven Rostedt 	 * we need to be careful that we don't subtract the
2079c7b09308SSteven Rostedt 	 * write counter enough to allow another writer to slip
2080c7b09308SSteven Rostedt 	 * in on this page.
2081c7b09308SSteven Rostedt 	 * We put in a discarded commit instead, to make sure
2082c7b09308SSteven Rostedt 	 * that this space is not used again.
2083c7b09308SSteven Rostedt 	 *
2084c7b09308SSteven Rostedt 	 * If we are less than the minimum size, we don't need to
2085c7b09308SSteven Rostedt 	 * worry about it.
2086c7b09308SSteven Rostedt 	 */
2087c7b09308SSteven Rostedt 	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2088c7b09308SSteven Rostedt 		/* No room for any events */
2089c7b09308SSteven Rostedt 
2090c7b09308SSteven Rostedt 		/* Mark the rest of the page with padding */
2091c7b09308SSteven Rostedt 		rb_event_set_padding(event);
2092c7b09308SSteven Rostedt 
2093c7b09308SSteven Rostedt 		/* Set the write back to the previous setting */
2094c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
2095c7b09308SSteven Rostedt 		return;
2096c7b09308SSteven Rostedt 	}
2097c7b09308SSteven Rostedt 
2098c7b09308SSteven Rostedt 	/* Put in a discarded event */
2099c7b09308SSteven Rostedt 	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2100c7b09308SSteven Rostedt 	event->type_len = RINGBUF_TYPE_PADDING;
2101c7b09308SSteven Rostedt 	/* time delta must be non zero */
2102c7b09308SSteven Rostedt 	event->time_delta = 1;
2103c7b09308SSteven Rostedt 
2104c7b09308SSteven Rostedt 	/* Set write to end of buffer */
2105c7b09308SSteven Rostedt 	length = (tail + length) - BUF_PAGE_SIZE;
2106c7b09308SSteven Rostedt 	local_sub(length, &tail_page->write);
2107c7b09308SSteven Rostedt }
21086634ff26SSteven Rostedt 
2109747e94aeSSteven Rostedt /*
2110747e94aeSSteven Rostedt  * This is the slow path, force gcc not to inline it.
2111747e94aeSSteven Rostedt  */
2112747e94aeSSteven Rostedt static noinline struct ring_buffer_event *
21136634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
21146634ff26SSteven Rostedt 	     unsigned long length, unsigned long tail,
2115e8bc43e8SSteven Rostedt 	     struct buffer_page *tail_page, u64 ts)
21167a8e76a3SSteven Rostedt {
21175a50e33cSSteven Rostedt 	struct buffer_page *commit_page = cpu_buffer->commit_page;
21187a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
211977ae365eSSteven Rostedt 	struct buffer_page *next_page;
212077ae365eSSteven Rostedt 	int ret;
2121aa20ae84SSteven Rostedt 
2122aa20ae84SSteven Rostedt 	next_page = tail_page;
21237a8e76a3SSteven Rostedt 
21247a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &next_page);
21257a8e76a3SSteven Rostedt 
2126bf41a158SSteven Rostedt 	/*
2127bf41a158SSteven Rostedt 	 * If for some reason, we had an interrupt storm that made
2128bf41a158SSteven Rostedt 	 * it all the way around the buffer, bail, and warn
2129bf41a158SSteven Rostedt 	 * about it.
2130bf41a158SSteven Rostedt 	 */
213198db8df7SSteven Rostedt 	if (unlikely(next_page == commit_page)) {
213277ae365eSSteven Rostedt 		local_inc(&cpu_buffer->commit_overrun);
213345141d46SSteven Rostedt 		goto out_reset;
2134bf41a158SSteven Rostedt 	}
2135d769041fSSteven Rostedt 
2136bf41a158SSteven Rostedt 	/*
213777ae365eSSteven Rostedt 	 * This is where the fun begins!
213877ae365eSSteven Rostedt 	 *
213977ae365eSSteven Rostedt 	 * We are fighting against races between a reader that
214077ae365eSSteven Rostedt 	 * could be on another CPU trying to swap its reader
214177ae365eSSteven Rostedt 	 * page with the buffer head.
214277ae365eSSteven Rostedt 	 *
214377ae365eSSteven Rostedt 	 * We are also fighting against interrupts coming in and
214477ae365eSSteven Rostedt 	 * moving the head or tail on us as well.
214577ae365eSSteven Rostedt 	 *
214677ae365eSSteven Rostedt 	 * If the next page is the head page then we have filled
214777ae365eSSteven Rostedt 	 * the buffer, unless the commit page is still on the
214877ae365eSSteven Rostedt 	 * reader page.
2149bf41a158SSteven Rostedt 	 */
215077ae365eSSteven Rostedt 	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2151bf41a158SSteven Rostedt 
215277ae365eSSteven Rostedt 		/*
215377ae365eSSteven Rostedt 		 * If the commit is not on the reader page, then
215477ae365eSSteven Rostedt 		 * move the header page.
215577ae365eSSteven Rostedt 		 */
215677ae365eSSteven Rostedt 		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
215777ae365eSSteven Rostedt 			/*
215877ae365eSSteven Rostedt 			 * If we are not in overwrite mode,
215977ae365eSSteven Rostedt 			 * this is easy, just stop here.
216077ae365eSSteven Rostedt 			 */
2161884bfe89SSlava Pestov 			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2162884bfe89SSlava Pestov 				local_inc(&cpu_buffer->dropped_events);
216377ae365eSSteven Rostedt 				goto out_reset;
2164884bfe89SSlava Pestov 			}
216577ae365eSSteven Rostedt 
216677ae365eSSteven Rostedt 			ret = rb_handle_head_page(cpu_buffer,
216777ae365eSSteven Rostedt 						  tail_page,
216877ae365eSSteven Rostedt 						  next_page);
216977ae365eSSteven Rostedt 			if (ret < 0)
217077ae365eSSteven Rostedt 				goto out_reset;
217177ae365eSSteven Rostedt 			if (ret)
217277ae365eSSteven Rostedt 				goto out_again;
217377ae365eSSteven Rostedt 		} else {
217477ae365eSSteven Rostedt 			/*
217577ae365eSSteven Rostedt 			 * We need to be careful here too. The
217677ae365eSSteven Rostedt 			 * commit page could still be on the reader
217777ae365eSSteven Rostedt 			 * page. We could have a small buffer, and
217877ae365eSSteven Rostedt 			 * have filled up the buffer with events
217977ae365eSSteven Rostedt 			 * from interrupts and such, and wrapped.
218077ae365eSSteven Rostedt 			 *
218177ae365eSSteven Rostedt 			 * Note, if the tail page is also the on the
218277ae365eSSteven Rostedt 			 * reader_page, we let it move out.
218377ae365eSSteven Rostedt 			 */
218477ae365eSSteven Rostedt 			if (unlikely((cpu_buffer->commit_page !=
218577ae365eSSteven Rostedt 				      cpu_buffer->tail_page) &&
218677ae365eSSteven Rostedt 				     (cpu_buffer->commit_page ==
218777ae365eSSteven Rostedt 				      cpu_buffer->reader_page))) {
218877ae365eSSteven Rostedt 				local_inc(&cpu_buffer->commit_overrun);
218977ae365eSSteven Rostedt 				goto out_reset;
219077ae365eSSteven Rostedt 			}
219177ae365eSSteven Rostedt 		}
2192bf41a158SSteven Rostedt 	}
2193bf41a158SSteven Rostedt 
219477ae365eSSteven Rostedt 	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
219577ae365eSSteven Rostedt 	if (ret) {
219677ae365eSSteven Rostedt 		/*
219777ae365eSSteven Rostedt 		 * Nested commits always have zero deltas, so
219877ae365eSSteven Rostedt 		 * just reread the time stamp
219977ae365eSSteven Rostedt 		 */
2200e8bc43e8SSteven Rostedt 		ts = rb_time_stamp(buffer);
2201e8bc43e8SSteven Rostedt 		next_page->page->time_stamp = ts;
220277ae365eSSteven Rostedt 	}
22037a8e76a3SSteven Rostedt 
220477ae365eSSteven Rostedt  out_again:
220577ae365eSSteven Rostedt 
220677ae365eSSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2207bf41a158SSteven Rostedt 
2208bf41a158SSteven Rostedt 	/* fail and let the caller try again */
2209bf41a158SSteven Rostedt 	return ERR_PTR(-EAGAIN);
2210bf41a158SSteven Rostedt 
221145141d46SSteven Rostedt  out_reset:
22126f3b3440SLai Jiangshan 	/* reset write */
2213c7b09308SSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
22146f3b3440SLai Jiangshan 
2215bf41a158SSteven Rostedt 	return NULL;
22167a8e76a3SSteven Rostedt }
22177a8e76a3SSteven Rostedt 
22186634ff26SSteven Rostedt static struct ring_buffer_event *
22196634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
222069d1b839SSteven Rostedt 		  unsigned long length, u64 ts,
222169d1b839SSteven Rostedt 		  u64 delta, int add_timestamp)
22226634ff26SSteven Rostedt {
22235a50e33cSSteven Rostedt 	struct buffer_page *tail_page;
22246634ff26SSteven Rostedt 	struct ring_buffer_event *event;
22256634ff26SSteven Rostedt 	unsigned long tail, write;
22266634ff26SSteven Rostedt 
222769d1b839SSteven Rostedt 	/*
222869d1b839SSteven Rostedt 	 * If the time delta since the last event is too big to
222969d1b839SSteven Rostedt 	 * hold in the time field of the event, then we append a
223069d1b839SSteven Rostedt 	 * TIME EXTEND event ahead of the data event.
223169d1b839SSteven Rostedt 	 */
223269d1b839SSteven Rostedt 	if (unlikely(add_timestamp))
223369d1b839SSteven Rostedt 		length += RB_LEN_TIME_EXTEND;
223469d1b839SSteven Rostedt 
22356634ff26SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
22366634ff26SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
223777ae365eSSteven Rostedt 
223877ae365eSSteven Rostedt 	/* set write to only the index of the write */
223977ae365eSSteven Rostedt 	write &= RB_WRITE_MASK;
22406634ff26SSteven Rostedt 	tail = write - length;
22416634ff26SSteven Rostedt 
22426634ff26SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
2243747e94aeSSteven Rostedt 	if (unlikely(write > BUF_PAGE_SIZE))
22446634ff26SSteven Rostedt 		return rb_move_tail(cpu_buffer, length, tail,
22455a50e33cSSteven Rostedt 				    tail_page, ts);
22466634ff26SSteven Rostedt 
22476634ff26SSteven Rostedt 	/* We reserved something on the buffer */
22486634ff26SSteven Rostedt 
22496634ff26SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
22501744a21dSVegard Nossum 	kmemcheck_annotate_bitfield(event, bitfield);
225169d1b839SSteven Rostedt 	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
22526634ff26SSteven Rostedt 
22536634ff26SSteven Rostedt 	local_inc(&tail_page->entries);
22546634ff26SSteven Rostedt 
22556634ff26SSteven Rostedt 	/*
2256fa743953SSteven Rostedt 	 * If this is the first commit on the page, then update
2257fa743953SSteven Rostedt 	 * its timestamp.
22586634ff26SSteven Rostedt 	 */
2259fa743953SSteven Rostedt 	if (!tail)
2260e8bc43e8SSteven Rostedt 		tail_page->page->time_stamp = ts;
22616634ff26SSteven Rostedt 
2262c64e148aSVaibhav Nagarnaik 	/* account for these added bytes */
2263c64e148aSVaibhav Nagarnaik 	local_add(length, &cpu_buffer->entries_bytes);
2264c64e148aSVaibhav Nagarnaik 
22656634ff26SSteven Rostedt 	return event;
22666634ff26SSteven Rostedt }
22676634ff26SSteven Rostedt 
2268edd813bfSSteven Rostedt static inline int
2269edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2270edd813bfSSteven Rostedt 		  struct ring_buffer_event *event)
2271edd813bfSSteven Rostedt {
2272edd813bfSSteven Rostedt 	unsigned long new_index, old_index;
2273edd813bfSSteven Rostedt 	struct buffer_page *bpage;
2274edd813bfSSteven Rostedt 	unsigned long index;
2275edd813bfSSteven Rostedt 	unsigned long addr;
2276edd813bfSSteven Rostedt 
2277edd813bfSSteven Rostedt 	new_index = rb_event_index(event);
227869d1b839SSteven Rostedt 	old_index = new_index + rb_event_ts_length(event);
2279edd813bfSSteven Rostedt 	addr = (unsigned long)event;
2280edd813bfSSteven Rostedt 	addr &= PAGE_MASK;
2281edd813bfSSteven Rostedt 
2282edd813bfSSteven Rostedt 	bpage = cpu_buffer->tail_page;
2283edd813bfSSteven Rostedt 
2284edd813bfSSteven Rostedt 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
228577ae365eSSteven Rostedt 		unsigned long write_mask =
228677ae365eSSteven Rostedt 			local_read(&bpage->write) & ~RB_WRITE_MASK;
2287c64e148aSVaibhav Nagarnaik 		unsigned long event_length = rb_event_length(event);
2288edd813bfSSteven Rostedt 		/*
2289edd813bfSSteven Rostedt 		 * This is on the tail page. It is possible that
2290edd813bfSSteven Rostedt 		 * a write could come in and move the tail page
2291edd813bfSSteven Rostedt 		 * and write to the next page. That is fine
2292edd813bfSSteven Rostedt 		 * because we just shorten what is on this page.
2293edd813bfSSteven Rostedt 		 */
229477ae365eSSteven Rostedt 		old_index += write_mask;
229577ae365eSSteven Rostedt 		new_index += write_mask;
2296edd813bfSSteven Rostedt 		index = local_cmpxchg(&bpage->write, old_index, new_index);
2297c64e148aSVaibhav Nagarnaik 		if (index == old_index) {
2298c64e148aSVaibhav Nagarnaik 			/* update counters */
2299c64e148aSVaibhav Nagarnaik 			local_sub(event_length, &cpu_buffer->entries_bytes);
2300edd813bfSSteven Rostedt 			return 1;
2301edd813bfSSteven Rostedt 		}
2302c64e148aSVaibhav Nagarnaik 	}
2303edd813bfSSteven Rostedt 
2304edd813bfSSteven Rostedt 	/* could not discard */
2305edd813bfSSteven Rostedt 	return 0;
2306edd813bfSSteven Rostedt }
2307edd813bfSSteven Rostedt 
2308fa743953SSteven Rostedt static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2309fa743953SSteven Rostedt {
2310fa743953SSteven Rostedt 	local_inc(&cpu_buffer->committing);
2311fa743953SSteven Rostedt 	local_inc(&cpu_buffer->commits);
2312fa743953SSteven Rostedt }
2313fa743953SSteven Rostedt 
2314d9abde21SSteven Rostedt static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2315fa743953SSteven Rostedt {
2316fa743953SSteven Rostedt 	unsigned long commits;
2317fa743953SSteven Rostedt 
2318fa743953SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
2319fa743953SSteven Rostedt 		       !local_read(&cpu_buffer->committing)))
2320fa743953SSteven Rostedt 		return;
2321fa743953SSteven Rostedt 
2322fa743953SSteven Rostedt  again:
2323fa743953SSteven Rostedt 	commits = local_read(&cpu_buffer->commits);
2324fa743953SSteven Rostedt 	/* synchronize with interrupts */
2325fa743953SSteven Rostedt 	barrier();
2326fa743953SSteven Rostedt 	if (local_read(&cpu_buffer->committing) == 1)
2327fa743953SSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
2328fa743953SSteven Rostedt 
2329fa743953SSteven Rostedt 	local_dec(&cpu_buffer->committing);
2330fa743953SSteven Rostedt 
2331fa743953SSteven Rostedt 	/* synchronize with interrupts */
2332fa743953SSteven Rostedt 	barrier();
2333fa743953SSteven Rostedt 
2334fa743953SSteven Rostedt 	/*
2335fa743953SSteven Rostedt 	 * Need to account for interrupts coming in between the
2336fa743953SSteven Rostedt 	 * updating of the commit page and the clearing of the
2337fa743953SSteven Rostedt 	 * committing counter.
2338fa743953SSteven Rostedt 	 */
2339fa743953SSteven Rostedt 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2340fa743953SSteven Rostedt 	    !local_read(&cpu_buffer->committing)) {
2341fa743953SSteven Rostedt 		local_inc(&cpu_buffer->committing);
2342fa743953SSteven Rostedt 		goto again;
2343fa743953SSteven Rostedt 	}
2344fa743953SSteven Rostedt }
2345fa743953SSteven Rostedt 
23467a8e76a3SSteven Rostedt static struct ring_buffer_event *
234762f0b3ebSSteven Rostedt rb_reserve_next_event(struct ring_buffer *buffer,
234862f0b3ebSSteven Rostedt 		      struct ring_buffer_per_cpu *cpu_buffer,
23491cd8d735SSteven Rostedt 		      unsigned long length)
23507a8e76a3SSteven Rostedt {
23517a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
235269d1b839SSteven Rostedt 	u64 ts, delta;
2353818e3dd3SSteven Rostedt 	int nr_loops = 0;
235469d1b839SSteven Rostedt 	int add_timestamp;
2355140ff891SSteven Rostedt 	u64 diff;
23567a8e76a3SSteven Rostedt 
2357fa743953SSteven Rostedt 	rb_start_commit(cpu_buffer);
2358fa743953SSteven Rostedt 
235985bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
236062f0b3ebSSteven Rostedt 	/*
236162f0b3ebSSteven Rostedt 	 * Due to the ability to swap a cpu buffer from a buffer
236262f0b3ebSSteven Rostedt 	 * it is possible it was swapped before we committed.
236362f0b3ebSSteven Rostedt 	 * (committing stops a swap). We check for it here and
236462f0b3ebSSteven Rostedt 	 * if it happened, we have to fail the write.
236562f0b3ebSSteven Rostedt 	 */
236662f0b3ebSSteven Rostedt 	barrier();
236762f0b3ebSSteven Rostedt 	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
236862f0b3ebSSteven Rostedt 		local_dec(&cpu_buffer->committing);
236962f0b3ebSSteven Rostedt 		local_dec(&cpu_buffer->commits);
237062f0b3ebSSteven Rostedt 		return NULL;
237162f0b3ebSSteven Rostedt 	}
237285bac32cSSteven Rostedt #endif
237362f0b3ebSSteven Rostedt 
2374be957c44SSteven Rostedt 	length = rb_calculate_event_length(length);
2375bf41a158SSteven Rostedt  again:
237669d1b839SSteven Rostedt 	add_timestamp = 0;
237769d1b839SSteven Rostedt 	delta = 0;
237869d1b839SSteven Rostedt 
2379818e3dd3SSteven Rostedt 	/*
2380818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
2381818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
2382818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
2383818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
2384818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
2385818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
2386818e3dd3SSteven Rostedt 	 * Bail!
2387818e3dd3SSteven Rostedt 	 */
23883e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2389fa743953SSteven Rostedt 		goto out_fail;
2390818e3dd3SSteven Rostedt 
23916d3f1e12SJiri Olsa 	ts = rb_time_stamp(cpu_buffer->buffer);
2392168b6b1dSSteven Rostedt 	diff = ts - cpu_buffer->write_stamp;
23937a8e76a3SSteven Rostedt 
2394168b6b1dSSteven Rostedt 	/* make sure this diff is calculated here */
2395bf41a158SSteven Rostedt 	barrier();
23967a8e76a3SSteven Rostedt 
2397bf41a158SSteven Rostedt 	/* Did the write stamp get updated already? */
2398140ff891SSteven Rostedt 	if (likely(ts >= cpu_buffer->write_stamp)) {
2399168b6b1dSSteven Rostedt 		delta = diff;
2400168b6b1dSSteven Rostedt 		if (unlikely(test_time_stamp(delta))) {
240131274d72SJiri Olsa 			int local_clock_stable = 1;
240231274d72SJiri Olsa #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
240331274d72SJiri Olsa 			local_clock_stable = sched_clock_stable;
240431274d72SJiri Olsa #endif
240569d1b839SSteven Rostedt 			WARN_ONCE(delta > (1ULL << 59),
240631274d72SJiri Olsa 				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
240769d1b839SSteven Rostedt 				  (unsigned long long)delta,
240869d1b839SSteven Rostedt 				  (unsigned long long)ts,
240931274d72SJiri Olsa 				  (unsigned long long)cpu_buffer->write_stamp,
241031274d72SJiri Olsa 				  local_clock_stable ? "" :
241131274d72SJiri Olsa 				  "If you just came from a suspend/resume,\n"
241231274d72SJiri Olsa 				  "please switch to the trace global clock:\n"
241331274d72SJiri Olsa 				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
241469d1b839SSteven Rostedt 			add_timestamp = 1;
24157a8e76a3SSteven Rostedt 		}
2416168b6b1dSSteven Rostedt 	}
24177a8e76a3SSteven Rostedt 
241869d1b839SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, length, ts,
241969d1b839SSteven Rostedt 				  delta, add_timestamp);
2420168b6b1dSSteven Rostedt 	if (unlikely(PTR_ERR(event) == -EAGAIN))
2421bf41a158SSteven Rostedt 		goto again;
24227a8e76a3SSteven Rostedt 
2423fa743953SSteven Rostedt 	if (!event)
2424fa743953SSteven Rostedt 		goto out_fail;
2425bf41a158SSteven Rostedt 
24267a8e76a3SSteven Rostedt 	return event;
2427fa743953SSteven Rostedt 
2428fa743953SSteven Rostedt  out_fail:
2429fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
2430fa743953SSteven Rostedt 	return NULL;
24317a8e76a3SSteven Rostedt }
24327a8e76a3SSteven Rostedt 
24331155de47SPaul Mundt #ifdef CONFIG_TRACING
24341155de47SPaul Mundt 
2435*567cd4daSSteven Rostedt /*
2436*567cd4daSSteven Rostedt  * The lock and unlock are done within a preempt disable section.
2437*567cd4daSSteven Rostedt  * The current_context per_cpu variable can only be modified
2438*567cd4daSSteven Rostedt  * by the current task between lock and unlock. But it can
2439*567cd4daSSteven Rostedt  * be modified more than once via an interrupt. To pass this
2440*567cd4daSSteven Rostedt  * information from the lock to the unlock without having to
2441*567cd4daSSteven Rostedt  * access the 'in_interrupt()' functions again (which do show
2442*567cd4daSSteven Rostedt  * a bit of overhead in something as critical as function tracing,
2443*567cd4daSSteven Rostedt  * we use a bitmask trick.
2444*567cd4daSSteven Rostedt  *
2445*567cd4daSSteven Rostedt  *  bit 0 =  NMI context
2446*567cd4daSSteven Rostedt  *  bit 1 =  IRQ context
2447*567cd4daSSteven Rostedt  *  bit 2 =  SoftIRQ context
2448*567cd4daSSteven Rostedt  *  bit 3 =  normal context.
2449*567cd4daSSteven Rostedt  *
2450*567cd4daSSteven Rostedt  * This works because this is the order of contexts that can
2451*567cd4daSSteven Rostedt  * preempt other contexts. A SoftIRQ never preempts an IRQ
2452*567cd4daSSteven Rostedt  * context.
2453*567cd4daSSteven Rostedt  *
2454*567cd4daSSteven Rostedt  * When the context is determined, the corresponding bit is
2455*567cd4daSSteven Rostedt  * checked and set (if it was set, then a recursion of that context
2456*567cd4daSSteven Rostedt  * happened).
2457*567cd4daSSteven Rostedt  *
2458*567cd4daSSteven Rostedt  * On unlock, we need to clear this bit. To do so, just subtract
2459*567cd4daSSteven Rostedt  * 1 from the current_context and AND it to itself.
2460*567cd4daSSteven Rostedt  *
2461*567cd4daSSteven Rostedt  * (binary)
2462*567cd4daSSteven Rostedt  *  101 - 1 = 100
2463*567cd4daSSteven Rostedt  *  101 & 100 = 100 (clearing bit zero)
2464*567cd4daSSteven Rostedt  *
2465*567cd4daSSteven Rostedt  *  1010 - 1 = 1001
2466*567cd4daSSteven Rostedt  *  1010 & 1001 = 1000 (clearing bit 1)
2467*567cd4daSSteven Rostedt  *
2468*567cd4daSSteven Rostedt  * The least significant bit can be cleared this way, and it
2469*567cd4daSSteven Rostedt  * just so happens that it is the same bit corresponding to
2470*567cd4daSSteven Rostedt  * the current context.
2471*567cd4daSSteven Rostedt  */
2472*567cd4daSSteven Rostedt static DEFINE_PER_CPU(unsigned int, current_context);
2473261842b7SSteven Rostedt 
2474*567cd4daSSteven Rostedt static __always_inline int trace_recursive_lock(void)
2475261842b7SSteven Rostedt {
2476*567cd4daSSteven Rostedt 	unsigned int val = this_cpu_read(current_context);
2477*567cd4daSSteven Rostedt 	int bit;
2478e057a5e5SFrederic Weisbecker 
2479*567cd4daSSteven Rostedt 	if (in_interrupt()) {
2480*567cd4daSSteven Rostedt 		if (in_nmi())
2481*567cd4daSSteven Rostedt 			bit = 0;
2482*567cd4daSSteven Rostedt 		else if (in_irq())
2483*567cd4daSSteven Rostedt 			bit = 1;
2484*567cd4daSSteven Rostedt 		else
2485*567cd4daSSteven Rostedt 			bit = 2;
2486*567cd4daSSteven Rostedt 	} else
2487*567cd4daSSteven Rostedt 		bit = 3;
2488e057a5e5SFrederic Weisbecker 
2489*567cd4daSSteven Rostedt 	if (unlikely(val & (1 << bit)))
2490*567cd4daSSteven Rostedt 		return 1;
2491d9abde21SSteven Rostedt 
2492*567cd4daSSteven Rostedt 	val |= (1 << bit);
2493*567cd4daSSteven Rostedt 	this_cpu_write(current_context, val);
2494d9abde21SSteven Rostedt 
2495d9abde21SSteven Rostedt 	return 0;
2496261842b7SSteven Rostedt }
2497261842b7SSteven Rostedt 
2498*567cd4daSSteven Rostedt static __always_inline void trace_recursive_unlock(void)
2499261842b7SSteven Rostedt {
2500*567cd4daSSteven Rostedt 	unsigned int val = this_cpu_read(current_context);
2501261842b7SSteven Rostedt 
2502*567cd4daSSteven Rostedt 	val--;
2503*567cd4daSSteven Rostedt 	val &= this_cpu_read(current_context);
2504*567cd4daSSteven Rostedt 	this_cpu_write(current_context, val);
2505261842b7SSteven Rostedt }
2506261842b7SSteven Rostedt 
25071155de47SPaul Mundt #else
25081155de47SPaul Mundt 
25091155de47SPaul Mundt #define trace_recursive_lock()		(0)
25101155de47SPaul Mundt #define trace_recursive_unlock()	do { } while (0)
25111155de47SPaul Mundt 
25121155de47SPaul Mundt #endif
25131155de47SPaul Mundt 
25147a8e76a3SSteven Rostedt /**
25157a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
25167a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
25177a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
25187a8e76a3SSteven Rostedt  *
25197a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
25207a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
25217a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
25227a8e76a3SSteven Rostedt  *
25237a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
25247a8e76a3SSteven Rostedt  * which also includes the event header.
25257a8e76a3SSteven Rostedt  *
25267a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
25277a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
25287a8e76a3SSteven Rostedt  */
25297a8e76a3SSteven Rostedt struct ring_buffer_event *
25300a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
25317a8e76a3SSteven Rostedt {
25327a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
25337a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
25345168ae50SSteven Rostedt 	int cpu;
25357a8e76a3SSteven Rostedt 
2536033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2537a3583244SSteven Rostedt 		return NULL;
2538a3583244SSteven Rostedt 
2539bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
25405168ae50SSteven Rostedt 	preempt_disable_notrace();
2541bf41a158SSteven Rostedt 
254252fbe9cdSLai Jiangshan 	if (atomic_read(&buffer->record_disabled))
254352fbe9cdSLai Jiangshan 		goto out_nocheck;
254452fbe9cdSLai Jiangshan 
2545261842b7SSteven Rostedt 	if (trace_recursive_lock())
2546261842b7SSteven Rostedt 		goto out_nocheck;
2547261842b7SSteven Rostedt 
25487a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
25497a8e76a3SSteven Rostedt 
25509e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2551d769041fSSteven Rostedt 		goto out;
25527a8e76a3SSteven Rostedt 
25537a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25547a8e76a3SSteven Rostedt 
25557a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
2556d769041fSSteven Rostedt 		goto out;
25577a8e76a3SSteven Rostedt 
2558be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
2559bf41a158SSteven Rostedt 		goto out;
25607a8e76a3SSteven Rostedt 
256162f0b3ebSSteven Rostedt 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
25627a8e76a3SSteven Rostedt 	if (!event)
2563d769041fSSteven Rostedt 		goto out;
25647a8e76a3SSteven Rostedt 
25657a8e76a3SSteven Rostedt 	return event;
25667a8e76a3SSteven Rostedt 
2567d769041fSSteven Rostedt  out:
2568261842b7SSteven Rostedt 	trace_recursive_unlock();
2569261842b7SSteven Rostedt 
2570261842b7SSteven Rostedt  out_nocheck:
25715168ae50SSteven Rostedt 	preempt_enable_notrace();
25727a8e76a3SSteven Rostedt 	return NULL;
25737a8e76a3SSteven Rostedt }
2574c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
25757a8e76a3SSteven Rostedt 
2576a1863c21SSteven Rostedt static void
2577a1863c21SSteven Rostedt rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
25787a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
25797a8e76a3SSteven Rostedt {
258069d1b839SSteven Rostedt 	u64 delta;
258169d1b839SSteven Rostedt 
2582fa743953SSteven Rostedt 	/*
2583fa743953SSteven Rostedt 	 * The event first in the commit queue updates the
2584fa743953SSteven Rostedt 	 * time stamp.
2585fa743953SSteven Rostedt 	 */
258669d1b839SSteven Rostedt 	if (rb_event_is_commit(cpu_buffer, event)) {
258769d1b839SSteven Rostedt 		/*
258869d1b839SSteven Rostedt 		 * A commit event that is first on a page
258969d1b839SSteven Rostedt 		 * updates the write timestamp with the page stamp
259069d1b839SSteven Rostedt 		 */
259169d1b839SSteven Rostedt 		if (!rb_event_index(event))
259269d1b839SSteven Rostedt 			cpu_buffer->write_stamp =
259369d1b839SSteven Rostedt 				cpu_buffer->commit_page->page->time_stamp;
259469d1b839SSteven Rostedt 		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
259569d1b839SSteven Rostedt 			delta = event->array[0];
259669d1b839SSteven Rostedt 			delta <<= TS_SHIFT;
259769d1b839SSteven Rostedt 			delta += event->time_delta;
259869d1b839SSteven Rostedt 			cpu_buffer->write_stamp += delta;
259969d1b839SSteven Rostedt 		} else
2600bf41a158SSteven Rostedt 			cpu_buffer->write_stamp += event->time_delta;
2601a1863c21SSteven Rostedt 	}
260269d1b839SSteven Rostedt }
2603bf41a158SSteven Rostedt 
2604a1863c21SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2605a1863c21SSteven Rostedt 		      struct ring_buffer_event *event)
2606a1863c21SSteven Rostedt {
2607a1863c21SSteven Rostedt 	local_inc(&cpu_buffer->entries);
2608a1863c21SSteven Rostedt 	rb_update_write_stamp(cpu_buffer, event);
2609fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
26107a8e76a3SSteven Rostedt }
26117a8e76a3SSteven Rostedt 
26127a8e76a3SSteven Rostedt /**
26137a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
26147a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
26157a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
26167a8e76a3SSteven Rostedt  *
26177a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
26187a8e76a3SSteven Rostedt  *
26197a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
26207a8e76a3SSteven Rostedt  */
26217a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
26220a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
26237a8e76a3SSteven Rostedt {
26247a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
26257a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
26267a8e76a3SSteven Rostedt 
26277a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
26287a8e76a3SSteven Rostedt 
26297a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
26307a8e76a3SSteven Rostedt 
2631261842b7SSteven Rostedt 	trace_recursive_unlock();
2632261842b7SSteven Rostedt 
26335168ae50SSteven Rostedt 	preempt_enable_notrace();
26347a8e76a3SSteven Rostedt 
26357a8e76a3SSteven Rostedt 	return 0;
26367a8e76a3SSteven Rostedt }
2637c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
26387a8e76a3SSteven Rostedt 
2639f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event)
2640f3b9aae1SFrederic Weisbecker {
264169d1b839SSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
264269d1b839SSteven Rostedt 		event = skip_time_extend(event);
264369d1b839SSteven Rostedt 
2644334d4169SLai Jiangshan 	/* array[0] holds the actual length for the discarded event */
2645334d4169SLai Jiangshan 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2646334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2647f3b9aae1SFrederic Weisbecker 	/* time delta must be non zero */
2648f3b9aae1SFrederic Weisbecker 	if (!event->time_delta)
2649f3b9aae1SFrederic Weisbecker 		event->time_delta = 1;
2650f3b9aae1SFrederic Weisbecker }
2651f3b9aae1SFrederic Weisbecker 
2652a1863c21SSteven Rostedt /*
2653a1863c21SSteven Rostedt  * Decrement the entries to the page that an event is on.
2654a1863c21SSteven Rostedt  * The event does not even need to exist, only the pointer
2655a1863c21SSteven Rostedt  * to the page it is on. This may only be called before the commit
2656a1863c21SSteven Rostedt  * takes place.
2657a1863c21SSteven Rostedt  */
2658a1863c21SSteven Rostedt static inline void
2659a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2660a1863c21SSteven Rostedt 		   struct ring_buffer_event *event)
2661a1863c21SSteven Rostedt {
2662a1863c21SSteven Rostedt 	unsigned long addr = (unsigned long)event;
2663a1863c21SSteven Rostedt 	struct buffer_page *bpage = cpu_buffer->commit_page;
2664a1863c21SSteven Rostedt 	struct buffer_page *start;
2665a1863c21SSteven Rostedt 
2666a1863c21SSteven Rostedt 	addr &= PAGE_MASK;
2667a1863c21SSteven Rostedt 
2668a1863c21SSteven Rostedt 	/* Do the likely case first */
2669a1863c21SSteven Rostedt 	if (likely(bpage->page == (void *)addr)) {
2670a1863c21SSteven Rostedt 		local_dec(&bpage->entries);
2671a1863c21SSteven Rostedt 		return;
2672a1863c21SSteven Rostedt 	}
2673a1863c21SSteven Rostedt 
2674a1863c21SSteven Rostedt 	/*
2675a1863c21SSteven Rostedt 	 * Because the commit page may be on the reader page we
2676a1863c21SSteven Rostedt 	 * start with the next page and check the end loop there.
2677a1863c21SSteven Rostedt 	 */
2678a1863c21SSteven Rostedt 	rb_inc_page(cpu_buffer, &bpage);
2679a1863c21SSteven Rostedt 	start = bpage;
2680a1863c21SSteven Rostedt 	do {
2681a1863c21SSteven Rostedt 		if (bpage->page == (void *)addr) {
2682a1863c21SSteven Rostedt 			local_dec(&bpage->entries);
2683a1863c21SSteven Rostedt 			return;
2684a1863c21SSteven Rostedt 		}
2685a1863c21SSteven Rostedt 		rb_inc_page(cpu_buffer, &bpage);
2686a1863c21SSteven Rostedt 	} while (bpage != start);
2687a1863c21SSteven Rostedt 
2688a1863c21SSteven Rostedt 	/* commit not part of this buffer?? */
2689a1863c21SSteven Rostedt 	RB_WARN_ON(cpu_buffer, 1);
2690a1863c21SSteven Rostedt }
2691a1863c21SSteven Rostedt 
26927a8e76a3SSteven Rostedt /**
2693fa1b47ddSSteven Rostedt  * ring_buffer_commit_discard - discard an event that has not been committed
2694fa1b47ddSSteven Rostedt  * @buffer: the ring buffer
2695fa1b47ddSSteven Rostedt  * @event: non committed event to discard
2696fa1b47ddSSteven Rostedt  *
2697dc892f73SSteven Rostedt  * Sometimes an event that is in the ring buffer needs to be ignored.
2698dc892f73SSteven Rostedt  * This function lets the user discard an event in the ring buffer
2699dc892f73SSteven Rostedt  * and then that event will not be read later.
2700dc892f73SSteven Rostedt  *
2701dc892f73SSteven Rostedt  * This function only works if it is called before the the item has been
2702dc892f73SSteven Rostedt  * committed. It will try to free the event from the ring buffer
2703fa1b47ddSSteven Rostedt  * if another event has not been added behind it.
2704fa1b47ddSSteven Rostedt  *
2705fa1b47ddSSteven Rostedt  * If another event has been added behind it, it will set the event
2706fa1b47ddSSteven Rostedt  * up as discarded, and perform the commit.
2707fa1b47ddSSteven Rostedt  *
2708fa1b47ddSSteven Rostedt  * If this function is called, do not call ring_buffer_unlock_commit on
2709fa1b47ddSSteven Rostedt  * the event.
2710fa1b47ddSSteven Rostedt  */
2711fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer,
2712fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event)
2713fa1b47ddSSteven Rostedt {
2714fa1b47ddSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2715fa1b47ddSSteven Rostedt 	int cpu;
2716fa1b47ddSSteven Rostedt 
2717fa1b47ddSSteven Rostedt 	/* The event is discarded regardless */
2718f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
2719fa1b47ddSSteven Rostedt 
2720fa743953SSteven Rostedt 	cpu = smp_processor_id();
2721fa743953SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2722fa743953SSteven Rostedt 
2723fa1b47ddSSteven Rostedt 	/*
2724fa1b47ddSSteven Rostedt 	 * This must only be called if the event has not been
2725fa1b47ddSSteven Rostedt 	 * committed yet. Thus we can assume that preemption
2726fa1b47ddSSteven Rostedt 	 * is still disabled.
2727fa1b47ddSSteven Rostedt 	 */
2728fa743953SSteven Rostedt 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2729fa1b47ddSSteven Rostedt 
2730a1863c21SSteven Rostedt 	rb_decrement_entry(cpu_buffer, event);
27310f2541d2SSteven Rostedt 	if (rb_try_to_discard(cpu_buffer, event))
2732fa1b47ddSSteven Rostedt 		goto out;
2733fa1b47ddSSteven Rostedt 
2734fa1b47ddSSteven Rostedt 	/*
2735fa1b47ddSSteven Rostedt 	 * The commit is still visible by the reader, so we
2736a1863c21SSteven Rostedt 	 * must still update the timestamp.
2737fa1b47ddSSteven Rostedt 	 */
2738a1863c21SSteven Rostedt 	rb_update_write_stamp(cpu_buffer, event);
2739fa1b47ddSSteven Rostedt  out:
2740fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
2741fa1b47ddSSteven Rostedt 
2742f3b9aae1SFrederic Weisbecker 	trace_recursive_unlock();
2743f3b9aae1SFrederic Weisbecker 
27445168ae50SSteven Rostedt 	preempt_enable_notrace();
2745fa1b47ddSSteven Rostedt 
2746fa1b47ddSSteven Rostedt }
2747fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2748fa1b47ddSSteven Rostedt 
2749fa1b47ddSSteven Rostedt /**
27507a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
27517a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
27527a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
27537a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
27547a8e76a3SSteven Rostedt  *
27557a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
27567a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
27577a8e76a3SSteven Rostedt  * may be easier to simply call this function.
27587a8e76a3SSteven Rostedt  *
27597a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
27607a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
27617a8e76a3SSteven Rostedt  */
27627a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
27637a8e76a3SSteven Rostedt 		      unsigned long length,
27647a8e76a3SSteven Rostedt 		      void *data)
27657a8e76a3SSteven Rostedt {
27667a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
27677a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
27687a8e76a3SSteven Rostedt 	void *body;
27697a8e76a3SSteven Rostedt 	int ret = -EBUSY;
27705168ae50SSteven Rostedt 	int cpu;
27717a8e76a3SSteven Rostedt 
2772033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2773a3583244SSteven Rostedt 		return -EBUSY;
2774a3583244SSteven Rostedt 
27755168ae50SSteven Rostedt 	preempt_disable_notrace();
2776bf41a158SSteven Rostedt 
277752fbe9cdSLai Jiangshan 	if (atomic_read(&buffer->record_disabled))
277852fbe9cdSLai Jiangshan 		goto out;
277952fbe9cdSLai Jiangshan 
27807a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
27817a8e76a3SSteven Rostedt 
27829e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2783d769041fSSteven Rostedt 		goto out;
27847a8e76a3SSteven Rostedt 
27857a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
27867a8e76a3SSteven Rostedt 
27877a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
27887a8e76a3SSteven Rostedt 		goto out;
27897a8e76a3SSteven Rostedt 
2790be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
2791be957c44SSteven Rostedt 		goto out;
2792be957c44SSteven Rostedt 
279362f0b3ebSSteven Rostedt 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
27947a8e76a3SSteven Rostedt 	if (!event)
27957a8e76a3SSteven Rostedt 		goto out;
27967a8e76a3SSteven Rostedt 
27977a8e76a3SSteven Rostedt 	body = rb_event_data(event);
27987a8e76a3SSteven Rostedt 
27997a8e76a3SSteven Rostedt 	memcpy(body, data, length);
28007a8e76a3SSteven Rostedt 
28017a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
28027a8e76a3SSteven Rostedt 
28037a8e76a3SSteven Rostedt 	ret = 0;
28047a8e76a3SSteven Rostedt  out:
28055168ae50SSteven Rostedt 	preempt_enable_notrace();
28067a8e76a3SSteven Rostedt 
28077a8e76a3SSteven Rostedt 	return ret;
28087a8e76a3SSteven Rostedt }
2809c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
28107a8e76a3SSteven Rostedt 
281134a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2812bf41a158SSteven Rostedt {
2813bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
281477ae365eSSteven Rostedt 	struct buffer_page *head = rb_set_head_page(cpu_buffer);
2815bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
2816bf41a158SSteven Rostedt 
281777ae365eSSteven Rostedt 	/* In case of error, head will be NULL */
281877ae365eSSteven Rostedt 	if (unlikely(!head))
281977ae365eSSteven Rostedt 		return 1;
282077ae365eSSteven Rostedt 
2821bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
2822bf41a158SSteven Rostedt 		(commit == reader ||
2823bf41a158SSteven Rostedt 		 (commit == head &&
2824bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
2825bf41a158SSteven Rostedt }
2826bf41a158SSteven Rostedt 
28277a8e76a3SSteven Rostedt /**
28287a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
28297a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
28307a8e76a3SSteven Rostedt  *
28317a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
28327a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
28337a8e76a3SSteven Rostedt  *
28347a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
28357a8e76a3SSteven Rostedt  */
28367a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
28377a8e76a3SSteven Rostedt {
28387a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
28397a8e76a3SSteven Rostedt }
2840c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
28417a8e76a3SSteven Rostedt 
28427a8e76a3SSteven Rostedt /**
28437a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
28447a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
28457a8e76a3SSteven Rostedt  *
28467a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
2847c41b20e7SAdam Buchbinder  * to truly enable the writing (much like preempt_disable).
28487a8e76a3SSteven Rostedt  */
28497a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
28507a8e76a3SSteven Rostedt {
28517a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
28527a8e76a3SSteven Rostedt }
2853c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
28547a8e76a3SSteven Rostedt 
28557a8e76a3SSteven Rostedt /**
2856499e5470SSteven Rostedt  * ring_buffer_record_off - stop all writes into the buffer
2857499e5470SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
2858499e5470SSteven Rostedt  *
2859499e5470SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
2860499e5470SSteven Rostedt  * to the buffer after this will fail and return NULL.
2861499e5470SSteven Rostedt  *
2862499e5470SSteven Rostedt  * This is different than ring_buffer_record_disable() as
286387abb3b1SWang Tianhong  * it works like an on/off switch, where as the disable() version
2864499e5470SSteven Rostedt  * must be paired with a enable().
2865499e5470SSteven Rostedt  */
2866499e5470SSteven Rostedt void ring_buffer_record_off(struct ring_buffer *buffer)
2867499e5470SSteven Rostedt {
2868499e5470SSteven Rostedt 	unsigned int rd;
2869499e5470SSteven Rostedt 	unsigned int new_rd;
2870499e5470SSteven Rostedt 
2871499e5470SSteven Rostedt 	do {
2872499e5470SSteven Rostedt 		rd = atomic_read(&buffer->record_disabled);
2873499e5470SSteven Rostedt 		new_rd = rd | RB_BUFFER_OFF;
2874499e5470SSteven Rostedt 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2875499e5470SSteven Rostedt }
2876499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2877499e5470SSteven Rostedt 
2878499e5470SSteven Rostedt /**
2879499e5470SSteven Rostedt  * ring_buffer_record_on - restart writes into the buffer
2880499e5470SSteven Rostedt  * @buffer: The ring buffer to start writes to.
2881499e5470SSteven Rostedt  *
2882499e5470SSteven Rostedt  * This enables all writes to the buffer that was disabled by
2883499e5470SSteven Rostedt  * ring_buffer_record_off().
2884499e5470SSteven Rostedt  *
2885499e5470SSteven Rostedt  * This is different than ring_buffer_record_enable() as
288687abb3b1SWang Tianhong  * it works like an on/off switch, where as the enable() version
2887499e5470SSteven Rostedt  * must be paired with a disable().
2888499e5470SSteven Rostedt  */
2889499e5470SSteven Rostedt void ring_buffer_record_on(struct ring_buffer *buffer)
2890499e5470SSteven Rostedt {
2891499e5470SSteven Rostedt 	unsigned int rd;
2892499e5470SSteven Rostedt 	unsigned int new_rd;
2893499e5470SSteven Rostedt 
2894499e5470SSteven Rostedt 	do {
2895499e5470SSteven Rostedt 		rd = atomic_read(&buffer->record_disabled);
2896499e5470SSteven Rostedt 		new_rd = rd & ~RB_BUFFER_OFF;
2897499e5470SSteven Rostedt 	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2898499e5470SSteven Rostedt }
2899499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2900499e5470SSteven Rostedt 
2901499e5470SSteven Rostedt /**
2902499e5470SSteven Rostedt  * ring_buffer_record_is_on - return true if the ring buffer can write
2903499e5470SSteven Rostedt  * @buffer: The ring buffer to see if write is enabled
2904499e5470SSteven Rostedt  *
2905499e5470SSteven Rostedt  * Returns true if the ring buffer is in a state that it accepts writes.
2906499e5470SSteven Rostedt  */
2907499e5470SSteven Rostedt int ring_buffer_record_is_on(struct ring_buffer *buffer)
2908499e5470SSteven Rostedt {
2909499e5470SSteven Rostedt 	return !atomic_read(&buffer->record_disabled);
2910499e5470SSteven Rostedt }
2911499e5470SSteven Rostedt 
2912499e5470SSteven Rostedt /**
29137a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
29147a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
29157a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
29167a8e76a3SSteven Rostedt  *
29177a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
29187a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
29197a8e76a3SSteven Rostedt  *
29207a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
29217a8e76a3SSteven Rostedt  */
29227a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
29237a8e76a3SSteven Rostedt {
29247a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
29257a8e76a3SSteven Rostedt 
29269e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
29278aabee57SSteven Rostedt 		return;
29287a8e76a3SSteven Rostedt 
29297a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
29307a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
29317a8e76a3SSteven Rostedt }
2932c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
29337a8e76a3SSteven Rostedt 
29347a8e76a3SSteven Rostedt /**
29357a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
29367a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
29377a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
29387a8e76a3SSteven Rostedt  *
29397a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
2940c41b20e7SAdam Buchbinder  * to truly enable the writing (much like preempt_disable).
29417a8e76a3SSteven Rostedt  */
29427a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
29437a8e76a3SSteven Rostedt {
29447a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
29457a8e76a3SSteven Rostedt 
29469e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
29478aabee57SSteven Rostedt 		return;
29487a8e76a3SSteven Rostedt 
29497a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
29507a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
29517a8e76a3SSteven Rostedt }
2952c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
29537a8e76a3SSteven Rostedt 
2954f6195aa0SSteven Rostedt /*
2955f6195aa0SSteven Rostedt  * The total entries in the ring buffer is the running counter
2956f6195aa0SSteven Rostedt  * of entries entered into the ring buffer, minus the sum of
2957f6195aa0SSteven Rostedt  * the entries read from the ring buffer and the number of
2958f6195aa0SSteven Rostedt  * entries that were overwritten.
2959f6195aa0SSteven Rostedt  */
2960f6195aa0SSteven Rostedt static inline unsigned long
2961f6195aa0SSteven Rostedt rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2962f6195aa0SSteven Rostedt {
2963f6195aa0SSteven Rostedt 	return local_read(&cpu_buffer->entries) -
2964f6195aa0SSteven Rostedt 		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2965f6195aa0SSteven Rostedt }
2966f6195aa0SSteven Rostedt 
29677a8e76a3SSteven Rostedt /**
2968c64e148aSVaibhav Nagarnaik  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2969c64e148aSVaibhav Nagarnaik  * @buffer: The ring buffer
2970c64e148aSVaibhav Nagarnaik  * @cpu: The per CPU buffer to read from.
2971c64e148aSVaibhav Nagarnaik  */
297250ecf2c3SYoshihiro YUNOMAE u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2973c64e148aSVaibhav Nagarnaik {
2974c64e148aSVaibhav Nagarnaik 	unsigned long flags;
2975c64e148aSVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer;
2976c64e148aSVaibhav Nagarnaik 	struct buffer_page *bpage;
2977da830e58SLinus Torvalds 	u64 ret = 0;
2978c64e148aSVaibhav Nagarnaik 
2979c64e148aSVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2980c64e148aSVaibhav Nagarnaik 		return 0;
2981c64e148aSVaibhav Nagarnaik 
2982c64e148aSVaibhav Nagarnaik 	cpu_buffer = buffer->buffers[cpu];
29837115e3fcSLinus Torvalds 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2984c64e148aSVaibhav Nagarnaik 	/*
2985c64e148aSVaibhav Nagarnaik 	 * if the tail is on reader_page, oldest time stamp is on the reader
2986c64e148aSVaibhav Nagarnaik 	 * page
2987c64e148aSVaibhav Nagarnaik 	 */
2988c64e148aSVaibhav Nagarnaik 	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2989c64e148aSVaibhav Nagarnaik 		bpage = cpu_buffer->reader_page;
2990c64e148aSVaibhav Nagarnaik 	else
2991c64e148aSVaibhav Nagarnaik 		bpage = rb_set_head_page(cpu_buffer);
299254f7be5bSSteven Rostedt 	if (bpage)
2993c64e148aSVaibhav Nagarnaik 		ret = bpage->page->time_stamp;
29947115e3fcSLinus Torvalds 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2995c64e148aSVaibhav Nagarnaik 
2996c64e148aSVaibhav Nagarnaik 	return ret;
2997c64e148aSVaibhav Nagarnaik }
2998c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2999c64e148aSVaibhav Nagarnaik 
3000c64e148aSVaibhav Nagarnaik /**
3001c64e148aSVaibhav Nagarnaik  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3002c64e148aSVaibhav Nagarnaik  * @buffer: The ring buffer
3003c64e148aSVaibhav Nagarnaik  * @cpu: The per CPU buffer to read from.
3004c64e148aSVaibhav Nagarnaik  */
3005c64e148aSVaibhav Nagarnaik unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3006c64e148aSVaibhav Nagarnaik {
3007c64e148aSVaibhav Nagarnaik 	struct ring_buffer_per_cpu *cpu_buffer;
3008c64e148aSVaibhav Nagarnaik 	unsigned long ret;
3009c64e148aSVaibhav Nagarnaik 
3010c64e148aSVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3011c64e148aSVaibhav Nagarnaik 		return 0;
3012c64e148aSVaibhav Nagarnaik 
3013c64e148aSVaibhav Nagarnaik 	cpu_buffer = buffer->buffers[cpu];
3014c64e148aSVaibhav Nagarnaik 	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3015c64e148aSVaibhav Nagarnaik 
3016c64e148aSVaibhav Nagarnaik 	return ret;
3017c64e148aSVaibhav Nagarnaik }
3018c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3019c64e148aSVaibhav Nagarnaik 
3020c64e148aSVaibhav Nagarnaik /**
30217a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
30227a8e76a3SSteven Rostedt  * @buffer: The ring buffer
30237a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
30247a8e76a3SSteven Rostedt  */
30257a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
30267a8e76a3SSteven Rostedt {
30277a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
30287a8e76a3SSteven Rostedt 
30299e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
30308aabee57SSteven Rostedt 		return 0;
30317a8e76a3SSteven Rostedt 
30327a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
3033554f786eSSteven Rostedt 
3034f6195aa0SSteven Rostedt 	return rb_num_of_entries(cpu_buffer);
30357a8e76a3SSteven Rostedt }
3036c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
30377a8e76a3SSteven Rostedt 
30387a8e76a3SSteven Rostedt /**
3039884bfe89SSlava Pestov  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3040884bfe89SSlava Pestov  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
30417a8e76a3SSteven Rostedt  * @buffer: The ring buffer
30427a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
30437a8e76a3SSteven Rostedt  */
30447a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
30457a8e76a3SSteven Rostedt {
30467a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
30478aabee57SSteven Rostedt 	unsigned long ret;
30487a8e76a3SSteven Rostedt 
30499e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
30508aabee57SSteven Rostedt 		return 0;
30517a8e76a3SSteven Rostedt 
30527a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
305377ae365eSSteven Rostedt 	ret = local_read(&cpu_buffer->overrun);
3054554f786eSSteven Rostedt 
3055554f786eSSteven Rostedt 	return ret;
30567a8e76a3SSteven Rostedt }
3057c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
30587a8e76a3SSteven Rostedt 
30597a8e76a3SSteven Rostedt /**
3060884bfe89SSlava Pestov  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3061884bfe89SSlava Pestov  * commits failing due to the buffer wrapping around while there are uncommitted
3062884bfe89SSlava Pestov  * events, such as during an interrupt storm.
3063f0d2c681SSteven Rostedt  * @buffer: The ring buffer
3064f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
3065f0d2c681SSteven Rostedt  */
3066f0d2c681SSteven Rostedt unsigned long
3067f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3068f0d2c681SSteven Rostedt {
3069f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3070f0d2c681SSteven Rostedt 	unsigned long ret;
3071f0d2c681SSteven Rostedt 
3072f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3073f0d2c681SSteven Rostedt 		return 0;
3074f0d2c681SSteven Rostedt 
3075f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
307677ae365eSSteven Rostedt 	ret = local_read(&cpu_buffer->commit_overrun);
3077f0d2c681SSteven Rostedt 
3078f0d2c681SSteven Rostedt 	return ret;
3079f0d2c681SSteven Rostedt }
3080f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3081f0d2c681SSteven Rostedt 
3082f0d2c681SSteven Rostedt /**
3083884bfe89SSlava Pestov  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3084884bfe89SSlava Pestov  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3085884bfe89SSlava Pestov  * @buffer: The ring buffer
3086884bfe89SSlava Pestov  * @cpu: The per CPU buffer to get the number of overruns from
3087884bfe89SSlava Pestov  */
3088884bfe89SSlava Pestov unsigned long
3089884bfe89SSlava Pestov ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3090884bfe89SSlava Pestov {
3091884bfe89SSlava Pestov 	struct ring_buffer_per_cpu *cpu_buffer;
3092884bfe89SSlava Pestov 	unsigned long ret;
3093884bfe89SSlava Pestov 
3094884bfe89SSlava Pestov 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3095884bfe89SSlava Pestov 		return 0;
3096884bfe89SSlava Pestov 
3097884bfe89SSlava Pestov 	cpu_buffer = buffer->buffers[cpu];
3098884bfe89SSlava Pestov 	ret = local_read(&cpu_buffer->dropped_events);
3099884bfe89SSlava Pestov 
3100884bfe89SSlava Pestov 	return ret;
3101884bfe89SSlava Pestov }
3102884bfe89SSlava Pestov EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3103884bfe89SSlava Pestov 
3104884bfe89SSlava Pestov /**
31057a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
31067a8e76a3SSteven Rostedt  * @buffer: The ring buffer
31077a8e76a3SSteven Rostedt  *
31087a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
31097a8e76a3SSteven Rostedt  * (all CPU entries)
31107a8e76a3SSteven Rostedt  */
31117a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
31127a8e76a3SSteven Rostedt {
31137a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
31147a8e76a3SSteven Rostedt 	unsigned long entries = 0;
31157a8e76a3SSteven Rostedt 	int cpu;
31167a8e76a3SSteven Rostedt 
31177a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
31187a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
31197a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
3120f6195aa0SSteven Rostedt 		entries += rb_num_of_entries(cpu_buffer);
31217a8e76a3SSteven Rostedt 	}
31227a8e76a3SSteven Rostedt 
31237a8e76a3SSteven Rostedt 	return entries;
31247a8e76a3SSteven Rostedt }
3125c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
31267a8e76a3SSteven Rostedt 
31277a8e76a3SSteven Rostedt /**
312867b394f7SJiri Olsa  * ring_buffer_overruns - get the number of overruns in buffer
31297a8e76a3SSteven Rostedt  * @buffer: The ring buffer
31307a8e76a3SSteven Rostedt  *
31317a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
31327a8e76a3SSteven Rostedt  * (all CPU entries)
31337a8e76a3SSteven Rostedt  */
31347a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
31357a8e76a3SSteven Rostedt {
31367a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
31377a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
31387a8e76a3SSteven Rostedt 	int cpu;
31397a8e76a3SSteven Rostedt 
31407a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
31417a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
31427a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
314377ae365eSSteven Rostedt 		overruns += local_read(&cpu_buffer->overrun);
31447a8e76a3SSteven Rostedt 	}
31457a8e76a3SSteven Rostedt 
31467a8e76a3SSteven Rostedt 	return overruns;
31477a8e76a3SSteven Rostedt }
3148c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
31497a8e76a3SSteven Rostedt 
3150642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
31517a8e76a3SSteven Rostedt {
31527a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
31537a8e76a3SSteven Rostedt 
3154d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
3155d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
315677ae365eSSteven Rostedt 		iter->head_page = rb_set_head_page(cpu_buffer);
315777ae365eSSteven Rostedt 		if (unlikely(!iter->head_page))
315877ae365eSSteven Rostedt 			return;
315977ae365eSSteven Rostedt 		iter->head = iter->head_page->read;
3160d769041fSSteven Rostedt 	} else {
3161d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
31626f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
3163d769041fSSteven Rostedt 	}
3164d769041fSSteven Rostedt 	if (iter->head)
3165d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
3166d769041fSSteven Rostedt 	else
3167abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
3168492a74f4SSteven Rostedt 	iter->cache_reader_page = cpu_buffer->reader_page;
3169492a74f4SSteven Rostedt 	iter->cache_read = cpu_buffer->read;
3170642edba5SSteven Rostedt }
3171f83c9d0fSSteven Rostedt 
3172642edba5SSteven Rostedt /**
3173642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
3174642edba5SSteven Rostedt  * @iter: The iterator to reset
3175642edba5SSteven Rostedt  *
3176642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
3177642edba5SSteven Rostedt  * again.
3178642edba5SSteven Rostedt  */
3179642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3180642edba5SSteven Rostedt {
3181554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3182642edba5SSteven Rostedt 	unsigned long flags;
3183642edba5SSteven Rostedt 
3184554f786eSSteven Rostedt 	if (!iter)
3185554f786eSSteven Rostedt 		return;
3186554f786eSSteven Rostedt 
3187554f786eSSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
3188554f786eSSteven Rostedt 
31895389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3190642edba5SSteven Rostedt 	rb_iter_reset(iter);
31915389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
31927a8e76a3SSteven Rostedt }
3193c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
31947a8e76a3SSteven Rostedt 
31957a8e76a3SSteven Rostedt /**
31967a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
31977a8e76a3SSteven Rostedt  * @iter: The iterator to check
31987a8e76a3SSteven Rostedt  */
31997a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
32007a8e76a3SSteven Rostedt {
32017a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
32027a8e76a3SSteven Rostedt 
32037a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
32047a8e76a3SSteven Rostedt 
3205bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
3206bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
32077a8e76a3SSteven Rostedt }
3208c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
32097a8e76a3SSteven Rostedt 
32107a8e76a3SSteven Rostedt static void
32117a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
32127a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
32137a8e76a3SSteven Rostedt {
32147a8e76a3SSteven Rostedt 	u64 delta;
32157a8e76a3SSteven Rostedt 
3216334d4169SLai Jiangshan 	switch (event->type_len) {
32177a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
32187a8e76a3SSteven Rostedt 		return;
32197a8e76a3SSteven Rostedt 
32207a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
32217a8e76a3SSteven Rostedt 		delta = event->array[0];
32227a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
32237a8e76a3SSteven Rostedt 		delta += event->time_delta;
32247a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
32257a8e76a3SSteven Rostedt 		return;
32267a8e76a3SSteven Rostedt 
32277a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
32287a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
32297a8e76a3SSteven Rostedt 		return;
32307a8e76a3SSteven Rostedt 
32317a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
32327a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
32337a8e76a3SSteven Rostedt 		return;
32347a8e76a3SSteven Rostedt 
32357a8e76a3SSteven Rostedt 	default:
32367a8e76a3SSteven Rostedt 		BUG();
32377a8e76a3SSteven Rostedt 	}
32387a8e76a3SSteven Rostedt 	return;
32397a8e76a3SSteven Rostedt }
32407a8e76a3SSteven Rostedt 
32417a8e76a3SSteven Rostedt static void
32427a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
32437a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
32447a8e76a3SSteven Rostedt {
32457a8e76a3SSteven Rostedt 	u64 delta;
32467a8e76a3SSteven Rostedt 
3247334d4169SLai Jiangshan 	switch (event->type_len) {
32487a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
32497a8e76a3SSteven Rostedt 		return;
32507a8e76a3SSteven Rostedt 
32517a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
32527a8e76a3SSteven Rostedt 		delta = event->array[0];
32537a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
32547a8e76a3SSteven Rostedt 		delta += event->time_delta;
32557a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
32567a8e76a3SSteven Rostedt 		return;
32577a8e76a3SSteven Rostedt 
32587a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
32597a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
32607a8e76a3SSteven Rostedt 		return;
32617a8e76a3SSteven Rostedt 
32627a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
32637a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
32647a8e76a3SSteven Rostedt 		return;
32657a8e76a3SSteven Rostedt 
32667a8e76a3SSteven Rostedt 	default:
32677a8e76a3SSteven Rostedt 		BUG();
32687a8e76a3SSteven Rostedt 	}
32697a8e76a3SSteven Rostedt 	return;
32707a8e76a3SSteven Rostedt }
32717a8e76a3SSteven Rostedt 
3272d769041fSSteven Rostedt static struct buffer_page *
3273d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
32747a8e76a3SSteven Rostedt {
3275d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
327666a8cb95SSteven Rostedt 	unsigned long overwrite;
3277d769041fSSteven Rostedt 	unsigned long flags;
3278818e3dd3SSteven Rostedt 	int nr_loops = 0;
327977ae365eSSteven Rostedt 	int ret;
3280d769041fSSteven Rostedt 
32813e03fb7fSSteven Rostedt 	local_irq_save(flags);
32820199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
3283d769041fSSteven Rostedt 
3284d769041fSSteven Rostedt  again:
3285818e3dd3SSteven Rostedt 	/*
3286818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
3287818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
3288818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
3289818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
3290818e3dd3SSteven Rostedt 	 */
32913e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3292818e3dd3SSteven Rostedt 		reader = NULL;
3293818e3dd3SSteven Rostedt 		goto out;
3294818e3dd3SSteven Rostedt 	}
3295818e3dd3SSteven Rostedt 
3296d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
3297d769041fSSteven Rostedt 
3298d769041fSSteven Rostedt 	/* If there's more to read, return this page */
3299bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3300d769041fSSteven Rostedt 		goto out;
3301d769041fSSteven Rostedt 
3302d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
33033e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
33043e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
33053e89c7bbSSteven Rostedt 		goto out;
3306d769041fSSteven Rostedt 
3307d769041fSSteven Rostedt 	/* check if we caught up to the tail */
3308d769041fSSteven Rostedt 	reader = NULL;
3309bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3310d769041fSSteven Rostedt 		goto out;
33117a8e76a3SSteven Rostedt 
3312a5fb8331SSteven Rostedt 	/* Don't bother swapping if the ring buffer is empty */
3313a5fb8331SSteven Rostedt 	if (rb_num_of_entries(cpu_buffer) == 0)
3314a5fb8331SSteven Rostedt 		goto out;
3315a5fb8331SSteven Rostedt 
33167a8e76a3SSteven Rostedt 	/*
3317d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
33187a8e76a3SSteven Rostedt 	 */
331977ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
332077ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
332177ae365eSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
3322ff0ff84aSSteven Rostedt 	cpu_buffer->reader_page->real_end = 0;
3323d769041fSSteven Rostedt 
332477ae365eSSteven Rostedt  spin:
332577ae365eSSteven Rostedt 	/*
332677ae365eSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
332777ae365eSSteven Rostedt 	 */
332877ae365eSSteven Rostedt 	reader = rb_set_head_page(cpu_buffer);
332954f7be5bSSteven Rostedt 	if (!reader)
333054f7be5bSSteven Rostedt 		goto out;
33310e1ff5d7SSteven Rostedt 	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3332d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
3333bf41a158SSteven Rostedt 
33343adc54faSSteven Rostedt 	/*
33353adc54faSSteven Rostedt 	 * cpu_buffer->pages just needs to point to the buffer, it
33363adc54faSSteven Rostedt 	 *  has no specific buffer page to point to. Lets move it out
333725985edcSLucas De Marchi 	 *  of our way so we don't accidentally swap it.
33383adc54faSSteven Rostedt 	 */
33393adc54faSSteven Rostedt 	cpu_buffer->pages = reader->list.prev;
33403adc54faSSteven Rostedt 
334177ae365eSSteven Rostedt 	/* The reader page will be pointing to the new head */
334277ae365eSSteven Rostedt 	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3343d769041fSSteven Rostedt 
3344d769041fSSteven Rostedt 	/*
334566a8cb95SSteven Rostedt 	 * We want to make sure we read the overruns after we set up our
334666a8cb95SSteven Rostedt 	 * pointers to the next object. The writer side does a
334766a8cb95SSteven Rostedt 	 * cmpxchg to cross pages which acts as the mb on the writer
334866a8cb95SSteven Rostedt 	 * side. Note, the reader will constantly fail the swap
334966a8cb95SSteven Rostedt 	 * while the writer is updating the pointers, so this
335066a8cb95SSteven Rostedt 	 * guarantees that the overwrite recorded here is the one we
335166a8cb95SSteven Rostedt 	 * want to compare with the last_overrun.
335266a8cb95SSteven Rostedt 	 */
335366a8cb95SSteven Rostedt 	smp_mb();
335466a8cb95SSteven Rostedt 	overwrite = local_read(&(cpu_buffer->overrun));
335566a8cb95SSteven Rostedt 
335666a8cb95SSteven Rostedt 	/*
335777ae365eSSteven Rostedt 	 * Here's the tricky part.
335877ae365eSSteven Rostedt 	 *
335977ae365eSSteven Rostedt 	 * We need to move the pointer past the header page.
336077ae365eSSteven Rostedt 	 * But we can only do that if a writer is not currently
336177ae365eSSteven Rostedt 	 * moving it. The page before the header page has the
336277ae365eSSteven Rostedt 	 * flag bit '1' set if it is pointing to the page we want.
336377ae365eSSteven Rostedt 	 * but if the writer is in the process of moving it
336477ae365eSSteven Rostedt 	 * than it will be '2' or already moved '0'.
3365d769041fSSteven Rostedt 	 */
3366d769041fSSteven Rostedt 
336777ae365eSSteven Rostedt 	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
336877ae365eSSteven Rostedt 
336977ae365eSSteven Rostedt 	/*
337077ae365eSSteven Rostedt 	 * If we did not convert it, then we must try again.
337177ae365eSSteven Rostedt 	 */
337277ae365eSSteven Rostedt 	if (!ret)
337377ae365eSSteven Rostedt 		goto spin;
337477ae365eSSteven Rostedt 
337577ae365eSSteven Rostedt 	/*
337677ae365eSSteven Rostedt 	 * Yeah! We succeeded in replacing the page.
337777ae365eSSteven Rostedt 	 *
337877ae365eSSteven Rostedt 	 * Now make the new head point back to the reader page.
337977ae365eSSteven Rostedt 	 */
33805ded3dc6SDavid Sharp 	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
33817a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3382d769041fSSteven Rostedt 
3383d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
3384d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
3385d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
3386d769041fSSteven Rostedt 
338766a8cb95SSteven Rostedt 	if (overwrite != cpu_buffer->last_overrun) {
338866a8cb95SSteven Rostedt 		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
338966a8cb95SSteven Rostedt 		cpu_buffer->last_overrun = overwrite;
339066a8cb95SSteven Rostedt 	}
339166a8cb95SSteven Rostedt 
3392d769041fSSteven Rostedt 	goto again;
3393d769041fSSteven Rostedt 
3394d769041fSSteven Rostedt  out:
33950199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
33963e03fb7fSSteven Rostedt 	local_irq_restore(flags);
3397d769041fSSteven Rostedt 
3398d769041fSSteven Rostedt 	return reader;
33997a8e76a3SSteven Rostedt }
34007a8e76a3SSteven Rostedt 
3401d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3402d769041fSSteven Rostedt {
3403d769041fSSteven Rostedt 	struct ring_buffer_event *event;
3404d769041fSSteven Rostedt 	struct buffer_page *reader;
3405d769041fSSteven Rostedt 	unsigned length;
3406d769041fSSteven Rostedt 
3407d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
3408d769041fSSteven Rostedt 
3409d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
34103e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
34113e89c7bbSSteven Rostedt 		return;
3412d769041fSSteven Rostedt 
3413d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
34147a8e76a3SSteven Rostedt 
3415a1863c21SSteven Rostedt 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3416e4906effSSteven Rostedt 		cpu_buffer->read++;
34177a8e76a3SSteven Rostedt 
34187a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
34197a8e76a3SSteven Rostedt 
3420d769041fSSteven Rostedt 	length = rb_event_length(event);
34216f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
34227a8e76a3SSteven Rostedt }
34237a8e76a3SSteven Rostedt 
34247a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
34257a8e76a3SSteven Rostedt {
34267a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
34277a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
34287a8e76a3SSteven Rostedt 	unsigned length;
34297a8e76a3SSteven Rostedt 
34307a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
34317a8e76a3SSteven Rostedt 
34327a8e76a3SSteven Rostedt 	/*
34337a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
34347a8e76a3SSteven Rostedt 	 */
3435bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
3436ea05b57cSSteven Rostedt 		/* discarded commits can make the page empty */
3437ea05b57cSSteven Rostedt 		if (iter->head_page == cpu_buffer->commit_page)
34383e89c7bbSSteven Rostedt 			return;
3439d769041fSSteven Rostedt 		rb_inc_iter(iter);
34407a8e76a3SSteven Rostedt 		return;
34417a8e76a3SSteven Rostedt 	}
34427a8e76a3SSteven Rostedt 
34437a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
34447a8e76a3SSteven Rostedt 
34457a8e76a3SSteven Rostedt 	length = rb_event_length(event);
34467a8e76a3SSteven Rostedt 
34477a8e76a3SSteven Rostedt 	/*
34487a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
34497a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
34507a8e76a3SSteven Rostedt 	 */
34513e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
3452f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
34533e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
34543e89c7bbSSteven Rostedt 		return;
34557a8e76a3SSteven Rostedt 
34567a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
34577a8e76a3SSteven Rostedt 
34587a8e76a3SSteven Rostedt 	iter->head += length;
34597a8e76a3SSteven Rostedt 
34607a8e76a3SSteven Rostedt 	/* check for end of page padding */
3461bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
3462bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
3463771e0384SSteven Rostedt 		rb_inc_iter(iter);
34647a8e76a3SSteven Rostedt }
34657a8e76a3SSteven Rostedt 
346666a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
346766a8cb95SSteven Rostedt {
346866a8cb95SSteven Rostedt 	return cpu_buffer->lost_events;
346966a8cb95SSteven Rostedt }
347066a8cb95SSteven Rostedt 
3471f83c9d0fSSteven Rostedt static struct ring_buffer_event *
347266a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
347366a8cb95SSteven Rostedt 	       unsigned long *lost_events)
34747a8e76a3SSteven Rostedt {
34757a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3476d769041fSSteven Rostedt 	struct buffer_page *reader;
3477818e3dd3SSteven Rostedt 	int nr_loops = 0;
34787a8e76a3SSteven Rostedt 
34797a8e76a3SSteven Rostedt  again:
3480818e3dd3SSteven Rostedt 	/*
348169d1b839SSteven Rostedt 	 * We repeat when a time extend is encountered.
348269d1b839SSteven Rostedt 	 * Since the time extend is always attached to a data event,
348369d1b839SSteven Rostedt 	 * we should never loop more than once.
348469d1b839SSteven Rostedt 	 * (We never hit the following condition more than twice).
3485818e3dd3SSteven Rostedt 	 */
348669d1b839SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3487818e3dd3SSteven Rostedt 		return NULL;
3488818e3dd3SSteven Rostedt 
3489d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
3490d769041fSSteven Rostedt 	if (!reader)
34917a8e76a3SSteven Rostedt 		return NULL;
34927a8e76a3SSteven Rostedt 
3493d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
34947a8e76a3SSteven Rostedt 
3495334d4169SLai Jiangshan 	switch (event->type_len) {
34967a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
34972d622719STom Zanussi 		if (rb_null_event(event))
3498bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, 1);
34992d622719STom Zanussi 		/*
35002d622719STom Zanussi 		 * Because the writer could be discarding every
35012d622719STom Zanussi 		 * event it creates (which would probably be bad)
35022d622719STom Zanussi 		 * if we were to go back to "again" then we may never
35032d622719STom Zanussi 		 * catch up, and will trigger the warn on, or lock
35042d622719STom Zanussi 		 * the box. Return the padding, and we will release
35052d622719STom Zanussi 		 * the current locks, and try again.
35062d622719STom Zanussi 		 */
35072d622719STom Zanussi 		return event;
35087a8e76a3SSteven Rostedt 
35097a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
35107a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
3511d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
35127a8e76a3SSteven Rostedt 		goto again;
35137a8e76a3SSteven Rostedt 
35147a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
35157a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
3516d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
35177a8e76a3SSteven Rostedt 		goto again;
35187a8e76a3SSteven Rostedt 
35197a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
35207a8e76a3SSteven Rostedt 		if (ts) {
35217a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
3522d8eeb2d3SRobert Richter 			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
352337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
35247a8e76a3SSteven Rostedt 		}
352566a8cb95SSteven Rostedt 		if (lost_events)
352666a8cb95SSteven Rostedt 			*lost_events = rb_lost_events(cpu_buffer);
35277a8e76a3SSteven Rostedt 		return event;
35287a8e76a3SSteven Rostedt 
35297a8e76a3SSteven Rostedt 	default:
35307a8e76a3SSteven Rostedt 		BUG();
35317a8e76a3SSteven Rostedt 	}
35327a8e76a3SSteven Rostedt 
35337a8e76a3SSteven Rostedt 	return NULL;
35347a8e76a3SSteven Rostedt }
3535c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
35367a8e76a3SSteven Rostedt 
3537f83c9d0fSSteven Rostedt static struct ring_buffer_event *
3538f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
35397a8e76a3SSteven Rostedt {
35407a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
35417a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
35427a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3543818e3dd3SSteven Rostedt 	int nr_loops = 0;
35447a8e76a3SSteven Rostedt 
35457a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
35467a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
35477a8e76a3SSteven Rostedt 
3548492a74f4SSteven Rostedt 	/*
3549492a74f4SSteven Rostedt 	 * Check if someone performed a consuming read to
3550492a74f4SSteven Rostedt 	 * the buffer. A consuming read invalidates the iterator
3551492a74f4SSteven Rostedt 	 * and we need to reset the iterator in this case.
3552492a74f4SSteven Rostedt 	 */
3553492a74f4SSteven Rostedt 	if (unlikely(iter->cache_read != cpu_buffer->read ||
3554492a74f4SSteven Rostedt 		     iter->cache_reader_page != cpu_buffer->reader_page))
3555492a74f4SSteven Rostedt 		rb_iter_reset(iter);
3556492a74f4SSteven Rostedt 
35577a8e76a3SSteven Rostedt  again:
35583c05d748SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
35593c05d748SSteven Rostedt 		return NULL;
35603c05d748SSteven Rostedt 
3561818e3dd3SSteven Rostedt 	/*
356269d1b839SSteven Rostedt 	 * We repeat when a time extend is encountered.
356369d1b839SSteven Rostedt 	 * Since the time extend is always attached to a data event,
356469d1b839SSteven Rostedt 	 * we should never loop more than once.
356569d1b839SSteven Rostedt 	 * (We never hit the following condition more than twice).
3566818e3dd3SSteven Rostedt 	 */
356769d1b839SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3568818e3dd3SSteven Rostedt 		return NULL;
3569818e3dd3SSteven Rostedt 
35707a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
35717a8e76a3SSteven Rostedt 		return NULL;
35727a8e76a3SSteven Rostedt 
35733c05d748SSteven Rostedt 	if (iter->head >= local_read(&iter->head_page->page->commit)) {
35743c05d748SSteven Rostedt 		rb_inc_iter(iter);
35753c05d748SSteven Rostedt 		goto again;
35763c05d748SSteven Rostedt 	}
35773c05d748SSteven Rostedt 
35787a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
35797a8e76a3SSteven Rostedt 
3580334d4169SLai Jiangshan 	switch (event->type_len) {
35817a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
35822d622719STom Zanussi 		if (rb_null_event(event)) {
3583d769041fSSteven Rostedt 			rb_inc_iter(iter);
35847a8e76a3SSteven Rostedt 			goto again;
35852d622719STom Zanussi 		}
35862d622719STom Zanussi 		rb_advance_iter(iter);
35872d622719STom Zanussi 		return event;
35887a8e76a3SSteven Rostedt 
35897a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
35907a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
35917a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
35927a8e76a3SSteven Rostedt 		goto again;
35937a8e76a3SSteven Rostedt 
35947a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
35957a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
35967a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
35977a8e76a3SSteven Rostedt 		goto again;
35987a8e76a3SSteven Rostedt 
35997a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
36007a8e76a3SSteven Rostedt 		if (ts) {
36017a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
360237886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
360337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
36047a8e76a3SSteven Rostedt 		}
36057a8e76a3SSteven Rostedt 		return event;
36067a8e76a3SSteven Rostedt 
36077a8e76a3SSteven Rostedt 	default:
36087a8e76a3SSteven Rostedt 		BUG();
36097a8e76a3SSteven Rostedt 	}
36107a8e76a3SSteven Rostedt 
36117a8e76a3SSteven Rostedt 	return NULL;
36127a8e76a3SSteven Rostedt }
3613c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
36147a8e76a3SSteven Rostedt 
36158d707e8eSSteven Rostedt static inline int rb_ok_to_lock(void)
36168d707e8eSSteven Rostedt {
36178d707e8eSSteven Rostedt 	/*
36188d707e8eSSteven Rostedt 	 * If an NMI die dumps out the content of the ring buffer
36198d707e8eSSteven Rostedt 	 * do not grab locks. We also permanently disable the ring
36208d707e8eSSteven Rostedt 	 * buffer too. A one time deal is all you get from reading
36218d707e8eSSteven Rostedt 	 * the ring buffer from an NMI.
36228d707e8eSSteven Rostedt 	 */
3623464e85ebSSteven Rostedt 	if (likely(!in_nmi()))
36248d707e8eSSteven Rostedt 		return 1;
36258d707e8eSSteven Rostedt 
36268d707e8eSSteven Rostedt 	tracing_off_permanent();
36278d707e8eSSteven Rostedt 	return 0;
36288d707e8eSSteven Rostedt }
36298d707e8eSSteven Rostedt 
36307a8e76a3SSteven Rostedt /**
3631f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
3632f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
3633f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
3634f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
363566a8cb95SSteven Rostedt  * @lost_events: a variable to store if events were lost (may be NULL)
3636f83c9d0fSSteven Rostedt  *
3637f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
3638f83c9d0fSSteven Rostedt  * not consume the data.
3639f83c9d0fSSteven Rostedt  */
3640f83c9d0fSSteven Rostedt struct ring_buffer_event *
364166a8cb95SSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
364266a8cb95SSteven Rostedt 		 unsigned long *lost_events)
3643f83c9d0fSSteven Rostedt {
3644f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
36458aabee57SSteven Rostedt 	struct ring_buffer_event *event;
3646f83c9d0fSSteven Rostedt 	unsigned long flags;
36478d707e8eSSteven Rostedt 	int dolock;
3648f83c9d0fSSteven Rostedt 
3649554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
36508aabee57SSteven Rostedt 		return NULL;
3651554f786eSSteven Rostedt 
36528d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
36532d622719STom Zanussi  again:
36548d707e8eSSteven Rostedt 	local_irq_save(flags);
36558d707e8eSSteven Rostedt 	if (dolock)
36565389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
365766a8cb95SSteven Rostedt 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3658469535a5SRobert Richter 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3659469535a5SRobert Richter 		rb_advance_reader(cpu_buffer);
36608d707e8eSSteven Rostedt 	if (dolock)
36615389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
36628d707e8eSSteven Rostedt 	local_irq_restore(flags);
3663f83c9d0fSSteven Rostedt 
36641b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
36652d622719STom Zanussi 		goto again;
36662d622719STom Zanussi 
3667f83c9d0fSSteven Rostedt 	return event;
3668f83c9d0fSSteven Rostedt }
3669f83c9d0fSSteven Rostedt 
3670f83c9d0fSSteven Rostedt /**
3671f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
3672f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
3673f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
3674f83c9d0fSSteven Rostedt  *
3675f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
3676f83c9d0fSSteven Rostedt  * not increment the iterator.
3677f83c9d0fSSteven Rostedt  */
3678f83c9d0fSSteven Rostedt struct ring_buffer_event *
3679f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3680f83c9d0fSSteven Rostedt {
3681f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3682f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
3683f83c9d0fSSteven Rostedt 	unsigned long flags;
3684f83c9d0fSSteven Rostedt 
36852d622719STom Zanussi  again:
36865389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3687f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
36885389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3689f83c9d0fSSteven Rostedt 
36901b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
36912d622719STom Zanussi 		goto again;
36922d622719STom Zanussi 
3693f83c9d0fSSteven Rostedt 	return event;
3694f83c9d0fSSteven Rostedt }
3695f83c9d0fSSteven Rostedt 
3696f83c9d0fSSteven Rostedt /**
36977a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
36987a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
369966a8cb95SSteven Rostedt  * @cpu: the cpu to read the buffer from
370066a8cb95SSteven Rostedt  * @ts: a variable to store the timestamp (may be NULL)
370166a8cb95SSteven Rostedt  * @lost_events: a variable to store if events were lost (may be NULL)
37027a8e76a3SSteven Rostedt  *
37037a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
37047a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
37057a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
37067a8e76a3SSteven Rostedt  */
37077a8e76a3SSteven Rostedt struct ring_buffer_event *
370866a8cb95SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
370966a8cb95SSteven Rostedt 		    unsigned long *lost_events)
37107a8e76a3SSteven Rostedt {
3711554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
3712554f786eSSteven Rostedt 	struct ring_buffer_event *event = NULL;
3713f83c9d0fSSteven Rostedt 	unsigned long flags;
37148d707e8eSSteven Rostedt 	int dolock;
37158d707e8eSSteven Rostedt 
37168d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
37177a8e76a3SSteven Rostedt 
37182d622719STom Zanussi  again:
3719554f786eSSteven Rostedt 	/* might be called in atomic */
3720554f786eSSteven Rostedt 	preempt_disable();
37217a8e76a3SSteven Rostedt 
3722554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3723554f786eSSteven Rostedt 		goto out;
3724554f786eSSteven Rostedt 
3725554f786eSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
37268d707e8eSSteven Rostedt 	local_irq_save(flags);
37278d707e8eSSteven Rostedt 	if (dolock)
37285389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
37297a8e76a3SSteven Rostedt 
373066a8cb95SSteven Rostedt 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
373166a8cb95SSteven Rostedt 	if (event) {
373266a8cb95SSteven Rostedt 		cpu_buffer->lost_events = 0;
3733d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
373466a8cb95SSteven Rostedt 	}
37357a8e76a3SSteven Rostedt 
37368d707e8eSSteven Rostedt 	if (dolock)
37375389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
37388d707e8eSSteven Rostedt 	local_irq_restore(flags);
3739f83c9d0fSSteven Rostedt 
3740554f786eSSteven Rostedt  out:
3741554f786eSSteven Rostedt 	preempt_enable();
3742554f786eSSteven Rostedt 
37431b959e18SSteven Rostedt 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
37442d622719STom Zanussi 		goto again;
37452d622719STom Zanussi 
37467a8e76a3SSteven Rostedt 	return event;
37477a8e76a3SSteven Rostedt }
3748c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
37497a8e76a3SSteven Rostedt 
37507a8e76a3SSteven Rostedt /**
375172c9ddfdSDavid Miller  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
37527a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
37537a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
37547a8e76a3SSteven Rostedt  *
375572c9ddfdSDavid Miller  * This performs the initial preparations necessary to iterate
375672c9ddfdSDavid Miller  * through the buffer.  Memory is allocated, buffer recording
375772c9ddfdSDavid Miller  * is disabled, and the iterator pointer is returned to the caller.
37587a8e76a3SSteven Rostedt  *
375972c9ddfdSDavid Miller  * Disabling buffer recordng prevents the reading from being
376072c9ddfdSDavid Miller  * corrupted. This is not a consuming read, so a producer is not
376172c9ddfdSDavid Miller  * expected.
376272c9ddfdSDavid Miller  *
376372c9ddfdSDavid Miller  * After a sequence of ring_buffer_read_prepare calls, the user is
376472c9ddfdSDavid Miller  * expected to make at least one call to ring_buffer_prepare_sync.
376572c9ddfdSDavid Miller  * Afterwards, ring_buffer_read_start is invoked to get things going
376672c9ddfdSDavid Miller  * for real.
376772c9ddfdSDavid Miller  *
376872c9ddfdSDavid Miller  * This overall must be paired with ring_buffer_finish.
37697a8e76a3SSteven Rostedt  */
37707a8e76a3SSteven Rostedt struct ring_buffer_iter *
377172c9ddfdSDavid Miller ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
37727a8e76a3SSteven Rostedt {
37737a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
37748aabee57SSteven Rostedt 	struct ring_buffer_iter *iter;
37757a8e76a3SSteven Rostedt 
37769e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
37778aabee57SSteven Rostedt 		return NULL;
37787a8e76a3SSteven Rostedt 
37797a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
37807a8e76a3SSteven Rostedt 	if (!iter)
37818aabee57SSteven Rostedt 		return NULL;
37827a8e76a3SSteven Rostedt 
37837a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
37847a8e76a3SSteven Rostedt 
37857a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
37867a8e76a3SSteven Rostedt 
378783f40318SVaibhav Nagarnaik 	atomic_inc(&buffer->resize_disabled);
37887a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
378972c9ddfdSDavid Miller 
379072c9ddfdSDavid Miller 	return iter;
379172c9ddfdSDavid Miller }
379272c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
379372c9ddfdSDavid Miller 
379472c9ddfdSDavid Miller /**
379572c9ddfdSDavid Miller  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
379672c9ddfdSDavid Miller  *
379772c9ddfdSDavid Miller  * All previously invoked ring_buffer_read_prepare calls to prepare
379872c9ddfdSDavid Miller  * iterators will be synchronized.  Afterwards, read_buffer_read_start
379972c9ddfdSDavid Miller  * calls on those iterators are allowed.
380072c9ddfdSDavid Miller  */
380172c9ddfdSDavid Miller void
380272c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void)
380372c9ddfdSDavid Miller {
38047a8e76a3SSteven Rostedt 	synchronize_sched();
380572c9ddfdSDavid Miller }
380672c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
380772c9ddfdSDavid Miller 
380872c9ddfdSDavid Miller /**
380972c9ddfdSDavid Miller  * ring_buffer_read_start - start a non consuming read of the buffer
381072c9ddfdSDavid Miller  * @iter: The iterator returned by ring_buffer_read_prepare
381172c9ddfdSDavid Miller  *
381272c9ddfdSDavid Miller  * This finalizes the startup of an iteration through the buffer.
381372c9ddfdSDavid Miller  * The iterator comes from a call to ring_buffer_read_prepare and
381472c9ddfdSDavid Miller  * an intervening ring_buffer_read_prepare_sync must have been
381572c9ddfdSDavid Miller  * performed.
381672c9ddfdSDavid Miller  *
381772c9ddfdSDavid Miller  * Must be paired with ring_buffer_finish.
381872c9ddfdSDavid Miller  */
381972c9ddfdSDavid Miller void
382072c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter)
382172c9ddfdSDavid Miller {
382272c9ddfdSDavid Miller 	struct ring_buffer_per_cpu *cpu_buffer;
382372c9ddfdSDavid Miller 	unsigned long flags;
382472c9ddfdSDavid Miller 
382572c9ddfdSDavid Miller 	if (!iter)
382672c9ddfdSDavid Miller 		return;
382772c9ddfdSDavid Miller 
382872c9ddfdSDavid Miller 	cpu_buffer = iter->cpu_buffer;
38297a8e76a3SSteven Rostedt 
38305389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
38310199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
3832642edba5SSteven Rostedt 	rb_iter_reset(iter);
38330199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
38345389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
38357a8e76a3SSteven Rostedt }
3836c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
38377a8e76a3SSteven Rostedt 
38387a8e76a3SSteven Rostedt /**
38397a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
38407a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
38417a8e76a3SSteven Rostedt  *
38427a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
38437a8e76a3SSteven Rostedt  * iterator.
38447a8e76a3SSteven Rostedt  */
38457a8e76a3SSteven Rostedt void
38467a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
38477a8e76a3SSteven Rostedt {
38487a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
38499366c1baSSteven Rostedt 	unsigned long flags;
38507a8e76a3SSteven Rostedt 
3851659f451fSSteven Rostedt 	/*
3852659f451fSSteven Rostedt 	 * Ring buffer is disabled from recording, here's a good place
3853659f451fSSteven Rostedt 	 * to check the integrity of the ring buffer.
38549366c1baSSteven Rostedt 	 * Must prevent readers from trying to read, as the check
38559366c1baSSteven Rostedt 	 * clears the HEAD page and readers require it.
3856659f451fSSteven Rostedt 	 */
38579366c1baSSteven Rostedt 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3858659f451fSSteven Rostedt 	rb_check_pages(cpu_buffer);
38599366c1baSSteven Rostedt 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3860659f451fSSteven Rostedt 
38617a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
386283f40318SVaibhav Nagarnaik 	atomic_dec(&cpu_buffer->buffer->resize_disabled);
38637a8e76a3SSteven Rostedt 	kfree(iter);
38647a8e76a3SSteven Rostedt }
3865c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
38667a8e76a3SSteven Rostedt 
38677a8e76a3SSteven Rostedt /**
38687a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
38697a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
38707a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
38717a8e76a3SSteven Rostedt  *
38727a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
38737a8e76a3SSteven Rostedt  */
38747a8e76a3SSteven Rostedt struct ring_buffer_event *
38757a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
38767a8e76a3SSteven Rostedt {
38777a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
3878f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3879f83c9d0fSSteven Rostedt 	unsigned long flags;
38807a8e76a3SSteven Rostedt 
38815389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
38827e9391cfSSteven Rostedt  again:
3883f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
38847a8e76a3SSteven Rostedt 	if (!event)
3885f83c9d0fSSteven Rostedt 		goto out;
38867a8e76a3SSteven Rostedt 
38877e9391cfSSteven Rostedt 	if (event->type_len == RINGBUF_TYPE_PADDING)
38887e9391cfSSteven Rostedt 		goto again;
38897e9391cfSSteven Rostedt 
38907a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
3891f83c9d0fSSteven Rostedt  out:
38925389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
38937a8e76a3SSteven Rostedt 
38947a8e76a3SSteven Rostedt 	return event;
38957a8e76a3SSteven Rostedt }
3896c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
38977a8e76a3SSteven Rostedt 
38987a8e76a3SSteven Rostedt /**
38997a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
39007a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
39017a8e76a3SSteven Rostedt  */
3902438ced17SVaibhav Nagarnaik unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
39037a8e76a3SSteven Rostedt {
3904438ced17SVaibhav Nagarnaik 	/*
3905438ced17SVaibhav Nagarnaik 	 * Earlier, this method returned
3906438ced17SVaibhav Nagarnaik 	 *	BUF_PAGE_SIZE * buffer->nr_pages
3907438ced17SVaibhav Nagarnaik 	 * Since the nr_pages field is now removed, we have converted this to
3908438ced17SVaibhav Nagarnaik 	 * return the per cpu buffer value.
3909438ced17SVaibhav Nagarnaik 	 */
3910438ced17SVaibhav Nagarnaik 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3911438ced17SVaibhav Nagarnaik 		return 0;
3912438ced17SVaibhav Nagarnaik 
3913438ced17SVaibhav Nagarnaik 	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
39147a8e76a3SSteven Rostedt }
3915c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
39167a8e76a3SSteven Rostedt 
39177a8e76a3SSteven Rostedt static void
39187a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
39197a8e76a3SSteven Rostedt {
392077ae365eSSteven Rostedt 	rb_head_page_deactivate(cpu_buffer);
392177ae365eSSteven Rostedt 
39227a8e76a3SSteven Rostedt 	cpu_buffer->head_page
39233adc54faSSteven Rostedt 		= list_entry(cpu_buffer->pages, struct buffer_page, list);
3924bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
3925778c55d4SSteven Rostedt 	local_set(&cpu_buffer->head_page->entries, 0);
3926abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
39277a8e76a3SSteven Rostedt 
39286f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
3929bf41a158SSteven Rostedt 
3930bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
3931bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
3932bf41a158SSteven Rostedt 
3933bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
39345040b4b7SVaibhav Nagarnaik 	INIT_LIST_HEAD(&cpu_buffer->new_pages);
3935bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
3936778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
3937abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
39386f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
3939d769041fSSteven Rostedt 
3940c64e148aSVaibhav Nagarnaik 	local_set(&cpu_buffer->entries_bytes, 0);
394177ae365eSSteven Rostedt 	local_set(&cpu_buffer->overrun, 0);
3942884bfe89SSlava Pestov 	local_set(&cpu_buffer->commit_overrun, 0);
3943884bfe89SSlava Pestov 	local_set(&cpu_buffer->dropped_events, 0);
3944e4906effSSteven Rostedt 	local_set(&cpu_buffer->entries, 0);
3945fa743953SSteven Rostedt 	local_set(&cpu_buffer->committing, 0);
3946fa743953SSteven Rostedt 	local_set(&cpu_buffer->commits, 0);
394777ae365eSSteven Rostedt 	cpu_buffer->read = 0;
3948c64e148aSVaibhav Nagarnaik 	cpu_buffer->read_bytes = 0;
394969507c06SSteven Rostedt 
395069507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
395169507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
395277ae365eSSteven Rostedt 
395366a8cb95SSteven Rostedt 	cpu_buffer->lost_events = 0;
395466a8cb95SSteven Rostedt 	cpu_buffer->last_overrun = 0;
395566a8cb95SSteven Rostedt 
395677ae365eSSteven Rostedt 	rb_head_page_activate(cpu_buffer);
39577a8e76a3SSteven Rostedt }
39587a8e76a3SSteven Rostedt 
39597a8e76a3SSteven Rostedt /**
39607a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
39617a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
39627a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
39637a8e76a3SSteven Rostedt  */
39647a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
39657a8e76a3SSteven Rostedt {
39667a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
39677a8e76a3SSteven Rostedt 	unsigned long flags;
39687a8e76a3SSteven Rostedt 
39699e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
39708aabee57SSteven Rostedt 		return;
39717a8e76a3SSteven Rostedt 
397283f40318SVaibhav Nagarnaik 	atomic_inc(&buffer->resize_disabled);
397341ede23eSSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
397441ede23eSSteven Rostedt 
397583f40318SVaibhav Nagarnaik 	/* Make sure all commits have finished */
397683f40318SVaibhav Nagarnaik 	synchronize_sched();
397783f40318SVaibhav Nagarnaik 
39785389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3979f83c9d0fSSteven Rostedt 
398041b6a95dSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
398141b6a95dSSteven Rostedt 		goto out;
398241b6a95dSSteven Rostedt 
39830199c4e6SThomas Gleixner 	arch_spin_lock(&cpu_buffer->lock);
39847a8e76a3SSteven Rostedt 
39857a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
39867a8e76a3SSteven Rostedt 
39870199c4e6SThomas Gleixner 	arch_spin_unlock(&cpu_buffer->lock);
3988f83c9d0fSSteven Rostedt 
398941b6a95dSSteven Rostedt  out:
39905389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
399141ede23eSSteven Rostedt 
399241ede23eSSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
399383f40318SVaibhav Nagarnaik 	atomic_dec(&buffer->resize_disabled);
39947a8e76a3SSteven Rostedt }
3995c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
39967a8e76a3SSteven Rostedt 
39977a8e76a3SSteven Rostedt /**
39987a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
39997a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
40007a8e76a3SSteven Rostedt  */
40017a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
40027a8e76a3SSteven Rostedt {
40037a8e76a3SSteven Rostedt 	int cpu;
40047a8e76a3SSteven Rostedt 
40057a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
4006d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
40077a8e76a3SSteven Rostedt }
4008c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
40097a8e76a3SSteven Rostedt 
40107a8e76a3SSteven Rostedt /**
40117a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
40127a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
40137a8e76a3SSteven Rostedt  */
40147a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
40157a8e76a3SSteven Rostedt {
40167a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
4017d4788207SSteven Rostedt 	unsigned long flags;
40188d707e8eSSteven Rostedt 	int dolock;
40197a8e76a3SSteven Rostedt 	int cpu;
4020d4788207SSteven Rostedt 	int ret;
40217a8e76a3SSteven Rostedt 
40228d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
40237a8e76a3SSteven Rostedt 
40247a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
40257a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
40267a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
40278d707e8eSSteven Rostedt 		local_irq_save(flags);
40288d707e8eSSteven Rostedt 		if (dolock)
40295389f6faSThomas Gleixner 			raw_spin_lock(&cpu_buffer->reader_lock);
4030d4788207SSteven Rostedt 		ret = rb_per_cpu_empty(cpu_buffer);
40318d707e8eSSteven Rostedt 		if (dolock)
40325389f6faSThomas Gleixner 			raw_spin_unlock(&cpu_buffer->reader_lock);
40338d707e8eSSteven Rostedt 		local_irq_restore(flags);
40348d707e8eSSteven Rostedt 
4035d4788207SSteven Rostedt 		if (!ret)
40367a8e76a3SSteven Rostedt 			return 0;
40377a8e76a3SSteven Rostedt 	}
4038554f786eSSteven Rostedt 
40397a8e76a3SSteven Rostedt 	return 1;
40407a8e76a3SSteven Rostedt }
4041c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
40427a8e76a3SSteven Rostedt 
40437a8e76a3SSteven Rostedt /**
40447a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
40457a8e76a3SSteven Rostedt  * @buffer: The ring buffer
40467a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
40477a8e76a3SSteven Rostedt  */
40487a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
40497a8e76a3SSteven Rostedt {
40507a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
4051d4788207SSteven Rostedt 	unsigned long flags;
40528d707e8eSSteven Rostedt 	int dolock;
40538aabee57SSteven Rostedt 	int ret;
40547a8e76a3SSteven Rostedt 
40559e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
40568aabee57SSteven Rostedt 		return 1;
40577a8e76a3SSteven Rostedt 
40588d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
4059554f786eSSteven Rostedt 
40607a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
40618d707e8eSSteven Rostedt 	local_irq_save(flags);
40628d707e8eSSteven Rostedt 	if (dolock)
40635389f6faSThomas Gleixner 		raw_spin_lock(&cpu_buffer->reader_lock);
4064554f786eSSteven Rostedt 	ret = rb_per_cpu_empty(cpu_buffer);
40658d707e8eSSteven Rostedt 	if (dolock)
40665389f6faSThomas Gleixner 		raw_spin_unlock(&cpu_buffer->reader_lock);
40678d707e8eSSteven Rostedt 	local_irq_restore(flags);
4068554f786eSSteven Rostedt 
4069554f786eSSteven Rostedt 	return ret;
40707a8e76a3SSteven Rostedt }
4071c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
40727a8e76a3SSteven Rostedt 
407385bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
40747a8e76a3SSteven Rostedt /**
40757a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
40767a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
40777a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
40787a8e76a3SSteven Rostedt  *
40797a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
40807a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
40817a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
40827a8e76a3SSteven Rostedt  * used at the moment.
40837a8e76a3SSteven Rostedt  */
40847a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
40857a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
40867a8e76a3SSteven Rostedt {
40877a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
40887a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
4089554f786eSSteven Rostedt 	int ret = -EINVAL;
4090554f786eSSteven Rostedt 
40919e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
40929e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4093554f786eSSteven Rostedt 		goto out;
40947a8e76a3SSteven Rostedt 
4095438ced17SVaibhav Nagarnaik 	cpu_buffer_a = buffer_a->buffers[cpu];
4096438ced17SVaibhav Nagarnaik 	cpu_buffer_b = buffer_b->buffers[cpu];
4097438ced17SVaibhav Nagarnaik 
40987a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
4099438ced17SVaibhav Nagarnaik 	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4100554f786eSSteven Rostedt 		goto out;
4101554f786eSSteven Rostedt 
4102554f786eSSteven Rostedt 	ret = -EAGAIN;
41037a8e76a3SSteven Rostedt 
410497b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
4105554f786eSSteven Rostedt 		goto out;
410697b17efeSSteven Rostedt 
410797b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
4108554f786eSSteven Rostedt 		goto out;
410997b17efeSSteven Rostedt 
411097b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
4111554f786eSSteven Rostedt 		goto out;
411297b17efeSSteven Rostedt 
411397b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
4114554f786eSSteven Rostedt 		goto out;
411597b17efeSSteven Rostedt 
411697b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
4117554f786eSSteven Rostedt 		goto out;
411897b17efeSSteven Rostedt 
41197a8e76a3SSteven Rostedt 	/*
41207a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
41217a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
41227a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
41237a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
41247a8e76a3SSteven Rostedt 	 */
41257a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
41267a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
41277a8e76a3SSteven Rostedt 
412898277991SSteven Rostedt 	ret = -EBUSY;
412998277991SSteven Rostedt 	if (local_read(&cpu_buffer_a->committing))
413098277991SSteven Rostedt 		goto out_dec;
413198277991SSteven Rostedt 	if (local_read(&cpu_buffer_b->committing))
413298277991SSteven Rostedt 		goto out_dec;
413398277991SSteven Rostedt 
41347a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
41357a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
41367a8e76a3SSteven Rostedt 
41377a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
41387a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
41397a8e76a3SSteven Rostedt 
414098277991SSteven Rostedt 	ret = 0;
414198277991SSteven Rostedt 
414298277991SSteven Rostedt out_dec:
41437a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
41447a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
4145554f786eSSteven Rostedt out:
4146554f786eSSteven Rostedt 	return ret;
41477a8e76a3SSteven Rostedt }
4148c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
414985bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
41507a8e76a3SSteven Rostedt 
41518789a9e7SSteven Rostedt /**
41528789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
41538789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
41548789a9e7SSteven Rostedt  *
41558789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
41568789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
41578789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
41588789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
41598789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
41608789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
41618789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
41628789a9e7SSteven Rostedt  *
41638789a9e7SSteven Rostedt  * Returns:
41648789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
41658789a9e7SSteven Rostedt  */
41667ea59064SVaibhav Nagarnaik void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
41678789a9e7SSteven Rostedt {
4168044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
41697ea59064SVaibhav Nagarnaik 	struct page *page;
41708789a9e7SSteven Rostedt 
4171d7ec4bfeSVaibhav Nagarnaik 	page = alloc_pages_node(cpu_to_node(cpu),
4172d7ec4bfeSVaibhav Nagarnaik 				GFP_KERNEL | __GFP_NORETRY, 0);
41737ea59064SVaibhav Nagarnaik 	if (!page)
41748789a9e7SSteven Rostedt 		return NULL;
41758789a9e7SSteven Rostedt 
41767ea59064SVaibhav Nagarnaik 	bpage = page_address(page);
41778789a9e7SSteven Rostedt 
4178ef7a4a16SSteven Rostedt 	rb_init_page(bpage);
4179ef7a4a16SSteven Rostedt 
4180044fa782SSteven Rostedt 	return bpage;
41818789a9e7SSteven Rostedt }
4182d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
41838789a9e7SSteven Rostedt 
41848789a9e7SSteven Rostedt /**
41858789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
41868789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
41878789a9e7SSteven Rostedt  * @data: the page to free
41888789a9e7SSteven Rostedt  *
41898789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
41908789a9e7SSteven Rostedt  */
41918789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
41928789a9e7SSteven Rostedt {
41938789a9e7SSteven Rostedt 	free_page((unsigned long)data);
41948789a9e7SSteven Rostedt }
4195d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
41968789a9e7SSteven Rostedt 
41978789a9e7SSteven Rostedt /**
41988789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
41998789a9e7SSteven Rostedt  * @buffer: buffer to extract from
42008789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4201ef7a4a16SSteven Rostedt  * @len: amount to extract
42028789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
42038789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
42048789a9e7SSteven Rostedt  *
42058789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
42068789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
42078789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
42088789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
42098789a9e7SSteven Rostedt  *
42108789a9e7SSteven Rostedt  * for example:
4211b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
42128789a9e7SSteven Rostedt  *	if (!rpage)
42138789a9e7SSteven Rostedt  *		return error;
4214ef7a4a16SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4215667d2412SLai Jiangshan  *	if (ret >= 0)
4216667d2412SLai Jiangshan  *		process_page(rpage, ret);
42178789a9e7SSteven Rostedt  *
42188789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
42198789a9e7SSteven Rostedt  * the writer is off the reader page.
42208789a9e7SSteven Rostedt  *
42218789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
42228789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
42238789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
42248789a9e7SSteven Rostedt  *  responsible for that.
42258789a9e7SSteven Rostedt  *
42268789a9e7SSteven Rostedt  * Returns:
4227667d2412SLai Jiangshan  *  >=0 if data has been transferred, returns the offset of consumed data.
4228667d2412SLai Jiangshan  *  <0 if no data has been transferred.
42298789a9e7SSteven Rostedt  */
42308789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
4231ef7a4a16SSteven Rostedt 			  void **data_page, size_t len, int cpu, int full)
42328789a9e7SSteven Rostedt {
42338789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
42348789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
4235044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
4236ef7a4a16SSteven Rostedt 	struct buffer_page *reader;
4237ff0ff84aSSteven Rostedt 	unsigned long missed_events;
42388789a9e7SSteven Rostedt 	unsigned long flags;
4239ef7a4a16SSteven Rostedt 	unsigned int commit;
4240667d2412SLai Jiangshan 	unsigned int read;
42414f3640f8SSteven Rostedt 	u64 save_timestamp;
4242667d2412SLai Jiangshan 	int ret = -1;
42438789a9e7SSteven Rostedt 
4244554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4245554f786eSSteven Rostedt 		goto out;
4246554f786eSSteven Rostedt 
4247474d32b6SSteven Rostedt 	/*
4248474d32b6SSteven Rostedt 	 * If len is not big enough to hold the page header, then
4249474d32b6SSteven Rostedt 	 * we can not copy anything.
4250474d32b6SSteven Rostedt 	 */
4251474d32b6SSteven Rostedt 	if (len <= BUF_PAGE_HDR_SIZE)
4252554f786eSSteven Rostedt 		goto out;
4253474d32b6SSteven Rostedt 
4254474d32b6SSteven Rostedt 	len -= BUF_PAGE_HDR_SIZE;
4255474d32b6SSteven Rostedt 
42568789a9e7SSteven Rostedt 	if (!data_page)
4257554f786eSSteven Rostedt 		goto out;
42588789a9e7SSteven Rostedt 
4259044fa782SSteven Rostedt 	bpage = *data_page;
4260044fa782SSteven Rostedt 	if (!bpage)
4261554f786eSSteven Rostedt 		goto out;
42628789a9e7SSteven Rostedt 
42635389f6faSThomas Gleixner 	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
42648789a9e7SSteven Rostedt 
4265ef7a4a16SSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
4266ef7a4a16SSteven Rostedt 	if (!reader)
4267554f786eSSteven Rostedt 		goto out_unlock;
42688789a9e7SSteven Rostedt 
4269ef7a4a16SSteven Rostedt 	event = rb_reader_event(cpu_buffer);
4270667d2412SLai Jiangshan 
4271ef7a4a16SSteven Rostedt 	read = reader->read;
4272ef7a4a16SSteven Rostedt 	commit = rb_page_commit(reader);
4273ef7a4a16SSteven Rostedt 
427466a8cb95SSteven Rostedt 	/* Check if any events were dropped */
4275ff0ff84aSSteven Rostedt 	missed_events = cpu_buffer->lost_events;
427666a8cb95SSteven Rostedt 
42778789a9e7SSteven Rostedt 	/*
4278474d32b6SSteven Rostedt 	 * If this page has been partially read or
4279474d32b6SSteven Rostedt 	 * if len is not big enough to read the rest of the page or
4280474d32b6SSteven Rostedt 	 * a writer is still on the page, then
4281474d32b6SSteven Rostedt 	 * we must copy the data from the page to the buffer.
4282474d32b6SSteven Rostedt 	 * Otherwise, we can simply swap the page with the one passed in.
42838789a9e7SSteven Rostedt 	 */
4284474d32b6SSteven Rostedt 	if (read || (len < (commit - read)) ||
4285ef7a4a16SSteven Rostedt 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4286667d2412SLai Jiangshan 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4287474d32b6SSteven Rostedt 		unsigned int rpos = read;
4288474d32b6SSteven Rostedt 		unsigned int pos = 0;
4289ef7a4a16SSteven Rostedt 		unsigned int size;
42908789a9e7SSteven Rostedt 
42918789a9e7SSteven Rostedt 		if (full)
4292554f786eSSteven Rostedt 			goto out_unlock;
42938789a9e7SSteven Rostedt 
4294ef7a4a16SSteven Rostedt 		if (len > (commit - read))
4295ef7a4a16SSteven Rostedt 			len = (commit - read);
4296ef7a4a16SSteven Rostedt 
429769d1b839SSteven Rostedt 		/* Always keep the time extend and data together */
429869d1b839SSteven Rostedt 		size = rb_event_ts_length(event);
4299ef7a4a16SSteven Rostedt 
4300ef7a4a16SSteven Rostedt 		if (len < size)
4301554f786eSSteven Rostedt 			goto out_unlock;
4302ef7a4a16SSteven Rostedt 
43034f3640f8SSteven Rostedt 		/* save the current timestamp, since the user will need it */
43044f3640f8SSteven Rostedt 		save_timestamp = cpu_buffer->read_stamp;
43054f3640f8SSteven Rostedt 
4306ef7a4a16SSteven Rostedt 		/* Need to copy one event at a time */
4307ef7a4a16SSteven Rostedt 		do {
4308e1e35927SDavid Sharp 			/* We need the size of one event, because
4309e1e35927SDavid Sharp 			 * rb_advance_reader only advances by one event,
4310e1e35927SDavid Sharp 			 * whereas rb_event_ts_length may include the size of
4311e1e35927SDavid Sharp 			 * one or two events.
4312e1e35927SDavid Sharp 			 * We have already ensured there's enough space if this
4313e1e35927SDavid Sharp 			 * is a time extend. */
4314e1e35927SDavid Sharp 			size = rb_event_length(event);
4315474d32b6SSteven Rostedt 			memcpy(bpage->data + pos, rpage->data + rpos, size);
4316ef7a4a16SSteven Rostedt 
4317ef7a4a16SSteven Rostedt 			len -= size;
4318ef7a4a16SSteven Rostedt 
4319ef7a4a16SSteven Rostedt 			rb_advance_reader(cpu_buffer);
4320474d32b6SSteven Rostedt 			rpos = reader->read;
4321474d32b6SSteven Rostedt 			pos += size;
4322ef7a4a16SSteven Rostedt 
432318fab912SHuang Ying 			if (rpos >= commit)
432418fab912SHuang Ying 				break;
432518fab912SHuang Ying 
4326ef7a4a16SSteven Rostedt 			event = rb_reader_event(cpu_buffer);
432769d1b839SSteven Rostedt 			/* Always keep the time extend and data together */
432869d1b839SSteven Rostedt 			size = rb_event_ts_length(event);
4329e1e35927SDavid Sharp 		} while (len >= size);
4330667d2412SLai Jiangshan 
4331667d2412SLai Jiangshan 		/* update bpage */
4332ef7a4a16SSteven Rostedt 		local_set(&bpage->commit, pos);
43334f3640f8SSteven Rostedt 		bpage->time_stamp = save_timestamp;
4334ef7a4a16SSteven Rostedt 
4335474d32b6SSteven Rostedt 		/* we copied everything to the beginning */
4336474d32b6SSteven Rostedt 		read = 0;
43378789a9e7SSteven Rostedt 	} else {
4338afbab76aSSteven Rostedt 		/* update the entry counter */
433977ae365eSSteven Rostedt 		cpu_buffer->read += rb_page_entries(reader);
4340c64e148aSVaibhav Nagarnaik 		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4341afbab76aSSteven Rostedt 
43428789a9e7SSteven Rostedt 		/* swap the pages */
4343044fa782SSteven Rostedt 		rb_init_page(bpage);
4344ef7a4a16SSteven Rostedt 		bpage = reader->page;
4345ef7a4a16SSteven Rostedt 		reader->page = *data_page;
4346ef7a4a16SSteven Rostedt 		local_set(&reader->write, 0);
4347778c55d4SSteven Rostedt 		local_set(&reader->entries, 0);
4348ef7a4a16SSteven Rostedt 		reader->read = 0;
4349044fa782SSteven Rostedt 		*data_page = bpage;
4350ff0ff84aSSteven Rostedt 
4351ff0ff84aSSteven Rostedt 		/*
4352ff0ff84aSSteven Rostedt 		 * Use the real_end for the data size,
4353ff0ff84aSSteven Rostedt 		 * This gives us a chance to store the lost events
4354ff0ff84aSSteven Rostedt 		 * on the page.
4355ff0ff84aSSteven Rostedt 		 */
4356ff0ff84aSSteven Rostedt 		if (reader->real_end)
4357ff0ff84aSSteven Rostedt 			local_set(&bpage->commit, reader->real_end);
4358ef7a4a16SSteven Rostedt 	}
4359ef7a4a16SSteven Rostedt 	ret = read;
4360ef7a4a16SSteven Rostedt 
436166a8cb95SSteven Rostedt 	cpu_buffer->lost_events = 0;
43622711ca23SSteven Rostedt 
43632711ca23SSteven Rostedt 	commit = local_read(&bpage->commit);
436466a8cb95SSteven Rostedt 	/*
436566a8cb95SSteven Rostedt 	 * Set a flag in the commit field if we lost events
436666a8cb95SSteven Rostedt 	 */
4367ff0ff84aSSteven Rostedt 	if (missed_events) {
4368ff0ff84aSSteven Rostedt 		/* If there is room at the end of the page to save the
4369ff0ff84aSSteven Rostedt 		 * missed events, then record it there.
4370ff0ff84aSSteven Rostedt 		 */
4371ff0ff84aSSteven Rostedt 		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4372ff0ff84aSSteven Rostedt 			memcpy(&bpage->data[commit], &missed_events,
4373ff0ff84aSSteven Rostedt 			       sizeof(missed_events));
4374ff0ff84aSSteven Rostedt 			local_add(RB_MISSED_STORED, &bpage->commit);
43752711ca23SSteven Rostedt 			commit += sizeof(missed_events);
4376ff0ff84aSSteven Rostedt 		}
437766a8cb95SSteven Rostedt 		local_add(RB_MISSED_EVENTS, &bpage->commit);
4378ff0ff84aSSteven Rostedt 	}
437966a8cb95SSteven Rostedt 
43802711ca23SSteven Rostedt 	/*
43812711ca23SSteven Rostedt 	 * This page may be off to user land. Zero it out here.
43822711ca23SSteven Rostedt 	 */
43832711ca23SSteven Rostedt 	if (commit < BUF_PAGE_SIZE)
43842711ca23SSteven Rostedt 		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
43852711ca23SSteven Rostedt 
4386554f786eSSteven Rostedt  out_unlock:
43875389f6faSThomas Gleixner 	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
43888789a9e7SSteven Rostedt 
4389554f786eSSteven Rostedt  out:
43908789a9e7SSteven Rostedt 	return ret;
43918789a9e7SSteven Rostedt }
4392d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page);
43938789a9e7SSteven Rostedt 
439459222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
439509c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
4396554f786eSSteven Rostedt 			 unsigned long action, void *hcpu)
4397554f786eSSteven Rostedt {
4398554f786eSSteven Rostedt 	struct ring_buffer *buffer =
4399554f786eSSteven Rostedt 		container_of(self, struct ring_buffer, cpu_notify);
4400554f786eSSteven Rostedt 	long cpu = (long)hcpu;
4401438ced17SVaibhav Nagarnaik 	int cpu_i, nr_pages_same;
4402438ced17SVaibhav Nagarnaik 	unsigned int nr_pages;
4403554f786eSSteven Rostedt 
4404554f786eSSteven Rostedt 	switch (action) {
4405554f786eSSteven Rostedt 	case CPU_UP_PREPARE:
4406554f786eSSteven Rostedt 	case CPU_UP_PREPARE_FROZEN:
44073f237a79SRusty Russell 		if (cpumask_test_cpu(cpu, buffer->cpumask))
4408554f786eSSteven Rostedt 			return NOTIFY_OK;
4409554f786eSSteven Rostedt 
4410438ced17SVaibhav Nagarnaik 		nr_pages = 0;
4411438ced17SVaibhav Nagarnaik 		nr_pages_same = 1;
4412438ced17SVaibhav Nagarnaik 		/* check if all cpu sizes are same */
4413438ced17SVaibhav Nagarnaik 		for_each_buffer_cpu(buffer, cpu_i) {
4414438ced17SVaibhav Nagarnaik 			/* fill in the size from first enabled cpu */
4415438ced17SVaibhav Nagarnaik 			if (nr_pages == 0)
4416438ced17SVaibhav Nagarnaik 				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4417438ced17SVaibhav Nagarnaik 			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4418438ced17SVaibhav Nagarnaik 				nr_pages_same = 0;
4419438ced17SVaibhav Nagarnaik 				break;
4420438ced17SVaibhav Nagarnaik 			}
4421438ced17SVaibhav Nagarnaik 		}
4422438ced17SVaibhav Nagarnaik 		/* allocate minimum pages, user can later expand it */
4423438ced17SVaibhav Nagarnaik 		if (!nr_pages_same)
4424438ced17SVaibhav Nagarnaik 			nr_pages = 2;
4425554f786eSSteven Rostedt 		buffer->buffers[cpu] =
4426438ced17SVaibhav Nagarnaik 			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4427554f786eSSteven Rostedt 		if (!buffer->buffers[cpu]) {
4428554f786eSSteven Rostedt 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4429554f786eSSteven Rostedt 			     cpu);
4430554f786eSSteven Rostedt 			return NOTIFY_OK;
4431554f786eSSteven Rostedt 		}
4432554f786eSSteven Rostedt 		smp_wmb();
44333f237a79SRusty Russell 		cpumask_set_cpu(cpu, buffer->cpumask);
4434554f786eSSteven Rostedt 		break;
4435554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE:
4436554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE_FROZEN:
4437554f786eSSteven Rostedt 		/*
4438554f786eSSteven Rostedt 		 * Do nothing.
4439554f786eSSteven Rostedt 		 *  If we were to free the buffer, then the user would
4440554f786eSSteven Rostedt 		 *  lose any trace that was in the buffer.
4441554f786eSSteven Rostedt 		 */
4442554f786eSSteven Rostedt 		break;
4443554f786eSSteven Rostedt 	default:
4444554f786eSSteven Rostedt 		break;
4445554f786eSSteven Rostedt 	}
4446554f786eSSteven Rostedt 	return NOTIFY_OK;
4447554f786eSSteven Rostedt }
4448554f786eSSteven Rostedt #endif
4449