xref: /linux-6.15/kernel/trace/ring_buffer.c (revision 464e85eb)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
714131f2fSIngo Molnar #include <linux/trace_clock.h>
878d904b4SSteven Rostedt #include <linux/ftrace_irq.h>
97a8e76a3SSteven Rostedt #include <linux/spinlock.h>
107a8e76a3SSteven Rostedt #include <linux/debugfs.h>
117a8e76a3SSteven Rostedt #include <linux/uaccess.h>
12a81bd80aSSteven Rostedt #include <linux/hardirq.h>
131744a21dSVegard Nossum #include <linux/kmemcheck.h>
147a8e76a3SSteven Rostedt #include <linux/module.h>
157a8e76a3SSteven Rostedt #include <linux/percpu.h>
167a8e76a3SSteven Rostedt #include <linux/mutex.h>
177a8e76a3SSteven Rostedt #include <linux/init.h>
187a8e76a3SSteven Rostedt #include <linux/hash.h>
197a8e76a3SSteven Rostedt #include <linux/list.h>
20554f786eSSteven Rostedt #include <linux/cpu.h>
217a8e76a3SSteven Rostedt #include <linux/fs.h>
227a8e76a3SSteven Rostedt 
23182e9f5fSSteven Rostedt #include "trace.h"
24182e9f5fSSteven Rostedt 
25033601a3SSteven Rostedt /*
26d1b182a8SSteven Rostedt  * The ring buffer header is special. We must manually up keep it.
27d1b182a8SSteven Rostedt  */
28d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
29d1b182a8SSteven Rostedt {
30d1b182a8SSteven Rostedt 	int ret;
31d1b182a8SSteven Rostedt 
32334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "# compressed entry header\n");
33334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
34d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
35d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
36d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\n");
37d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
38d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_PADDING);
39d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
40d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_TIME_EXTEND);
41334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
42334d4169SLai Jiangshan 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
43d1b182a8SSteven Rostedt 
44d1b182a8SSteven Rostedt 	return ret;
45d1b182a8SSteven Rostedt }
46d1b182a8SSteven Rostedt 
47d1b182a8SSteven Rostedt /*
485cc98548SSteven Rostedt  * The ring buffer is made up of a list of pages. A separate list of pages is
495cc98548SSteven Rostedt  * allocated for each CPU. A writer may only write to a buffer that is
505cc98548SSteven Rostedt  * associated with the CPU it is currently executing on.  A reader may read
515cc98548SSteven Rostedt  * from any per cpu buffer.
525cc98548SSteven Rostedt  *
535cc98548SSteven Rostedt  * The reader is special. For each per cpu buffer, the reader has its own
545cc98548SSteven Rostedt  * reader page. When a reader has read the entire reader page, this reader
555cc98548SSteven Rostedt  * page is swapped with another page in the ring buffer.
565cc98548SSteven Rostedt  *
575cc98548SSteven Rostedt  * Now, as long as the writer is off the reader page, the reader can do what
585cc98548SSteven Rostedt  * ever it wants with that page. The writer will never write to that page
595cc98548SSteven Rostedt  * again (as long as it is out of the ring buffer).
605cc98548SSteven Rostedt  *
615cc98548SSteven Rostedt  * Here's some silly ASCII art.
625cc98548SSteven Rostedt  *
635cc98548SSteven Rostedt  *   +------+
645cc98548SSteven Rostedt  *   |reader|          RING BUFFER
655cc98548SSteven Rostedt  *   |page  |
665cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
675cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
685cc98548SSteven Rostedt  *                   +---+   +---+   +---+
695cc98548SSteven Rostedt  *                     ^               |
705cc98548SSteven Rostedt  *                     |               |
715cc98548SSteven Rostedt  *                     +---------------+
725cc98548SSteven Rostedt  *
735cc98548SSteven Rostedt  *
745cc98548SSteven Rostedt  *   +------+
755cc98548SSteven Rostedt  *   |reader|          RING BUFFER
765cc98548SSteven Rostedt  *   |page  |------------------v
775cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
785cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
795cc98548SSteven Rostedt  *                   +---+   +---+   +---+
805cc98548SSteven Rostedt  *                     ^               |
815cc98548SSteven Rostedt  *                     |               |
825cc98548SSteven Rostedt  *                     +---------------+
835cc98548SSteven Rostedt  *
845cc98548SSteven Rostedt  *
855cc98548SSteven Rostedt  *   +------+
865cc98548SSteven Rostedt  *   |reader|          RING BUFFER
875cc98548SSteven Rostedt  *   |page  |------------------v
885cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
895cc98548SSteven Rostedt  *      ^            |   |-->|   |-->|   |
905cc98548SSteven Rostedt  *      |            +---+   +---+   +---+
915cc98548SSteven Rostedt  *      |                              |
925cc98548SSteven Rostedt  *      |                              |
935cc98548SSteven Rostedt  *      +------------------------------+
945cc98548SSteven Rostedt  *
955cc98548SSteven Rostedt  *
965cc98548SSteven Rostedt  *   +------+
975cc98548SSteven Rostedt  *   |buffer|          RING BUFFER
985cc98548SSteven Rostedt  *   |page  |------------------v
995cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
1005cc98548SSteven Rostedt  *      ^            |   |   |   |-->|   |
1015cc98548SSteven Rostedt  *      |   New      +---+   +---+   +---+
1025cc98548SSteven Rostedt  *      |  Reader------^               |
1035cc98548SSteven Rostedt  *      |   page                       |
1045cc98548SSteven Rostedt  *      +------------------------------+
1055cc98548SSteven Rostedt  *
1065cc98548SSteven Rostedt  *
1075cc98548SSteven Rostedt  * After we make this swap, the reader can hand this page off to the splice
1085cc98548SSteven Rostedt  * code and be done with it. It can even allocate a new page if it needs to
1095cc98548SSteven Rostedt  * and swap that into the ring buffer.
1105cc98548SSteven Rostedt  *
1115cc98548SSteven Rostedt  * We will be using cmpxchg soon to make all this lockless.
1125cc98548SSteven Rostedt  *
1135cc98548SSteven Rostedt  */
1145cc98548SSteven Rostedt 
1155cc98548SSteven Rostedt /*
116033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
117033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
118033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
119033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
120033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
121033601a3SSteven Rostedt  *
122033601a3SSteven Rostedt  * There's three layers that must be on in order to write
123033601a3SSteven Rostedt  * to the ring buffer.
124033601a3SSteven Rostedt  *
125033601a3SSteven Rostedt  * 1) This global flag must be set.
126033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
127033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
128033601a3SSteven Rostedt  *
129033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
130033601a3SSteven Rostedt  * will permantly disable all ring buffers.
131033601a3SSteven Rostedt  */
132033601a3SSteven Rostedt 
133033601a3SSteven Rostedt /*
134033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
135033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
136033601a3SSteven Rostedt  *
137033601a3SSteven Rostedt  *  ON   DISABLED
138033601a3SSteven Rostedt  * ---- ----------
139033601a3SSteven Rostedt  *   0      0        : ring buffers are off
140033601a3SSteven Rostedt  *   1      0        : ring buffers are on
141033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
142033601a3SSteven Rostedt  */
143033601a3SSteven Rostedt 
144033601a3SSteven Rostedt enum {
145033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
146033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
147033601a3SSteven Rostedt };
148033601a3SSteven Rostedt 
149033601a3SSteven Rostedt enum {
150033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
151033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
152033601a3SSteven Rostedt };
153033601a3SSteven Rostedt 
1545e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
155a3583244SSteven Rostedt 
156474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
157474d32b6SSteven Rostedt 
158a3583244SSteven Rostedt /**
159a3583244SSteven Rostedt  * tracing_on - enable all tracing buffers
160a3583244SSteven Rostedt  *
161a3583244SSteven Rostedt  * This function enables all tracing buffers that may have been
162a3583244SSteven Rostedt  * disabled with tracing_off.
163a3583244SSteven Rostedt  */
164a3583244SSteven Rostedt void tracing_on(void)
165a3583244SSteven Rostedt {
166033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
167a3583244SSteven Rostedt }
168c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_on);
169a3583244SSteven Rostedt 
170a3583244SSteven Rostedt /**
171a3583244SSteven Rostedt  * tracing_off - turn off all tracing buffers
172a3583244SSteven Rostedt  *
173a3583244SSteven Rostedt  * This function stops all tracing buffers from recording data.
174a3583244SSteven Rostedt  * It does not disable any overhead the tracers themselves may
175a3583244SSteven Rostedt  * be causing. This function simply causes all recording to
176a3583244SSteven Rostedt  * the ring buffers to fail.
177a3583244SSteven Rostedt  */
178a3583244SSteven Rostedt void tracing_off(void)
179a3583244SSteven Rostedt {
180033601a3SSteven Rostedt 	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
181033601a3SSteven Rostedt }
182c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_off);
183033601a3SSteven Rostedt 
184033601a3SSteven Rostedt /**
185033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
186033601a3SSteven Rostedt  *
187033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
188c3706f00SWenji Huang  * permanently.
189033601a3SSteven Rostedt  */
190033601a3SSteven Rostedt void tracing_off_permanent(void)
191033601a3SSteven Rostedt {
192033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
193a3583244SSteven Rostedt }
194a3583244SSteven Rostedt 
195988ae9d6SSteven Rostedt /**
196988ae9d6SSteven Rostedt  * tracing_is_on - show state of ring buffers enabled
197988ae9d6SSteven Rostedt  */
198988ae9d6SSteven Rostedt int tracing_is_on(void)
199988ae9d6SSteven Rostedt {
200988ae9d6SSteven Rostedt 	return ring_buffer_flags == RB_BUFFERS_ON;
201988ae9d6SSteven Rostedt }
202988ae9d6SSteven Rostedt EXPORT_SYMBOL_GPL(tracing_is_on);
203988ae9d6SSteven Rostedt 
204d06bbd66SIngo Molnar #include "trace.h"
205d06bbd66SIngo Molnar 
206e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
20767d34724SAndrew Morton #define RB_ALIGNMENT		4U
208334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
209c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
210334d4169SLai Jiangshan 
211334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
212334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2137a8e76a3SSteven Rostedt 
2147a8e76a3SSteven Rostedt enum {
2157a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
2167a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
2177a8e76a3SSteven Rostedt };
2187a8e76a3SSteven Rostedt 
2192d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event)
2202d622719STom Zanussi {
221334d4169SLai Jiangshan 	return event->type_len == RINGBUF_TYPE_PADDING
222334d4169SLai Jiangshan 			&& event->time_delta == 0;
2232d622719STom Zanussi }
2242d622719STom Zanussi 
2252d622719STom Zanussi static inline int rb_discarded_event(struct ring_buffer_event *event)
2262d622719STom Zanussi {
227334d4169SLai Jiangshan 	return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
2282d622719STom Zanussi }
2292d622719STom Zanussi 
2302d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
2312d622719STom Zanussi {
232334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2332d622719STom Zanussi 	event->time_delta = 0;
2342d622719STom Zanussi }
2352d622719STom Zanussi 
2362d622719STom Zanussi static unsigned
2372d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2382d622719STom Zanussi {
2392d622719STom Zanussi 	unsigned length;
2402d622719STom Zanussi 
241334d4169SLai Jiangshan 	if (event->type_len)
242334d4169SLai Jiangshan 		length = event->type_len * RB_ALIGNMENT;
2432d622719STom Zanussi 	else
2442d622719STom Zanussi 		length = event->array[0];
2452d622719STom Zanussi 	return length + RB_EVNT_HDR_SIZE;
2462d622719STom Zanussi }
2472d622719STom Zanussi 
2487a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
24934a148bfSAndrew Morton static unsigned
2507a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2517a8e76a3SSteven Rostedt {
252334d4169SLai Jiangshan 	switch (event->type_len) {
2537a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
2542d622719STom Zanussi 		if (rb_null_event(event))
2557a8e76a3SSteven Rostedt 			/* undefined */
2567a8e76a3SSteven Rostedt 			return -1;
257334d4169SLai Jiangshan 		return  event->array[0] + RB_EVNT_HDR_SIZE;
2587a8e76a3SSteven Rostedt 
2597a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
2607a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
2617a8e76a3SSteven Rostedt 
2627a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
2637a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
2647a8e76a3SSteven Rostedt 
2657a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
2662d622719STom Zanussi 		return rb_event_data_length(event);
2677a8e76a3SSteven Rostedt 	default:
2687a8e76a3SSteven Rostedt 		BUG();
2697a8e76a3SSteven Rostedt 	}
2707a8e76a3SSteven Rostedt 	/* not hit */
2717a8e76a3SSteven Rostedt 	return 0;
2727a8e76a3SSteven Rostedt }
2737a8e76a3SSteven Rostedt 
2747a8e76a3SSteven Rostedt /**
2757a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
2767a8e76a3SSteven Rostedt  * @event: the event to get the length of
2777a8e76a3SSteven Rostedt  */
2787a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2797a8e76a3SSteven Rostedt {
280465634adSRobert Richter 	unsigned length = rb_event_length(event);
281334d4169SLai Jiangshan 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
282465634adSRobert Richter 		return length;
283465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
284465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
285465634adSRobert Richter                 length -= sizeof(event->array[0]);
286465634adSRobert Richter 	return length;
2877a8e76a3SSteven Rostedt }
288c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2897a8e76a3SSteven Rostedt 
2907a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
29134a148bfSAndrew Morton static void *
2927a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
2937a8e76a3SSteven Rostedt {
294334d4169SLai Jiangshan 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
2957a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
296334d4169SLai Jiangshan 	if (event->type_len)
2977a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
2987a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
2997a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
3007a8e76a3SSteven Rostedt }
3017a8e76a3SSteven Rostedt 
3027a8e76a3SSteven Rostedt /**
3037a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
3047a8e76a3SSteven Rostedt  * @event: the event to get the data from
3057a8e76a3SSteven Rostedt  */
3067a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3077a8e76a3SSteven Rostedt {
3087a8e76a3SSteven Rostedt 	return rb_event_data(event);
3097a8e76a3SSteven Rostedt }
310c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3117a8e76a3SSteven Rostedt 
3127a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
3139e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
3147a8e76a3SSteven Rostedt 
3157a8e76a3SSteven Rostedt #define TS_SHIFT	27
3167a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
3177a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
3187a8e76a3SSteven Rostedt 
319abc9b56dSSteven Rostedt struct buffer_data_page {
3207a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
321c3706f00SWenji Huang 	local_t		 commit;	/* write committed index */
322abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
323abc9b56dSSteven Rostedt };
324abc9b56dSSteven Rostedt 
325abc9b56dSSteven Rostedt struct buffer_page {
326778c55d4SSteven Rostedt 	struct list_head list;		/* list of buffer pages */
327abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
3286f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
329778c55d4SSteven Rostedt 	local_t		 entries;	/* entries on this page */
330abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
3317a8e76a3SSteven Rostedt };
3327a8e76a3SSteven Rostedt 
333044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
334abc9b56dSSteven Rostedt {
335044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
336abc9b56dSSteven Rostedt }
337abc9b56dSSteven Rostedt 
338474d32b6SSteven Rostedt /**
339474d32b6SSteven Rostedt  * ring_buffer_page_len - the size of data on the page.
340474d32b6SSteven Rostedt  * @page: The page to read
341474d32b6SSteven Rostedt  *
342474d32b6SSteven Rostedt  * Returns the amount of data on the page, including buffer page header.
343474d32b6SSteven Rostedt  */
344ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page)
345ef7a4a16SSteven Rostedt {
346474d32b6SSteven Rostedt 	return local_read(&((struct buffer_data_page *)page)->commit)
347474d32b6SSteven Rostedt 		+ BUF_PAGE_HDR_SIZE;
348ef7a4a16SSteven Rostedt }
349ef7a4a16SSteven Rostedt 
3507a8e76a3SSteven Rostedt /*
351ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
352ed56829cSSteven Rostedt  * this issue out.
353ed56829cSSteven Rostedt  */
35434a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
355ed56829cSSteven Rostedt {
3566ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
357e4c2ce82SSteven Rostedt 	kfree(bpage);
358ed56829cSSteven Rostedt }
359ed56829cSSteven Rostedt 
360ed56829cSSteven Rostedt /*
3617a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
3627a8e76a3SSteven Rostedt  */
3637a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
3647a8e76a3SSteven Rostedt {
3657a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
3667a8e76a3SSteven Rostedt 		return 1;
3677a8e76a3SSteven Rostedt 	return 0;
3687a8e76a3SSteven Rostedt }
3697a8e76a3SSteven Rostedt 
370474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
3717a8e76a3SSteven Rostedt 
372be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
373be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
374be957c44SSteven Rostedt 
375ea05b57cSSteven Rostedt /* Max number of timestamps that can fit on a page */
376ea05b57cSSteven Rostedt #define RB_TIMESTAMPS_PER_PAGE	(BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
377ea05b57cSSteven Rostedt 
378d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s)
379d1b182a8SSteven Rostedt {
380d1b182a8SSteven Rostedt 	struct buffer_data_page field;
381d1b182a8SSteven Rostedt 	int ret;
382d1b182a8SSteven Rostedt 
383d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
384d1b182a8SSteven Rostedt 			       "offset:0;\tsize:%u;\n",
385d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.time_stamp));
386d1b182a8SSteven Rostedt 
387d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
388d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
389d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
390d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.commit));
391d1b182a8SSteven Rostedt 
392d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: char data;\t"
393d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
394d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), data),
395d1b182a8SSteven Rostedt 			       (unsigned int)BUF_PAGE_SIZE);
396d1b182a8SSteven Rostedt 
397d1b182a8SSteven Rostedt 	return ret;
398d1b182a8SSteven Rostedt }
399d1b182a8SSteven Rostedt 
4007a8e76a3SSteven Rostedt /*
4017a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
4027a8e76a3SSteven Rostedt  */
4037a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
4047a8e76a3SSteven Rostedt 	int				cpu;
4057a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
406f83c9d0fSSteven Rostedt 	spinlock_t			reader_lock; /* serialize readers */
4073e03fb7fSSteven Rostedt 	raw_spinlock_t			lock;
4087a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
4097a8e76a3SSteven Rostedt 	struct list_head		pages;
4106f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
4116f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
412c3706f00SWenji Huang 	struct buffer_page		*commit_page;	/* committed pages */
413d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
414f0d2c681SSteven Rostedt 	unsigned long			nmi_dropped;
415f0d2c681SSteven Rostedt 	unsigned long			commit_overrun;
4167a8e76a3SSteven Rostedt 	unsigned long			overrun;
417e4906effSSteven Rostedt 	unsigned long			read;
418e4906effSSteven Rostedt 	local_t				entries;
419fa743953SSteven Rostedt 	local_t				committing;
420fa743953SSteven Rostedt 	local_t				commits;
4217a8e76a3SSteven Rostedt 	u64				write_stamp;
4227a8e76a3SSteven Rostedt 	u64				read_stamp;
4237a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
4247a8e76a3SSteven Rostedt };
4257a8e76a3SSteven Rostedt 
4267a8e76a3SSteven Rostedt struct ring_buffer {
4277a8e76a3SSteven Rostedt 	unsigned			pages;
4287a8e76a3SSteven Rostedt 	unsigned			flags;
4297a8e76a3SSteven Rostedt 	int				cpus;
4307a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
43100f62f61SArnaldo Carvalho de Melo 	cpumask_var_t			cpumask;
4327a8e76a3SSteven Rostedt 
4331f8a6a10SPeter Zijlstra 	struct lock_class_key		*reader_lock_key;
4341f8a6a10SPeter Zijlstra 
4357a8e76a3SSteven Rostedt 	struct mutex			mutex;
4367a8e76a3SSteven Rostedt 
4377a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
438554f786eSSteven Rostedt 
43959222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
440554f786eSSteven Rostedt 	struct notifier_block		cpu_notify;
441554f786eSSteven Rostedt #endif
44237886f6aSSteven Rostedt 	u64				(*clock)(void);
4437a8e76a3SSteven Rostedt };
4447a8e76a3SSteven Rostedt 
4457a8e76a3SSteven Rostedt struct ring_buffer_iter {
4467a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
4477a8e76a3SSteven Rostedt 	unsigned long			head;
4487a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
4497a8e76a3SSteven Rostedt 	u64				read_stamp;
4507a8e76a3SSteven Rostedt };
4517a8e76a3SSteven Rostedt 
452f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
4537a8e76a3SSteven Rostedt #define RB_WARN_ON(buffer, cond)				\
4543e89c7bbSSteven Rostedt 	({							\
4553e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);			\
4563e89c7bbSSteven Rostedt 		if (_____ret) {					\
457bf41a158SSteven Rostedt 			atomic_inc(&buffer->record_disabled);	\
458bf41a158SSteven Rostedt 			WARN_ON(1);				\
459bf41a158SSteven Rostedt 		}						\
4603e89c7bbSSteven Rostedt 		_____ret;					\
4613e89c7bbSSteven Rostedt 	})
462f536aafcSSteven Rostedt 
46337886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
46437886f6aSSteven Rostedt #define DEBUG_SHIFT 0
46537886f6aSSteven Rostedt 
46688eb0125SSteven Rostedt static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
46788eb0125SSteven Rostedt {
46888eb0125SSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
46988eb0125SSteven Rostedt 	return buffer->clock() << DEBUG_SHIFT;
47088eb0125SSteven Rostedt }
47188eb0125SSteven Rostedt 
47237886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
47337886f6aSSteven Rostedt {
47437886f6aSSteven Rostedt 	u64 time;
47537886f6aSSteven Rostedt 
47637886f6aSSteven Rostedt 	preempt_disable_notrace();
47788eb0125SSteven Rostedt 	time = rb_time_stamp(buffer, cpu);
47837886f6aSSteven Rostedt 	preempt_enable_no_resched_notrace();
47937886f6aSSteven Rostedt 
48037886f6aSSteven Rostedt 	return time;
48137886f6aSSteven Rostedt }
48237886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
48337886f6aSSteven Rostedt 
48437886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
48537886f6aSSteven Rostedt 				      int cpu, u64 *ts)
48637886f6aSSteven Rostedt {
48737886f6aSSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
48837886f6aSSteven Rostedt 	*ts >>= DEBUG_SHIFT;
48937886f6aSSteven Rostedt }
49037886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
49137886f6aSSteven Rostedt 
4927a8e76a3SSteven Rostedt /**
4937a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
4947a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
4957a8e76a3SSteven Rostedt  *
496c3706f00SWenji Huang  * As a safety measure we check to make sure the data pages have not
4977a8e76a3SSteven Rostedt  * been corrupted.
4987a8e76a3SSteven Rostedt  */
4997a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
5007a8e76a3SSteven Rostedt {
5017a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
502044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
5037a8e76a3SSteven Rostedt 
5043e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
5053e89c7bbSSteven Rostedt 		return -1;
5063e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
5073e89c7bbSSteven Rostedt 		return -1;
5087a8e76a3SSteven Rostedt 
509044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
5103e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
511044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
5123e89c7bbSSteven Rostedt 			return -1;
5133e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
514044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
5153e89c7bbSSteven Rostedt 			return -1;
5167a8e76a3SSteven Rostedt 	}
5177a8e76a3SSteven Rostedt 
5187a8e76a3SSteven Rostedt 	return 0;
5197a8e76a3SSteven Rostedt }
5207a8e76a3SSteven Rostedt 
5217a8e76a3SSteven Rostedt static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
5227a8e76a3SSteven Rostedt 			     unsigned nr_pages)
5237a8e76a3SSteven Rostedt {
5247a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
525044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
5267a8e76a3SSteven Rostedt 	unsigned long addr;
5277a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
5287a8e76a3SSteven Rostedt 	unsigned i;
5297a8e76a3SSteven Rostedt 
5307a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
531044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
532aa1e0e3bSSteven Rostedt 				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
533044fa782SSteven Rostedt 		if (!bpage)
534e4c2ce82SSteven Rostedt 			goto free_pages;
535044fa782SSteven Rostedt 		list_add(&bpage->list, &pages);
536e4c2ce82SSteven Rostedt 
5377a8e76a3SSteven Rostedt 		addr = __get_free_page(GFP_KERNEL);
5387a8e76a3SSteven Rostedt 		if (!addr)
5397a8e76a3SSteven Rostedt 			goto free_pages;
540044fa782SSteven Rostedt 		bpage->page = (void *)addr;
541044fa782SSteven Rostedt 		rb_init_page(bpage->page);
5427a8e76a3SSteven Rostedt 	}
5437a8e76a3SSteven Rostedt 
5447a8e76a3SSteven Rostedt 	list_splice(&pages, head);
5457a8e76a3SSteven Rostedt 
5467a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
5477a8e76a3SSteven Rostedt 
5487a8e76a3SSteven Rostedt 	return 0;
5497a8e76a3SSteven Rostedt 
5507a8e76a3SSteven Rostedt  free_pages:
551044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
552044fa782SSteven Rostedt 		list_del_init(&bpage->list);
553044fa782SSteven Rostedt 		free_buffer_page(bpage);
5547a8e76a3SSteven Rostedt 	}
5557a8e76a3SSteven Rostedt 	return -ENOMEM;
5567a8e76a3SSteven Rostedt }
5577a8e76a3SSteven Rostedt 
5587a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
5597a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
5607a8e76a3SSteven Rostedt {
5617a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
562044fa782SSteven Rostedt 	struct buffer_page *bpage;
563d769041fSSteven Rostedt 	unsigned long addr;
5647a8e76a3SSteven Rostedt 	int ret;
5657a8e76a3SSteven Rostedt 
5667a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
5677a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
5687a8e76a3SSteven Rostedt 	if (!cpu_buffer)
5697a8e76a3SSteven Rostedt 		return NULL;
5707a8e76a3SSteven Rostedt 
5717a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
5727a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
573f83c9d0fSSteven Rostedt 	spin_lock_init(&cpu_buffer->reader_lock);
5741f8a6a10SPeter Zijlstra 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
5753e03fb7fSSteven Rostedt 	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
5767a8e76a3SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->pages);
5777a8e76a3SSteven Rostedt 
578044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
579e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
580044fa782SSteven Rostedt 	if (!bpage)
581e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
582e4c2ce82SSteven Rostedt 
583044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
584d769041fSSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
585d769041fSSteven Rostedt 	if (!addr)
586e4c2ce82SSteven Rostedt 		goto fail_free_reader;
587044fa782SSteven Rostedt 	bpage->page = (void *)addr;
588044fa782SSteven Rostedt 	rb_init_page(bpage->page);
589e4c2ce82SSteven Rostedt 
590d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
591d769041fSSteven Rostedt 
5927a8e76a3SSteven Rostedt 	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
5937a8e76a3SSteven Rostedt 	if (ret < 0)
594d769041fSSteven Rostedt 		goto fail_free_reader;
5957a8e76a3SSteven Rostedt 
5967a8e76a3SSteven Rostedt 	cpu_buffer->head_page
5977a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
598bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
5997a8e76a3SSteven Rostedt 
6007a8e76a3SSteven Rostedt 	return cpu_buffer;
6017a8e76a3SSteven Rostedt 
602d769041fSSteven Rostedt  fail_free_reader:
603d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
604d769041fSSteven Rostedt 
6057a8e76a3SSteven Rostedt  fail_free_buffer:
6067a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
6077a8e76a3SSteven Rostedt 	return NULL;
6087a8e76a3SSteven Rostedt }
6097a8e76a3SSteven Rostedt 
6107a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6117a8e76a3SSteven Rostedt {
6127a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
613044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
6147a8e76a3SSteven Rostedt 
615d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
616d769041fSSteven Rostedt 
617044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
618044fa782SSteven Rostedt 		list_del_init(&bpage->list);
619044fa782SSteven Rostedt 		free_buffer_page(bpage);
6207a8e76a3SSteven Rostedt 	}
6217a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
6227a8e76a3SSteven Rostedt }
6237a8e76a3SSteven Rostedt 
62459222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
62509c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
626554f786eSSteven Rostedt 			 unsigned long action, void *hcpu);
627554f786eSSteven Rostedt #endif
628554f786eSSteven Rostedt 
6297a8e76a3SSteven Rostedt /**
6307a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
63168814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
6327a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
6337a8e76a3SSteven Rostedt  *
6347a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
6357a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
6367a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
6377a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
6387a8e76a3SSteven Rostedt  */
6391f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
6401f8a6a10SPeter Zijlstra 					struct lock_class_key *key)
6417a8e76a3SSteven Rostedt {
6427a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
6437a8e76a3SSteven Rostedt 	int bsize;
6447a8e76a3SSteven Rostedt 	int cpu;
6457a8e76a3SSteven Rostedt 
6467a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
6477a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
6487a8e76a3SSteven Rostedt 			 GFP_KERNEL);
6497a8e76a3SSteven Rostedt 	if (!buffer)
6507a8e76a3SSteven Rostedt 		return NULL;
6517a8e76a3SSteven Rostedt 
6529e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
6539e01c1b7SRusty Russell 		goto fail_free_buffer;
6549e01c1b7SRusty Russell 
6557a8e76a3SSteven Rostedt 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
6567a8e76a3SSteven Rostedt 	buffer->flags = flags;
65737886f6aSSteven Rostedt 	buffer->clock = trace_clock_local;
6581f8a6a10SPeter Zijlstra 	buffer->reader_lock_key = key;
6597a8e76a3SSteven Rostedt 
6607a8e76a3SSteven Rostedt 	/* need at least two pages */
6615f78abeeSSteven Rostedt 	if (buffer->pages < 2)
6625f78abeeSSteven Rostedt 		buffer->pages = 2;
6637a8e76a3SSteven Rostedt 
6643bf832ceSFrederic Weisbecker 	/*
6653bf832ceSFrederic Weisbecker 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
6663bf832ceSFrederic Weisbecker 	 * in early initcall, it will not be notified of secondary cpus.
6673bf832ceSFrederic Weisbecker 	 * In that off case, we need to allocate for all possible cpus.
6683bf832ceSFrederic Weisbecker 	 */
6693bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU
670554f786eSSteven Rostedt 	get_online_cpus();
671554f786eSSteven Rostedt 	cpumask_copy(buffer->cpumask, cpu_online_mask);
6723bf832ceSFrederic Weisbecker #else
6733bf832ceSFrederic Weisbecker 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
6743bf832ceSFrederic Weisbecker #endif
6757a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
6767a8e76a3SSteven Rostedt 
6777a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
6787a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
6797a8e76a3SSteven Rostedt 				  GFP_KERNEL);
6807a8e76a3SSteven Rostedt 	if (!buffer->buffers)
6819e01c1b7SRusty Russell 		goto fail_free_cpumask;
6827a8e76a3SSteven Rostedt 
6837a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6847a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
6857a8e76a3SSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
6867a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
6877a8e76a3SSteven Rostedt 			goto fail_free_buffers;
6887a8e76a3SSteven Rostedt 	}
6897a8e76a3SSteven Rostedt 
69059222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
691554f786eSSteven Rostedt 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
692554f786eSSteven Rostedt 	buffer->cpu_notify.priority = 0;
693554f786eSSteven Rostedt 	register_cpu_notifier(&buffer->cpu_notify);
694554f786eSSteven Rostedt #endif
695554f786eSSteven Rostedt 
696554f786eSSteven Rostedt 	put_online_cpus();
6977a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
6987a8e76a3SSteven Rostedt 
6997a8e76a3SSteven Rostedt 	return buffer;
7007a8e76a3SSteven Rostedt 
7017a8e76a3SSteven Rostedt  fail_free_buffers:
7027a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
7037a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
7047a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
7057a8e76a3SSteven Rostedt 	}
7067a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
7077a8e76a3SSteven Rostedt 
7089e01c1b7SRusty Russell  fail_free_cpumask:
7099e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
710554f786eSSteven Rostedt 	put_online_cpus();
7119e01c1b7SRusty Russell 
7127a8e76a3SSteven Rostedt  fail_free_buffer:
7137a8e76a3SSteven Rostedt 	kfree(buffer);
7147a8e76a3SSteven Rostedt 	return NULL;
7157a8e76a3SSteven Rostedt }
7161f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
7177a8e76a3SSteven Rostedt 
7187a8e76a3SSteven Rostedt /**
7197a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
7207a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
7217a8e76a3SSteven Rostedt  */
7227a8e76a3SSteven Rostedt void
7237a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
7247a8e76a3SSteven Rostedt {
7257a8e76a3SSteven Rostedt 	int cpu;
7267a8e76a3SSteven Rostedt 
727554f786eSSteven Rostedt 	get_online_cpus();
728554f786eSSteven Rostedt 
72959222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
730554f786eSSteven Rostedt 	unregister_cpu_notifier(&buffer->cpu_notify);
731554f786eSSteven Rostedt #endif
732554f786eSSteven Rostedt 
7337a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
7347a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
7357a8e76a3SSteven Rostedt 
736554f786eSSteven Rostedt 	put_online_cpus();
737554f786eSSteven Rostedt 
7389e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
7399e01c1b7SRusty Russell 
7407a8e76a3SSteven Rostedt 	kfree(buffer);
7417a8e76a3SSteven Rostedt }
742c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
7437a8e76a3SSteven Rostedt 
74437886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer,
74537886f6aSSteven Rostedt 			   u64 (*clock)(void))
74637886f6aSSteven Rostedt {
74737886f6aSSteven Rostedt 	buffer->clock = clock;
74837886f6aSSteven Rostedt }
74937886f6aSSteven Rostedt 
7507a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
7517a8e76a3SSteven Rostedt 
7527a8e76a3SSteven Rostedt static void
7537a8e76a3SSteven Rostedt rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
7547a8e76a3SSteven Rostedt {
755044fa782SSteven Rostedt 	struct buffer_page *bpage;
7567a8e76a3SSteven Rostedt 	struct list_head *p;
7577a8e76a3SSteven Rostedt 	unsigned i;
7587a8e76a3SSteven Rostedt 
7597a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7607a8e76a3SSteven Rostedt 	synchronize_sched();
7617a8e76a3SSteven Rostedt 
7627a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7633e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7643e89c7bbSSteven Rostedt 			return;
7657a8e76a3SSteven Rostedt 		p = cpu_buffer->pages.next;
766044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
767044fa782SSteven Rostedt 		list_del_init(&bpage->list);
768044fa782SSteven Rostedt 		free_buffer_page(bpage);
7697a8e76a3SSteven Rostedt 	}
7703e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7713e89c7bbSSteven Rostedt 		return;
7727a8e76a3SSteven Rostedt 
7737a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
7747a8e76a3SSteven Rostedt 
7757a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
7767a8e76a3SSteven Rostedt 
7777a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
7787a8e76a3SSteven Rostedt 
7797a8e76a3SSteven Rostedt }
7807a8e76a3SSteven Rostedt 
7817a8e76a3SSteven Rostedt static void
7827a8e76a3SSteven Rostedt rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
7837a8e76a3SSteven Rostedt 		struct list_head *pages, unsigned nr_pages)
7847a8e76a3SSteven Rostedt {
785044fa782SSteven Rostedt 	struct buffer_page *bpage;
7867a8e76a3SSteven Rostedt 	struct list_head *p;
7877a8e76a3SSteven Rostedt 	unsigned i;
7887a8e76a3SSteven Rostedt 
7897a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7907a8e76a3SSteven Rostedt 	synchronize_sched();
7917a8e76a3SSteven Rostedt 
7927a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7933e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
7943e89c7bbSSteven Rostedt 			return;
7957a8e76a3SSteven Rostedt 		p = pages->next;
796044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
797044fa782SSteven Rostedt 		list_del_init(&bpage->list);
798044fa782SSteven Rostedt 		list_add_tail(&bpage->list, &cpu_buffer->pages);
7997a8e76a3SSteven Rostedt 	}
8007a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
8017a8e76a3SSteven Rostedt 
8027a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
8037a8e76a3SSteven Rostedt 
8047a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
8057a8e76a3SSteven Rostedt }
8067a8e76a3SSteven Rostedt 
8077a8e76a3SSteven Rostedt /**
8087a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
8097a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
8107a8e76a3SSteven Rostedt  * @size: the new size.
8117a8e76a3SSteven Rostedt  *
8127a8e76a3SSteven Rostedt  * The tracer is responsible for making sure that the buffer is
8137a8e76a3SSteven Rostedt  * not being used while changing the size.
8147a8e76a3SSteven Rostedt  * Note: We may be able to change the above requirement by using
8157a8e76a3SSteven Rostedt  *  RCU synchronizations.
8167a8e76a3SSteven Rostedt  *
8177a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
8187a8e76a3SSteven Rostedt  *
8197a8e76a3SSteven Rostedt  * Returns -1 on failure.
8207a8e76a3SSteven Rostedt  */
8217a8e76a3SSteven Rostedt int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
8227a8e76a3SSteven Rostedt {
8237a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
8247a8e76a3SSteven Rostedt 	unsigned nr_pages, rm_pages, new_pages;
825044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
8267a8e76a3SSteven Rostedt 	unsigned long buffer_size;
8277a8e76a3SSteven Rostedt 	unsigned long addr;
8287a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
8297a8e76a3SSteven Rostedt 	int i, cpu;
8307a8e76a3SSteven Rostedt 
831ee51a1deSIngo Molnar 	/*
832ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
833ee51a1deSIngo Molnar 	 */
834ee51a1deSIngo Molnar 	if (!buffer)
835ee51a1deSIngo Molnar 		return size;
836ee51a1deSIngo Molnar 
8377a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8387a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
8397a8e76a3SSteven Rostedt 	buffer_size = buffer->pages * BUF_PAGE_SIZE;
8407a8e76a3SSteven Rostedt 
8417a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
8427a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
8437a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
8447a8e76a3SSteven Rostedt 
8457a8e76a3SSteven Rostedt 	if (size == buffer_size)
8467a8e76a3SSteven Rostedt 		return size;
8477a8e76a3SSteven Rostedt 
8487a8e76a3SSteven Rostedt 	mutex_lock(&buffer->mutex);
849554f786eSSteven Rostedt 	get_online_cpus();
8507a8e76a3SSteven Rostedt 
8517a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8527a8e76a3SSteven Rostedt 
8537a8e76a3SSteven Rostedt 	if (size < buffer_size) {
8547a8e76a3SSteven Rostedt 
8557a8e76a3SSteven Rostedt 		/* easy case, just free pages */
856554f786eSSteven Rostedt 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
857554f786eSSteven Rostedt 			goto out_fail;
8587a8e76a3SSteven Rostedt 
8597a8e76a3SSteven Rostedt 		rm_pages = buffer->pages - nr_pages;
8607a8e76a3SSteven Rostedt 
8617a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
8627a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
8637a8e76a3SSteven Rostedt 			rb_remove_pages(cpu_buffer, rm_pages);
8647a8e76a3SSteven Rostedt 		}
8657a8e76a3SSteven Rostedt 		goto out;
8667a8e76a3SSteven Rostedt 	}
8677a8e76a3SSteven Rostedt 
8687a8e76a3SSteven Rostedt 	/*
8697a8e76a3SSteven Rostedt 	 * This is a bit more difficult. We only want to add pages
8707a8e76a3SSteven Rostedt 	 * when we can allocate enough for all CPUs. We do this
8717a8e76a3SSteven Rostedt 	 * by allocating all the pages and storing them on a local
8727a8e76a3SSteven Rostedt 	 * link list. If we succeed in our allocation, then we
8737a8e76a3SSteven Rostedt 	 * add these pages to the cpu_buffers. Otherwise we just free
8747a8e76a3SSteven Rostedt 	 * them all and return -ENOMEM;
8757a8e76a3SSteven Rostedt 	 */
876554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
877554f786eSSteven Rostedt 		goto out_fail;
878f536aafcSSteven Rostedt 
8797a8e76a3SSteven Rostedt 	new_pages = nr_pages - buffer->pages;
8807a8e76a3SSteven Rostedt 
8817a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8827a8e76a3SSteven Rostedt 		for (i = 0; i < new_pages; i++) {
883044fa782SSteven Rostedt 			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
884e4c2ce82SSteven Rostedt 						  cache_line_size()),
885e4c2ce82SSteven Rostedt 					    GFP_KERNEL, cpu_to_node(cpu));
886044fa782SSteven Rostedt 			if (!bpage)
887e4c2ce82SSteven Rostedt 				goto free_pages;
888044fa782SSteven Rostedt 			list_add(&bpage->list, &pages);
8897a8e76a3SSteven Rostedt 			addr = __get_free_page(GFP_KERNEL);
8907a8e76a3SSteven Rostedt 			if (!addr)
8917a8e76a3SSteven Rostedt 				goto free_pages;
892044fa782SSteven Rostedt 			bpage->page = (void *)addr;
893044fa782SSteven Rostedt 			rb_init_page(bpage->page);
8947a8e76a3SSteven Rostedt 		}
8957a8e76a3SSteven Rostedt 	}
8967a8e76a3SSteven Rostedt 
8977a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8987a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
8997a8e76a3SSteven Rostedt 		rb_insert_pages(cpu_buffer, &pages, new_pages);
9007a8e76a3SSteven Rostedt 	}
9017a8e76a3SSteven Rostedt 
902554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, !list_empty(&pages)))
903554f786eSSteven Rostedt 		goto out_fail;
9047a8e76a3SSteven Rostedt 
9057a8e76a3SSteven Rostedt  out:
9067a8e76a3SSteven Rostedt 	buffer->pages = nr_pages;
907554f786eSSteven Rostedt 	put_online_cpus();
9087a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
9097a8e76a3SSteven Rostedt 
9107a8e76a3SSteven Rostedt 	return size;
9117a8e76a3SSteven Rostedt 
9127a8e76a3SSteven Rostedt  free_pages:
913044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
914044fa782SSteven Rostedt 		list_del_init(&bpage->list);
915044fa782SSteven Rostedt 		free_buffer_page(bpage);
9167a8e76a3SSteven Rostedt 	}
917554f786eSSteven Rostedt 	put_online_cpus();
918641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
9197a8e76a3SSteven Rostedt 	return -ENOMEM;
920554f786eSSteven Rostedt 
921554f786eSSteven Rostedt 	/*
922554f786eSSteven Rostedt 	 * Something went totally wrong, and we are too paranoid
923554f786eSSteven Rostedt 	 * to even clean up the mess.
924554f786eSSteven Rostedt 	 */
925554f786eSSteven Rostedt  out_fail:
926554f786eSSteven Rostedt 	put_online_cpus();
927554f786eSSteven Rostedt 	mutex_unlock(&buffer->mutex);
928554f786eSSteven Rostedt 	return -1;
9297a8e76a3SSteven Rostedt }
930c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
9317a8e76a3SSteven Rostedt 
9328789a9e7SSteven Rostedt static inline void *
933044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
9348789a9e7SSteven Rostedt {
935044fa782SSteven Rostedt 	return bpage->data + index;
9368789a9e7SSteven Rostedt }
9378789a9e7SSteven Rostedt 
938044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
9397a8e76a3SSteven Rostedt {
940044fa782SSteven Rostedt 	return bpage->page->data + index;
9417a8e76a3SSteven Rostedt }
9427a8e76a3SSteven Rostedt 
9437a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
944d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
9457a8e76a3SSteven Rostedt {
9466f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
9476f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
9486f807acdSSteven Rostedt }
9496f807acdSSteven Rostedt 
9506f807acdSSteven Rostedt static inline struct ring_buffer_event *
9516f807acdSSteven Rostedt rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
9526f807acdSSteven Rostedt {
9536f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->head_page,
9546f807acdSSteven Rostedt 			       cpu_buffer->head_page->read);
9557a8e76a3SSteven Rostedt }
9567a8e76a3SSteven Rostedt 
9577a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
9587a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
9597a8e76a3SSteven Rostedt {
9606f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
9617a8e76a3SSteven Rostedt }
9627a8e76a3SSteven Rostedt 
963bf41a158SSteven Rostedt static inline unsigned rb_page_write(struct buffer_page *bpage)
964bf41a158SSteven Rostedt {
965bf41a158SSteven Rostedt 	return local_read(&bpage->write);
966bf41a158SSteven Rostedt }
967bf41a158SSteven Rostedt 
968bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
969bf41a158SSteven Rostedt {
970abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
971bf41a158SSteven Rostedt }
972bf41a158SSteven Rostedt 
973bf41a158SSteven Rostedt /* Size is determined by what has been commited */
974bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
975bf41a158SSteven Rostedt {
976bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
977bf41a158SSteven Rostedt }
978bf41a158SSteven Rostedt 
979bf41a158SSteven Rostedt static inline unsigned
980bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
981bf41a158SSteven Rostedt {
982bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
983bf41a158SSteven Rostedt }
984bf41a158SSteven Rostedt 
985bf41a158SSteven Rostedt static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
986bf41a158SSteven Rostedt {
987bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->head_page);
988bf41a158SSteven Rostedt }
989bf41a158SSteven Rostedt 
9907a8e76a3SSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
991044fa782SSteven Rostedt 			       struct buffer_page **bpage)
9927a8e76a3SSteven Rostedt {
993044fa782SSteven Rostedt 	struct list_head *p = (*bpage)->list.next;
9947a8e76a3SSteven Rostedt 
9957a8e76a3SSteven Rostedt 	if (p == &cpu_buffer->pages)
9967a8e76a3SSteven Rostedt 		p = p->next;
9977a8e76a3SSteven Rostedt 
998044fa782SSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
9997a8e76a3SSteven Rostedt }
10007a8e76a3SSteven Rostedt 
1001bf41a158SSteven Rostedt static inline unsigned
1002bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
10037a8e76a3SSteven Rostedt {
1004bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1005bf41a158SSteven Rostedt 
100622f470f8SSteven Rostedt 	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
10077a8e76a3SSteven Rostedt }
10087a8e76a3SSteven Rostedt 
10090f0c85fcSSteven Rostedt static inline int
1010fa743953SSteven Rostedt rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1011bf41a158SSteven Rostedt 		   struct ring_buffer_event *event)
10127a8e76a3SSteven Rostedt {
1013bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1014bf41a158SSteven Rostedt 	unsigned long index;
1015bf41a158SSteven Rostedt 
1016bf41a158SSteven Rostedt 	index = rb_event_index(event);
1017bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1018bf41a158SSteven Rostedt 
1019bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
1020bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
1021bf41a158SSteven Rostedt }
1022bf41a158SSteven Rostedt 
102334a148bfSAndrew Morton static void
1024bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1025bf41a158SSteven Rostedt {
1026bf41a158SSteven Rostedt 	/*
1027bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
1028bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
1029bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
1030bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
1031bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
1032bf41a158SSteven Rostedt 	 * assign the commit to the tail.
1033bf41a158SSteven Rostedt 	 */
1034a8ccf1d6SSteven Rostedt  again:
1035bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1036abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1037bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1038bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1039abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1040abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1041bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
1042bf41a158SSteven Rostedt 		barrier();
1043bf41a158SSteven Rostedt 	}
1044bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
1045bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
1046abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1047bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1048bf41a158SSteven Rostedt 		barrier();
1049bf41a158SSteven Rostedt 	}
1050a8ccf1d6SSteven Rostedt 
1051a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
1052a8ccf1d6SSteven Rostedt 	barrier();
1053a8ccf1d6SSteven Rostedt 
1054a8ccf1d6SSteven Rostedt 	/*
1055a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
1056a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
1057a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
1058a8ccf1d6SSteven Rostedt 	 */
1059a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1060a8ccf1d6SSteven Rostedt 		goto again;
10617a8e76a3SSteven Rostedt }
10627a8e76a3SSteven Rostedt 
1063d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
10647a8e76a3SSteven Rostedt {
1065abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
10666f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
1067d769041fSSteven Rostedt }
1068d769041fSSteven Rostedt 
106934a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
1070d769041fSSteven Rostedt {
1071d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1072d769041fSSteven Rostedt 
1073d769041fSSteven Rostedt 	/*
1074d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
1075d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
1076d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
1077d769041fSSteven Rostedt 	 * to the head page instead of next.
1078d769041fSSteven Rostedt 	 */
1079d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
1080d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
1081d769041fSSteven Rostedt 	else
1082d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
1083d769041fSSteven Rostedt 
1084abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
10857a8e76a3SSteven Rostedt 	iter->head = 0;
10867a8e76a3SSteven Rostedt }
10877a8e76a3SSteven Rostedt 
10887a8e76a3SSteven Rostedt /**
10897a8e76a3SSteven Rostedt  * ring_buffer_update_event - update event type and data
10907a8e76a3SSteven Rostedt  * @event: the even to update
10917a8e76a3SSteven Rostedt  * @type: the type of event
10927a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
10937a8e76a3SSteven Rostedt  *
10947a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
10957a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
10967a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
10977a8e76a3SSteven Rostedt  * data field.
10987a8e76a3SSteven Rostedt  */
109934a148bfSAndrew Morton static void
11007a8e76a3SSteven Rostedt rb_update_event(struct ring_buffer_event *event,
11017a8e76a3SSteven Rostedt 			 unsigned type, unsigned length)
11027a8e76a3SSteven Rostedt {
1103334d4169SLai Jiangshan 	event->type_len = type;
11047a8e76a3SSteven Rostedt 
11057a8e76a3SSteven Rostedt 	switch (type) {
11067a8e76a3SSteven Rostedt 
11077a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
11087a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
11097a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
11107a8e76a3SSteven Rostedt 		break;
11117a8e76a3SSteven Rostedt 
1112334d4169SLai Jiangshan 	case 0:
11137a8e76a3SSteven Rostedt 		length -= RB_EVNT_HDR_SIZE;
1114334d4169SLai Jiangshan 		if (length > RB_MAX_SMALL_DATA)
11157a8e76a3SSteven Rostedt 			event->array[0] = length;
1116334d4169SLai Jiangshan 		else
1117334d4169SLai Jiangshan 			event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
11187a8e76a3SSteven Rostedt 		break;
11197a8e76a3SSteven Rostedt 	default:
11207a8e76a3SSteven Rostedt 		BUG();
11217a8e76a3SSteven Rostedt 	}
11227a8e76a3SSteven Rostedt }
11237a8e76a3SSteven Rostedt 
112434a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
11257a8e76a3SSteven Rostedt {
11267a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
11277a8e76a3SSteven Rostedt 
11287a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
11297a8e76a3SSteven Rostedt 	if (!length)
11307a8e76a3SSteven Rostedt 		length = 1;
11317a8e76a3SSteven Rostedt 
11327a8e76a3SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA)
11337a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
11347a8e76a3SSteven Rostedt 
11357a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
11367a8e76a3SSteven Rostedt 	length = ALIGN(length, RB_ALIGNMENT);
11377a8e76a3SSteven Rostedt 
11387a8e76a3SSteven Rostedt 	return length;
11397a8e76a3SSteven Rostedt }
11407a8e76a3SSteven Rostedt 
1141c7b09308SSteven Rostedt static inline void
1142c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1143c7b09308SSteven Rostedt 	      struct buffer_page *tail_page,
1144c7b09308SSteven Rostedt 	      unsigned long tail, unsigned long length)
1145c7b09308SSteven Rostedt {
1146c7b09308SSteven Rostedt 	struct ring_buffer_event *event;
1147c7b09308SSteven Rostedt 
1148c7b09308SSteven Rostedt 	/*
1149c7b09308SSteven Rostedt 	 * Only the event that crossed the page boundary
1150c7b09308SSteven Rostedt 	 * must fill the old tail_page with padding.
1151c7b09308SSteven Rostedt 	 */
1152c7b09308SSteven Rostedt 	if (tail >= BUF_PAGE_SIZE) {
1153c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
1154c7b09308SSteven Rostedt 		return;
1155c7b09308SSteven Rostedt 	}
1156c7b09308SSteven Rostedt 
1157c7b09308SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
1158b0b7065bSLinus Torvalds 	kmemcheck_annotate_bitfield(event, bitfield);
1159c7b09308SSteven Rostedt 
1160c7b09308SSteven Rostedt 	/*
1161c7b09308SSteven Rostedt 	 * If this event is bigger than the minimum size, then
1162c7b09308SSteven Rostedt 	 * we need to be careful that we don't subtract the
1163c7b09308SSteven Rostedt 	 * write counter enough to allow another writer to slip
1164c7b09308SSteven Rostedt 	 * in on this page.
1165c7b09308SSteven Rostedt 	 * We put in a discarded commit instead, to make sure
1166c7b09308SSteven Rostedt 	 * that this space is not used again.
1167c7b09308SSteven Rostedt 	 *
1168c7b09308SSteven Rostedt 	 * If we are less than the minimum size, we don't need to
1169c7b09308SSteven Rostedt 	 * worry about it.
1170c7b09308SSteven Rostedt 	 */
1171c7b09308SSteven Rostedt 	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1172c7b09308SSteven Rostedt 		/* No room for any events */
1173c7b09308SSteven Rostedt 
1174c7b09308SSteven Rostedt 		/* Mark the rest of the page with padding */
1175c7b09308SSteven Rostedt 		rb_event_set_padding(event);
1176c7b09308SSteven Rostedt 
1177c7b09308SSteven Rostedt 		/* Set the write back to the previous setting */
1178c7b09308SSteven Rostedt 		local_sub(length, &tail_page->write);
1179c7b09308SSteven Rostedt 		return;
1180c7b09308SSteven Rostedt 	}
1181c7b09308SSteven Rostedt 
1182c7b09308SSteven Rostedt 	/* Put in a discarded event */
1183c7b09308SSteven Rostedt 	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1184c7b09308SSteven Rostedt 	event->type_len = RINGBUF_TYPE_PADDING;
1185c7b09308SSteven Rostedt 	/* time delta must be non zero */
1186c7b09308SSteven Rostedt 	event->time_delta = 1;
1187c7b09308SSteven Rostedt 	/* Account for this as an entry */
1188c7b09308SSteven Rostedt 	local_inc(&tail_page->entries);
1189c7b09308SSteven Rostedt 	local_inc(&cpu_buffer->entries);
1190c7b09308SSteven Rostedt 
1191c7b09308SSteven Rostedt 	/* Set write to end of buffer */
1192c7b09308SSteven Rostedt 	length = (tail + length) - BUF_PAGE_SIZE;
1193c7b09308SSteven Rostedt 	local_sub(length, &tail_page->write);
1194c7b09308SSteven Rostedt }
11956634ff26SSteven Rostedt 
11967a8e76a3SSteven Rostedt static struct ring_buffer_event *
11976634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
11986634ff26SSteven Rostedt 	     unsigned long length, unsigned long tail,
11996634ff26SSteven Rostedt 	     struct buffer_page *commit_page,
12006634ff26SSteven Rostedt 	     struct buffer_page *tail_page, u64 *ts)
12017a8e76a3SSteven Rostedt {
12026634ff26SSteven Rostedt 	struct buffer_page *next_page, *head_page, *reader_page;
12037a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
120478d904b4SSteven Rostedt 	bool lock_taken = false;
12056634ff26SSteven Rostedt 	unsigned long flags;
1206aa20ae84SSteven Rostedt 
1207aa20ae84SSteven Rostedt 	next_page = tail_page;
12087a8e76a3SSteven Rostedt 
12093e03fb7fSSteven Rostedt 	local_irq_save(flags);
121078d904b4SSteven Rostedt 	/*
1211a81bd80aSSteven Rostedt 	 * Since the write to the buffer is still not
1212a81bd80aSSteven Rostedt 	 * fully lockless, we must be careful with NMIs.
1213a81bd80aSSteven Rostedt 	 * The locks in the writers are taken when a write
1214a81bd80aSSteven Rostedt 	 * crosses to a new page. The locks protect against
1215a81bd80aSSteven Rostedt 	 * races with the readers (this will soon be fixed
1216a81bd80aSSteven Rostedt 	 * with a lockless solution).
1217a81bd80aSSteven Rostedt 	 *
1218a81bd80aSSteven Rostedt 	 * Because we can not protect against NMIs, and we
1219a81bd80aSSteven Rostedt 	 * want to keep traces reentrant, we need to manage
1220a81bd80aSSteven Rostedt 	 * what happens when we are in an NMI.
1221a81bd80aSSteven Rostedt 	 *
122278d904b4SSteven Rostedt 	 * NMIs can happen after we take the lock.
122378d904b4SSteven Rostedt 	 * If we are in an NMI, only take the lock
122478d904b4SSteven Rostedt 	 * if it is not already taken. Otherwise
122578d904b4SSteven Rostedt 	 * simply fail.
122678d904b4SSteven Rostedt 	 */
1227a81bd80aSSteven Rostedt 	if (unlikely(in_nmi())) {
1228f0d2c681SSteven Rostedt 		if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1229f0d2c681SSteven Rostedt 			cpu_buffer->nmi_dropped++;
123045141d46SSteven Rostedt 			goto out_reset;
1231f0d2c681SSteven Rostedt 		}
123278d904b4SSteven Rostedt 	} else
12333e03fb7fSSteven Rostedt 		__raw_spin_lock(&cpu_buffer->lock);
1234bf41a158SSteven Rostedt 
123578d904b4SSteven Rostedt 	lock_taken = true;
123678d904b4SSteven Rostedt 
12377a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &next_page);
12387a8e76a3SSteven Rostedt 
1239d769041fSSteven Rostedt 	head_page = cpu_buffer->head_page;
1240d769041fSSteven Rostedt 	reader_page = cpu_buffer->reader_page;
1241d769041fSSteven Rostedt 
1242d769041fSSteven Rostedt 	/* we grabbed the lock before incrementing */
12433e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
124445141d46SSteven Rostedt 		goto out_reset;
1245bf41a158SSteven Rostedt 
1246bf41a158SSteven Rostedt 	/*
1247bf41a158SSteven Rostedt 	 * If for some reason, we had an interrupt storm that made
1248bf41a158SSteven Rostedt 	 * it all the way around the buffer, bail, and warn
1249bf41a158SSteven Rostedt 	 * about it.
1250bf41a158SSteven Rostedt 	 */
125198db8df7SSteven Rostedt 	if (unlikely(next_page == commit_page)) {
1252f0d2c681SSteven Rostedt 		cpu_buffer->commit_overrun++;
125345141d46SSteven Rostedt 		goto out_reset;
1254bf41a158SSteven Rostedt 	}
1255d769041fSSteven Rostedt 
12567a8e76a3SSteven Rostedt 	if (next_page == head_page) {
12576f3b3440SLai Jiangshan 		if (!(buffer->flags & RB_FL_OVERWRITE))
125845141d46SSteven Rostedt 			goto out_reset;
12597a8e76a3SSteven Rostedt 
1260bf41a158SSteven Rostedt 		/* tail_page has not moved yet? */
1261bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->tail_page) {
12627a8e76a3SSteven Rostedt 			/* count overflows */
1263778c55d4SSteven Rostedt 			cpu_buffer->overrun +=
1264778c55d4SSteven Rostedt 				local_read(&head_page->entries);
12657a8e76a3SSteven Rostedt 
12667a8e76a3SSteven Rostedt 			rb_inc_page(cpu_buffer, &head_page);
12677a8e76a3SSteven Rostedt 			cpu_buffer->head_page = head_page;
1268bf41a158SSteven Rostedt 			cpu_buffer->head_page->read = 0;
1269bf41a158SSteven Rostedt 		}
12707a8e76a3SSteven Rostedt 	}
12717a8e76a3SSteven Rostedt 
1272bf41a158SSteven Rostedt 	/*
1273bf41a158SSteven Rostedt 	 * If the tail page is still the same as what we think
1274bf41a158SSteven Rostedt 	 * it is, then it is up to us to update the tail
1275bf41a158SSteven Rostedt 	 * pointer.
1276bf41a158SSteven Rostedt 	 */
1277bf41a158SSteven Rostedt 	if (tail_page == cpu_buffer->tail_page) {
1278bf41a158SSteven Rostedt 		local_set(&next_page->write, 0);
1279778c55d4SSteven Rostedt 		local_set(&next_page->entries, 0);
1280abc9b56dSSteven Rostedt 		local_set(&next_page->page->commit, 0);
1281bf41a158SSteven Rostedt 		cpu_buffer->tail_page = next_page;
1282bf41a158SSteven Rostedt 
1283bf41a158SSteven Rostedt 		/* reread the time stamp */
128488eb0125SSteven Rostedt 		*ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1285abc9b56dSSteven Rostedt 		cpu_buffer->tail_page->page->time_stamp = *ts;
1286bf41a158SSteven Rostedt 	}
1287bf41a158SSteven Rostedt 
1288c7b09308SSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
12897a8e76a3SSteven Rostedt 
12903e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
12913e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1292bf41a158SSteven Rostedt 
1293bf41a158SSteven Rostedt 	/* fail and let the caller try again */
1294bf41a158SSteven Rostedt 	return ERR_PTR(-EAGAIN);
1295bf41a158SSteven Rostedt 
129645141d46SSteven Rostedt  out_reset:
12976f3b3440SLai Jiangshan 	/* reset write */
1298c7b09308SSteven Rostedt 	rb_reset_tail(cpu_buffer, tail_page, tail, length);
12996f3b3440SLai Jiangshan 
130078d904b4SSteven Rostedt 	if (likely(lock_taken))
13013e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
13023e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1303bf41a158SSteven Rostedt 	return NULL;
13047a8e76a3SSteven Rostedt }
13057a8e76a3SSteven Rostedt 
13066634ff26SSteven Rostedt static struct ring_buffer_event *
13076634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
13086634ff26SSteven Rostedt 		  unsigned type, unsigned long length, u64 *ts)
13096634ff26SSteven Rostedt {
13106634ff26SSteven Rostedt 	struct buffer_page *tail_page, *commit_page;
13116634ff26SSteven Rostedt 	struct ring_buffer_event *event;
13126634ff26SSteven Rostedt 	unsigned long tail, write;
13136634ff26SSteven Rostedt 
13146634ff26SSteven Rostedt 	commit_page = cpu_buffer->commit_page;
13156634ff26SSteven Rostedt 	/* we just need to protect against interrupts */
13166634ff26SSteven Rostedt 	barrier();
13176634ff26SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
13186634ff26SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
13196634ff26SSteven Rostedt 	tail = write - length;
13206634ff26SSteven Rostedt 
13216634ff26SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
13226634ff26SSteven Rostedt 	if (write > BUF_PAGE_SIZE)
13236634ff26SSteven Rostedt 		return rb_move_tail(cpu_buffer, length, tail,
13246634ff26SSteven Rostedt 				    commit_page, tail_page, ts);
13256634ff26SSteven Rostedt 
13266634ff26SSteven Rostedt 	/* We reserved something on the buffer */
13276634ff26SSteven Rostedt 
13286634ff26SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
13291744a21dSVegard Nossum 	kmemcheck_annotate_bitfield(event, bitfield);
13306634ff26SSteven Rostedt 	rb_update_event(event, type, length);
13316634ff26SSteven Rostedt 
13326634ff26SSteven Rostedt 	/* The passed in type is zero for DATA */
13336634ff26SSteven Rostedt 	if (likely(!type))
13346634ff26SSteven Rostedt 		local_inc(&tail_page->entries);
13356634ff26SSteven Rostedt 
13366634ff26SSteven Rostedt 	/*
1337fa743953SSteven Rostedt 	 * If this is the first commit on the page, then update
1338fa743953SSteven Rostedt 	 * its timestamp.
13396634ff26SSteven Rostedt 	 */
1340fa743953SSteven Rostedt 	if (!tail)
1341fa743953SSteven Rostedt 		tail_page->page->time_stamp = *ts;
13426634ff26SSteven Rostedt 
13436634ff26SSteven Rostedt 	return event;
13446634ff26SSteven Rostedt }
13456634ff26SSteven Rostedt 
1346edd813bfSSteven Rostedt static inline int
1347edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1348edd813bfSSteven Rostedt 		  struct ring_buffer_event *event)
1349edd813bfSSteven Rostedt {
1350edd813bfSSteven Rostedt 	unsigned long new_index, old_index;
1351edd813bfSSteven Rostedt 	struct buffer_page *bpage;
1352edd813bfSSteven Rostedt 	unsigned long index;
1353edd813bfSSteven Rostedt 	unsigned long addr;
1354edd813bfSSteven Rostedt 
1355edd813bfSSteven Rostedt 	new_index = rb_event_index(event);
1356edd813bfSSteven Rostedt 	old_index = new_index + rb_event_length(event);
1357edd813bfSSteven Rostedt 	addr = (unsigned long)event;
1358edd813bfSSteven Rostedt 	addr &= PAGE_MASK;
1359edd813bfSSteven Rostedt 
1360edd813bfSSteven Rostedt 	bpage = cpu_buffer->tail_page;
1361edd813bfSSteven Rostedt 
1362edd813bfSSteven Rostedt 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1363edd813bfSSteven Rostedt 		/*
1364edd813bfSSteven Rostedt 		 * This is on the tail page. It is possible that
1365edd813bfSSteven Rostedt 		 * a write could come in and move the tail page
1366edd813bfSSteven Rostedt 		 * and write to the next page. That is fine
1367edd813bfSSteven Rostedt 		 * because we just shorten what is on this page.
1368edd813bfSSteven Rostedt 		 */
1369edd813bfSSteven Rostedt 		index = local_cmpxchg(&bpage->write, old_index, new_index);
1370edd813bfSSteven Rostedt 		if (index == old_index)
1371edd813bfSSteven Rostedt 			return 1;
1372edd813bfSSteven Rostedt 	}
1373edd813bfSSteven Rostedt 
1374edd813bfSSteven Rostedt 	/* could not discard */
1375edd813bfSSteven Rostedt 	return 0;
1376edd813bfSSteven Rostedt }
1377edd813bfSSteven Rostedt 
13787a8e76a3SSteven Rostedt static int
13797a8e76a3SSteven Rostedt rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
13807a8e76a3SSteven Rostedt 		  u64 *ts, u64 *delta)
13817a8e76a3SSteven Rostedt {
13827a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
13837a8e76a3SSteven Rostedt 	static int once;
1384bf41a158SSteven Rostedt 	int ret;
13857a8e76a3SSteven Rostedt 
13867a8e76a3SSteven Rostedt 	if (unlikely(*delta > (1ULL << 59) && !once++)) {
13877a8e76a3SSteven Rostedt 		printk(KERN_WARNING "Delta way too big! %llu"
13887a8e76a3SSteven Rostedt 		       " ts=%llu write stamp = %llu\n",
1389e2862c94SStephen Rothwell 		       (unsigned long long)*delta,
1390e2862c94SStephen Rothwell 		       (unsigned long long)*ts,
1391e2862c94SStephen Rothwell 		       (unsigned long long)cpu_buffer->write_stamp);
13927a8e76a3SSteven Rostedt 		WARN_ON(1);
13937a8e76a3SSteven Rostedt 	}
13947a8e76a3SSteven Rostedt 
13957a8e76a3SSteven Rostedt 	/*
13967a8e76a3SSteven Rostedt 	 * The delta is too big, we to add a
13977a8e76a3SSteven Rostedt 	 * new timestamp.
13987a8e76a3SSteven Rostedt 	 */
13997a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer,
14007a8e76a3SSteven Rostedt 				  RINGBUF_TYPE_TIME_EXTEND,
14017a8e76a3SSteven Rostedt 				  RB_LEN_TIME_EXTEND,
14027a8e76a3SSteven Rostedt 				  ts);
14037a8e76a3SSteven Rostedt 	if (!event)
1404bf41a158SSteven Rostedt 		return -EBUSY;
14057a8e76a3SSteven Rostedt 
1406bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1407bf41a158SSteven Rostedt 		return -EAGAIN;
1408bf41a158SSteven Rostedt 
1409bf41a158SSteven Rostedt 	/* Only a commited time event can update the write stamp */
1410fa743953SSteven Rostedt 	if (rb_event_is_commit(cpu_buffer, event)) {
1411bf41a158SSteven Rostedt 		/*
1412fa743953SSteven Rostedt 		 * If this is the first on the page, then it was
1413fa743953SSteven Rostedt 		 * updated with the page itself. Try to discard it
1414fa743953SSteven Rostedt 		 * and if we can't just make it zero.
1415bf41a158SSteven Rostedt 		 */
1416bf41a158SSteven Rostedt 		if (rb_event_index(event)) {
14177a8e76a3SSteven Rostedt 			event->time_delta = *delta & TS_MASK;
14187a8e76a3SSteven Rostedt 			event->array[0] = *delta >> TS_SHIFT;
1419bf41a158SSteven Rostedt 		} else {
1420ea05b57cSSteven Rostedt 			/* try to discard, since we do not need this */
1421ea05b57cSSteven Rostedt 			if (!rb_try_to_discard(cpu_buffer, event)) {
1422ea05b57cSSteven Rostedt 				/* nope, just zero it */
1423bf41a158SSteven Rostedt 				event->time_delta = 0;
1424bf41a158SSteven Rostedt 				event->array[0] = 0;
1425bf41a158SSteven Rostedt 			}
1426ea05b57cSSteven Rostedt 		}
14277a8e76a3SSteven Rostedt 		cpu_buffer->write_stamp = *ts;
1428bf41a158SSteven Rostedt 		/* let the caller know this was the commit */
1429bf41a158SSteven Rostedt 		ret = 1;
1430bf41a158SSteven Rostedt 	} else {
1431edd813bfSSteven Rostedt 		/* Try to discard the event */
1432edd813bfSSteven Rostedt 		if (!rb_try_to_discard(cpu_buffer, event)) {
1433bf41a158SSteven Rostedt 			/* Darn, this is just wasted space */
1434bf41a158SSteven Rostedt 			event->time_delta = 0;
1435bf41a158SSteven Rostedt 			event->array[0] = 0;
14367a8e76a3SSteven Rostedt 		}
1437f57a8a19SSteven Rostedt 		ret = 0;
1438edd813bfSSteven Rostedt 	}
14397a8e76a3SSteven Rostedt 
1440bf41a158SSteven Rostedt 	*delta = 0;
1441bf41a158SSteven Rostedt 
1442bf41a158SSteven Rostedt 	return ret;
14437a8e76a3SSteven Rostedt }
14447a8e76a3SSteven Rostedt 
1445fa743953SSteven Rostedt static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
1446fa743953SSteven Rostedt {
1447fa743953SSteven Rostedt 	local_inc(&cpu_buffer->committing);
1448fa743953SSteven Rostedt 	local_inc(&cpu_buffer->commits);
1449fa743953SSteven Rostedt }
1450fa743953SSteven Rostedt 
1451fa743953SSteven Rostedt static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
1452fa743953SSteven Rostedt {
1453fa743953SSteven Rostedt 	unsigned long commits;
1454fa743953SSteven Rostedt 
1455fa743953SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
1456fa743953SSteven Rostedt 		       !local_read(&cpu_buffer->committing)))
1457fa743953SSteven Rostedt 		return;
1458fa743953SSteven Rostedt 
1459fa743953SSteven Rostedt  again:
1460fa743953SSteven Rostedt 	commits = local_read(&cpu_buffer->commits);
1461fa743953SSteven Rostedt 	/* synchronize with interrupts */
1462fa743953SSteven Rostedt 	barrier();
1463fa743953SSteven Rostedt 	if (local_read(&cpu_buffer->committing) == 1)
1464fa743953SSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
1465fa743953SSteven Rostedt 
1466fa743953SSteven Rostedt 	local_dec(&cpu_buffer->committing);
1467fa743953SSteven Rostedt 
1468fa743953SSteven Rostedt 	/* synchronize with interrupts */
1469fa743953SSteven Rostedt 	barrier();
1470fa743953SSteven Rostedt 
1471fa743953SSteven Rostedt 	/*
1472fa743953SSteven Rostedt 	 * Need to account for interrupts coming in between the
1473fa743953SSteven Rostedt 	 * updating of the commit page and the clearing of the
1474fa743953SSteven Rostedt 	 * committing counter.
1475fa743953SSteven Rostedt 	 */
1476fa743953SSteven Rostedt 	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
1477fa743953SSteven Rostedt 	    !local_read(&cpu_buffer->committing)) {
1478fa743953SSteven Rostedt 		local_inc(&cpu_buffer->committing);
1479fa743953SSteven Rostedt 		goto again;
1480fa743953SSteven Rostedt 	}
1481fa743953SSteven Rostedt }
1482fa743953SSteven Rostedt 
14837a8e76a3SSteven Rostedt static struct ring_buffer_event *
14847a8e76a3SSteven Rostedt rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
14851cd8d735SSteven Rostedt 		      unsigned long length)
14867a8e76a3SSteven Rostedt {
14877a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1488168b6b1dSSteven Rostedt 	u64 ts, delta = 0;
1489bf41a158SSteven Rostedt 	int commit = 0;
1490818e3dd3SSteven Rostedt 	int nr_loops = 0;
14917a8e76a3SSteven Rostedt 
1492fa743953SSteven Rostedt 	rb_start_commit(cpu_buffer);
1493fa743953SSteven Rostedt 
1494be957c44SSteven Rostedt 	length = rb_calculate_event_length(length);
1495bf41a158SSteven Rostedt  again:
1496818e3dd3SSteven Rostedt 	/*
1497818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
1498818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
1499818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
1500818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
1501818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
1502818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
1503818e3dd3SSteven Rostedt 	 * Bail!
1504818e3dd3SSteven Rostedt 	 */
15053e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1506fa743953SSteven Rostedt 		goto out_fail;
1507818e3dd3SSteven Rostedt 
150888eb0125SSteven Rostedt 	ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
15097a8e76a3SSteven Rostedt 
1510bf41a158SSteven Rostedt 	/*
1511bf41a158SSteven Rostedt 	 * Only the first commit can update the timestamp.
1512bf41a158SSteven Rostedt 	 * Yes there is a race here. If an interrupt comes in
1513bf41a158SSteven Rostedt 	 * just after the conditional and it traces too, then it
1514bf41a158SSteven Rostedt 	 * will also check the deltas. More than one timestamp may
1515bf41a158SSteven Rostedt 	 * also be made. But only the entry that did the actual
1516bf41a158SSteven Rostedt 	 * commit will be something other than zero.
1517bf41a158SSteven Rostedt 	 */
15180f0c85fcSSteven Rostedt 	if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1519bf41a158SSteven Rostedt 		   rb_page_write(cpu_buffer->tail_page) ==
15200f0c85fcSSteven Rostedt 		   rb_commit_index(cpu_buffer))) {
1521168b6b1dSSteven Rostedt 		u64 diff;
1522bf41a158SSteven Rostedt 
1523168b6b1dSSteven Rostedt 		diff = ts - cpu_buffer->write_stamp;
15247a8e76a3SSteven Rostedt 
1525168b6b1dSSteven Rostedt 		/* make sure this diff is calculated here */
1526bf41a158SSteven Rostedt 		barrier();
15277a8e76a3SSteven Rostedt 
1528bf41a158SSteven Rostedt 		/* Did the write stamp get updated already? */
1529bf41a158SSteven Rostedt 		if (unlikely(ts < cpu_buffer->write_stamp))
1530168b6b1dSSteven Rostedt 			goto get_event;
1531bf41a158SSteven Rostedt 
1532168b6b1dSSteven Rostedt 		delta = diff;
1533168b6b1dSSteven Rostedt 		if (unlikely(test_time_stamp(delta))) {
1534bf41a158SSteven Rostedt 
1535bf41a158SSteven Rostedt 			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1536bf41a158SSteven Rostedt 			if (commit == -EBUSY)
1537fa743953SSteven Rostedt 				goto out_fail;
1538bf41a158SSteven Rostedt 
1539bf41a158SSteven Rostedt 			if (commit == -EAGAIN)
1540bf41a158SSteven Rostedt 				goto again;
1541bf41a158SSteven Rostedt 
1542bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, commit < 0);
15437a8e76a3SSteven Rostedt 		}
1544168b6b1dSSteven Rostedt 	}
15457a8e76a3SSteven Rostedt 
1546168b6b1dSSteven Rostedt  get_event:
15471cd8d735SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1548168b6b1dSSteven Rostedt 	if (unlikely(PTR_ERR(event) == -EAGAIN))
1549bf41a158SSteven Rostedt 		goto again;
15507a8e76a3SSteven Rostedt 
1551fa743953SSteven Rostedt 	if (!event)
1552fa743953SSteven Rostedt 		goto out_fail;
1553bf41a158SSteven Rostedt 
1554fa743953SSteven Rostedt 	if (!rb_event_is_commit(cpu_buffer, event))
15557a8e76a3SSteven Rostedt 		delta = 0;
15567a8e76a3SSteven Rostedt 
15577a8e76a3SSteven Rostedt 	event->time_delta = delta;
15587a8e76a3SSteven Rostedt 
15597a8e76a3SSteven Rostedt 	return event;
1560fa743953SSteven Rostedt 
1561fa743953SSteven Rostedt  out_fail:
1562fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
1563fa743953SSteven Rostedt 	return NULL;
15647a8e76a3SSteven Rostedt }
15657a8e76a3SSteven Rostedt 
15661155de47SPaul Mundt #ifdef CONFIG_TRACING
15671155de47SPaul Mundt 
1568aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16
1569261842b7SSteven Rostedt 
1570261842b7SSteven Rostedt static int trace_recursive_lock(void)
1571261842b7SSteven Rostedt {
1572aa18efb2SSteven Rostedt 	current->trace_recursion++;
1573261842b7SSteven Rostedt 
1574aa18efb2SSteven Rostedt 	if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1575aa18efb2SSteven Rostedt 		return 0;
1576261842b7SSteven Rostedt 
1577261842b7SSteven Rostedt 	/* Disable all tracing before we do anything else */
1578261842b7SSteven Rostedt 	tracing_off_permanent();
1579e057a5e5SFrederic Weisbecker 
15807d7d2b80SSteven Rostedt 	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1581e057a5e5SFrederic Weisbecker 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1582aa18efb2SSteven Rostedt 		    current->trace_recursion,
1583e057a5e5SFrederic Weisbecker 		    hardirq_count() >> HARDIRQ_SHIFT,
1584e057a5e5SFrederic Weisbecker 		    softirq_count() >> SOFTIRQ_SHIFT,
1585e057a5e5SFrederic Weisbecker 		    in_nmi());
1586e057a5e5SFrederic Weisbecker 
1587261842b7SSteven Rostedt 	WARN_ON_ONCE(1);
1588261842b7SSteven Rostedt 	return -1;
1589261842b7SSteven Rostedt }
1590261842b7SSteven Rostedt 
1591261842b7SSteven Rostedt static void trace_recursive_unlock(void)
1592261842b7SSteven Rostedt {
1593aa18efb2SSteven Rostedt 	WARN_ON_ONCE(!current->trace_recursion);
1594261842b7SSteven Rostedt 
1595aa18efb2SSteven Rostedt 	current->trace_recursion--;
1596261842b7SSteven Rostedt }
1597261842b7SSteven Rostedt 
15981155de47SPaul Mundt #else
15991155de47SPaul Mundt 
16001155de47SPaul Mundt #define trace_recursive_lock()		(0)
16011155de47SPaul Mundt #define trace_recursive_unlock()	do { } while (0)
16021155de47SPaul Mundt 
16031155de47SPaul Mundt #endif
16041155de47SPaul Mundt 
1605bf41a158SSteven Rostedt static DEFINE_PER_CPU(int, rb_need_resched);
1606bf41a158SSteven Rostedt 
16077a8e76a3SSteven Rostedt /**
16087a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
16097a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
16107a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
16117a8e76a3SSteven Rostedt  *
16127a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
16137a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
16147a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
16157a8e76a3SSteven Rostedt  *
16167a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
16177a8e76a3SSteven Rostedt  * which also includes the event header.
16187a8e76a3SSteven Rostedt  *
16197a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
16207a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
16217a8e76a3SSteven Rostedt  */
16227a8e76a3SSteven Rostedt struct ring_buffer_event *
16230a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
16247a8e76a3SSteven Rostedt {
16257a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
16267a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1627bf41a158SSteven Rostedt 	int cpu, resched;
16287a8e76a3SSteven Rostedt 
1629033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1630a3583244SSteven Rostedt 		return NULL;
1631a3583244SSteven Rostedt 
16327a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
16337a8e76a3SSteven Rostedt 		return NULL;
16347a8e76a3SSteven Rostedt 
1635bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
1636182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1637bf41a158SSteven Rostedt 
1638261842b7SSteven Rostedt 	if (trace_recursive_lock())
1639261842b7SSteven Rostedt 		goto out_nocheck;
1640261842b7SSteven Rostedt 
16417a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
16427a8e76a3SSteven Rostedt 
16439e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1644d769041fSSteven Rostedt 		goto out;
16457a8e76a3SSteven Rostedt 
16467a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
16477a8e76a3SSteven Rostedt 
16487a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
1649d769041fSSteven Rostedt 		goto out;
16507a8e76a3SSteven Rostedt 
1651be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
1652bf41a158SSteven Rostedt 		goto out;
16537a8e76a3SSteven Rostedt 
16541cd8d735SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, length);
16557a8e76a3SSteven Rostedt 	if (!event)
1656d769041fSSteven Rostedt 		goto out;
16577a8e76a3SSteven Rostedt 
1658bf41a158SSteven Rostedt 	/*
1659bf41a158SSteven Rostedt 	 * Need to store resched state on this cpu.
1660bf41a158SSteven Rostedt 	 * Only the first needs to.
1661bf41a158SSteven Rostedt 	 */
1662bf41a158SSteven Rostedt 
1663bf41a158SSteven Rostedt 	if (preempt_count() == 1)
1664bf41a158SSteven Rostedt 		per_cpu(rb_need_resched, cpu) = resched;
1665bf41a158SSteven Rostedt 
16667a8e76a3SSteven Rostedt 	return event;
16677a8e76a3SSteven Rostedt 
1668d769041fSSteven Rostedt  out:
1669261842b7SSteven Rostedt 	trace_recursive_unlock();
1670261842b7SSteven Rostedt 
1671261842b7SSteven Rostedt  out_nocheck:
1672182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
16737a8e76a3SSteven Rostedt 	return NULL;
16747a8e76a3SSteven Rostedt }
1675c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
16767a8e76a3SSteven Rostedt 
16777a8e76a3SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
16787a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
16797a8e76a3SSteven Rostedt {
1680e4906effSSteven Rostedt 	local_inc(&cpu_buffer->entries);
1681bf41a158SSteven Rostedt 
1682fa743953SSteven Rostedt 	/*
1683fa743953SSteven Rostedt 	 * The event first in the commit queue updates the
1684fa743953SSteven Rostedt 	 * time stamp.
1685fa743953SSteven Rostedt 	 */
1686fa743953SSteven Rostedt 	if (rb_event_is_commit(cpu_buffer, event))
1687bf41a158SSteven Rostedt 		cpu_buffer->write_stamp += event->time_delta;
1688bf41a158SSteven Rostedt 
1689fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
16907a8e76a3SSteven Rostedt }
16917a8e76a3SSteven Rostedt 
16927a8e76a3SSteven Rostedt /**
16937a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
16947a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
16957a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
16967a8e76a3SSteven Rostedt  *
16977a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
16987a8e76a3SSteven Rostedt  *
16997a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
17007a8e76a3SSteven Rostedt  */
17017a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
17020a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
17037a8e76a3SSteven Rostedt {
17047a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
17057a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
17067a8e76a3SSteven Rostedt 
17077a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
17087a8e76a3SSteven Rostedt 
17097a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
17107a8e76a3SSteven Rostedt 
1711261842b7SSteven Rostedt 	trace_recursive_unlock();
1712261842b7SSteven Rostedt 
1713bf41a158SSteven Rostedt 	/*
1714bf41a158SSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1715bf41a158SSteven Rostedt 	 */
1716182e9f5fSSteven Rostedt 	if (preempt_count() == 1)
1717182e9f5fSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1718bf41a158SSteven Rostedt 	else
1719bf41a158SSteven Rostedt 		preempt_enable_no_resched_notrace();
17207a8e76a3SSteven Rostedt 
17217a8e76a3SSteven Rostedt 	return 0;
17227a8e76a3SSteven Rostedt }
1723c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
17247a8e76a3SSteven Rostedt 
1725f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event)
1726f3b9aae1SFrederic Weisbecker {
1727334d4169SLai Jiangshan 	/* array[0] holds the actual length for the discarded event */
1728334d4169SLai Jiangshan 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1729334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
1730f3b9aae1SFrederic Weisbecker 	/* time delta must be non zero */
1731f3b9aae1SFrederic Weisbecker 	if (!event->time_delta)
1732f3b9aae1SFrederic Weisbecker 		event->time_delta = 1;
1733f3b9aae1SFrederic Weisbecker }
1734f3b9aae1SFrederic Weisbecker 
17357a8e76a3SSteven Rostedt /**
1736fa1b47ddSSteven Rostedt  * ring_buffer_event_discard - discard any event in the ring buffer
1737fa1b47ddSSteven Rostedt  * @event: the event to discard
1738fa1b47ddSSteven Rostedt  *
1739fa1b47ddSSteven Rostedt  * Sometimes a event that is in the ring buffer needs to be ignored.
1740fa1b47ddSSteven Rostedt  * This function lets the user discard an event in the ring buffer
1741fa1b47ddSSteven Rostedt  * and then that event will not be read later.
1742fa1b47ddSSteven Rostedt  *
1743fa1b47ddSSteven Rostedt  * Note, it is up to the user to be careful with this, and protect
1744fa1b47ddSSteven Rostedt  * against races. If the user discards an event that has been consumed
1745fa1b47ddSSteven Rostedt  * it is possible that it could corrupt the ring buffer.
1746fa1b47ddSSteven Rostedt  */
1747fa1b47ddSSteven Rostedt void ring_buffer_event_discard(struct ring_buffer_event *event)
1748fa1b47ddSSteven Rostedt {
1749f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1750fa1b47ddSSteven Rostedt }
1751fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1752fa1b47ddSSteven Rostedt 
1753fa1b47ddSSteven Rostedt /**
1754fa1b47ddSSteven Rostedt  * ring_buffer_commit_discard - discard an event that has not been committed
1755fa1b47ddSSteven Rostedt  * @buffer: the ring buffer
1756fa1b47ddSSteven Rostedt  * @event: non committed event to discard
1757fa1b47ddSSteven Rostedt  *
1758fa1b47ddSSteven Rostedt  * This is similar to ring_buffer_event_discard but must only be
1759fa1b47ddSSteven Rostedt  * performed on an event that has not been committed yet. The difference
1760fa1b47ddSSteven Rostedt  * is that this will also try to free the event from the ring buffer
1761fa1b47ddSSteven Rostedt  * if another event has not been added behind it.
1762fa1b47ddSSteven Rostedt  *
1763fa1b47ddSSteven Rostedt  * If another event has been added behind it, it will set the event
1764fa1b47ddSSteven Rostedt  * up as discarded, and perform the commit.
1765fa1b47ddSSteven Rostedt  *
1766fa1b47ddSSteven Rostedt  * If this function is called, do not call ring_buffer_unlock_commit on
1767fa1b47ddSSteven Rostedt  * the event.
1768fa1b47ddSSteven Rostedt  */
1769fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer,
1770fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event)
1771fa1b47ddSSteven Rostedt {
1772fa1b47ddSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1773fa1b47ddSSteven Rostedt 	int cpu;
1774fa1b47ddSSteven Rostedt 
1775fa1b47ddSSteven Rostedt 	/* The event is discarded regardless */
1776f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1777fa1b47ddSSteven Rostedt 
1778fa743953SSteven Rostedt 	cpu = smp_processor_id();
1779fa743953SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1780fa743953SSteven Rostedt 
1781fa1b47ddSSteven Rostedt 	/*
1782fa1b47ddSSteven Rostedt 	 * This must only be called if the event has not been
1783fa1b47ddSSteven Rostedt 	 * committed yet. Thus we can assume that preemption
1784fa1b47ddSSteven Rostedt 	 * is still disabled.
1785fa1b47ddSSteven Rostedt 	 */
1786fa743953SSteven Rostedt 	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1787fa1b47ddSSteven Rostedt 
17880f2541d2SSteven Rostedt 	if (rb_try_to_discard(cpu_buffer, event))
1789fa1b47ddSSteven Rostedt 		goto out;
1790fa1b47ddSSteven Rostedt 
1791fa1b47ddSSteven Rostedt 	/*
1792fa1b47ddSSteven Rostedt 	 * The commit is still visible by the reader, so we
1793fa1b47ddSSteven Rostedt 	 * must increment entries.
1794fa1b47ddSSteven Rostedt 	 */
1795e4906effSSteven Rostedt 	local_inc(&cpu_buffer->entries);
1796fa1b47ddSSteven Rostedt  out:
1797fa743953SSteven Rostedt 	rb_end_commit(cpu_buffer);
1798fa1b47ddSSteven Rostedt 
1799f3b9aae1SFrederic Weisbecker 	trace_recursive_unlock();
1800f3b9aae1SFrederic Weisbecker 
1801fa1b47ddSSteven Rostedt 	/*
1802fa1b47ddSSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1803fa1b47ddSSteven Rostedt 	 */
1804fa1b47ddSSteven Rostedt 	if (preempt_count() == 1)
1805fa1b47ddSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1806fa1b47ddSSteven Rostedt 	else
1807fa1b47ddSSteven Rostedt 		preempt_enable_no_resched_notrace();
1808fa1b47ddSSteven Rostedt 
1809fa1b47ddSSteven Rostedt }
1810fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1811fa1b47ddSSteven Rostedt 
1812fa1b47ddSSteven Rostedt /**
18137a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
18147a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
18157a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
18167a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
18177a8e76a3SSteven Rostedt  *
18187a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
18197a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
18207a8e76a3SSteven Rostedt  * may be easier to simply call this function.
18217a8e76a3SSteven Rostedt  *
18227a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
18237a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
18247a8e76a3SSteven Rostedt  */
18257a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
18267a8e76a3SSteven Rostedt 			unsigned long length,
18277a8e76a3SSteven Rostedt 			void *data)
18287a8e76a3SSteven Rostedt {
18297a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18307a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
18317a8e76a3SSteven Rostedt 	void *body;
18327a8e76a3SSteven Rostedt 	int ret = -EBUSY;
1833bf41a158SSteven Rostedt 	int cpu, resched;
18347a8e76a3SSteven Rostedt 
1835033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1836a3583244SSteven Rostedt 		return -EBUSY;
1837a3583244SSteven Rostedt 
18387a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
18397a8e76a3SSteven Rostedt 		return -EBUSY;
18407a8e76a3SSteven Rostedt 
1841182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1842bf41a158SSteven Rostedt 
18437a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
18447a8e76a3SSteven Rostedt 
18459e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1846d769041fSSteven Rostedt 		goto out;
18477a8e76a3SSteven Rostedt 
18487a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18497a8e76a3SSteven Rostedt 
18507a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
18517a8e76a3SSteven Rostedt 		goto out;
18527a8e76a3SSteven Rostedt 
1853be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
1854be957c44SSteven Rostedt 		goto out;
1855be957c44SSteven Rostedt 
1856be957c44SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, length);
18577a8e76a3SSteven Rostedt 	if (!event)
18587a8e76a3SSteven Rostedt 		goto out;
18597a8e76a3SSteven Rostedt 
18607a8e76a3SSteven Rostedt 	body = rb_event_data(event);
18617a8e76a3SSteven Rostedt 
18627a8e76a3SSteven Rostedt 	memcpy(body, data, length);
18637a8e76a3SSteven Rostedt 
18647a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
18657a8e76a3SSteven Rostedt 
18667a8e76a3SSteven Rostedt 	ret = 0;
18677a8e76a3SSteven Rostedt  out:
1868182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
18697a8e76a3SSteven Rostedt 
18707a8e76a3SSteven Rostedt 	return ret;
18717a8e76a3SSteven Rostedt }
1872c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
18737a8e76a3SSteven Rostedt 
187434a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1875bf41a158SSteven Rostedt {
1876bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
1877bf41a158SSteven Rostedt 	struct buffer_page *head = cpu_buffer->head_page;
1878bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
1879bf41a158SSteven Rostedt 
1880bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
1881bf41a158SSteven Rostedt 		(commit == reader ||
1882bf41a158SSteven Rostedt 		 (commit == head &&
1883bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
1884bf41a158SSteven Rostedt }
1885bf41a158SSteven Rostedt 
18867a8e76a3SSteven Rostedt /**
18877a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
18887a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
18897a8e76a3SSteven Rostedt  *
18907a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
18917a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
18927a8e76a3SSteven Rostedt  *
18937a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
18947a8e76a3SSteven Rostedt  */
18957a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
18967a8e76a3SSteven Rostedt {
18977a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
18987a8e76a3SSteven Rostedt }
1899c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
19007a8e76a3SSteven Rostedt 
19017a8e76a3SSteven Rostedt /**
19027a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
19037a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
19047a8e76a3SSteven Rostedt  *
19057a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
19067a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
19077a8e76a3SSteven Rostedt  */
19087a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
19097a8e76a3SSteven Rostedt {
19107a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
19117a8e76a3SSteven Rostedt }
1912c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
19137a8e76a3SSteven Rostedt 
19147a8e76a3SSteven Rostedt /**
19157a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
19167a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
19177a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
19187a8e76a3SSteven Rostedt  *
19197a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
19207a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
19217a8e76a3SSteven Rostedt  *
19227a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
19237a8e76a3SSteven Rostedt  */
19247a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
19257a8e76a3SSteven Rostedt {
19267a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19277a8e76a3SSteven Rostedt 
19289e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19298aabee57SSteven Rostedt 		return;
19307a8e76a3SSteven Rostedt 
19317a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
19327a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
19337a8e76a3SSteven Rostedt }
1934c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
19357a8e76a3SSteven Rostedt 
19367a8e76a3SSteven Rostedt /**
19377a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
19387a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
19397a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
19407a8e76a3SSteven Rostedt  *
19417a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
19427a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
19437a8e76a3SSteven Rostedt  */
19447a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
19457a8e76a3SSteven Rostedt {
19467a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19477a8e76a3SSteven Rostedt 
19489e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19498aabee57SSteven Rostedt 		return;
19507a8e76a3SSteven Rostedt 
19517a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
19527a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
19537a8e76a3SSteven Rostedt }
1954c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
19557a8e76a3SSteven Rostedt 
19567a8e76a3SSteven Rostedt /**
19577a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
19587a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19597a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
19607a8e76a3SSteven Rostedt  */
19617a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
19627a8e76a3SSteven Rostedt {
19637a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19648aabee57SSteven Rostedt 	unsigned long ret;
19657a8e76a3SSteven Rostedt 
19669e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19678aabee57SSteven Rostedt 		return 0;
19687a8e76a3SSteven Rostedt 
19697a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1970e4906effSSteven Rostedt 	ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1971e4906effSSteven Rostedt 		- cpu_buffer->read;
1972554f786eSSteven Rostedt 
1973554f786eSSteven Rostedt 	return ret;
19747a8e76a3SSteven Rostedt }
1975c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
19767a8e76a3SSteven Rostedt 
19777a8e76a3SSteven Rostedt /**
19787a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
19797a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19807a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
19817a8e76a3SSteven Rostedt  */
19827a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
19837a8e76a3SSteven Rostedt {
19847a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19858aabee57SSteven Rostedt 	unsigned long ret;
19867a8e76a3SSteven Rostedt 
19879e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19888aabee57SSteven Rostedt 		return 0;
19897a8e76a3SSteven Rostedt 
19907a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1991554f786eSSteven Rostedt 	ret = cpu_buffer->overrun;
1992554f786eSSteven Rostedt 
1993554f786eSSteven Rostedt 	return ret;
19947a8e76a3SSteven Rostedt }
1995c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
19967a8e76a3SSteven Rostedt 
19977a8e76a3SSteven Rostedt /**
1998f0d2c681SSteven Rostedt  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1999f0d2c681SSteven Rostedt  * @buffer: The ring buffer
2000f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
2001f0d2c681SSteven Rostedt  */
2002f0d2c681SSteven Rostedt unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
2003f0d2c681SSteven Rostedt {
2004f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2005f0d2c681SSteven Rostedt 	unsigned long ret;
2006f0d2c681SSteven Rostedt 
2007f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2008f0d2c681SSteven Rostedt 		return 0;
2009f0d2c681SSteven Rostedt 
2010f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2011f0d2c681SSteven Rostedt 	ret = cpu_buffer->nmi_dropped;
2012f0d2c681SSteven Rostedt 
2013f0d2c681SSteven Rostedt 	return ret;
2014f0d2c681SSteven Rostedt }
2015f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2016f0d2c681SSteven Rostedt 
2017f0d2c681SSteven Rostedt /**
2018f0d2c681SSteven Rostedt  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2019f0d2c681SSteven Rostedt  * @buffer: The ring buffer
2020f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
2021f0d2c681SSteven Rostedt  */
2022f0d2c681SSteven Rostedt unsigned long
2023f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2024f0d2c681SSteven Rostedt {
2025f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2026f0d2c681SSteven Rostedt 	unsigned long ret;
2027f0d2c681SSteven Rostedt 
2028f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2029f0d2c681SSteven Rostedt 		return 0;
2030f0d2c681SSteven Rostedt 
2031f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2032f0d2c681SSteven Rostedt 	ret = cpu_buffer->commit_overrun;
2033f0d2c681SSteven Rostedt 
2034f0d2c681SSteven Rostedt 	return ret;
2035f0d2c681SSteven Rostedt }
2036f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2037f0d2c681SSteven Rostedt 
2038f0d2c681SSteven Rostedt /**
20397a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
20407a8e76a3SSteven Rostedt  * @buffer: The ring buffer
20417a8e76a3SSteven Rostedt  *
20427a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
20437a8e76a3SSteven Rostedt  * (all CPU entries)
20447a8e76a3SSteven Rostedt  */
20457a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
20467a8e76a3SSteven Rostedt {
20477a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20487a8e76a3SSteven Rostedt 	unsigned long entries = 0;
20497a8e76a3SSteven Rostedt 	int cpu;
20507a8e76a3SSteven Rostedt 
20517a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
20527a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
20537a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
2054e4906effSSteven Rostedt 		entries += (local_read(&cpu_buffer->entries) -
2055e4906effSSteven Rostedt 			    cpu_buffer->overrun) - cpu_buffer->read;
20567a8e76a3SSteven Rostedt 	}
20577a8e76a3SSteven Rostedt 
20587a8e76a3SSteven Rostedt 	return entries;
20597a8e76a3SSteven Rostedt }
2060c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
20617a8e76a3SSteven Rostedt 
20627a8e76a3SSteven Rostedt /**
20637a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in buffer
20647a8e76a3SSteven Rostedt  * @buffer: The ring buffer
20657a8e76a3SSteven Rostedt  *
20667a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
20677a8e76a3SSteven Rostedt  * (all CPU entries)
20687a8e76a3SSteven Rostedt  */
20697a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
20707a8e76a3SSteven Rostedt {
20717a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20727a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
20737a8e76a3SSteven Rostedt 	int cpu;
20747a8e76a3SSteven Rostedt 
20757a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
20767a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
20777a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
20787a8e76a3SSteven Rostedt 		overruns += cpu_buffer->overrun;
20797a8e76a3SSteven Rostedt 	}
20807a8e76a3SSteven Rostedt 
20817a8e76a3SSteven Rostedt 	return overruns;
20827a8e76a3SSteven Rostedt }
2083c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
20847a8e76a3SSteven Rostedt 
2085642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
20867a8e76a3SSteven Rostedt {
20877a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
20887a8e76a3SSteven Rostedt 
2089d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
2090d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
20917a8e76a3SSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
20926f807acdSSteven Rostedt 		iter->head = cpu_buffer->head_page->read;
2093d769041fSSteven Rostedt 	} else {
2094d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
20956f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
2096d769041fSSteven Rostedt 	}
2097d769041fSSteven Rostedt 	if (iter->head)
2098d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
2099d769041fSSteven Rostedt 	else
2100abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
2101642edba5SSteven Rostedt }
2102f83c9d0fSSteven Rostedt 
2103642edba5SSteven Rostedt /**
2104642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
2105642edba5SSteven Rostedt  * @iter: The iterator to reset
2106642edba5SSteven Rostedt  *
2107642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
2108642edba5SSteven Rostedt  * again.
2109642edba5SSteven Rostedt  */
2110642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2111642edba5SSteven Rostedt {
2112554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2113642edba5SSteven Rostedt 	unsigned long flags;
2114642edba5SSteven Rostedt 
2115554f786eSSteven Rostedt 	if (!iter)
2116554f786eSSteven Rostedt 		return;
2117554f786eSSteven Rostedt 
2118554f786eSSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
2119554f786eSSteven Rostedt 
2120642edba5SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2121642edba5SSteven Rostedt 	rb_iter_reset(iter);
2122f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
21237a8e76a3SSteven Rostedt }
2124c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
21257a8e76a3SSteven Rostedt 
21267a8e76a3SSteven Rostedt /**
21277a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
21287a8e76a3SSteven Rostedt  * @iter: The iterator to check
21297a8e76a3SSteven Rostedt  */
21307a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
21317a8e76a3SSteven Rostedt {
21327a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
21337a8e76a3SSteven Rostedt 
21347a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
21357a8e76a3SSteven Rostedt 
2136bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
2137bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
21387a8e76a3SSteven Rostedt }
2139c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
21407a8e76a3SSteven Rostedt 
21417a8e76a3SSteven Rostedt static void
21427a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
21437a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
21447a8e76a3SSteven Rostedt {
21457a8e76a3SSteven Rostedt 	u64 delta;
21467a8e76a3SSteven Rostedt 
2147334d4169SLai Jiangshan 	switch (event->type_len) {
21487a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
21497a8e76a3SSteven Rostedt 		return;
21507a8e76a3SSteven Rostedt 
21517a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
21527a8e76a3SSteven Rostedt 		delta = event->array[0];
21537a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
21547a8e76a3SSteven Rostedt 		delta += event->time_delta;
21557a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
21567a8e76a3SSteven Rostedt 		return;
21577a8e76a3SSteven Rostedt 
21587a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
21597a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
21607a8e76a3SSteven Rostedt 		return;
21617a8e76a3SSteven Rostedt 
21627a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
21637a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
21647a8e76a3SSteven Rostedt 		return;
21657a8e76a3SSteven Rostedt 
21667a8e76a3SSteven Rostedt 	default:
21677a8e76a3SSteven Rostedt 		BUG();
21687a8e76a3SSteven Rostedt 	}
21697a8e76a3SSteven Rostedt 	return;
21707a8e76a3SSteven Rostedt }
21717a8e76a3SSteven Rostedt 
21727a8e76a3SSteven Rostedt static void
21737a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
21747a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
21757a8e76a3SSteven Rostedt {
21767a8e76a3SSteven Rostedt 	u64 delta;
21777a8e76a3SSteven Rostedt 
2178334d4169SLai Jiangshan 	switch (event->type_len) {
21797a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
21807a8e76a3SSteven Rostedt 		return;
21817a8e76a3SSteven Rostedt 
21827a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
21837a8e76a3SSteven Rostedt 		delta = event->array[0];
21847a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
21857a8e76a3SSteven Rostedt 		delta += event->time_delta;
21867a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
21877a8e76a3SSteven Rostedt 		return;
21887a8e76a3SSteven Rostedt 
21897a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
21907a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
21917a8e76a3SSteven Rostedt 		return;
21927a8e76a3SSteven Rostedt 
21937a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
21947a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
21957a8e76a3SSteven Rostedt 		return;
21967a8e76a3SSteven Rostedt 
21977a8e76a3SSteven Rostedt 	default:
21987a8e76a3SSteven Rostedt 		BUG();
21997a8e76a3SSteven Rostedt 	}
22007a8e76a3SSteven Rostedt 	return;
22017a8e76a3SSteven Rostedt }
22027a8e76a3SSteven Rostedt 
2203d769041fSSteven Rostedt static struct buffer_page *
2204d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
22057a8e76a3SSteven Rostedt {
2206d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
2207d769041fSSteven Rostedt 	unsigned long flags;
2208818e3dd3SSteven Rostedt 	int nr_loops = 0;
2209d769041fSSteven Rostedt 
22103e03fb7fSSteven Rostedt 	local_irq_save(flags);
22113e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2212d769041fSSteven Rostedt 
2213d769041fSSteven Rostedt  again:
2214818e3dd3SSteven Rostedt 	/*
2215818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
2216818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
2217818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
2218818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
2219818e3dd3SSteven Rostedt 	 */
22203e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2221818e3dd3SSteven Rostedt 		reader = NULL;
2222818e3dd3SSteven Rostedt 		goto out;
2223818e3dd3SSteven Rostedt 	}
2224818e3dd3SSteven Rostedt 
2225d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
2226d769041fSSteven Rostedt 
2227d769041fSSteven Rostedt 	/* If there's more to read, return this page */
2228bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
2229d769041fSSteven Rostedt 		goto out;
2230d769041fSSteven Rostedt 
2231d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
22323e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
22333e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
22343e89c7bbSSteven Rostedt 		goto out;
2235d769041fSSteven Rostedt 
2236d769041fSSteven Rostedt 	/* check if we caught up to the tail */
2237d769041fSSteven Rostedt 	reader = NULL;
2238bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2239d769041fSSteven Rostedt 		goto out;
22407a8e76a3SSteven Rostedt 
22417a8e76a3SSteven Rostedt 	/*
2242d769041fSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
2243d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
22447a8e76a3SSteven Rostedt 	 */
2245d769041fSSteven Rostedt 
2246d769041fSSteven Rostedt 	reader = cpu_buffer->head_page;
2247d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.next = reader->list.next;
2248d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
2249bf41a158SSteven Rostedt 
2250bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2251778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
2252abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
2253d769041fSSteven Rostedt 
2254d769041fSSteven Rostedt 	/* Make the reader page now replace the head */
2255d769041fSSteven Rostedt 	reader->list.prev->next = &cpu_buffer->reader_page->list;
2256d769041fSSteven Rostedt 	reader->list.next->prev = &cpu_buffer->reader_page->list;
2257d769041fSSteven Rostedt 
2258d769041fSSteven Rostedt 	/*
2259d769041fSSteven Rostedt 	 * If the tail is on the reader, then we must set the head
2260d769041fSSteven Rostedt 	 * to the inserted page, otherwise we set it one before.
2261d769041fSSteven Rostedt 	 */
2262d769041fSSteven Rostedt 	cpu_buffer->head_page = cpu_buffer->reader_page;
2263d769041fSSteven Rostedt 
2264bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page != reader)
22657a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2266d769041fSSteven Rostedt 
2267d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
2268d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
2269d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
2270d769041fSSteven Rostedt 
2271d769041fSSteven Rostedt 	goto again;
2272d769041fSSteven Rostedt 
2273d769041fSSteven Rostedt  out:
22743e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
22753e03fb7fSSteven Rostedt 	local_irq_restore(flags);
2276d769041fSSteven Rostedt 
2277d769041fSSteven Rostedt 	return reader;
22787a8e76a3SSteven Rostedt }
22797a8e76a3SSteven Rostedt 
2280d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2281d769041fSSteven Rostedt {
2282d769041fSSteven Rostedt 	struct ring_buffer_event *event;
2283d769041fSSteven Rostedt 	struct buffer_page *reader;
2284d769041fSSteven Rostedt 	unsigned length;
2285d769041fSSteven Rostedt 
2286d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2287d769041fSSteven Rostedt 
2288d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
22893e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
22903e89c7bbSSteven Rostedt 		return;
2291d769041fSSteven Rostedt 
2292d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
22937a8e76a3SSteven Rostedt 
2294334d4169SLai Jiangshan 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2295334d4169SLai Jiangshan 			|| rb_discarded_event(event))
2296e4906effSSteven Rostedt 		cpu_buffer->read++;
22977a8e76a3SSteven Rostedt 
22987a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
22997a8e76a3SSteven Rostedt 
2300d769041fSSteven Rostedt 	length = rb_event_length(event);
23016f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
23027a8e76a3SSteven Rostedt }
23037a8e76a3SSteven Rostedt 
23047a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
23057a8e76a3SSteven Rostedt {
23067a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
23077a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
23087a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
23097a8e76a3SSteven Rostedt 	unsigned length;
23107a8e76a3SSteven Rostedt 
23117a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
23127a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
23137a8e76a3SSteven Rostedt 
23147a8e76a3SSteven Rostedt 	/*
23157a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
23167a8e76a3SSteven Rostedt 	 */
2317bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
2318ea05b57cSSteven Rostedt 		/* discarded commits can make the page empty */
2319ea05b57cSSteven Rostedt 		if (iter->head_page == cpu_buffer->commit_page)
23203e89c7bbSSteven Rostedt 			return;
2321d769041fSSteven Rostedt 		rb_inc_iter(iter);
23227a8e76a3SSteven Rostedt 		return;
23237a8e76a3SSteven Rostedt 	}
23247a8e76a3SSteven Rostedt 
23257a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
23267a8e76a3SSteven Rostedt 
23277a8e76a3SSteven Rostedt 	length = rb_event_length(event);
23287a8e76a3SSteven Rostedt 
23297a8e76a3SSteven Rostedt 	/*
23307a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
23317a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
23327a8e76a3SSteven Rostedt 	 */
23333e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
2334f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
23353e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
23363e89c7bbSSteven Rostedt 		return;
23377a8e76a3SSteven Rostedt 
23387a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
23397a8e76a3SSteven Rostedt 
23407a8e76a3SSteven Rostedt 	iter->head += length;
23417a8e76a3SSteven Rostedt 
23427a8e76a3SSteven Rostedt 	/* check for end of page padding */
2343bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
2344bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
23457a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
23467a8e76a3SSteven Rostedt }
23477a8e76a3SSteven Rostedt 
2348f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2349f83c9d0fSSteven Rostedt rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
23507a8e76a3SSteven Rostedt {
23517a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
23527a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2353d769041fSSteven Rostedt 	struct buffer_page *reader;
2354818e3dd3SSteven Rostedt 	int nr_loops = 0;
23557a8e76a3SSteven Rostedt 
23567a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
23577a8e76a3SSteven Rostedt 
23587a8e76a3SSteven Rostedt  again:
2359818e3dd3SSteven Rostedt 	/*
2360818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
2361818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
2362ea05b57cSSteven Rostedt 	 * as one timestamp is about to be written, or from discarded
2363ea05b57cSSteven Rostedt 	 * commits. The most that we can have is the number on a single page.
2364818e3dd3SSteven Rostedt 	 */
2365ea05b57cSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2366818e3dd3SSteven Rostedt 		return NULL;
2367818e3dd3SSteven Rostedt 
2368d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2369d769041fSSteven Rostedt 	if (!reader)
23707a8e76a3SSteven Rostedt 		return NULL;
23717a8e76a3SSteven Rostedt 
2372d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
23737a8e76a3SSteven Rostedt 
2374334d4169SLai Jiangshan 	switch (event->type_len) {
23757a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
23762d622719STom Zanussi 		if (rb_null_event(event))
2377bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, 1);
23782d622719STom Zanussi 		/*
23792d622719STom Zanussi 		 * Because the writer could be discarding every
23802d622719STom Zanussi 		 * event it creates (which would probably be bad)
23812d622719STom Zanussi 		 * if we were to go back to "again" then we may never
23822d622719STom Zanussi 		 * catch up, and will trigger the warn on, or lock
23832d622719STom Zanussi 		 * the box. Return the padding, and we will release
23842d622719STom Zanussi 		 * the current locks, and try again.
23852d622719STom Zanussi 		 */
2386d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23872d622719STom Zanussi 		return event;
23887a8e76a3SSteven Rostedt 
23897a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
23907a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
2391d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23927a8e76a3SSteven Rostedt 		goto again;
23937a8e76a3SSteven Rostedt 
23947a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
23957a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
2396d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23977a8e76a3SSteven Rostedt 		goto again;
23987a8e76a3SSteven Rostedt 
23997a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
24007a8e76a3SSteven Rostedt 		if (ts) {
24017a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
240237886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
240337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
24047a8e76a3SSteven Rostedt 		}
24057a8e76a3SSteven Rostedt 		return event;
24067a8e76a3SSteven Rostedt 
24077a8e76a3SSteven Rostedt 	default:
24087a8e76a3SSteven Rostedt 		BUG();
24097a8e76a3SSteven Rostedt 	}
24107a8e76a3SSteven Rostedt 
24117a8e76a3SSteven Rostedt 	return NULL;
24127a8e76a3SSteven Rostedt }
2413c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
24147a8e76a3SSteven Rostedt 
2415f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2416f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
24177a8e76a3SSteven Rostedt {
24187a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
24197a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
24207a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2421818e3dd3SSteven Rostedt 	int nr_loops = 0;
24227a8e76a3SSteven Rostedt 
24237a8e76a3SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
24247a8e76a3SSteven Rostedt 		return NULL;
24257a8e76a3SSteven Rostedt 
24267a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
24277a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
24287a8e76a3SSteven Rostedt 
24297a8e76a3SSteven Rostedt  again:
2430818e3dd3SSteven Rostedt 	/*
2431ea05b57cSSteven Rostedt 	 * We repeat when a timestamp is encountered.
2432ea05b57cSSteven Rostedt 	 * We can get multiple timestamps by nested interrupts or also
2433ea05b57cSSteven Rostedt 	 * if filtering is on (discarding commits). Since discarding
2434ea05b57cSSteven Rostedt 	 * commits can be frequent we can get a lot of timestamps.
2435ea05b57cSSteven Rostedt 	 * But we limit them by not adding timestamps if they begin
2436ea05b57cSSteven Rostedt 	 * at the start of a page.
2437818e3dd3SSteven Rostedt 	 */
2438ea05b57cSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
2439818e3dd3SSteven Rostedt 		return NULL;
2440818e3dd3SSteven Rostedt 
24417a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
24427a8e76a3SSteven Rostedt 		return NULL;
24437a8e76a3SSteven Rostedt 
24447a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
24457a8e76a3SSteven Rostedt 
2446334d4169SLai Jiangshan 	switch (event->type_len) {
24477a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
24482d622719STom Zanussi 		if (rb_null_event(event)) {
2449d769041fSSteven Rostedt 			rb_inc_iter(iter);
24507a8e76a3SSteven Rostedt 			goto again;
24512d622719STom Zanussi 		}
24522d622719STom Zanussi 		rb_advance_iter(iter);
24532d622719STom Zanussi 		return event;
24547a8e76a3SSteven Rostedt 
24557a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
24567a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
24577a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
24587a8e76a3SSteven Rostedt 		goto again;
24597a8e76a3SSteven Rostedt 
24607a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
24617a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
24627a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
24637a8e76a3SSteven Rostedt 		goto again;
24647a8e76a3SSteven Rostedt 
24657a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
24667a8e76a3SSteven Rostedt 		if (ts) {
24677a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
246837886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
246937886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
24707a8e76a3SSteven Rostedt 		}
24717a8e76a3SSteven Rostedt 		return event;
24727a8e76a3SSteven Rostedt 
24737a8e76a3SSteven Rostedt 	default:
24747a8e76a3SSteven Rostedt 		BUG();
24757a8e76a3SSteven Rostedt 	}
24767a8e76a3SSteven Rostedt 
24777a8e76a3SSteven Rostedt 	return NULL;
24787a8e76a3SSteven Rostedt }
2479c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
24807a8e76a3SSteven Rostedt 
24818d707e8eSSteven Rostedt static inline int rb_ok_to_lock(void)
24828d707e8eSSteven Rostedt {
24838d707e8eSSteven Rostedt 	/*
24848d707e8eSSteven Rostedt 	 * If an NMI die dumps out the content of the ring buffer
24858d707e8eSSteven Rostedt 	 * do not grab locks. We also permanently disable the ring
24868d707e8eSSteven Rostedt 	 * buffer too. A one time deal is all you get from reading
24878d707e8eSSteven Rostedt 	 * the ring buffer from an NMI.
24888d707e8eSSteven Rostedt 	 */
2489*464e85ebSSteven Rostedt 	if (likely(!in_nmi()))
24908d707e8eSSteven Rostedt 		return 1;
24918d707e8eSSteven Rostedt 
24928d707e8eSSteven Rostedt 	tracing_off_permanent();
24938d707e8eSSteven Rostedt 	return 0;
24948d707e8eSSteven Rostedt }
24958d707e8eSSteven Rostedt 
24967a8e76a3SSteven Rostedt /**
2497f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
2498f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
2499f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
2500f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2501f83c9d0fSSteven Rostedt  *
2502f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2503f83c9d0fSSteven Rostedt  * not consume the data.
2504f83c9d0fSSteven Rostedt  */
2505f83c9d0fSSteven Rostedt struct ring_buffer_event *
2506f83c9d0fSSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2507f83c9d0fSSteven Rostedt {
2508f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
25098aabee57SSteven Rostedt 	struct ring_buffer_event *event;
2510f83c9d0fSSteven Rostedt 	unsigned long flags;
25118d707e8eSSteven Rostedt 	int dolock;
2512f83c9d0fSSteven Rostedt 
2513554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
25148aabee57SSteven Rostedt 		return NULL;
2515554f786eSSteven Rostedt 
25168d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
25172d622719STom Zanussi  again:
25188d707e8eSSteven Rostedt 	local_irq_save(flags);
25198d707e8eSSteven Rostedt 	if (dolock)
25208d707e8eSSteven Rostedt 		spin_lock(&cpu_buffer->reader_lock);
2521f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
25228d707e8eSSteven Rostedt 	if (dolock)
25238d707e8eSSteven Rostedt 		spin_unlock(&cpu_buffer->reader_lock);
25248d707e8eSSteven Rostedt 	local_irq_restore(flags);
2525f83c9d0fSSteven Rostedt 
2526334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
25272d622719STom Zanussi 		cpu_relax();
25282d622719STom Zanussi 		goto again;
25292d622719STom Zanussi 	}
25302d622719STom Zanussi 
2531f83c9d0fSSteven Rostedt 	return event;
2532f83c9d0fSSteven Rostedt }
2533f83c9d0fSSteven Rostedt 
2534f83c9d0fSSteven Rostedt /**
2535f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
2536f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
2537f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2538f83c9d0fSSteven Rostedt  *
2539f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2540f83c9d0fSSteven Rostedt  * not increment the iterator.
2541f83c9d0fSSteven Rostedt  */
2542f83c9d0fSSteven Rostedt struct ring_buffer_event *
2543f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2544f83c9d0fSSteven Rostedt {
2545f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2546f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
2547f83c9d0fSSteven Rostedt 	unsigned long flags;
2548f83c9d0fSSteven Rostedt 
25492d622719STom Zanussi  again:
2550f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2551f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
2552f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2553f83c9d0fSSteven Rostedt 
2554334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
25552d622719STom Zanussi 		cpu_relax();
25562d622719STom Zanussi 		goto again;
25572d622719STom Zanussi 	}
25582d622719STom Zanussi 
2559f83c9d0fSSteven Rostedt 	return event;
2560f83c9d0fSSteven Rostedt }
2561f83c9d0fSSteven Rostedt 
2562f83c9d0fSSteven Rostedt /**
25637a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
25647a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
25657a8e76a3SSteven Rostedt  *
25667a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
25677a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
25687a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
25697a8e76a3SSteven Rostedt  */
25707a8e76a3SSteven Rostedt struct ring_buffer_event *
25717a8e76a3SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
25727a8e76a3SSteven Rostedt {
2573554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2574554f786eSSteven Rostedt 	struct ring_buffer_event *event = NULL;
2575f83c9d0fSSteven Rostedt 	unsigned long flags;
25768d707e8eSSteven Rostedt 	int dolock;
25778d707e8eSSteven Rostedt 
25788d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
25797a8e76a3SSteven Rostedt 
25802d622719STom Zanussi  again:
2581554f786eSSteven Rostedt 	/* might be called in atomic */
2582554f786eSSteven Rostedt 	preempt_disable();
25837a8e76a3SSteven Rostedt 
2584554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2585554f786eSSteven Rostedt 		goto out;
2586554f786eSSteven Rostedt 
2587554f786eSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25888d707e8eSSteven Rostedt 	local_irq_save(flags);
25898d707e8eSSteven Rostedt 	if (dolock)
25908d707e8eSSteven Rostedt 		spin_lock(&cpu_buffer->reader_lock);
25917a8e76a3SSteven Rostedt 
2592f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2593f83c9d0fSSteven Rostedt 	if (!event)
2594554f786eSSteven Rostedt 		goto out_unlock;
2595f83c9d0fSSteven Rostedt 
2596d769041fSSteven Rostedt 	rb_advance_reader(cpu_buffer);
25977a8e76a3SSteven Rostedt 
2598554f786eSSteven Rostedt  out_unlock:
25998d707e8eSSteven Rostedt 	if (dolock)
26008d707e8eSSteven Rostedt 		spin_unlock(&cpu_buffer->reader_lock);
26018d707e8eSSteven Rostedt 	local_irq_restore(flags);
2602f83c9d0fSSteven Rostedt 
2603554f786eSSteven Rostedt  out:
2604554f786eSSteven Rostedt 	preempt_enable();
2605554f786eSSteven Rostedt 
2606334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
26072d622719STom Zanussi 		cpu_relax();
26082d622719STom Zanussi 		goto again;
26092d622719STom Zanussi 	}
26102d622719STom Zanussi 
26117a8e76a3SSteven Rostedt 	return event;
26127a8e76a3SSteven Rostedt }
2613c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
26147a8e76a3SSteven Rostedt 
26157a8e76a3SSteven Rostedt /**
26167a8e76a3SSteven Rostedt  * ring_buffer_read_start - start a non consuming read of the buffer
26177a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
26187a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
26197a8e76a3SSteven Rostedt  *
26207a8e76a3SSteven Rostedt  * This starts up an iteration through the buffer. It also disables
26217a8e76a3SSteven Rostedt  * the recording to the buffer until the reading is finished.
26227a8e76a3SSteven Rostedt  * This prevents the reading from being corrupted. This is not
26237a8e76a3SSteven Rostedt  * a consuming read, so a producer is not expected.
26247a8e76a3SSteven Rostedt  *
26257a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_finish.
26267a8e76a3SSteven Rostedt  */
26277a8e76a3SSteven Rostedt struct ring_buffer_iter *
26287a8e76a3SSteven Rostedt ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
26297a8e76a3SSteven Rostedt {
26307a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
26318aabee57SSteven Rostedt 	struct ring_buffer_iter *iter;
2632d769041fSSteven Rostedt 	unsigned long flags;
26337a8e76a3SSteven Rostedt 
26349e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
26358aabee57SSteven Rostedt 		return NULL;
26367a8e76a3SSteven Rostedt 
26377a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
26387a8e76a3SSteven Rostedt 	if (!iter)
26398aabee57SSteven Rostedt 		return NULL;
26407a8e76a3SSteven Rostedt 
26417a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
26427a8e76a3SSteven Rostedt 
26437a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
26447a8e76a3SSteven Rostedt 
26457a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
26467a8e76a3SSteven Rostedt 	synchronize_sched();
26477a8e76a3SSteven Rostedt 
2648f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
26493e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2650642edba5SSteven Rostedt 	rb_iter_reset(iter);
26513e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2652f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
26537a8e76a3SSteven Rostedt 
26547a8e76a3SSteven Rostedt 	return iter;
26557a8e76a3SSteven Rostedt }
2656c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
26577a8e76a3SSteven Rostedt 
26587a8e76a3SSteven Rostedt /**
26597a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
26607a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
26617a8e76a3SSteven Rostedt  *
26627a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
26637a8e76a3SSteven Rostedt  * iterator.
26647a8e76a3SSteven Rostedt  */
26657a8e76a3SSteven Rostedt void
26667a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
26677a8e76a3SSteven Rostedt {
26687a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
26697a8e76a3SSteven Rostedt 
26707a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
26717a8e76a3SSteven Rostedt 	kfree(iter);
26727a8e76a3SSteven Rostedt }
2673c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
26747a8e76a3SSteven Rostedt 
26757a8e76a3SSteven Rostedt /**
26767a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
26777a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
26787a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
26797a8e76a3SSteven Rostedt  *
26807a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
26817a8e76a3SSteven Rostedt  */
26827a8e76a3SSteven Rostedt struct ring_buffer_event *
26837a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
26847a8e76a3SSteven Rostedt {
26857a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2686f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2687f83c9d0fSSteven Rostedt 	unsigned long flags;
26887a8e76a3SSteven Rostedt 
26892d622719STom Zanussi  again:
2690f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2691f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
26927a8e76a3SSteven Rostedt 	if (!event)
2693f83c9d0fSSteven Rostedt 		goto out;
26947a8e76a3SSteven Rostedt 
26957a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
2696f83c9d0fSSteven Rostedt  out:
2697f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
26987a8e76a3SSteven Rostedt 
2699334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
27002d622719STom Zanussi 		cpu_relax();
27012d622719STom Zanussi 		goto again;
27022d622719STom Zanussi 	}
27032d622719STom Zanussi 
27047a8e76a3SSteven Rostedt 	return event;
27057a8e76a3SSteven Rostedt }
2706c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
27077a8e76a3SSteven Rostedt 
27087a8e76a3SSteven Rostedt /**
27097a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
27107a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
27117a8e76a3SSteven Rostedt  */
27127a8e76a3SSteven Rostedt unsigned long ring_buffer_size(struct ring_buffer *buffer)
27137a8e76a3SSteven Rostedt {
27147a8e76a3SSteven Rostedt 	return BUF_PAGE_SIZE * buffer->pages;
27157a8e76a3SSteven Rostedt }
2716c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
27177a8e76a3SSteven Rostedt 
27187a8e76a3SSteven Rostedt static void
27197a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
27207a8e76a3SSteven Rostedt {
27217a8e76a3SSteven Rostedt 	cpu_buffer->head_page
27227a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2723bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
2724778c55d4SSteven Rostedt 	local_set(&cpu_buffer->head_page->entries, 0);
2725abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
27267a8e76a3SSteven Rostedt 
27276f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
2728bf41a158SSteven Rostedt 
2729bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
2730bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
2731bf41a158SSteven Rostedt 
2732bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2733bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2734778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
2735abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
27366f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
2737d769041fSSteven Rostedt 
2738f0d2c681SSteven Rostedt 	cpu_buffer->nmi_dropped = 0;
2739f0d2c681SSteven Rostedt 	cpu_buffer->commit_overrun = 0;
27407a8e76a3SSteven Rostedt 	cpu_buffer->overrun = 0;
2741e4906effSSteven Rostedt 	cpu_buffer->read = 0;
2742e4906effSSteven Rostedt 	local_set(&cpu_buffer->entries, 0);
2743fa743953SSteven Rostedt 	local_set(&cpu_buffer->committing, 0);
2744fa743953SSteven Rostedt 	local_set(&cpu_buffer->commits, 0);
274569507c06SSteven Rostedt 
274669507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
274769507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
27487a8e76a3SSteven Rostedt }
27497a8e76a3SSteven Rostedt 
27507a8e76a3SSteven Rostedt /**
27517a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
27527a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
27537a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
27547a8e76a3SSteven Rostedt  */
27557a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
27567a8e76a3SSteven Rostedt {
27577a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
27587a8e76a3SSteven Rostedt 	unsigned long flags;
27597a8e76a3SSteven Rostedt 
27609e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
27618aabee57SSteven Rostedt 		return;
27627a8e76a3SSteven Rostedt 
276341ede23eSSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
276441ede23eSSteven Rostedt 
2765f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2766f83c9d0fSSteven Rostedt 
27673e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
27687a8e76a3SSteven Rostedt 
27697a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
27707a8e76a3SSteven Rostedt 
27713e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2772f83c9d0fSSteven Rostedt 
2773f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
277441ede23eSSteven Rostedt 
277541ede23eSSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
27767a8e76a3SSteven Rostedt }
2777c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
27787a8e76a3SSteven Rostedt 
27797a8e76a3SSteven Rostedt /**
27807a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
27817a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
27827a8e76a3SSteven Rostedt  */
27837a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
27847a8e76a3SSteven Rostedt {
27857a8e76a3SSteven Rostedt 	int cpu;
27867a8e76a3SSteven Rostedt 
27877a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
2788d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
27897a8e76a3SSteven Rostedt }
2790c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
27917a8e76a3SSteven Rostedt 
27927a8e76a3SSteven Rostedt /**
27937a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
27947a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
27957a8e76a3SSteven Rostedt  */
27967a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
27977a8e76a3SSteven Rostedt {
27987a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2799d4788207SSteven Rostedt 	unsigned long flags;
28008d707e8eSSteven Rostedt 	int dolock;
28017a8e76a3SSteven Rostedt 	int cpu;
2802d4788207SSteven Rostedt 	int ret;
28037a8e76a3SSteven Rostedt 
28048d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
28057a8e76a3SSteven Rostedt 
28067a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
28077a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
28087a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
28098d707e8eSSteven Rostedt 		local_irq_save(flags);
28108d707e8eSSteven Rostedt 		if (dolock)
28118d707e8eSSteven Rostedt 			spin_lock(&cpu_buffer->reader_lock);
2812d4788207SSteven Rostedt 		ret = rb_per_cpu_empty(cpu_buffer);
28138d707e8eSSteven Rostedt 		if (dolock)
28148d707e8eSSteven Rostedt 			spin_unlock(&cpu_buffer->reader_lock);
28158d707e8eSSteven Rostedt 		local_irq_restore(flags);
28168d707e8eSSteven Rostedt 
2817d4788207SSteven Rostedt 		if (!ret)
28187a8e76a3SSteven Rostedt 			return 0;
28197a8e76a3SSteven Rostedt 	}
2820554f786eSSteven Rostedt 
28217a8e76a3SSteven Rostedt 	return 1;
28227a8e76a3SSteven Rostedt }
2823c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
28247a8e76a3SSteven Rostedt 
28257a8e76a3SSteven Rostedt /**
28267a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
28277a8e76a3SSteven Rostedt  * @buffer: The ring buffer
28287a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
28297a8e76a3SSteven Rostedt  */
28307a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
28317a8e76a3SSteven Rostedt {
28327a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2833d4788207SSteven Rostedt 	unsigned long flags;
28348d707e8eSSteven Rostedt 	int dolock;
28358aabee57SSteven Rostedt 	int ret;
28367a8e76a3SSteven Rostedt 
28379e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
28388aabee57SSteven Rostedt 		return 1;
28397a8e76a3SSteven Rostedt 
28408d707e8eSSteven Rostedt 	dolock = rb_ok_to_lock();
2841554f786eSSteven Rostedt 
28427a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
28438d707e8eSSteven Rostedt 	local_irq_save(flags);
28448d707e8eSSteven Rostedt 	if (dolock)
28458d707e8eSSteven Rostedt 		spin_lock(&cpu_buffer->reader_lock);
2846554f786eSSteven Rostedt 	ret = rb_per_cpu_empty(cpu_buffer);
28478d707e8eSSteven Rostedt 	if (dolock)
28488d707e8eSSteven Rostedt 		spin_unlock(&cpu_buffer->reader_lock);
28498d707e8eSSteven Rostedt 	local_irq_restore(flags);
2850554f786eSSteven Rostedt 
2851554f786eSSteven Rostedt 	return ret;
28527a8e76a3SSteven Rostedt }
2853c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
28547a8e76a3SSteven Rostedt 
28557a8e76a3SSteven Rostedt /**
28567a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
28577a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
28587a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
28597a8e76a3SSteven Rostedt  *
28607a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
28617a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
28627a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
28637a8e76a3SSteven Rostedt  * used at the moment.
28647a8e76a3SSteven Rostedt  */
28657a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
28667a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
28677a8e76a3SSteven Rostedt {
28687a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
28697a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
2870554f786eSSteven Rostedt 	int ret = -EINVAL;
2871554f786eSSteven Rostedt 
28729e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
28739e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
2874554f786eSSteven Rostedt 		goto out;
28757a8e76a3SSteven Rostedt 
28767a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
28776d102bc6SLai Jiangshan 	if (buffer_a->pages != buffer_b->pages)
2878554f786eSSteven Rostedt 		goto out;
2879554f786eSSteven Rostedt 
2880554f786eSSteven Rostedt 	ret = -EAGAIN;
28817a8e76a3SSteven Rostedt 
288297b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2883554f786eSSteven Rostedt 		goto out;
288497b17efeSSteven Rostedt 
288597b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
2886554f786eSSteven Rostedt 		goto out;
288797b17efeSSteven Rostedt 
288897b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
2889554f786eSSteven Rostedt 		goto out;
289097b17efeSSteven Rostedt 
28917a8e76a3SSteven Rostedt 	cpu_buffer_a = buffer_a->buffers[cpu];
28927a8e76a3SSteven Rostedt 	cpu_buffer_b = buffer_b->buffers[cpu];
28937a8e76a3SSteven Rostedt 
289497b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
2895554f786eSSteven Rostedt 		goto out;
289697b17efeSSteven Rostedt 
289797b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
2898554f786eSSteven Rostedt 		goto out;
289997b17efeSSteven Rostedt 
29007a8e76a3SSteven Rostedt 	/*
29017a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
29027a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
29037a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
29047a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
29057a8e76a3SSteven Rostedt 	 */
29067a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
29077a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
29087a8e76a3SSteven Rostedt 
29097a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
29107a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
29117a8e76a3SSteven Rostedt 
29127a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
29137a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
29147a8e76a3SSteven Rostedt 
29157a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
29167a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
29177a8e76a3SSteven Rostedt 
2918554f786eSSteven Rostedt 	ret = 0;
2919554f786eSSteven Rostedt out:
2920554f786eSSteven Rostedt 	return ret;
29217a8e76a3SSteven Rostedt }
2922c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
29237a8e76a3SSteven Rostedt 
29248789a9e7SSteven Rostedt /**
29258789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
29268789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
29278789a9e7SSteven Rostedt  *
29288789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
29298789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
29308789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
29318789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
29328789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
29338789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
29348789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
29358789a9e7SSteven Rostedt  *
29368789a9e7SSteven Rostedt  * Returns:
29378789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
29388789a9e7SSteven Rostedt  */
29398789a9e7SSteven Rostedt void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
29408789a9e7SSteven Rostedt {
2941044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
2942ef7a4a16SSteven Rostedt 	unsigned long addr;
29438789a9e7SSteven Rostedt 
29448789a9e7SSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
29458789a9e7SSteven Rostedt 	if (!addr)
29468789a9e7SSteven Rostedt 		return NULL;
29478789a9e7SSteven Rostedt 
2948044fa782SSteven Rostedt 	bpage = (void *)addr;
29498789a9e7SSteven Rostedt 
2950ef7a4a16SSteven Rostedt 	rb_init_page(bpage);
2951ef7a4a16SSteven Rostedt 
2952044fa782SSteven Rostedt 	return bpage;
29538789a9e7SSteven Rostedt }
2954d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
29558789a9e7SSteven Rostedt 
29568789a9e7SSteven Rostedt /**
29578789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
29588789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
29598789a9e7SSteven Rostedt  * @data: the page to free
29608789a9e7SSteven Rostedt  *
29618789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
29628789a9e7SSteven Rostedt  */
29638789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
29648789a9e7SSteven Rostedt {
29658789a9e7SSteven Rostedt 	free_page((unsigned long)data);
29668789a9e7SSteven Rostedt }
2967d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
29688789a9e7SSteven Rostedt 
29698789a9e7SSteven Rostedt /**
29708789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
29718789a9e7SSteven Rostedt  * @buffer: buffer to extract from
29728789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2973ef7a4a16SSteven Rostedt  * @len: amount to extract
29748789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
29758789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
29768789a9e7SSteven Rostedt  *
29778789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
29788789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
29798789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
29808789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
29818789a9e7SSteven Rostedt  *
29828789a9e7SSteven Rostedt  * for example:
2983b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
29848789a9e7SSteven Rostedt  *	if (!rpage)
29858789a9e7SSteven Rostedt  *		return error;
2986ef7a4a16SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2987667d2412SLai Jiangshan  *	if (ret >= 0)
2988667d2412SLai Jiangshan  *		process_page(rpage, ret);
29898789a9e7SSteven Rostedt  *
29908789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
29918789a9e7SSteven Rostedt  * the writer is off the reader page.
29928789a9e7SSteven Rostedt  *
29938789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
29948789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
29958789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
29968789a9e7SSteven Rostedt  *  responsible for that.
29978789a9e7SSteven Rostedt  *
29988789a9e7SSteven Rostedt  * Returns:
2999667d2412SLai Jiangshan  *  >=0 if data has been transferred, returns the offset of consumed data.
3000667d2412SLai Jiangshan  *  <0 if no data has been transferred.
30018789a9e7SSteven Rostedt  */
30028789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
3003ef7a4a16SSteven Rostedt 			  void **data_page, size_t len, int cpu, int full)
30048789a9e7SSteven Rostedt {
30058789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
30068789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
3007044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
3008ef7a4a16SSteven Rostedt 	struct buffer_page *reader;
30098789a9e7SSteven Rostedt 	unsigned long flags;
3010ef7a4a16SSteven Rostedt 	unsigned int commit;
3011667d2412SLai Jiangshan 	unsigned int read;
30124f3640f8SSteven Rostedt 	u64 save_timestamp;
3013667d2412SLai Jiangshan 	int ret = -1;
30148789a9e7SSteven Rostedt 
3015554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3016554f786eSSteven Rostedt 		goto out;
3017554f786eSSteven Rostedt 
3018474d32b6SSteven Rostedt 	/*
3019474d32b6SSteven Rostedt 	 * If len is not big enough to hold the page header, then
3020474d32b6SSteven Rostedt 	 * we can not copy anything.
3021474d32b6SSteven Rostedt 	 */
3022474d32b6SSteven Rostedt 	if (len <= BUF_PAGE_HDR_SIZE)
3023554f786eSSteven Rostedt 		goto out;
3024474d32b6SSteven Rostedt 
3025474d32b6SSteven Rostedt 	len -= BUF_PAGE_HDR_SIZE;
3026474d32b6SSteven Rostedt 
30278789a9e7SSteven Rostedt 	if (!data_page)
3028554f786eSSteven Rostedt 		goto out;
30298789a9e7SSteven Rostedt 
3030044fa782SSteven Rostedt 	bpage = *data_page;
3031044fa782SSteven Rostedt 	if (!bpage)
3032554f786eSSteven Rostedt 		goto out;
30338789a9e7SSteven Rostedt 
30348789a9e7SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
30358789a9e7SSteven Rostedt 
3036ef7a4a16SSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
3037ef7a4a16SSteven Rostedt 	if (!reader)
3038554f786eSSteven Rostedt 		goto out_unlock;
30398789a9e7SSteven Rostedt 
3040ef7a4a16SSteven Rostedt 	event = rb_reader_event(cpu_buffer);
3041667d2412SLai Jiangshan 
3042ef7a4a16SSteven Rostedt 	read = reader->read;
3043ef7a4a16SSteven Rostedt 	commit = rb_page_commit(reader);
3044ef7a4a16SSteven Rostedt 
30458789a9e7SSteven Rostedt 	/*
3046474d32b6SSteven Rostedt 	 * If this page has been partially read or
3047474d32b6SSteven Rostedt 	 * if len is not big enough to read the rest of the page or
3048474d32b6SSteven Rostedt 	 * a writer is still on the page, then
3049474d32b6SSteven Rostedt 	 * we must copy the data from the page to the buffer.
3050474d32b6SSteven Rostedt 	 * Otherwise, we can simply swap the page with the one passed in.
30518789a9e7SSteven Rostedt 	 */
3052474d32b6SSteven Rostedt 	if (read || (len < (commit - read)) ||
3053ef7a4a16SSteven Rostedt 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
3054667d2412SLai Jiangshan 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3055474d32b6SSteven Rostedt 		unsigned int rpos = read;
3056474d32b6SSteven Rostedt 		unsigned int pos = 0;
3057ef7a4a16SSteven Rostedt 		unsigned int size;
30588789a9e7SSteven Rostedt 
30598789a9e7SSteven Rostedt 		if (full)
3060554f786eSSteven Rostedt 			goto out_unlock;
30618789a9e7SSteven Rostedt 
3062ef7a4a16SSteven Rostedt 		if (len > (commit - read))
3063ef7a4a16SSteven Rostedt 			len = (commit - read);
3064ef7a4a16SSteven Rostedt 
3065ef7a4a16SSteven Rostedt 		size = rb_event_length(event);
3066ef7a4a16SSteven Rostedt 
3067ef7a4a16SSteven Rostedt 		if (len < size)
3068554f786eSSteven Rostedt 			goto out_unlock;
3069ef7a4a16SSteven Rostedt 
30704f3640f8SSteven Rostedt 		/* save the current timestamp, since the user will need it */
30714f3640f8SSteven Rostedt 		save_timestamp = cpu_buffer->read_stamp;
30724f3640f8SSteven Rostedt 
3073ef7a4a16SSteven Rostedt 		/* Need to copy one event at a time */
3074ef7a4a16SSteven Rostedt 		do {
3075474d32b6SSteven Rostedt 			memcpy(bpage->data + pos, rpage->data + rpos, size);
3076ef7a4a16SSteven Rostedt 
3077ef7a4a16SSteven Rostedt 			len -= size;
3078ef7a4a16SSteven Rostedt 
3079ef7a4a16SSteven Rostedt 			rb_advance_reader(cpu_buffer);
3080474d32b6SSteven Rostedt 			rpos = reader->read;
3081474d32b6SSteven Rostedt 			pos += size;
3082ef7a4a16SSteven Rostedt 
3083ef7a4a16SSteven Rostedt 			event = rb_reader_event(cpu_buffer);
3084ef7a4a16SSteven Rostedt 			size = rb_event_length(event);
3085ef7a4a16SSteven Rostedt 		} while (len > size);
3086667d2412SLai Jiangshan 
3087667d2412SLai Jiangshan 		/* update bpage */
3088ef7a4a16SSteven Rostedt 		local_set(&bpage->commit, pos);
30894f3640f8SSteven Rostedt 		bpage->time_stamp = save_timestamp;
3090ef7a4a16SSteven Rostedt 
3091474d32b6SSteven Rostedt 		/* we copied everything to the beginning */
3092474d32b6SSteven Rostedt 		read = 0;
30938789a9e7SSteven Rostedt 	} else {
3094afbab76aSSteven Rostedt 		/* update the entry counter */
3095afbab76aSSteven Rostedt 		cpu_buffer->read += local_read(&reader->entries);
3096afbab76aSSteven Rostedt 
30978789a9e7SSteven Rostedt 		/* swap the pages */
3098044fa782SSteven Rostedt 		rb_init_page(bpage);
3099ef7a4a16SSteven Rostedt 		bpage = reader->page;
3100ef7a4a16SSteven Rostedt 		reader->page = *data_page;
3101ef7a4a16SSteven Rostedt 		local_set(&reader->write, 0);
3102778c55d4SSteven Rostedt 		local_set(&reader->entries, 0);
3103ef7a4a16SSteven Rostedt 		reader->read = 0;
3104044fa782SSteven Rostedt 		*data_page = bpage;
3105ef7a4a16SSteven Rostedt 	}
3106ef7a4a16SSteven Rostedt 	ret = read;
3107ef7a4a16SSteven Rostedt 
3108554f786eSSteven Rostedt  out_unlock:
31098789a9e7SSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
31108789a9e7SSteven Rostedt 
3111554f786eSSteven Rostedt  out:
31128789a9e7SSteven Rostedt 	return ret;
31138789a9e7SSteven Rostedt }
3114d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page);
31158789a9e7SSteven Rostedt 
31161155de47SPaul Mundt #ifdef CONFIG_TRACING
3117a3583244SSteven Rostedt static ssize_t
3118a3583244SSteven Rostedt rb_simple_read(struct file *filp, char __user *ubuf,
3119a3583244SSteven Rostedt 	       size_t cnt, loff_t *ppos)
3120a3583244SSteven Rostedt {
31215e39841cSHannes Eder 	unsigned long *p = filp->private_data;
3122a3583244SSteven Rostedt 	char buf[64];
3123a3583244SSteven Rostedt 	int r;
3124a3583244SSteven Rostedt 
3125033601a3SSteven Rostedt 	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3126033601a3SSteven Rostedt 		r = sprintf(buf, "permanently disabled\n");
3127033601a3SSteven Rostedt 	else
3128033601a3SSteven Rostedt 		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3129a3583244SSteven Rostedt 
3130a3583244SSteven Rostedt 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3131a3583244SSteven Rostedt }
3132a3583244SSteven Rostedt 
3133a3583244SSteven Rostedt static ssize_t
3134a3583244SSteven Rostedt rb_simple_write(struct file *filp, const char __user *ubuf,
3135a3583244SSteven Rostedt 		size_t cnt, loff_t *ppos)
3136a3583244SSteven Rostedt {
31375e39841cSHannes Eder 	unsigned long *p = filp->private_data;
3138a3583244SSteven Rostedt 	char buf[64];
31395e39841cSHannes Eder 	unsigned long val;
3140a3583244SSteven Rostedt 	int ret;
3141a3583244SSteven Rostedt 
3142a3583244SSteven Rostedt 	if (cnt >= sizeof(buf))
3143a3583244SSteven Rostedt 		return -EINVAL;
3144a3583244SSteven Rostedt 
3145a3583244SSteven Rostedt 	if (copy_from_user(&buf, ubuf, cnt))
3146a3583244SSteven Rostedt 		return -EFAULT;
3147a3583244SSteven Rostedt 
3148a3583244SSteven Rostedt 	buf[cnt] = 0;
3149a3583244SSteven Rostedt 
3150a3583244SSteven Rostedt 	ret = strict_strtoul(buf, 10, &val);
3151a3583244SSteven Rostedt 	if (ret < 0)
3152a3583244SSteven Rostedt 		return ret;
3153a3583244SSteven Rostedt 
3154033601a3SSteven Rostedt 	if (val)
3155033601a3SSteven Rostedt 		set_bit(RB_BUFFERS_ON_BIT, p);
3156033601a3SSteven Rostedt 	else
3157033601a3SSteven Rostedt 		clear_bit(RB_BUFFERS_ON_BIT, p);
3158a3583244SSteven Rostedt 
3159a3583244SSteven Rostedt 	(*ppos)++;
3160a3583244SSteven Rostedt 
3161a3583244SSteven Rostedt 	return cnt;
3162a3583244SSteven Rostedt }
3163a3583244SSteven Rostedt 
31645e2336a0SSteven Rostedt static const struct file_operations rb_simple_fops = {
3165a3583244SSteven Rostedt 	.open		= tracing_open_generic,
3166a3583244SSteven Rostedt 	.read		= rb_simple_read,
3167a3583244SSteven Rostedt 	.write		= rb_simple_write,
3168a3583244SSteven Rostedt };
3169a3583244SSteven Rostedt 
3170a3583244SSteven Rostedt 
3171a3583244SSteven Rostedt static __init int rb_init_debugfs(void)
3172a3583244SSteven Rostedt {
3173a3583244SSteven Rostedt 	struct dentry *d_tracer;
3174a3583244SSteven Rostedt 
3175a3583244SSteven Rostedt 	d_tracer = tracing_init_dentry();
3176a3583244SSteven Rostedt 
31775452af66SFrederic Weisbecker 	trace_create_file("tracing_on", 0644, d_tracer,
3178033601a3SSteven Rostedt 			    &ring_buffer_flags, &rb_simple_fops);
3179a3583244SSteven Rostedt 
3180a3583244SSteven Rostedt 	return 0;
3181a3583244SSteven Rostedt }
3182a3583244SSteven Rostedt 
3183a3583244SSteven Rostedt fs_initcall(rb_init_debugfs);
31841155de47SPaul Mundt #endif
3185554f786eSSteven Rostedt 
318659222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
318709c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
3188554f786eSSteven Rostedt 			 unsigned long action, void *hcpu)
3189554f786eSSteven Rostedt {
3190554f786eSSteven Rostedt 	struct ring_buffer *buffer =
3191554f786eSSteven Rostedt 		container_of(self, struct ring_buffer, cpu_notify);
3192554f786eSSteven Rostedt 	long cpu = (long)hcpu;
3193554f786eSSteven Rostedt 
3194554f786eSSteven Rostedt 	switch (action) {
3195554f786eSSteven Rostedt 	case CPU_UP_PREPARE:
3196554f786eSSteven Rostedt 	case CPU_UP_PREPARE_FROZEN:
31973f237a79SRusty Russell 		if (cpumask_test_cpu(cpu, buffer->cpumask))
3198554f786eSSteven Rostedt 			return NOTIFY_OK;
3199554f786eSSteven Rostedt 
3200554f786eSSteven Rostedt 		buffer->buffers[cpu] =
3201554f786eSSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
3202554f786eSSteven Rostedt 		if (!buffer->buffers[cpu]) {
3203554f786eSSteven Rostedt 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3204554f786eSSteven Rostedt 			     cpu);
3205554f786eSSteven Rostedt 			return NOTIFY_OK;
3206554f786eSSteven Rostedt 		}
3207554f786eSSteven Rostedt 		smp_wmb();
32083f237a79SRusty Russell 		cpumask_set_cpu(cpu, buffer->cpumask);
3209554f786eSSteven Rostedt 		break;
3210554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE:
3211554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE_FROZEN:
3212554f786eSSteven Rostedt 		/*
3213554f786eSSteven Rostedt 		 * Do nothing.
3214554f786eSSteven Rostedt 		 *  If we were to free the buffer, then the user would
3215554f786eSSteven Rostedt 		 *  lose any trace that was in the buffer.
3216554f786eSSteven Rostedt 		 */
3217554f786eSSteven Rostedt 		break;
3218554f786eSSteven Rostedt 	default:
3219554f786eSSteven Rostedt 		break;
3220554f786eSSteven Rostedt 	}
3221554f786eSSteven Rostedt 	return NOTIFY_OK;
3222554f786eSSteven Rostedt }
3223554f786eSSteven Rostedt #endif
3224