xref: /linux-6.15/kernel/trace/ring_buffer.c (revision edd813bf)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
714131f2fSIngo Molnar #include <linux/trace_clock.h>
878d904b4SSteven Rostedt #include <linux/ftrace_irq.h>
97a8e76a3SSteven Rostedt #include <linux/spinlock.h>
107a8e76a3SSteven Rostedt #include <linux/debugfs.h>
117a8e76a3SSteven Rostedt #include <linux/uaccess.h>
12a81bd80aSSteven Rostedt #include <linux/hardirq.h>
137a8e76a3SSteven Rostedt #include <linux/module.h>
147a8e76a3SSteven Rostedt #include <linux/percpu.h>
157a8e76a3SSteven Rostedt #include <linux/mutex.h>
167a8e76a3SSteven Rostedt #include <linux/init.h>
177a8e76a3SSteven Rostedt #include <linux/hash.h>
187a8e76a3SSteven Rostedt #include <linux/list.h>
19554f786eSSteven Rostedt #include <linux/cpu.h>
207a8e76a3SSteven Rostedt #include <linux/fs.h>
217a8e76a3SSteven Rostedt 
22182e9f5fSSteven Rostedt #include "trace.h"
23182e9f5fSSteven Rostedt 
24033601a3SSteven Rostedt /*
25d1b182a8SSteven Rostedt  * The ring buffer header is special. We must manually up keep it.
26d1b182a8SSteven Rostedt  */
27d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
28d1b182a8SSteven Rostedt {
29d1b182a8SSteven Rostedt 	int ret;
30d1b182a8SSteven Rostedt 
31334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "# compressed entry header\n");
32334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
33d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
34d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
35d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\n");
36d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
37d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_PADDING);
38d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_TIME_EXTEND);
40334d4169SLai Jiangshan 	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
41334d4169SLai Jiangshan 			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
42d1b182a8SSteven Rostedt 
43d1b182a8SSteven Rostedt 	return ret;
44d1b182a8SSteven Rostedt }
45d1b182a8SSteven Rostedt 
46d1b182a8SSteven Rostedt /*
475cc98548SSteven Rostedt  * The ring buffer is made up of a list of pages. A separate list of pages is
485cc98548SSteven Rostedt  * allocated for each CPU. A writer may only write to a buffer that is
495cc98548SSteven Rostedt  * associated with the CPU it is currently executing on.  A reader may read
505cc98548SSteven Rostedt  * from any per cpu buffer.
515cc98548SSteven Rostedt  *
525cc98548SSteven Rostedt  * The reader is special. For each per cpu buffer, the reader has its own
535cc98548SSteven Rostedt  * reader page. When a reader has read the entire reader page, this reader
545cc98548SSteven Rostedt  * page is swapped with another page in the ring buffer.
555cc98548SSteven Rostedt  *
565cc98548SSteven Rostedt  * Now, as long as the writer is off the reader page, the reader can do what
575cc98548SSteven Rostedt  * ever it wants with that page. The writer will never write to that page
585cc98548SSteven Rostedt  * again (as long as it is out of the ring buffer).
595cc98548SSteven Rostedt  *
605cc98548SSteven Rostedt  * Here's some silly ASCII art.
615cc98548SSteven Rostedt  *
625cc98548SSteven Rostedt  *   +------+
635cc98548SSteven Rostedt  *   |reader|          RING BUFFER
645cc98548SSteven Rostedt  *   |page  |
655cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
665cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
675cc98548SSteven Rostedt  *                   +---+   +---+   +---+
685cc98548SSteven Rostedt  *                     ^               |
695cc98548SSteven Rostedt  *                     |               |
705cc98548SSteven Rostedt  *                     +---------------+
715cc98548SSteven Rostedt  *
725cc98548SSteven Rostedt  *
735cc98548SSteven Rostedt  *   +------+
745cc98548SSteven Rostedt  *   |reader|          RING BUFFER
755cc98548SSteven Rostedt  *   |page  |------------------v
765cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
775cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
785cc98548SSteven Rostedt  *                   +---+   +---+   +---+
795cc98548SSteven Rostedt  *                     ^               |
805cc98548SSteven Rostedt  *                     |               |
815cc98548SSteven Rostedt  *                     +---------------+
825cc98548SSteven Rostedt  *
835cc98548SSteven Rostedt  *
845cc98548SSteven Rostedt  *   +------+
855cc98548SSteven Rostedt  *   |reader|          RING BUFFER
865cc98548SSteven Rostedt  *   |page  |------------------v
875cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
885cc98548SSteven Rostedt  *      ^            |   |-->|   |-->|   |
895cc98548SSteven Rostedt  *      |            +---+   +---+   +---+
905cc98548SSteven Rostedt  *      |                              |
915cc98548SSteven Rostedt  *      |                              |
925cc98548SSteven Rostedt  *      +------------------------------+
935cc98548SSteven Rostedt  *
945cc98548SSteven Rostedt  *
955cc98548SSteven Rostedt  *   +------+
965cc98548SSteven Rostedt  *   |buffer|          RING BUFFER
975cc98548SSteven Rostedt  *   |page  |------------------v
985cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
995cc98548SSteven Rostedt  *      ^            |   |   |   |-->|   |
1005cc98548SSteven Rostedt  *      |   New      +---+   +---+   +---+
1015cc98548SSteven Rostedt  *      |  Reader------^               |
1025cc98548SSteven Rostedt  *      |   page                       |
1035cc98548SSteven Rostedt  *      +------------------------------+
1045cc98548SSteven Rostedt  *
1055cc98548SSteven Rostedt  *
1065cc98548SSteven Rostedt  * After we make this swap, the reader can hand this page off to the splice
1075cc98548SSteven Rostedt  * code and be done with it. It can even allocate a new page if it needs to
1085cc98548SSteven Rostedt  * and swap that into the ring buffer.
1095cc98548SSteven Rostedt  *
1105cc98548SSteven Rostedt  * We will be using cmpxchg soon to make all this lockless.
1115cc98548SSteven Rostedt  *
1125cc98548SSteven Rostedt  */
1135cc98548SSteven Rostedt 
1145cc98548SSteven Rostedt /*
115033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
116033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
117033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
118033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
119033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
120033601a3SSteven Rostedt  *
121033601a3SSteven Rostedt  * There's three layers that must be on in order to write
122033601a3SSteven Rostedt  * to the ring buffer.
123033601a3SSteven Rostedt  *
124033601a3SSteven Rostedt  * 1) This global flag must be set.
125033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
126033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
127033601a3SSteven Rostedt  *
128033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
129033601a3SSteven Rostedt  * will permantly disable all ring buffers.
130033601a3SSteven Rostedt  */
131033601a3SSteven Rostedt 
132033601a3SSteven Rostedt /*
133033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
134033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
135033601a3SSteven Rostedt  *
136033601a3SSteven Rostedt  *  ON   DISABLED
137033601a3SSteven Rostedt  * ---- ----------
138033601a3SSteven Rostedt  *   0      0        : ring buffers are off
139033601a3SSteven Rostedt  *   1      0        : ring buffers are on
140033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
141033601a3SSteven Rostedt  */
142033601a3SSteven Rostedt 
143033601a3SSteven Rostedt enum {
144033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
145033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
146033601a3SSteven Rostedt };
147033601a3SSteven Rostedt 
148033601a3SSteven Rostedt enum {
149033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
150033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
151033601a3SSteven Rostedt };
152033601a3SSteven Rostedt 
1535e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154a3583244SSteven Rostedt 
155474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156474d32b6SSteven Rostedt 
157a3583244SSteven Rostedt /**
158a3583244SSteven Rostedt  * tracing_on - enable all tracing buffers
159a3583244SSteven Rostedt  *
160a3583244SSteven Rostedt  * This function enables all tracing buffers that may have been
161a3583244SSteven Rostedt  * disabled with tracing_off.
162a3583244SSteven Rostedt  */
163a3583244SSteven Rostedt void tracing_on(void)
164a3583244SSteven Rostedt {
165033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166a3583244SSteven Rostedt }
167c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_on);
168a3583244SSteven Rostedt 
169a3583244SSteven Rostedt /**
170a3583244SSteven Rostedt  * tracing_off - turn off all tracing buffers
171a3583244SSteven Rostedt  *
172a3583244SSteven Rostedt  * This function stops all tracing buffers from recording data.
173a3583244SSteven Rostedt  * It does not disable any overhead the tracers themselves may
174a3583244SSteven Rostedt  * be causing. This function simply causes all recording to
175a3583244SSteven Rostedt  * the ring buffers to fail.
176a3583244SSteven Rostedt  */
177a3583244SSteven Rostedt void tracing_off(void)
178a3583244SSteven Rostedt {
179033601a3SSteven Rostedt 	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180033601a3SSteven Rostedt }
181c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_off);
182033601a3SSteven Rostedt 
183033601a3SSteven Rostedt /**
184033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
185033601a3SSteven Rostedt  *
186033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
187c3706f00SWenji Huang  * permanently.
188033601a3SSteven Rostedt  */
189033601a3SSteven Rostedt void tracing_off_permanent(void)
190033601a3SSteven Rostedt {
191033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192a3583244SSteven Rostedt }
193a3583244SSteven Rostedt 
194988ae9d6SSteven Rostedt /**
195988ae9d6SSteven Rostedt  * tracing_is_on - show state of ring buffers enabled
196988ae9d6SSteven Rostedt  */
197988ae9d6SSteven Rostedt int tracing_is_on(void)
198988ae9d6SSteven Rostedt {
199988ae9d6SSteven Rostedt 	return ring_buffer_flags == RB_BUFFERS_ON;
200988ae9d6SSteven Rostedt }
201988ae9d6SSteven Rostedt EXPORT_SYMBOL_GPL(tracing_is_on);
202988ae9d6SSteven Rostedt 
203d06bbd66SIngo Molnar #include "trace.h"
204d06bbd66SIngo Molnar 
205e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
20667d34724SAndrew Morton #define RB_ALIGNMENT		4U
207334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208334d4169SLai Jiangshan 
209334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2117a8e76a3SSteven Rostedt 
2127a8e76a3SSteven Rostedt enum {
2137a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
2147a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
2157a8e76a3SSteven Rostedt };
2167a8e76a3SSteven Rostedt 
2172d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event)
2182d622719STom Zanussi {
219334d4169SLai Jiangshan 	return event->type_len == RINGBUF_TYPE_PADDING
220334d4169SLai Jiangshan 			&& event->time_delta == 0;
2212d622719STom Zanussi }
2222d622719STom Zanussi 
2232d622719STom Zanussi static inline int rb_discarded_event(struct ring_buffer_event *event)
2242d622719STom Zanussi {
225334d4169SLai Jiangshan 	return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
2262d622719STom Zanussi }
2272d622719STom Zanussi 
2282d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
2292d622719STom Zanussi {
230334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
2312d622719STom Zanussi 	event->time_delta = 0;
2322d622719STom Zanussi }
2332d622719STom Zanussi 
2342d622719STom Zanussi static unsigned
2352d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2362d622719STom Zanussi {
2372d622719STom Zanussi 	unsigned length;
2382d622719STom Zanussi 
239334d4169SLai Jiangshan 	if (event->type_len)
240334d4169SLai Jiangshan 		length = event->type_len * RB_ALIGNMENT;
2412d622719STom Zanussi 	else
2422d622719STom Zanussi 		length = event->array[0];
2432d622719STom Zanussi 	return length + RB_EVNT_HDR_SIZE;
2442d622719STom Zanussi }
2452d622719STom Zanussi 
2467a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
24734a148bfSAndrew Morton static unsigned
2487a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2497a8e76a3SSteven Rostedt {
250334d4169SLai Jiangshan 	switch (event->type_len) {
2517a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
2522d622719STom Zanussi 		if (rb_null_event(event))
2537a8e76a3SSteven Rostedt 			/* undefined */
2547a8e76a3SSteven Rostedt 			return -1;
255334d4169SLai Jiangshan 		return  event->array[0] + RB_EVNT_HDR_SIZE;
2567a8e76a3SSteven Rostedt 
2577a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
2587a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
2597a8e76a3SSteven Rostedt 
2607a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
2617a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
2627a8e76a3SSteven Rostedt 
2637a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
2642d622719STom Zanussi 		return rb_event_data_length(event);
2657a8e76a3SSteven Rostedt 	default:
2667a8e76a3SSteven Rostedt 		BUG();
2677a8e76a3SSteven Rostedt 	}
2687a8e76a3SSteven Rostedt 	/* not hit */
2697a8e76a3SSteven Rostedt 	return 0;
2707a8e76a3SSteven Rostedt }
2717a8e76a3SSteven Rostedt 
2727a8e76a3SSteven Rostedt /**
2737a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
2747a8e76a3SSteven Rostedt  * @event: the event to get the length of
2757a8e76a3SSteven Rostedt  */
2767a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2777a8e76a3SSteven Rostedt {
278465634adSRobert Richter 	unsigned length = rb_event_length(event);
279334d4169SLai Jiangshan 	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280465634adSRobert Richter 		return length;
281465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
282465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283465634adSRobert Richter                 length -= sizeof(event->array[0]);
284465634adSRobert Richter 	return length;
2857a8e76a3SSteven Rostedt }
286c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2877a8e76a3SSteven Rostedt 
2887a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
28934a148bfSAndrew Morton static void *
2907a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
2917a8e76a3SSteven Rostedt {
292334d4169SLai Jiangshan 	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
2937a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
294334d4169SLai Jiangshan 	if (event->type_len)
2957a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
2967a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
2977a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
2987a8e76a3SSteven Rostedt }
2997a8e76a3SSteven Rostedt 
3007a8e76a3SSteven Rostedt /**
3017a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
3027a8e76a3SSteven Rostedt  * @event: the event to get the data from
3037a8e76a3SSteven Rostedt  */
3047a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3057a8e76a3SSteven Rostedt {
3067a8e76a3SSteven Rostedt 	return rb_event_data(event);
3077a8e76a3SSteven Rostedt }
308c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3097a8e76a3SSteven Rostedt 
3107a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
3119e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
3127a8e76a3SSteven Rostedt 
3137a8e76a3SSteven Rostedt #define TS_SHIFT	27
3147a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
3157a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
3167a8e76a3SSteven Rostedt 
317abc9b56dSSteven Rostedt struct buffer_data_page {
3187a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
319c3706f00SWenji Huang 	local_t		 commit;	/* write committed index */
320abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
321abc9b56dSSteven Rostedt };
322abc9b56dSSteven Rostedt 
323abc9b56dSSteven Rostedt struct buffer_page {
324778c55d4SSteven Rostedt 	struct list_head list;		/* list of buffer pages */
325abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
3266f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
327778c55d4SSteven Rostedt 	local_t		 entries;	/* entries on this page */
328abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
3297a8e76a3SSteven Rostedt };
3307a8e76a3SSteven Rostedt 
331044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
332abc9b56dSSteven Rostedt {
333044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
334abc9b56dSSteven Rostedt }
335abc9b56dSSteven Rostedt 
336474d32b6SSteven Rostedt /**
337474d32b6SSteven Rostedt  * ring_buffer_page_len - the size of data on the page.
338474d32b6SSteven Rostedt  * @page: The page to read
339474d32b6SSteven Rostedt  *
340474d32b6SSteven Rostedt  * Returns the amount of data on the page, including buffer page header.
341474d32b6SSteven Rostedt  */
342ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page)
343ef7a4a16SSteven Rostedt {
344474d32b6SSteven Rostedt 	return local_read(&((struct buffer_data_page *)page)->commit)
345474d32b6SSteven Rostedt 		+ BUF_PAGE_HDR_SIZE;
346ef7a4a16SSteven Rostedt }
347ef7a4a16SSteven Rostedt 
3487a8e76a3SSteven Rostedt /*
349ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
350ed56829cSSteven Rostedt  * this issue out.
351ed56829cSSteven Rostedt  */
35234a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
353ed56829cSSteven Rostedt {
3546ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
355e4c2ce82SSteven Rostedt 	kfree(bpage);
356ed56829cSSteven Rostedt }
357ed56829cSSteven Rostedt 
358ed56829cSSteven Rostedt /*
3597a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
3607a8e76a3SSteven Rostedt  */
3617a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
3627a8e76a3SSteven Rostedt {
3637a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
3647a8e76a3SSteven Rostedt 		return 1;
3657a8e76a3SSteven Rostedt 	return 0;
3667a8e76a3SSteven Rostedt }
3677a8e76a3SSteven Rostedt 
368474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
3697a8e76a3SSteven Rostedt 
370be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
371be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
372be957c44SSteven Rostedt 
373d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s)
374d1b182a8SSteven Rostedt {
375d1b182a8SSteven Rostedt 	struct buffer_data_page field;
376d1b182a8SSteven Rostedt 	int ret;
377d1b182a8SSteven Rostedt 
378d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
379d1b182a8SSteven Rostedt 			       "offset:0;\tsize:%u;\n",
380d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.time_stamp));
381d1b182a8SSteven Rostedt 
382d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
383d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
384d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
385d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.commit));
386d1b182a8SSteven Rostedt 
387d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: char data;\t"
388d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
389d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), data),
390d1b182a8SSteven Rostedt 			       (unsigned int)BUF_PAGE_SIZE);
391d1b182a8SSteven Rostedt 
392d1b182a8SSteven Rostedt 	return ret;
393d1b182a8SSteven Rostedt }
394d1b182a8SSteven Rostedt 
3957a8e76a3SSteven Rostedt /*
3967a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
3977a8e76a3SSteven Rostedt  */
3987a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
3997a8e76a3SSteven Rostedt 	int				cpu;
4007a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
401f83c9d0fSSteven Rostedt 	spinlock_t			reader_lock; /* serialize readers */
4023e03fb7fSSteven Rostedt 	raw_spinlock_t			lock;
4037a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
4047a8e76a3SSteven Rostedt 	struct list_head		pages;
4056f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
4066f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
407c3706f00SWenji Huang 	struct buffer_page		*commit_page;	/* committed pages */
408d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
409f0d2c681SSteven Rostedt 	unsigned long			nmi_dropped;
410f0d2c681SSteven Rostedt 	unsigned long			commit_overrun;
4117a8e76a3SSteven Rostedt 	unsigned long			overrun;
412e4906effSSteven Rostedt 	unsigned long			read;
413e4906effSSteven Rostedt 	local_t				entries;
4147a8e76a3SSteven Rostedt 	u64				write_stamp;
4157a8e76a3SSteven Rostedt 	u64				read_stamp;
4167a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
4177a8e76a3SSteven Rostedt };
4187a8e76a3SSteven Rostedt 
4197a8e76a3SSteven Rostedt struct ring_buffer {
4207a8e76a3SSteven Rostedt 	unsigned			pages;
4217a8e76a3SSteven Rostedt 	unsigned			flags;
4227a8e76a3SSteven Rostedt 	int				cpus;
4237a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
42400f62f61SArnaldo Carvalho de Melo 	cpumask_var_t			cpumask;
4257a8e76a3SSteven Rostedt 
4267a8e76a3SSteven Rostedt 	struct mutex			mutex;
4277a8e76a3SSteven Rostedt 
4287a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
429554f786eSSteven Rostedt 
43059222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
431554f786eSSteven Rostedt 	struct notifier_block		cpu_notify;
432554f786eSSteven Rostedt #endif
43337886f6aSSteven Rostedt 	u64				(*clock)(void);
4347a8e76a3SSteven Rostedt };
4357a8e76a3SSteven Rostedt 
4367a8e76a3SSteven Rostedt struct ring_buffer_iter {
4377a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
4387a8e76a3SSteven Rostedt 	unsigned long			head;
4397a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
4407a8e76a3SSteven Rostedt 	u64				read_stamp;
4417a8e76a3SSteven Rostedt };
4427a8e76a3SSteven Rostedt 
443f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
4447a8e76a3SSteven Rostedt #define RB_WARN_ON(buffer, cond)				\
4453e89c7bbSSteven Rostedt 	({							\
4463e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);			\
4473e89c7bbSSteven Rostedt 		if (_____ret) {					\
448bf41a158SSteven Rostedt 			atomic_inc(&buffer->record_disabled);	\
449bf41a158SSteven Rostedt 			WARN_ON(1);				\
450bf41a158SSteven Rostedt 		}						\
4513e89c7bbSSteven Rostedt 		_____ret;					\
4523e89c7bbSSteven Rostedt 	})
453f536aafcSSteven Rostedt 
45437886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
45537886f6aSSteven Rostedt #define DEBUG_SHIFT 0
45637886f6aSSteven Rostedt 
45788eb0125SSteven Rostedt static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu)
45888eb0125SSteven Rostedt {
45988eb0125SSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
46088eb0125SSteven Rostedt 	return buffer->clock() << DEBUG_SHIFT;
46188eb0125SSteven Rostedt }
46288eb0125SSteven Rostedt 
46337886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
46437886f6aSSteven Rostedt {
46537886f6aSSteven Rostedt 	u64 time;
46637886f6aSSteven Rostedt 
46737886f6aSSteven Rostedt 	preempt_disable_notrace();
46888eb0125SSteven Rostedt 	time = rb_time_stamp(buffer, cpu);
46937886f6aSSteven Rostedt 	preempt_enable_no_resched_notrace();
47037886f6aSSteven Rostedt 
47137886f6aSSteven Rostedt 	return time;
47237886f6aSSteven Rostedt }
47337886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
47437886f6aSSteven Rostedt 
47537886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
47637886f6aSSteven Rostedt 				      int cpu, u64 *ts)
47737886f6aSSteven Rostedt {
47837886f6aSSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
47937886f6aSSteven Rostedt 	*ts >>= DEBUG_SHIFT;
48037886f6aSSteven Rostedt }
48137886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
48237886f6aSSteven Rostedt 
4837a8e76a3SSteven Rostedt /**
4847a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
4857a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
4867a8e76a3SSteven Rostedt  *
487c3706f00SWenji Huang  * As a safety measure we check to make sure the data pages have not
4887a8e76a3SSteven Rostedt  * been corrupted.
4897a8e76a3SSteven Rostedt  */
4907a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
4917a8e76a3SSteven Rostedt {
4927a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
493044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
4947a8e76a3SSteven Rostedt 
4953e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
4963e89c7bbSSteven Rostedt 		return -1;
4973e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
4983e89c7bbSSteven Rostedt 		return -1;
4997a8e76a3SSteven Rostedt 
500044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
5013e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
502044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
5033e89c7bbSSteven Rostedt 			return -1;
5043e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
505044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
5063e89c7bbSSteven Rostedt 			return -1;
5077a8e76a3SSteven Rostedt 	}
5087a8e76a3SSteven Rostedt 
5097a8e76a3SSteven Rostedt 	return 0;
5107a8e76a3SSteven Rostedt }
5117a8e76a3SSteven Rostedt 
5127a8e76a3SSteven Rostedt static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
5137a8e76a3SSteven Rostedt 			     unsigned nr_pages)
5147a8e76a3SSteven Rostedt {
5157a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
516044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
5177a8e76a3SSteven Rostedt 	unsigned long addr;
5187a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
5197a8e76a3SSteven Rostedt 	unsigned i;
5207a8e76a3SSteven Rostedt 
5217a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
522044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
523aa1e0e3bSSteven Rostedt 				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
524044fa782SSteven Rostedt 		if (!bpage)
525e4c2ce82SSteven Rostedt 			goto free_pages;
526044fa782SSteven Rostedt 		list_add(&bpage->list, &pages);
527e4c2ce82SSteven Rostedt 
5287a8e76a3SSteven Rostedt 		addr = __get_free_page(GFP_KERNEL);
5297a8e76a3SSteven Rostedt 		if (!addr)
5307a8e76a3SSteven Rostedt 			goto free_pages;
531044fa782SSteven Rostedt 		bpage->page = (void *)addr;
532044fa782SSteven Rostedt 		rb_init_page(bpage->page);
5337a8e76a3SSteven Rostedt 	}
5347a8e76a3SSteven Rostedt 
5357a8e76a3SSteven Rostedt 	list_splice(&pages, head);
5367a8e76a3SSteven Rostedt 
5377a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
5387a8e76a3SSteven Rostedt 
5397a8e76a3SSteven Rostedt 	return 0;
5407a8e76a3SSteven Rostedt 
5417a8e76a3SSteven Rostedt  free_pages:
542044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
543044fa782SSteven Rostedt 		list_del_init(&bpage->list);
544044fa782SSteven Rostedt 		free_buffer_page(bpage);
5457a8e76a3SSteven Rostedt 	}
5467a8e76a3SSteven Rostedt 	return -ENOMEM;
5477a8e76a3SSteven Rostedt }
5487a8e76a3SSteven Rostedt 
5497a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
5507a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
5517a8e76a3SSteven Rostedt {
5527a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
553044fa782SSteven Rostedt 	struct buffer_page *bpage;
554d769041fSSteven Rostedt 	unsigned long addr;
5557a8e76a3SSteven Rostedt 	int ret;
5567a8e76a3SSteven Rostedt 
5577a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
5587a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
5597a8e76a3SSteven Rostedt 	if (!cpu_buffer)
5607a8e76a3SSteven Rostedt 		return NULL;
5617a8e76a3SSteven Rostedt 
5627a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
5637a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
564f83c9d0fSSteven Rostedt 	spin_lock_init(&cpu_buffer->reader_lock);
5653e03fb7fSSteven Rostedt 	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
5667a8e76a3SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->pages);
5677a8e76a3SSteven Rostedt 
568044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
569e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
570044fa782SSteven Rostedt 	if (!bpage)
571e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
572e4c2ce82SSteven Rostedt 
573044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
574d769041fSSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
575d769041fSSteven Rostedt 	if (!addr)
576e4c2ce82SSteven Rostedt 		goto fail_free_reader;
577044fa782SSteven Rostedt 	bpage->page = (void *)addr;
578044fa782SSteven Rostedt 	rb_init_page(bpage->page);
579e4c2ce82SSteven Rostedt 
580d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
581d769041fSSteven Rostedt 
5827a8e76a3SSteven Rostedt 	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
5837a8e76a3SSteven Rostedt 	if (ret < 0)
584d769041fSSteven Rostedt 		goto fail_free_reader;
5857a8e76a3SSteven Rostedt 
5867a8e76a3SSteven Rostedt 	cpu_buffer->head_page
5877a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
588bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
5897a8e76a3SSteven Rostedt 
5907a8e76a3SSteven Rostedt 	return cpu_buffer;
5917a8e76a3SSteven Rostedt 
592d769041fSSteven Rostedt  fail_free_reader:
593d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
594d769041fSSteven Rostedt 
5957a8e76a3SSteven Rostedt  fail_free_buffer:
5967a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
5977a8e76a3SSteven Rostedt 	return NULL;
5987a8e76a3SSteven Rostedt }
5997a8e76a3SSteven Rostedt 
6007a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6017a8e76a3SSteven Rostedt {
6027a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
603044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
6047a8e76a3SSteven Rostedt 
605d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
606d769041fSSteven Rostedt 
607044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
608044fa782SSteven Rostedt 		list_del_init(&bpage->list);
609044fa782SSteven Rostedt 		free_buffer_page(bpage);
6107a8e76a3SSteven Rostedt 	}
6117a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
6127a8e76a3SSteven Rostedt }
6137a8e76a3SSteven Rostedt 
614a7b13743SSteven Rostedt /*
615a7b13743SSteven Rostedt  * Causes compile errors if the struct buffer_page gets bigger
616a7b13743SSteven Rostedt  * than the struct page.
617a7b13743SSteven Rostedt  */
618a7b13743SSteven Rostedt extern int ring_buffer_page_too_big(void);
619a7b13743SSteven Rostedt 
62059222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
62109c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
622554f786eSSteven Rostedt 			 unsigned long action, void *hcpu);
623554f786eSSteven Rostedt #endif
624554f786eSSteven Rostedt 
6257a8e76a3SSteven Rostedt /**
6267a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
62768814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
6287a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
6297a8e76a3SSteven Rostedt  *
6307a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
6317a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
6327a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
6337a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
6347a8e76a3SSteven Rostedt  */
6357a8e76a3SSteven Rostedt struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
6367a8e76a3SSteven Rostedt {
6377a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
6387a8e76a3SSteven Rostedt 	int bsize;
6397a8e76a3SSteven Rostedt 	int cpu;
6407a8e76a3SSteven Rostedt 
641a7b13743SSteven Rostedt 	/* Paranoid! Optimizes out when all is well */
642a7b13743SSteven Rostedt 	if (sizeof(struct buffer_page) > sizeof(struct page))
643a7b13743SSteven Rostedt 		ring_buffer_page_too_big();
644a7b13743SSteven Rostedt 
645a7b13743SSteven Rostedt 
6467a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
6477a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
6487a8e76a3SSteven Rostedt 			 GFP_KERNEL);
6497a8e76a3SSteven Rostedt 	if (!buffer)
6507a8e76a3SSteven Rostedt 		return NULL;
6517a8e76a3SSteven Rostedt 
6529e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
6539e01c1b7SRusty Russell 		goto fail_free_buffer;
6549e01c1b7SRusty Russell 
6557a8e76a3SSteven Rostedt 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
6567a8e76a3SSteven Rostedt 	buffer->flags = flags;
65737886f6aSSteven Rostedt 	buffer->clock = trace_clock_local;
6587a8e76a3SSteven Rostedt 
6597a8e76a3SSteven Rostedt 	/* need at least two pages */
6607a8e76a3SSteven Rostedt 	if (buffer->pages == 1)
6617a8e76a3SSteven Rostedt 		buffer->pages++;
6627a8e76a3SSteven Rostedt 
6633bf832ceSFrederic Weisbecker 	/*
6643bf832ceSFrederic Weisbecker 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
6653bf832ceSFrederic Weisbecker 	 * in early initcall, it will not be notified of secondary cpus.
6663bf832ceSFrederic Weisbecker 	 * In that off case, we need to allocate for all possible cpus.
6673bf832ceSFrederic Weisbecker 	 */
6683bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU
669554f786eSSteven Rostedt 	get_online_cpus();
670554f786eSSteven Rostedt 	cpumask_copy(buffer->cpumask, cpu_online_mask);
6713bf832ceSFrederic Weisbecker #else
6723bf832ceSFrederic Weisbecker 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
6733bf832ceSFrederic Weisbecker #endif
6747a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
6757a8e76a3SSteven Rostedt 
6767a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
6777a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
6787a8e76a3SSteven Rostedt 				  GFP_KERNEL);
6797a8e76a3SSteven Rostedt 	if (!buffer->buffers)
6809e01c1b7SRusty Russell 		goto fail_free_cpumask;
6817a8e76a3SSteven Rostedt 
6827a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6837a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
6847a8e76a3SSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
6857a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
6867a8e76a3SSteven Rostedt 			goto fail_free_buffers;
6877a8e76a3SSteven Rostedt 	}
6887a8e76a3SSteven Rostedt 
68959222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
690554f786eSSteven Rostedt 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
691554f786eSSteven Rostedt 	buffer->cpu_notify.priority = 0;
692554f786eSSteven Rostedt 	register_cpu_notifier(&buffer->cpu_notify);
693554f786eSSteven Rostedt #endif
694554f786eSSteven Rostedt 
695554f786eSSteven Rostedt 	put_online_cpus();
6967a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
6977a8e76a3SSteven Rostedt 
6987a8e76a3SSteven Rostedt 	return buffer;
6997a8e76a3SSteven Rostedt 
7007a8e76a3SSteven Rostedt  fail_free_buffers:
7017a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
7027a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
7037a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
7047a8e76a3SSteven Rostedt 	}
7057a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
7067a8e76a3SSteven Rostedt 
7079e01c1b7SRusty Russell  fail_free_cpumask:
7089e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
709554f786eSSteven Rostedt 	put_online_cpus();
7109e01c1b7SRusty Russell 
7117a8e76a3SSteven Rostedt  fail_free_buffer:
7127a8e76a3SSteven Rostedt 	kfree(buffer);
7137a8e76a3SSteven Rostedt 	return NULL;
7147a8e76a3SSteven Rostedt }
715c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_alloc);
7167a8e76a3SSteven Rostedt 
7177a8e76a3SSteven Rostedt /**
7187a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
7197a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
7207a8e76a3SSteven Rostedt  */
7217a8e76a3SSteven Rostedt void
7227a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
7237a8e76a3SSteven Rostedt {
7247a8e76a3SSteven Rostedt 	int cpu;
7257a8e76a3SSteven Rostedt 
726554f786eSSteven Rostedt 	get_online_cpus();
727554f786eSSteven Rostedt 
72859222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
729554f786eSSteven Rostedt 	unregister_cpu_notifier(&buffer->cpu_notify);
730554f786eSSteven Rostedt #endif
731554f786eSSteven Rostedt 
7327a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
7337a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
7347a8e76a3SSteven Rostedt 
735554f786eSSteven Rostedt 	put_online_cpus();
736554f786eSSteven Rostedt 
7379e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
7389e01c1b7SRusty Russell 
7397a8e76a3SSteven Rostedt 	kfree(buffer);
7407a8e76a3SSteven Rostedt }
741c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
7427a8e76a3SSteven Rostedt 
74337886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer,
74437886f6aSSteven Rostedt 			   u64 (*clock)(void))
74537886f6aSSteven Rostedt {
74637886f6aSSteven Rostedt 	buffer->clock = clock;
74737886f6aSSteven Rostedt }
74837886f6aSSteven Rostedt 
7497a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
7507a8e76a3SSteven Rostedt 
7517a8e76a3SSteven Rostedt static void
7527a8e76a3SSteven Rostedt rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
7537a8e76a3SSteven Rostedt {
754044fa782SSteven Rostedt 	struct buffer_page *bpage;
7557a8e76a3SSteven Rostedt 	struct list_head *p;
7567a8e76a3SSteven Rostedt 	unsigned i;
7577a8e76a3SSteven Rostedt 
7587a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7597a8e76a3SSteven Rostedt 	synchronize_sched();
7607a8e76a3SSteven Rostedt 
7617a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7623e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7633e89c7bbSSteven Rostedt 			return;
7647a8e76a3SSteven Rostedt 		p = cpu_buffer->pages.next;
765044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
766044fa782SSteven Rostedt 		list_del_init(&bpage->list);
767044fa782SSteven Rostedt 		free_buffer_page(bpage);
7687a8e76a3SSteven Rostedt 	}
7693e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7703e89c7bbSSteven Rostedt 		return;
7717a8e76a3SSteven Rostedt 
7727a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
7737a8e76a3SSteven Rostedt 
7747a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
7757a8e76a3SSteven Rostedt 
7767a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
7777a8e76a3SSteven Rostedt 
7787a8e76a3SSteven Rostedt }
7797a8e76a3SSteven Rostedt 
7807a8e76a3SSteven Rostedt static void
7817a8e76a3SSteven Rostedt rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
7827a8e76a3SSteven Rostedt 		struct list_head *pages, unsigned nr_pages)
7837a8e76a3SSteven Rostedt {
784044fa782SSteven Rostedt 	struct buffer_page *bpage;
7857a8e76a3SSteven Rostedt 	struct list_head *p;
7867a8e76a3SSteven Rostedt 	unsigned i;
7877a8e76a3SSteven Rostedt 
7887a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7897a8e76a3SSteven Rostedt 	synchronize_sched();
7907a8e76a3SSteven Rostedt 
7917a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7923e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
7933e89c7bbSSteven Rostedt 			return;
7947a8e76a3SSteven Rostedt 		p = pages->next;
795044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
796044fa782SSteven Rostedt 		list_del_init(&bpage->list);
797044fa782SSteven Rostedt 		list_add_tail(&bpage->list, &cpu_buffer->pages);
7987a8e76a3SSteven Rostedt 	}
7997a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
8007a8e76a3SSteven Rostedt 
8017a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
8027a8e76a3SSteven Rostedt 
8037a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
8047a8e76a3SSteven Rostedt }
8057a8e76a3SSteven Rostedt 
8067a8e76a3SSteven Rostedt /**
8077a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
8087a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
8097a8e76a3SSteven Rostedt  * @size: the new size.
8107a8e76a3SSteven Rostedt  *
8117a8e76a3SSteven Rostedt  * The tracer is responsible for making sure that the buffer is
8127a8e76a3SSteven Rostedt  * not being used while changing the size.
8137a8e76a3SSteven Rostedt  * Note: We may be able to change the above requirement by using
8147a8e76a3SSteven Rostedt  *  RCU synchronizations.
8157a8e76a3SSteven Rostedt  *
8167a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
8177a8e76a3SSteven Rostedt  *
8187a8e76a3SSteven Rostedt  * Returns -1 on failure.
8197a8e76a3SSteven Rostedt  */
8207a8e76a3SSteven Rostedt int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
8217a8e76a3SSteven Rostedt {
8227a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
8237a8e76a3SSteven Rostedt 	unsigned nr_pages, rm_pages, new_pages;
824044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
8257a8e76a3SSteven Rostedt 	unsigned long buffer_size;
8267a8e76a3SSteven Rostedt 	unsigned long addr;
8277a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
8287a8e76a3SSteven Rostedt 	int i, cpu;
8297a8e76a3SSteven Rostedt 
830ee51a1deSIngo Molnar 	/*
831ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
832ee51a1deSIngo Molnar 	 */
833ee51a1deSIngo Molnar 	if (!buffer)
834ee51a1deSIngo Molnar 		return size;
835ee51a1deSIngo Molnar 
8367a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8377a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
8387a8e76a3SSteven Rostedt 	buffer_size = buffer->pages * BUF_PAGE_SIZE;
8397a8e76a3SSteven Rostedt 
8407a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
8417a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
8427a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
8437a8e76a3SSteven Rostedt 
8447a8e76a3SSteven Rostedt 	if (size == buffer_size)
8457a8e76a3SSteven Rostedt 		return size;
8467a8e76a3SSteven Rostedt 
8477a8e76a3SSteven Rostedt 	mutex_lock(&buffer->mutex);
848554f786eSSteven Rostedt 	get_online_cpus();
8497a8e76a3SSteven Rostedt 
8507a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8517a8e76a3SSteven Rostedt 
8527a8e76a3SSteven Rostedt 	if (size < buffer_size) {
8537a8e76a3SSteven Rostedt 
8547a8e76a3SSteven Rostedt 		/* easy case, just free pages */
855554f786eSSteven Rostedt 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
856554f786eSSteven Rostedt 			goto out_fail;
8577a8e76a3SSteven Rostedt 
8587a8e76a3SSteven Rostedt 		rm_pages = buffer->pages - nr_pages;
8597a8e76a3SSteven Rostedt 
8607a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
8617a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
8627a8e76a3SSteven Rostedt 			rb_remove_pages(cpu_buffer, rm_pages);
8637a8e76a3SSteven Rostedt 		}
8647a8e76a3SSteven Rostedt 		goto out;
8657a8e76a3SSteven Rostedt 	}
8667a8e76a3SSteven Rostedt 
8677a8e76a3SSteven Rostedt 	/*
8687a8e76a3SSteven Rostedt 	 * This is a bit more difficult. We only want to add pages
8697a8e76a3SSteven Rostedt 	 * when we can allocate enough for all CPUs. We do this
8707a8e76a3SSteven Rostedt 	 * by allocating all the pages and storing them on a local
8717a8e76a3SSteven Rostedt 	 * link list. If we succeed in our allocation, then we
8727a8e76a3SSteven Rostedt 	 * add these pages to the cpu_buffers. Otherwise we just free
8737a8e76a3SSteven Rostedt 	 * them all and return -ENOMEM;
8747a8e76a3SSteven Rostedt 	 */
875554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
876554f786eSSteven Rostedt 		goto out_fail;
877f536aafcSSteven Rostedt 
8787a8e76a3SSteven Rostedt 	new_pages = nr_pages - buffer->pages;
8797a8e76a3SSteven Rostedt 
8807a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8817a8e76a3SSteven Rostedt 		for (i = 0; i < new_pages; i++) {
882044fa782SSteven Rostedt 			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
883e4c2ce82SSteven Rostedt 						  cache_line_size()),
884e4c2ce82SSteven Rostedt 					    GFP_KERNEL, cpu_to_node(cpu));
885044fa782SSteven Rostedt 			if (!bpage)
886e4c2ce82SSteven Rostedt 				goto free_pages;
887044fa782SSteven Rostedt 			list_add(&bpage->list, &pages);
8887a8e76a3SSteven Rostedt 			addr = __get_free_page(GFP_KERNEL);
8897a8e76a3SSteven Rostedt 			if (!addr)
8907a8e76a3SSteven Rostedt 				goto free_pages;
891044fa782SSteven Rostedt 			bpage->page = (void *)addr;
892044fa782SSteven Rostedt 			rb_init_page(bpage->page);
8937a8e76a3SSteven Rostedt 		}
8947a8e76a3SSteven Rostedt 	}
8957a8e76a3SSteven Rostedt 
8967a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8977a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
8987a8e76a3SSteven Rostedt 		rb_insert_pages(cpu_buffer, &pages, new_pages);
8997a8e76a3SSteven Rostedt 	}
9007a8e76a3SSteven Rostedt 
901554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, !list_empty(&pages)))
902554f786eSSteven Rostedt 		goto out_fail;
9037a8e76a3SSteven Rostedt 
9047a8e76a3SSteven Rostedt  out:
9057a8e76a3SSteven Rostedt 	buffer->pages = nr_pages;
906554f786eSSteven Rostedt 	put_online_cpus();
9077a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
9087a8e76a3SSteven Rostedt 
9097a8e76a3SSteven Rostedt 	return size;
9107a8e76a3SSteven Rostedt 
9117a8e76a3SSteven Rostedt  free_pages:
912044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
913044fa782SSteven Rostedt 		list_del_init(&bpage->list);
914044fa782SSteven Rostedt 		free_buffer_page(bpage);
9157a8e76a3SSteven Rostedt 	}
916554f786eSSteven Rostedt 	put_online_cpus();
917641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
9187a8e76a3SSteven Rostedt 	return -ENOMEM;
919554f786eSSteven Rostedt 
920554f786eSSteven Rostedt 	/*
921554f786eSSteven Rostedt 	 * Something went totally wrong, and we are too paranoid
922554f786eSSteven Rostedt 	 * to even clean up the mess.
923554f786eSSteven Rostedt 	 */
924554f786eSSteven Rostedt  out_fail:
925554f786eSSteven Rostedt 	put_online_cpus();
926554f786eSSteven Rostedt 	mutex_unlock(&buffer->mutex);
927554f786eSSteven Rostedt 	return -1;
9287a8e76a3SSteven Rostedt }
929c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
9307a8e76a3SSteven Rostedt 
9318789a9e7SSteven Rostedt static inline void *
932044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
9338789a9e7SSteven Rostedt {
934044fa782SSteven Rostedt 	return bpage->data + index;
9358789a9e7SSteven Rostedt }
9368789a9e7SSteven Rostedt 
937044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
9387a8e76a3SSteven Rostedt {
939044fa782SSteven Rostedt 	return bpage->page->data + index;
9407a8e76a3SSteven Rostedt }
9417a8e76a3SSteven Rostedt 
9427a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
943d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
9447a8e76a3SSteven Rostedt {
9456f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
9466f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
9476f807acdSSteven Rostedt }
9486f807acdSSteven Rostedt 
9496f807acdSSteven Rostedt static inline struct ring_buffer_event *
9506f807acdSSteven Rostedt rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
9516f807acdSSteven Rostedt {
9526f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->head_page,
9536f807acdSSteven Rostedt 			       cpu_buffer->head_page->read);
9547a8e76a3SSteven Rostedt }
9557a8e76a3SSteven Rostedt 
9567a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
9577a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
9587a8e76a3SSteven Rostedt {
9596f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
9607a8e76a3SSteven Rostedt }
9617a8e76a3SSteven Rostedt 
962bf41a158SSteven Rostedt static inline unsigned rb_page_write(struct buffer_page *bpage)
963bf41a158SSteven Rostedt {
964bf41a158SSteven Rostedt 	return local_read(&bpage->write);
965bf41a158SSteven Rostedt }
966bf41a158SSteven Rostedt 
967bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
968bf41a158SSteven Rostedt {
969abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
970bf41a158SSteven Rostedt }
971bf41a158SSteven Rostedt 
972bf41a158SSteven Rostedt /* Size is determined by what has been commited */
973bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
974bf41a158SSteven Rostedt {
975bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
976bf41a158SSteven Rostedt }
977bf41a158SSteven Rostedt 
978bf41a158SSteven Rostedt static inline unsigned
979bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
980bf41a158SSteven Rostedt {
981bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
982bf41a158SSteven Rostedt }
983bf41a158SSteven Rostedt 
984bf41a158SSteven Rostedt static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
985bf41a158SSteven Rostedt {
986bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->head_page);
987bf41a158SSteven Rostedt }
988bf41a158SSteven Rostedt 
9897a8e76a3SSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
990044fa782SSteven Rostedt 			       struct buffer_page **bpage)
9917a8e76a3SSteven Rostedt {
992044fa782SSteven Rostedt 	struct list_head *p = (*bpage)->list.next;
9937a8e76a3SSteven Rostedt 
9947a8e76a3SSteven Rostedt 	if (p == &cpu_buffer->pages)
9957a8e76a3SSteven Rostedt 		p = p->next;
9967a8e76a3SSteven Rostedt 
997044fa782SSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
9987a8e76a3SSteven Rostedt }
9997a8e76a3SSteven Rostedt 
1000bf41a158SSteven Rostedt static inline unsigned
1001bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
10027a8e76a3SSteven Rostedt {
1003bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1004bf41a158SSteven Rostedt 
1005bf41a158SSteven Rostedt 	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
10067a8e76a3SSteven Rostedt }
10077a8e76a3SSteven Rostedt 
10080f0c85fcSSteven Rostedt static inline int
1009bf41a158SSteven Rostedt rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1010bf41a158SSteven Rostedt 	     struct ring_buffer_event *event)
10117a8e76a3SSteven Rostedt {
1012bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1013bf41a158SSteven Rostedt 	unsigned long index;
1014bf41a158SSteven Rostedt 
1015bf41a158SSteven Rostedt 	index = rb_event_index(event);
1016bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1017bf41a158SSteven Rostedt 
1018bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
1019bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
1020bf41a158SSteven Rostedt }
1021bf41a158SSteven Rostedt 
102234a148bfSAndrew Morton static void
1023bf41a158SSteven Rostedt rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1024bf41a158SSteven Rostedt 		    struct ring_buffer_event *event)
1025bf41a158SSteven Rostedt {
1026bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1027bf41a158SSteven Rostedt 	unsigned long index;
1028bf41a158SSteven Rostedt 
1029bf41a158SSteven Rostedt 	index = rb_event_index(event);
1030bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1031bf41a158SSteven Rostedt 
1032bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page->page != (void *)addr) {
10333e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
10343e89c7bbSSteven Rostedt 			  cpu_buffer->commit_page == cpu_buffer->tail_page))
10353e89c7bbSSteven Rostedt 			return;
1036abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1037bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1038bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1039abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1040abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1041bf41a158SSteven Rostedt 	}
1042bf41a158SSteven Rostedt 
1043bf41a158SSteven Rostedt 	/* Now set the commit to the event's index */
1044abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->commit_page->page->commit, index);
1045bf41a158SSteven Rostedt }
1046bf41a158SSteven Rostedt 
104734a148bfSAndrew Morton static void
1048bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1049bf41a158SSteven Rostedt {
1050bf41a158SSteven Rostedt 	/*
1051bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
1052bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
1053bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
1054bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
1055bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
1056bf41a158SSteven Rostedt 	 * assign the commit to the tail.
1057bf41a158SSteven Rostedt 	 */
1058a8ccf1d6SSteven Rostedt  again:
1059bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1060abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1061bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1062bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1063abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1064abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1065bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
1066bf41a158SSteven Rostedt 		barrier();
1067bf41a158SSteven Rostedt 	}
1068bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
1069bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
1070abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1071bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1072bf41a158SSteven Rostedt 		barrier();
1073bf41a158SSteven Rostedt 	}
1074a8ccf1d6SSteven Rostedt 
1075a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
1076a8ccf1d6SSteven Rostedt 	barrier();
1077a8ccf1d6SSteven Rostedt 
1078a8ccf1d6SSteven Rostedt 	/*
1079a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
1080a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
1081a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
1082a8ccf1d6SSteven Rostedt 	 */
1083a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1084a8ccf1d6SSteven Rostedt 		goto again;
10857a8e76a3SSteven Rostedt }
10867a8e76a3SSteven Rostedt 
1087d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
10887a8e76a3SSteven Rostedt {
1089abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
10906f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
1091d769041fSSteven Rostedt }
1092d769041fSSteven Rostedt 
109334a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
1094d769041fSSteven Rostedt {
1095d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1096d769041fSSteven Rostedt 
1097d769041fSSteven Rostedt 	/*
1098d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
1099d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
1100d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
1101d769041fSSteven Rostedt 	 * to the head page instead of next.
1102d769041fSSteven Rostedt 	 */
1103d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
1104d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
1105d769041fSSteven Rostedt 	else
1106d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
1107d769041fSSteven Rostedt 
1108abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
11097a8e76a3SSteven Rostedt 	iter->head = 0;
11107a8e76a3SSteven Rostedt }
11117a8e76a3SSteven Rostedt 
11127a8e76a3SSteven Rostedt /**
11137a8e76a3SSteven Rostedt  * ring_buffer_update_event - update event type and data
11147a8e76a3SSteven Rostedt  * @event: the even to update
11157a8e76a3SSteven Rostedt  * @type: the type of event
11167a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
11177a8e76a3SSteven Rostedt  *
11187a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
11197a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
11207a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
11217a8e76a3SSteven Rostedt  * data field.
11227a8e76a3SSteven Rostedt  */
112334a148bfSAndrew Morton static void
11247a8e76a3SSteven Rostedt rb_update_event(struct ring_buffer_event *event,
11257a8e76a3SSteven Rostedt 			 unsigned type, unsigned length)
11267a8e76a3SSteven Rostedt {
1127334d4169SLai Jiangshan 	event->type_len = type;
11287a8e76a3SSteven Rostedt 
11297a8e76a3SSteven Rostedt 	switch (type) {
11307a8e76a3SSteven Rostedt 
11317a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
11327a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
11337a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
11347a8e76a3SSteven Rostedt 		break;
11357a8e76a3SSteven Rostedt 
1136334d4169SLai Jiangshan 	case 0:
11377a8e76a3SSteven Rostedt 		length -= RB_EVNT_HDR_SIZE;
1138334d4169SLai Jiangshan 		if (length > RB_MAX_SMALL_DATA)
11397a8e76a3SSteven Rostedt 			event->array[0] = length;
1140334d4169SLai Jiangshan 		else
1141334d4169SLai Jiangshan 			event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
11427a8e76a3SSteven Rostedt 		break;
11437a8e76a3SSteven Rostedt 	default:
11447a8e76a3SSteven Rostedt 		BUG();
11457a8e76a3SSteven Rostedt 	}
11467a8e76a3SSteven Rostedt }
11477a8e76a3SSteven Rostedt 
114834a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
11497a8e76a3SSteven Rostedt {
11507a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
11517a8e76a3SSteven Rostedt 
11527a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
11537a8e76a3SSteven Rostedt 	if (!length)
11547a8e76a3SSteven Rostedt 		length = 1;
11557a8e76a3SSteven Rostedt 
11567a8e76a3SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA)
11577a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
11587a8e76a3SSteven Rostedt 
11597a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
11607a8e76a3SSteven Rostedt 	length = ALIGN(length, RB_ALIGNMENT);
11617a8e76a3SSteven Rostedt 
11627a8e76a3SSteven Rostedt 	return length;
11637a8e76a3SSteven Rostedt }
11647a8e76a3SSteven Rostedt 
11656634ff26SSteven Rostedt 
11667a8e76a3SSteven Rostedt static struct ring_buffer_event *
11676634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
11686634ff26SSteven Rostedt 	     unsigned long length, unsigned long tail,
11696634ff26SSteven Rostedt 	     struct buffer_page *commit_page,
11706634ff26SSteven Rostedt 	     struct buffer_page *tail_page, u64 *ts)
11717a8e76a3SSteven Rostedt {
11726634ff26SSteven Rostedt 	struct buffer_page *next_page, *head_page, *reader_page;
11737a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
11747a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
117578d904b4SSteven Rostedt 	bool lock_taken = false;
11766634ff26SSteven Rostedt 	unsigned long flags;
1177aa20ae84SSteven Rostedt 
1178aa20ae84SSteven Rostedt 	next_page = tail_page;
11797a8e76a3SSteven Rostedt 
11803e03fb7fSSteven Rostedt 	local_irq_save(flags);
118178d904b4SSteven Rostedt 	/*
1182a81bd80aSSteven Rostedt 	 * Since the write to the buffer is still not
1183a81bd80aSSteven Rostedt 	 * fully lockless, we must be careful with NMIs.
1184a81bd80aSSteven Rostedt 	 * The locks in the writers are taken when a write
1185a81bd80aSSteven Rostedt 	 * crosses to a new page. The locks protect against
1186a81bd80aSSteven Rostedt 	 * races with the readers (this will soon be fixed
1187a81bd80aSSteven Rostedt 	 * with a lockless solution).
1188a81bd80aSSteven Rostedt 	 *
1189a81bd80aSSteven Rostedt 	 * Because we can not protect against NMIs, and we
1190a81bd80aSSteven Rostedt 	 * want to keep traces reentrant, we need to manage
1191a81bd80aSSteven Rostedt 	 * what happens when we are in an NMI.
1192a81bd80aSSteven Rostedt 	 *
119378d904b4SSteven Rostedt 	 * NMIs can happen after we take the lock.
119478d904b4SSteven Rostedt 	 * If we are in an NMI, only take the lock
119578d904b4SSteven Rostedt 	 * if it is not already taken. Otherwise
119678d904b4SSteven Rostedt 	 * simply fail.
119778d904b4SSteven Rostedt 	 */
1198a81bd80aSSteven Rostedt 	if (unlikely(in_nmi())) {
1199f0d2c681SSteven Rostedt 		if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1200f0d2c681SSteven Rostedt 			cpu_buffer->nmi_dropped++;
120145141d46SSteven Rostedt 			goto out_reset;
1202f0d2c681SSteven Rostedt 		}
120378d904b4SSteven Rostedt 	} else
12043e03fb7fSSteven Rostedt 		__raw_spin_lock(&cpu_buffer->lock);
1205bf41a158SSteven Rostedt 
120678d904b4SSteven Rostedt 	lock_taken = true;
120778d904b4SSteven Rostedt 
12087a8e76a3SSteven Rostedt 	rb_inc_page(cpu_buffer, &next_page);
12097a8e76a3SSteven Rostedt 
1210d769041fSSteven Rostedt 	head_page = cpu_buffer->head_page;
1211d769041fSSteven Rostedt 	reader_page = cpu_buffer->reader_page;
1212d769041fSSteven Rostedt 
1213d769041fSSteven Rostedt 	/* we grabbed the lock before incrementing */
12143e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
121545141d46SSteven Rostedt 		goto out_reset;
1216bf41a158SSteven Rostedt 
1217bf41a158SSteven Rostedt 	/*
1218bf41a158SSteven Rostedt 	 * If for some reason, we had an interrupt storm that made
1219bf41a158SSteven Rostedt 	 * it all the way around the buffer, bail, and warn
1220bf41a158SSteven Rostedt 	 * about it.
1221bf41a158SSteven Rostedt 	 */
122298db8df7SSteven Rostedt 	if (unlikely(next_page == commit_page)) {
1223f0d2c681SSteven Rostedt 		cpu_buffer->commit_overrun++;
122445141d46SSteven Rostedt 		goto out_reset;
1225bf41a158SSteven Rostedt 	}
1226d769041fSSteven Rostedt 
12277a8e76a3SSteven Rostedt 	if (next_page == head_page) {
12286f3b3440SLai Jiangshan 		if (!(buffer->flags & RB_FL_OVERWRITE))
122945141d46SSteven Rostedt 			goto out_reset;
12307a8e76a3SSteven Rostedt 
1231bf41a158SSteven Rostedt 		/* tail_page has not moved yet? */
1232bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->tail_page) {
12337a8e76a3SSteven Rostedt 			/* count overflows */
1234778c55d4SSteven Rostedt 			cpu_buffer->overrun +=
1235778c55d4SSteven Rostedt 				local_read(&head_page->entries);
12367a8e76a3SSteven Rostedt 
12377a8e76a3SSteven Rostedt 			rb_inc_page(cpu_buffer, &head_page);
12387a8e76a3SSteven Rostedt 			cpu_buffer->head_page = head_page;
1239bf41a158SSteven Rostedt 			cpu_buffer->head_page->read = 0;
1240bf41a158SSteven Rostedt 		}
12417a8e76a3SSteven Rostedt 	}
12427a8e76a3SSteven Rostedt 
1243bf41a158SSteven Rostedt 	/*
1244bf41a158SSteven Rostedt 	 * If the tail page is still the same as what we think
1245bf41a158SSteven Rostedt 	 * it is, then it is up to us to update the tail
1246bf41a158SSteven Rostedt 	 * pointer.
1247bf41a158SSteven Rostedt 	 */
1248bf41a158SSteven Rostedt 	if (tail_page == cpu_buffer->tail_page) {
1249bf41a158SSteven Rostedt 		local_set(&next_page->write, 0);
1250778c55d4SSteven Rostedt 		local_set(&next_page->entries, 0);
1251abc9b56dSSteven Rostedt 		local_set(&next_page->page->commit, 0);
1252bf41a158SSteven Rostedt 		cpu_buffer->tail_page = next_page;
1253bf41a158SSteven Rostedt 
1254bf41a158SSteven Rostedt 		/* reread the time stamp */
125588eb0125SSteven Rostedt 		*ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1256abc9b56dSSteven Rostedt 		cpu_buffer->tail_page->page->time_stamp = *ts;
1257bf41a158SSteven Rostedt 	}
1258bf41a158SSteven Rostedt 
1259bf41a158SSteven Rostedt 	/*
1260bf41a158SSteven Rostedt 	 * The actual tail page has moved forward.
1261bf41a158SSteven Rostedt 	 */
1262bf41a158SSteven Rostedt 	if (tail < BUF_PAGE_SIZE) {
1263bf41a158SSteven Rostedt 		/* Mark the rest of the page with padding */
12646f807acdSSteven Rostedt 		event = __rb_page_index(tail_page, tail);
12652d622719STom Zanussi 		rb_event_set_padding(event);
12667a8e76a3SSteven Rostedt 	}
12677a8e76a3SSteven Rostedt 
1268bf41a158SSteven Rostedt 	/* Set the write back to the previous setting */
12698e7abf1cSSteven Rostedt 	local_sub(length, &tail_page->write);
1270bf41a158SSteven Rostedt 
1271bf41a158SSteven Rostedt 	/*
1272bf41a158SSteven Rostedt 	 * If this was a commit entry that failed,
1273bf41a158SSteven Rostedt 	 * increment that too
1274bf41a158SSteven Rostedt 	 */
1275bf41a158SSteven Rostedt 	if (tail_page == cpu_buffer->commit_page &&
1276bf41a158SSteven Rostedt 	    tail == rb_commit_index(cpu_buffer)) {
1277bf41a158SSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
12787a8e76a3SSteven Rostedt 	}
12797a8e76a3SSteven Rostedt 
12803e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
12813e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1282bf41a158SSteven Rostedt 
1283bf41a158SSteven Rostedt 	/* fail and let the caller try again */
1284bf41a158SSteven Rostedt 	return ERR_PTR(-EAGAIN);
1285bf41a158SSteven Rostedt 
128645141d46SSteven Rostedt  out_reset:
12876f3b3440SLai Jiangshan 	/* reset write */
12888e7abf1cSSteven Rostedt 	local_sub(length, &tail_page->write);
12896f3b3440SLai Jiangshan 
129078d904b4SSteven Rostedt 	if (likely(lock_taken))
12913e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
12923e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1293bf41a158SSteven Rostedt 	return NULL;
12947a8e76a3SSteven Rostedt }
12957a8e76a3SSteven Rostedt 
12966634ff26SSteven Rostedt static struct ring_buffer_event *
12976634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
12986634ff26SSteven Rostedt 		  unsigned type, unsigned long length, u64 *ts)
12996634ff26SSteven Rostedt {
13006634ff26SSteven Rostedt 	struct buffer_page *tail_page, *commit_page;
13016634ff26SSteven Rostedt 	struct ring_buffer_event *event;
13026634ff26SSteven Rostedt 	unsigned long tail, write;
13036634ff26SSteven Rostedt 
13046634ff26SSteven Rostedt 	commit_page = cpu_buffer->commit_page;
13056634ff26SSteven Rostedt 	/* we just need to protect against interrupts */
13066634ff26SSteven Rostedt 	barrier();
13076634ff26SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
13086634ff26SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
13096634ff26SSteven Rostedt 	tail = write - length;
13106634ff26SSteven Rostedt 
13116634ff26SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
13126634ff26SSteven Rostedt 	if (write > BUF_PAGE_SIZE)
13136634ff26SSteven Rostedt 		return rb_move_tail(cpu_buffer, length, tail,
13146634ff26SSteven Rostedt 				    commit_page, tail_page, ts);
13156634ff26SSteven Rostedt 
13166634ff26SSteven Rostedt 	/* We reserved something on the buffer */
13176634ff26SSteven Rostedt 
13186634ff26SSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
13196634ff26SSteven Rostedt 		return NULL;
13206634ff26SSteven Rostedt 
13216634ff26SSteven Rostedt 	event = __rb_page_index(tail_page, tail);
13226634ff26SSteven Rostedt 	rb_update_event(event, type, length);
13236634ff26SSteven Rostedt 
13246634ff26SSteven Rostedt 	/* The passed in type is zero for DATA */
13256634ff26SSteven Rostedt 	if (likely(!type))
13266634ff26SSteven Rostedt 		local_inc(&tail_page->entries);
13276634ff26SSteven Rostedt 
13286634ff26SSteven Rostedt 	/*
13296634ff26SSteven Rostedt 	 * If this is a commit and the tail is zero, then update
13306634ff26SSteven Rostedt 	 * this page's time stamp.
13316634ff26SSteven Rostedt 	 */
13326634ff26SSteven Rostedt 	if (!tail && rb_is_commit(cpu_buffer, event))
13336634ff26SSteven Rostedt 		cpu_buffer->commit_page->page->time_stamp = *ts;
13346634ff26SSteven Rostedt 
13356634ff26SSteven Rostedt 	return event;
13366634ff26SSteven Rostedt }
13376634ff26SSteven Rostedt 
1338*edd813bfSSteven Rostedt static inline int
1339*edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1340*edd813bfSSteven Rostedt 		  struct ring_buffer_event *event)
1341*edd813bfSSteven Rostedt {
1342*edd813bfSSteven Rostedt 	unsigned long new_index, old_index;
1343*edd813bfSSteven Rostedt 	struct buffer_page *bpage;
1344*edd813bfSSteven Rostedt 	unsigned long index;
1345*edd813bfSSteven Rostedt 	unsigned long addr;
1346*edd813bfSSteven Rostedt 
1347*edd813bfSSteven Rostedt 	new_index = rb_event_index(event);
1348*edd813bfSSteven Rostedt 	old_index = new_index + rb_event_length(event);
1349*edd813bfSSteven Rostedt 	addr = (unsigned long)event;
1350*edd813bfSSteven Rostedt 	addr &= PAGE_MASK;
1351*edd813bfSSteven Rostedt 
1352*edd813bfSSteven Rostedt 	bpage = cpu_buffer->tail_page;
1353*edd813bfSSteven Rostedt 
1354*edd813bfSSteven Rostedt 	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1355*edd813bfSSteven Rostedt 		/*
1356*edd813bfSSteven Rostedt 		 * This is on the tail page. It is possible that
1357*edd813bfSSteven Rostedt 		 * a write could come in and move the tail page
1358*edd813bfSSteven Rostedt 		 * and write to the next page. That is fine
1359*edd813bfSSteven Rostedt 		 * because we just shorten what is on this page.
1360*edd813bfSSteven Rostedt 		 */
1361*edd813bfSSteven Rostedt 		index = local_cmpxchg(&bpage->write, old_index, new_index);
1362*edd813bfSSteven Rostedt 		if (index == old_index)
1363*edd813bfSSteven Rostedt 			return 1;
1364*edd813bfSSteven Rostedt 	}
1365*edd813bfSSteven Rostedt 
1366*edd813bfSSteven Rostedt 	/* could not discard */
1367*edd813bfSSteven Rostedt 	return 0;
1368*edd813bfSSteven Rostedt }
1369*edd813bfSSteven Rostedt 
13707a8e76a3SSteven Rostedt static int
13717a8e76a3SSteven Rostedt rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
13727a8e76a3SSteven Rostedt 		  u64 *ts, u64 *delta)
13737a8e76a3SSteven Rostedt {
13747a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
13757a8e76a3SSteven Rostedt 	static int once;
1376bf41a158SSteven Rostedt 	int ret;
13777a8e76a3SSteven Rostedt 
13787a8e76a3SSteven Rostedt 	if (unlikely(*delta > (1ULL << 59) && !once++)) {
13797a8e76a3SSteven Rostedt 		printk(KERN_WARNING "Delta way too big! %llu"
13807a8e76a3SSteven Rostedt 		       " ts=%llu write stamp = %llu\n",
1381e2862c94SStephen Rothwell 		       (unsigned long long)*delta,
1382e2862c94SStephen Rothwell 		       (unsigned long long)*ts,
1383e2862c94SStephen Rothwell 		       (unsigned long long)cpu_buffer->write_stamp);
13847a8e76a3SSteven Rostedt 		WARN_ON(1);
13857a8e76a3SSteven Rostedt 	}
13867a8e76a3SSteven Rostedt 
13877a8e76a3SSteven Rostedt 	/*
13887a8e76a3SSteven Rostedt 	 * The delta is too big, we to add a
13897a8e76a3SSteven Rostedt 	 * new timestamp.
13907a8e76a3SSteven Rostedt 	 */
13917a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer,
13927a8e76a3SSteven Rostedt 				  RINGBUF_TYPE_TIME_EXTEND,
13937a8e76a3SSteven Rostedt 				  RB_LEN_TIME_EXTEND,
13947a8e76a3SSteven Rostedt 				  ts);
13957a8e76a3SSteven Rostedt 	if (!event)
1396bf41a158SSteven Rostedt 		return -EBUSY;
13977a8e76a3SSteven Rostedt 
1398bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1399bf41a158SSteven Rostedt 		return -EAGAIN;
1400bf41a158SSteven Rostedt 
1401bf41a158SSteven Rostedt 	/* Only a commited time event can update the write stamp */
1402bf41a158SSteven Rostedt 	if (rb_is_commit(cpu_buffer, event)) {
1403bf41a158SSteven Rostedt 		/*
1404bf41a158SSteven Rostedt 		 * If this is the first on the page, then we need to
1405bf41a158SSteven Rostedt 		 * update the page itself, and just put in a zero.
1406bf41a158SSteven Rostedt 		 */
1407bf41a158SSteven Rostedt 		if (rb_event_index(event)) {
14087a8e76a3SSteven Rostedt 			event->time_delta = *delta & TS_MASK;
14097a8e76a3SSteven Rostedt 			event->array[0] = *delta >> TS_SHIFT;
1410bf41a158SSteven Rostedt 		} else {
1411abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp = *ts;
1412bf41a158SSteven Rostedt 			event->time_delta = 0;
1413bf41a158SSteven Rostedt 			event->array[0] = 0;
1414bf41a158SSteven Rostedt 		}
14157a8e76a3SSteven Rostedt 		cpu_buffer->write_stamp = *ts;
1416bf41a158SSteven Rostedt 		/* let the caller know this was the commit */
1417bf41a158SSteven Rostedt 		ret = 1;
1418bf41a158SSteven Rostedt 	} else {
1419*edd813bfSSteven Rostedt 		/* Try to discard the event */
1420*edd813bfSSteven Rostedt 		if (!rb_try_to_discard(cpu_buffer, event)) {
1421bf41a158SSteven Rostedt 			/* Darn, this is just wasted space */
1422bf41a158SSteven Rostedt 			event->time_delta = 0;
1423bf41a158SSteven Rostedt 			event->array[0] = 0;
1424bf41a158SSteven Rostedt 			ret = 0;
14257a8e76a3SSteven Rostedt 		}
1426*edd813bfSSteven Rostedt 	}
14277a8e76a3SSteven Rostedt 
1428bf41a158SSteven Rostedt 	*delta = 0;
1429bf41a158SSteven Rostedt 
1430bf41a158SSteven Rostedt 	return ret;
14317a8e76a3SSteven Rostedt }
14327a8e76a3SSteven Rostedt 
14337a8e76a3SSteven Rostedt static struct ring_buffer_event *
14347a8e76a3SSteven Rostedt rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
14351cd8d735SSteven Rostedt 		      unsigned long length)
14367a8e76a3SSteven Rostedt {
14377a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1438168b6b1dSSteven Rostedt 	u64 ts, delta = 0;
1439bf41a158SSteven Rostedt 	int commit = 0;
1440818e3dd3SSteven Rostedt 	int nr_loops = 0;
14417a8e76a3SSteven Rostedt 
1442be957c44SSteven Rostedt 	length = rb_calculate_event_length(length);
1443bf41a158SSteven Rostedt  again:
1444818e3dd3SSteven Rostedt 	/*
1445818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
1446818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
1447818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
1448818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
1449818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
1450818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
1451818e3dd3SSteven Rostedt 	 * Bail!
1452818e3dd3SSteven Rostedt 	 */
14533e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1454818e3dd3SSteven Rostedt 		return NULL;
1455818e3dd3SSteven Rostedt 
145688eb0125SSteven Rostedt 	ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
14577a8e76a3SSteven Rostedt 
1458bf41a158SSteven Rostedt 	/*
1459bf41a158SSteven Rostedt 	 * Only the first commit can update the timestamp.
1460bf41a158SSteven Rostedt 	 * Yes there is a race here. If an interrupt comes in
1461bf41a158SSteven Rostedt 	 * just after the conditional and it traces too, then it
1462bf41a158SSteven Rostedt 	 * will also check the deltas. More than one timestamp may
1463bf41a158SSteven Rostedt 	 * also be made. But only the entry that did the actual
1464bf41a158SSteven Rostedt 	 * commit will be something other than zero.
1465bf41a158SSteven Rostedt 	 */
14660f0c85fcSSteven Rostedt 	if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page &&
1467bf41a158SSteven Rostedt 		   rb_page_write(cpu_buffer->tail_page) ==
14680f0c85fcSSteven Rostedt 		   rb_commit_index(cpu_buffer))) {
1469168b6b1dSSteven Rostedt 		u64 diff;
1470bf41a158SSteven Rostedt 
1471168b6b1dSSteven Rostedt 		diff = ts - cpu_buffer->write_stamp;
14727a8e76a3SSteven Rostedt 
1473168b6b1dSSteven Rostedt 		/* make sure this diff is calculated here */
1474bf41a158SSteven Rostedt 		barrier();
14757a8e76a3SSteven Rostedt 
1476bf41a158SSteven Rostedt 		/* Did the write stamp get updated already? */
1477bf41a158SSteven Rostedt 		if (unlikely(ts < cpu_buffer->write_stamp))
1478168b6b1dSSteven Rostedt 			goto get_event;
1479bf41a158SSteven Rostedt 
1480168b6b1dSSteven Rostedt 		delta = diff;
1481168b6b1dSSteven Rostedt 		if (unlikely(test_time_stamp(delta))) {
1482bf41a158SSteven Rostedt 
1483bf41a158SSteven Rostedt 			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1484bf41a158SSteven Rostedt 			if (commit == -EBUSY)
14857a8e76a3SSteven Rostedt 				return NULL;
1486bf41a158SSteven Rostedt 
1487bf41a158SSteven Rostedt 			if (commit == -EAGAIN)
1488bf41a158SSteven Rostedt 				goto again;
1489bf41a158SSteven Rostedt 
1490bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, commit < 0);
14917a8e76a3SSteven Rostedt 		}
1492168b6b1dSSteven Rostedt 	}
14937a8e76a3SSteven Rostedt 
1494168b6b1dSSteven Rostedt  get_event:
14951cd8d735SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, 0, length, &ts);
1496168b6b1dSSteven Rostedt 	if (unlikely(PTR_ERR(event) == -EAGAIN))
1497bf41a158SSteven Rostedt 		goto again;
14987a8e76a3SSteven Rostedt 
1499bf41a158SSteven Rostedt 	if (!event) {
1500bf41a158SSteven Rostedt 		if (unlikely(commit))
1501bf41a158SSteven Rostedt 			/*
1502bf41a158SSteven Rostedt 			 * Ouch! We needed a timestamp and it was commited. But
1503bf41a158SSteven Rostedt 			 * we didn't get our event reserved.
1504bf41a158SSteven Rostedt 			 */
1505bf41a158SSteven Rostedt 			rb_set_commit_to_write(cpu_buffer);
1506bf41a158SSteven Rostedt 		return NULL;
1507bf41a158SSteven Rostedt 	}
1508bf41a158SSteven Rostedt 
1509bf41a158SSteven Rostedt 	/*
1510bf41a158SSteven Rostedt 	 * If the timestamp was commited, make the commit our entry
1511bf41a158SSteven Rostedt 	 * now so that we will update it when needed.
1512bf41a158SSteven Rostedt 	 */
15130f0c85fcSSteven Rostedt 	if (unlikely(commit))
1514bf41a158SSteven Rostedt 		rb_set_commit_event(cpu_buffer, event);
1515bf41a158SSteven Rostedt 	else if (!rb_is_commit(cpu_buffer, event))
15167a8e76a3SSteven Rostedt 		delta = 0;
15177a8e76a3SSteven Rostedt 
15187a8e76a3SSteven Rostedt 	event->time_delta = delta;
15197a8e76a3SSteven Rostedt 
15207a8e76a3SSteven Rostedt 	return event;
15217a8e76a3SSteven Rostedt }
15227a8e76a3SSteven Rostedt 
1523aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16
1524261842b7SSteven Rostedt 
1525261842b7SSteven Rostedt static int trace_recursive_lock(void)
1526261842b7SSteven Rostedt {
1527aa18efb2SSteven Rostedt 	current->trace_recursion++;
1528261842b7SSteven Rostedt 
1529aa18efb2SSteven Rostedt 	if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1530aa18efb2SSteven Rostedt 		return 0;
1531261842b7SSteven Rostedt 
1532261842b7SSteven Rostedt 	/* Disable all tracing before we do anything else */
1533261842b7SSteven Rostedt 	tracing_off_permanent();
1534e057a5e5SFrederic Weisbecker 
15357d7d2b80SSteven Rostedt 	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
1536e057a5e5SFrederic Weisbecker 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1537aa18efb2SSteven Rostedt 		    current->trace_recursion,
1538e057a5e5SFrederic Weisbecker 		    hardirq_count() >> HARDIRQ_SHIFT,
1539e057a5e5SFrederic Weisbecker 		    softirq_count() >> SOFTIRQ_SHIFT,
1540e057a5e5SFrederic Weisbecker 		    in_nmi());
1541e057a5e5SFrederic Weisbecker 
1542261842b7SSteven Rostedt 	WARN_ON_ONCE(1);
1543261842b7SSteven Rostedt 	return -1;
1544261842b7SSteven Rostedt }
1545261842b7SSteven Rostedt 
1546261842b7SSteven Rostedt static void trace_recursive_unlock(void)
1547261842b7SSteven Rostedt {
1548aa18efb2SSteven Rostedt 	WARN_ON_ONCE(!current->trace_recursion);
1549261842b7SSteven Rostedt 
1550aa18efb2SSteven Rostedt 	current->trace_recursion--;
1551261842b7SSteven Rostedt }
1552261842b7SSteven Rostedt 
1553bf41a158SSteven Rostedt static DEFINE_PER_CPU(int, rb_need_resched);
1554bf41a158SSteven Rostedt 
15557a8e76a3SSteven Rostedt /**
15567a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
15577a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
15587a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
15597a8e76a3SSteven Rostedt  *
15607a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
15617a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
15627a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
15637a8e76a3SSteven Rostedt  *
15647a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
15657a8e76a3SSteven Rostedt  * which also includes the event header.
15667a8e76a3SSteven Rostedt  *
15677a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
15687a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
15697a8e76a3SSteven Rostedt  */
15707a8e76a3SSteven Rostedt struct ring_buffer_event *
15710a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
15727a8e76a3SSteven Rostedt {
15737a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15747a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1575bf41a158SSteven Rostedt 	int cpu, resched;
15767a8e76a3SSteven Rostedt 
1577033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1578a3583244SSteven Rostedt 		return NULL;
1579a3583244SSteven Rostedt 
15807a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
15817a8e76a3SSteven Rostedt 		return NULL;
15827a8e76a3SSteven Rostedt 
1583bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
1584182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1585bf41a158SSteven Rostedt 
1586261842b7SSteven Rostedt 	if (trace_recursive_lock())
1587261842b7SSteven Rostedt 		goto out_nocheck;
1588261842b7SSteven Rostedt 
15897a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
15907a8e76a3SSteven Rostedt 
15919e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1592d769041fSSteven Rostedt 		goto out;
15937a8e76a3SSteven Rostedt 
15947a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15957a8e76a3SSteven Rostedt 
15967a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
1597d769041fSSteven Rostedt 		goto out;
15987a8e76a3SSteven Rostedt 
1599be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
1600bf41a158SSteven Rostedt 		goto out;
16017a8e76a3SSteven Rostedt 
16021cd8d735SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, length);
16037a8e76a3SSteven Rostedt 	if (!event)
1604d769041fSSteven Rostedt 		goto out;
16057a8e76a3SSteven Rostedt 
1606bf41a158SSteven Rostedt 	/*
1607bf41a158SSteven Rostedt 	 * Need to store resched state on this cpu.
1608bf41a158SSteven Rostedt 	 * Only the first needs to.
1609bf41a158SSteven Rostedt 	 */
1610bf41a158SSteven Rostedt 
1611bf41a158SSteven Rostedt 	if (preempt_count() == 1)
1612bf41a158SSteven Rostedt 		per_cpu(rb_need_resched, cpu) = resched;
1613bf41a158SSteven Rostedt 
16147a8e76a3SSteven Rostedt 	return event;
16157a8e76a3SSteven Rostedt 
1616d769041fSSteven Rostedt  out:
1617261842b7SSteven Rostedt 	trace_recursive_unlock();
1618261842b7SSteven Rostedt 
1619261842b7SSteven Rostedt  out_nocheck:
1620182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
16217a8e76a3SSteven Rostedt 	return NULL;
16227a8e76a3SSteven Rostedt }
1623c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
16247a8e76a3SSteven Rostedt 
16257a8e76a3SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
16267a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
16277a8e76a3SSteven Rostedt {
1628e4906effSSteven Rostedt 	local_inc(&cpu_buffer->entries);
1629bf41a158SSteven Rostedt 
1630bf41a158SSteven Rostedt 	/* Only process further if we own the commit */
1631bf41a158SSteven Rostedt 	if (!rb_is_commit(cpu_buffer, event))
1632bf41a158SSteven Rostedt 		return;
1633bf41a158SSteven Rostedt 
1634bf41a158SSteven Rostedt 	cpu_buffer->write_stamp += event->time_delta;
1635bf41a158SSteven Rostedt 
1636bf41a158SSteven Rostedt 	rb_set_commit_to_write(cpu_buffer);
16377a8e76a3SSteven Rostedt }
16387a8e76a3SSteven Rostedt 
16397a8e76a3SSteven Rostedt /**
16407a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
16417a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
16427a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
16437a8e76a3SSteven Rostedt  *
16447a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
16457a8e76a3SSteven Rostedt  *
16467a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
16477a8e76a3SSteven Rostedt  */
16487a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
16490a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
16507a8e76a3SSteven Rostedt {
16517a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
16527a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
16537a8e76a3SSteven Rostedt 
16547a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
16557a8e76a3SSteven Rostedt 
16567a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
16577a8e76a3SSteven Rostedt 
1658261842b7SSteven Rostedt 	trace_recursive_unlock();
1659261842b7SSteven Rostedt 
1660bf41a158SSteven Rostedt 	/*
1661bf41a158SSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1662bf41a158SSteven Rostedt 	 */
1663182e9f5fSSteven Rostedt 	if (preempt_count() == 1)
1664182e9f5fSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1665bf41a158SSteven Rostedt 	else
1666bf41a158SSteven Rostedt 		preempt_enable_no_resched_notrace();
16677a8e76a3SSteven Rostedt 
16687a8e76a3SSteven Rostedt 	return 0;
16697a8e76a3SSteven Rostedt }
1670c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
16717a8e76a3SSteven Rostedt 
1672f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event)
1673f3b9aae1SFrederic Weisbecker {
1674334d4169SLai Jiangshan 	/* array[0] holds the actual length for the discarded event */
1675334d4169SLai Jiangshan 	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
1676334d4169SLai Jiangshan 	event->type_len = RINGBUF_TYPE_PADDING;
1677f3b9aae1SFrederic Weisbecker 	/* time delta must be non zero */
1678f3b9aae1SFrederic Weisbecker 	if (!event->time_delta)
1679f3b9aae1SFrederic Weisbecker 		event->time_delta = 1;
1680f3b9aae1SFrederic Weisbecker }
1681f3b9aae1SFrederic Weisbecker 
16827a8e76a3SSteven Rostedt /**
1683fa1b47ddSSteven Rostedt  * ring_buffer_event_discard - discard any event in the ring buffer
1684fa1b47ddSSteven Rostedt  * @event: the event to discard
1685fa1b47ddSSteven Rostedt  *
1686fa1b47ddSSteven Rostedt  * Sometimes a event that is in the ring buffer needs to be ignored.
1687fa1b47ddSSteven Rostedt  * This function lets the user discard an event in the ring buffer
1688fa1b47ddSSteven Rostedt  * and then that event will not be read later.
1689fa1b47ddSSteven Rostedt  *
1690fa1b47ddSSteven Rostedt  * Note, it is up to the user to be careful with this, and protect
1691fa1b47ddSSteven Rostedt  * against races. If the user discards an event that has been consumed
1692fa1b47ddSSteven Rostedt  * it is possible that it could corrupt the ring buffer.
1693fa1b47ddSSteven Rostedt  */
1694fa1b47ddSSteven Rostedt void ring_buffer_event_discard(struct ring_buffer_event *event)
1695fa1b47ddSSteven Rostedt {
1696f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1697fa1b47ddSSteven Rostedt }
1698fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1699fa1b47ddSSteven Rostedt 
1700fa1b47ddSSteven Rostedt /**
1701fa1b47ddSSteven Rostedt  * ring_buffer_commit_discard - discard an event that has not been committed
1702fa1b47ddSSteven Rostedt  * @buffer: the ring buffer
1703fa1b47ddSSteven Rostedt  * @event: non committed event to discard
1704fa1b47ddSSteven Rostedt  *
1705fa1b47ddSSteven Rostedt  * This is similar to ring_buffer_event_discard but must only be
1706fa1b47ddSSteven Rostedt  * performed on an event that has not been committed yet. The difference
1707fa1b47ddSSteven Rostedt  * is that this will also try to free the event from the ring buffer
1708fa1b47ddSSteven Rostedt  * if another event has not been added behind it.
1709fa1b47ddSSteven Rostedt  *
1710fa1b47ddSSteven Rostedt  * If another event has been added behind it, it will set the event
1711fa1b47ddSSteven Rostedt  * up as discarded, and perform the commit.
1712fa1b47ddSSteven Rostedt  *
1713fa1b47ddSSteven Rostedt  * If this function is called, do not call ring_buffer_unlock_commit on
1714fa1b47ddSSteven Rostedt  * the event.
1715fa1b47ddSSteven Rostedt  */
1716fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer,
1717fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event)
1718fa1b47ddSSteven Rostedt {
1719fa1b47ddSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1720fa1b47ddSSteven Rostedt 	int cpu;
1721fa1b47ddSSteven Rostedt 
1722fa1b47ddSSteven Rostedt 	/* The event is discarded regardless */
1723f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1724fa1b47ddSSteven Rostedt 
1725fa1b47ddSSteven Rostedt 	/*
1726fa1b47ddSSteven Rostedt 	 * This must only be called if the event has not been
1727fa1b47ddSSteven Rostedt 	 * committed yet. Thus we can assume that preemption
1728fa1b47ddSSteven Rostedt 	 * is still disabled.
1729fa1b47ddSSteven Rostedt 	 */
173074f4fd21SSteven Rostedt 	RB_WARN_ON(buffer, preemptible());
1731fa1b47ddSSteven Rostedt 
1732fa1b47ddSSteven Rostedt 	cpu = smp_processor_id();
1733fa1b47ddSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1734fa1b47ddSSteven Rostedt 
1735*edd813bfSSteven Rostedt 	if (!rb_try_to_discard(cpu_buffer, event))
1736fa1b47ddSSteven Rostedt 		goto out;
1737fa1b47ddSSteven Rostedt 
1738fa1b47ddSSteven Rostedt 	/*
1739fa1b47ddSSteven Rostedt 	 * The commit is still visible by the reader, so we
1740fa1b47ddSSteven Rostedt 	 * must increment entries.
1741fa1b47ddSSteven Rostedt 	 */
1742e4906effSSteven Rostedt 	local_inc(&cpu_buffer->entries);
1743fa1b47ddSSteven Rostedt  out:
1744fa1b47ddSSteven Rostedt 	/*
1745fa1b47ddSSteven Rostedt 	 * If a write came in and pushed the tail page
1746fa1b47ddSSteven Rostedt 	 * we still need to update the commit pointer
1747fa1b47ddSSteven Rostedt 	 * if we were the commit.
1748fa1b47ddSSteven Rostedt 	 */
1749fa1b47ddSSteven Rostedt 	if (rb_is_commit(cpu_buffer, event))
1750fa1b47ddSSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
1751fa1b47ddSSteven Rostedt 
1752f3b9aae1SFrederic Weisbecker 	trace_recursive_unlock();
1753f3b9aae1SFrederic Weisbecker 
1754fa1b47ddSSteven Rostedt 	/*
1755fa1b47ddSSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1756fa1b47ddSSteven Rostedt 	 */
1757fa1b47ddSSteven Rostedt 	if (preempt_count() == 1)
1758fa1b47ddSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1759fa1b47ddSSteven Rostedt 	else
1760fa1b47ddSSteven Rostedt 		preempt_enable_no_resched_notrace();
1761fa1b47ddSSteven Rostedt 
1762fa1b47ddSSteven Rostedt }
1763fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1764fa1b47ddSSteven Rostedt 
1765fa1b47ddSSteven Rostedt /**
17667a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
17677a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
17687a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
17697a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
17707a8e76a3SSteven Rostedt  *
17717a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
17727a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
17737a8e76a3SSteven Rostedt  * may be easier to simply call this function.
17747a8e76a3SSteven Rostedt  *
17757a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
17767a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
17777a8e76a3SSteven Rostedt  */
17787a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
17797a8e76a3SSteven Rostedt 			unsigned long length,
17807a8e76a3SSteven Rostedt 			void *data)
17817a8e76a3SSteven Rostedt {
17827a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
17837a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
17847a8e76a3SSteven Rostedt 	void *body;
17857a8e76a3SSteven Rostedt 	int ret = -EBUSY;
1786bf41a158SSteven Rostedt 	int cpu, resched;
17877a8e76a3SSteven Rostedt 
1788033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1789a3583244SSteven Rostedt 		return -EBUSY;
1790a3583244SSteven Rostedt 
17917a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
17927a8e76a3SSteven Rostedt 		return -EBUSY;
17937a8e76a3SSteven Rostedt 
1794182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1795bf41a158SSteven Rostedt 
17967a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
17977a8e76a3SSteven Rostedt 
17989e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1799d769041fSSteven Rostedt 		goto out;
18007a8e76a3SSteven Rostedt 
18017a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18027a8e76a3SSteven Rostedt 
18037a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
18047a8e76a3SSteven Rostedt 		goto out;
18057a8e76a3SSteven Rostedt 
1806be957c44SSteven Rostedt 	if (length > BUF_MAX_DATA_SIZE)
1807be957c44SSteven Rostedt 		goto out;
1808be957c44SSteven Rostedt 
1809be957c44SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, length);
18107a8e76a3SSteven Rostedt 	if (!event)
18117a8e76a3SSteven Rostedt 		goto out;
18127a8e76a3SSteven Rostedt 
18137a8e76a3SSteven Rostedt 	body = rb_event_data(event);
18147a8e76a3SSteven Rostedt 
18157a8e76a3SSteven Rostedt 	memcpy(body, data, length);
18167a8e76a3SSteven Rostedt 
18177a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
18187a8e76a3SSteven Rostedt 
18197a8e76a3SSteven Rostedt 	ret = 0;
18207a8e76a3SSteven Rostedt  out:
1821182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
18227a8e76a3SSteven Rostedt 
18237a8e76a3SSteven Rostedt 	return ret;
18247a8e76a3SSteven Rostedt }
1825c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
18267a8e76a3SSteven Rostedt 
182734a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1828bf41a158SSteven Rostedt {
1829bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
1830bf41a158SSteven Rostedt 	struct buffer_page *head = cpu_buffer->head_page;
1831bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
1832bf41a158SSteven Rostedt 
1833bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
1834bf41a158SSteven Rostedt 		(commit == reader ||
1835bf41a158SSteven Rostedt 		 (commit == head &&
1836bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
1837bf41a158SSteven Rostedt }
1838bf41a158SSteven Rostedt 
18397a8e76a3SSteven Rostedt /**
18407a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
18417a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
18427a8e76a3SSteven Rostedt  *
18437a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
18447a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
18457a8e76a3SSteven Rostedt  *
18467a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
18477a8e76a3SSteven Rostedt  */
18487a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
18497a8e76a3SSteven Rostedt {
18507a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
18517a8e76a3SSteven Rostedt }
1852c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
18537a8e76a3SSteven Rostedt 
18547a8e76a3SSteven Rostedt /**
18557a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
18567a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
18577a8e76a3SSteven Rostedt  *
18587a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
18597a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
18607a8e76a3SSteven Rostedt  */
18617a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
18627a8e76a3SSteven Rostedt {
18637a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
18647a8e76a3SSteven Rostedt }
1865c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
18667a8e76a3SSteven Rostedt 
18677a8e76a3SSteven Rostedt /**
18687a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
18697a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
18707a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
18717a8e76a3SSteven Rostedt  *
18727a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
18737a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
18747a8e76a3SSteven Rostedt  *
18757a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
18767a8e76a3SSteven Rostedt  */
18777a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
18787a8e76a3SSteven Rostedt {
18797a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18807a8e76a3SSteven Rostedt 
18819e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
18828aabee57SSteven Rostedt 		return;
18837a8e76a3SSteven Rostedt 
18847a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18857a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
18867a8e76a3SSteven Rostedt }
1887c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
18887a8e76a3SSteven Rostedt 
18897a8e76a3SSteven Rostedt /**
18907a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
18917a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
18927a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
18937a8e76a3SSteven Rostedt  *
18947a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
18957a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
18967a8e76a3SSteven Rostedt  */
18977a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
18987a8e76a3SSteven Rostedt {
18997a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19007a8e76a3SSteven Rostedt 
19019e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19028aabee57SSteven Rostedt 		return;
19037a8e76a3SSteven Rostedt 
19047a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
19057a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
19067a8e76a3SSteven Rostedt }
1907c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
19087a8e76a3SSteven Rostedt 
19097a8e76a3SSteven Rostedt /**
19107a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
19117a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19127a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
19137a8e76a3SSteven Rostedt  */
19147a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
19157a8e76a3SSteven Rostedt {
19167a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19178aabee57SSteven Rostedt 	unsigned long ret;
19187a8e76a3SSteven Rostedt 
19199e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19208aabee57SSteven Rostedt 		return 0;
19217a8e76a3SSteven Rostedt 
19227a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1923e4906effSSteven Rostedt 	ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
1924e4906effSSteven Rostedt 		- cpu_buffer->read;
1925554f786eSSteven Rostedt 
1926554f786eSSteven Rostedt 	return ret;
19277a8e76a3SSteven Rostedt }
1928c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
19297a8e76a3SSteven Rostedt 
19307a8e76a3SSteven Rostedt /**
19317a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
19327a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19337a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
19347a8e76a3SSteven Rostedt  */
19357a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
19367a8e76a3SSteven Rostedt {
19377a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19388aabee57SSteven Rostedt 	unsigned long ret;
19397a8e76a3SSteven Rostedt 
19409e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19418aabee57SSteven Rostedt 		return 0;
19427a8e76a3SSteven Rostedt 
19437a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1944554f786eSSteven Rostedt 	ret = cpu_buffer->overrun;
1945554f786eSSteven Rostedt 
1946554f786eSSteven Rostedt 	return ret;
19477a8e76a3SSteven Rostedt }
1948c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
19497a8e76a3SSteven Rostedt 
19507a8e76a3SSteven Rostedt /**
1951f0d2c681SSteven Rostedt  * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
1952f0d2c681SSteven Rostedt  * @buffer: The ring buffer
1953f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
1954f0d2c681SSteven Rostedt  */
1955f0d2c681SSteven Rostedt unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
1956f0d2c681SSteven Rostedt {
1957f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1958f0d2c681SSteven Rostedt 	unsigned long ret;
1959f0d2c681SSteven Rostedt 
1960f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1961f0d2c681SSteven Rostedt 		return 0;
1962f0d2c681SSteven Rostedt 
1963f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1964f0d2c681SSteven Rostedt 	ret = cpu_buffer->nmi_dropped;
1965f0d2c681SSteven Rostedt 
1966f0d2c681SSteven Rostedt 	return ret;
1967f0d2c681SSteven Rostedt }
1968f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
1969f0d2c681SSteven Rostedt 
1970f0d2c681SSteven Rostedt /**
1971f0d2c681SSteven Rostedt  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
1972f0d2c681SSteven Rostedt  * @buffer: The ring buffer
1973f0d2c681SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
1974f0d2c681SSteven Rostedt  */
1975f0d2c681SSteven Rostedt unsigned long
1976f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
1977f0d2c681SSteven Rostedt {
1978f0d2c681SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1979f0d2c681SSteven Rostedt 	unsigned long ret;
1980f0d2c681SSteven Rostedt 
1981f0d2c681SSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1982f0d2c681SSteven Rostedt 		return 0;
1983f0d2c681SSteven Rostedt 
1984f0d2c681SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1985f0d2c681SSteven Rostedt 	ret = cpu_buffer->commit_overrun;
1986f0d2c681SSteven Rostedt 
1987f0d2c681SSteven Rostedt 	return ret;
1988f0d2c681SSteven Rostedt }
1989f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
1990f0d2c681SSteven Rostedt 
1991f0d2c681SSteven Rostedt /**
19927a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
19937a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19947a8e76a3SSteven Rostedt  *
19957a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
19967a8e76a3SSteven Rostedt  * (all CPU entries)
19977a8e76a3SSteven Rostedt  */
19987a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
19997a8e76a3SSteven Rostedt {
20007a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20017a8e76a3SSteven Rostedt 	unsigned long entries = 0;
20027a8e76a3SSteven Rostedt 	int cpu;
20037a8e76a3SSteven Rostedt 
20047a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
20057a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
20067a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
2007e4906effSSteven Rostedt 		entries += (local_read(&cpu_buffer->entries) -
2008e4906effSSteven Rostedt 			    cpu_buffer->overrun) - cpu_buffer->read;
20097a8e76a3SSteven Rostedt 	}
20107a8e76a3SSteven Rostedt 
20117a8e76a3SSteven Rostedt 	return entries;
20127a8e76a3SSteven Rostedt }
2013c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
20147a8e76a3SSteven Rostedt 
20157a8e76a3SSteven Rostedt /**
20167a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in buffer
20177a8e76a3SSteven Rostedt  * @buffer: The ring buffer
20187a8e76a3SSteven Rostedt  *
20197a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
20207a8e76a3SSteven Rostedt  * (all CPU entries)
20217a8e76a3SSteven Rostedt  */
20227a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
20237a8e76a3SSteven Rostedt {
20247a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20257a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
20267a8e76a3SSteven Rostedt 	int cpu;
20277a8e76a3SSteven Rostedt 
20287a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
20297a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
20307a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
20317a8e76a3SSteven Rostedt 		overruns += cpu_buffer->overrun;
20327a8e76a3SSteven Rostedt 	}
20337a8e76a3SSteven Rostedt 
20347a8e76a3SSteven Rostedt 	return overruns;
20357a8e76a3SSteven Rostedt }
2036c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
20377a8e76a3SSteven Rostedt 
2038642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
20397a8e76a3SSteven Rostedt {
20407a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
20417a8e76a3SSteven Rostedt 
2042d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
2043d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
20447a8e76a3SSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
20456f807acdSSteven Rostedt 		iter->head = cpu_buffer->head_page->read;
2046d769041fSSteven Rostedt 	} else {
2047d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
20486f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
2049d769041fSSteven Rostedt 	}
2050d769041fSSteven Rostedt 	if (iter->head)
2051d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
2052d769041fSSteven Rostedt 	else
2053abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
2054642edba5SSteven Rostedt }
2055f83c9d0fSSteven Rostedt 
2056642edba5SSteven Rostedt /**
2057642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
2058642edba5SSteven Rostedt  * @iter: The iterator to reset
2059642edba5SSteven Rostedt  *
2060642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
2061642edba5SSteven Rostedt  * again.
2062642edba5SSteven Rostedt  */
2063642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2064642edba5SSteven Rostedt {
2065554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2066642edba5SSteven Rostedt 	unsigned long flags;
2067642edba5SSteven Rostedt 
2068554f786eSSteven Rostedt 	if (!iter)
2069554f786eSSteven Rostedt 		return;
2070554f786eSSteven Rostedt 
2071554f786eSSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
2072554f786eSSteven Rostedt 
2073642edba5SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2074642edba5SSteven Rostedt 	rb_iter_reset(iter);
2075f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
20767a8e76a3SSteven Rostedt }
2077c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
20787a8e76a3SSteven Rostedt 
20797a8e76a3SSteven Rostedt /**
20807a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
20817a8e76a3SSteven Rostedt  * @iter: The iterator to check
20827a8e76a3SSteven Rostedt  */
20837a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
20847a8e76a3SSteven Rostedt {
20857a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20867a8e76a3SSteven Rostedt 
20877a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
20887a8e76a3SSteven Rostedt 
2089bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
2090bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
20917a8e76a3SSteven Rostedt }
2092c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
20937a8e76a3SSteven Rostedt 
20947a8e76a3SSteven Rostedt static void
20957a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
20967a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
20977a8e76a3SSteven Rostedt {
20987a8e76a3SSteven Rostedt 	u64 delta;
20997a8e76a3SSteven Rostedt 
2100334d4169SLai Jiangshan 	switch (event->type_len) {
21017a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
21027a8e76a3SSteven Rostedt 		return;
21037a8e76a3SSteven Rostedt 
21047a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
21057a8e76a3SSteven Rostedt 		delta = event->array[0];
21067a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
21077a8e76a3SSteven Rostedt 		delta += event->time_delta;
21087a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
21097a8e76a3SSteven Rostedt 		return;
21107a8e76a3SSteven Rostedt 
21117a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
21127a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
21137a8e76a3SSteven Rostedt 		return;
21147a8e76a3SSteven Rostedt 
21157a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
21167a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
21177a8e76a3SSteven Rostedt 		return;
21187a8e76a3SSteven Rostedt 
21197a8e76a3SSteven Rostedt 	default:
21207a8e76a3SSteven Rostedt 		BUG();
21217a8e76a3SSteven Rostedt 	}
21227a8e76a3SSteven Rostedt 	return;
21237a8e76a3SSteven Rostedt }
21247a8e76a3SSteven Rostedt 
21257a8e76a3SSteven Rostedt static void
21267a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
21277a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
21287a8e76a3SSteven Rostedt {
21297a8e76a3SSteven Rostedt 	u64 delta;
21307a8e76a3SSteven Rostedt 
2131334d4169SLai Jiangshan 	switch (event->type_len) {
21327a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
21337a8e76a3SSteven Rostedt 		return;
21347a8e76a3SSteven Rostedt 
21357a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
21367a8e76a3SSteven Rostedt 		delta = event->array[0];
21377a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
21387a8e76a3SSteven Rostedt 		delta += event->time_delta;
21397a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
21407a8e76a3SSteven Rostedt 		return;
21417a8e76a3SSteven Rostedt 
21427a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
21437a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
21447a8e76a3SSteven Rostedt 		return;
21457a8e76a3SSteven Rostedt 
21467a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
21477a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
21487a8e76a3SSteven Rostedt 		return;
21497a8e76a3SSteven Rostedt 
21507a8e76a3SSteven Rostedt 	default:
21517a8e76a3SSteven Rostedt 		BUG();
21527a8e76a3SSteven Rostedt 	}
21537a8e76a3SSteven Rostedt 	return;
21547a8e76a3SSteven Rostedt }
21557a8e76a3SSteven Rostedt 
2156d769041fSSteven Rostedt static struct buffer_page *
2157d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
21587a8e76a3SSteven Rostedt {
2159d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
2160d769041fSSteven Rostedt 	unsigned long flags;
2161818e3dd3SSteven Rostedt 	int nr_loops = 0;
2162d769041fSSteven Rostedt 
21633e03fb7fSSteven Rostedt 	local_irq_save(flags);
21643e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2165d769041fSSteven Rostedt 
2166d769041fSSteven Rostedt  again:
2167818e3dd3SSteven Rostedt 	/*
2168818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
2169818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
2170818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
2171818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
2172818e3dd3SSteven Rostedt 	 */
21733e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2174818e3dd3SSteven Rostedt 		reader = NULL;
2175818e3dd3SSteven Rostedt 		goto out;
2176818e3dd3SSteven Rostedt 	}
2177818e3dd3SSteven Rostedt 
2178d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
2179d769041fSSteven Rostedt 
2180d769041fSSteven Rostedt 	/* If there's more to read, return this page */
2181bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
2182d769041fSSteven Rostedt 		goto out;
2183d769041fSSteven Rostedt 
2184d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
21853e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
21863e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
21873e89c7bbSSteven Rostedt 		goto out;
2188d769041fSSteven Rostedt 
2189d769041fSSteven Rostedt 	/* check if we caught up to the tail */
2190d769041fSSteven Rostedt 	reader = NULL;
2191bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2192d769041fSSteven Rostedt 		goto out;
21937a8e76a3SSteven Rostedt 
21947a8e76a3SSteven Rostedt 	/*
2195d769041fSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
2196d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
21977a8e76a3SSteven Rostedt 	 */
2198d769041fSSteven Rostedt 
2199d769041fSSteven Rostedt 	reader = cpu_buffer->head_page;
2200d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.next = reader->list.next;
2201d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
2202bf41a158SSteven Rostedt 
2203bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2204778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
2205abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
2206d769041fSSteven Rostedt 
2207d769041fSSteven Rostedt 	/* Make the reader page now replace the head */
2208d769041fSSteven Rostedt 	reader->list.prev->next = &cpu_buffer->reader_page->list;
2209d769041fSSteven Rostedt 	reader->list.next->prev = &cpu_buffer->reader_page->list;
2210d769041fSSteven Rostedt 
2211d769041fSSteven Rostedt 	/*
2212d769041fSSteven Rostedt 	 * If the tail is on the reader, then we must set the head
2213d769041fSSteven Rostedt 	 * to the inserted page, otherwise we set it one before.
2214d769041fSSteven Rostedt 	 */
2215d769041fSSteven Rostedt 	cpu_buffer->head_page = cpu_buffer->reader_page;
2216d769041fSSteven Rostedt 
2217bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page != reader)
22187a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2219d769041fSSteven Rostedt 
2220d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
2221d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
2222d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
2223d769041fSSteven Rostedt 
2224d769041fSSteven Rostedt 	goto again;
2225d769041fSSteven Rostedt 
2226d769041fSSteven Rostedt  out:
22273e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
22283e03fb7fSSteven Rostedt 	local_irq_restore(flags);
2229d769041fSSteven Rostedt 
2230d769041fSSteven Rostedt 	return reader;
22317a8e76a3SSteven Rostedt }
22327a8e76a3SSteven Rostedt 
2233d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2234d769041fSSteven Rostedt {
2235d769041fSSteven Rostedt 	struct ring_buffer_event *event;
2236d769041fSSteven Rostedt 	struct buffer_page *reader;
2237d769041fSSteven Rostedt 	unsigned length;
2238d769041fSSteven Rostedt 
2239d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2240d769041fSSteven Rostedt 
2241d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
22423e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
22433e89c7bbSSteven Rostedt 		return;
2244d769041fSSteven Rostedt 
2245d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
22467a8e76a3SSteven Rostedt 
2247334d4169SLai Jiangshan 	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
2248334d4169SLai Jiangshan 			|| rb_discarded_event(event))
2249e4906effSSteven Rostedt 		cpu_buffer->read++;
22507a8e76a3SSteven Rostedt 
22517a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
22527a8e76a3SSteven Rostedt 
2253d769041fSSteven Rostedt 	length = rb_event_length(event);
22546f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
22557a8e76a3SSteven Rostedt }
22567a8e76a3SSteven Rostedt 
22577a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
22587a8e76a3SSteven Rostedt {
22597a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
22607a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
22617a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
22627a8e76a3SSteven Rostedt 	unsigned length;
22637a8e76a3SSteven Rostedt 
22647a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
22657a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
22667a8e76a3SSteven Rostedt 
22677a8e76a3SSteven Rostedt 	/*
22687a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
22697a8e76a3SSteven Rostedt 	 */
2270bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
22713e89c7bbSSteven Rostedt 		if (RB_WARN_ON(buffer,
22723e89c7bbSSteven Rostedt 			       iter->head_page == cpu_buffer->commit_page))
22733e89c7bbSSteven Rostedt 			return;
2274d769041fSSteven Rostedt 		rb_inc_iter(iter);
22757a8e76a3SSteven Rostedt 		return;
22767a8e76a3SSteven Rostedt 	}
22777a8e76a3SSteven Rostedt 
22787a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
22797a8e76a3SSteven Rostedt 
22807a8e76a3SSteven Rostedt 	length = rb_event_length(event);
22817a8e76a3SSteven Rostedt 
22827a8e76a3SSteven Rostedt 	/*
22837a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
22847a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
22857a8e76a3SSteven Rostedt 	 */
22863e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
2287f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
22883e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
22893e89c7bbSSteven Rostedt 		return;
22907a8e76a3SSteven Rostedt 
22917a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
22927a8e76a3SSteven Rostedt 
22937a8e76a3SSteven Rostedt 	iter->head += length;
22947a8e76a3SSteven Rostedt 
22957a8e76a3SSteven Rostedt 	/* check for end of page padding */
2296bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
2297bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
22987a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
22997a8e76a3SSteven Rostedt }
23007a8e76a3SSteven Rostedt 
2301f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2302f83c9d0fSSteven Rostedt rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
23037a8e76a3SSteven Rostedt {
23047a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
23057a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2306d769041fSSteven Rostedt 	struct buffer_page *reader;
2307818e3dd3SSteven Rostedt 	int nr_loops = 0;
23087a8e76a3SSteven Rostedt 
23097a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
23107a8e76a3SSteven Rostedt 
23117a8e76a3SSteven Rostedt  again:
2312818e3dd3SSteven Rostedt 	/*
2313818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
2314818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
2315818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
2316818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
2317818e3dd3SSteven Rostedt 	 * can have.  Nesting 10 deep of interrupts is clearly
2318818e3dd3SSteven Rostedt 	 * an anomaly.
2319818e3dd3SSteven Rostedt 	 */
23203e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2321818e3dd3SSteven Rostedt 		return NULL;
2322818e3dd3SSteven Rostedt 
2323d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2324d769041fSSteven Rostedt 	if (!reader)
23257a8e76a3SSteven Rostedt 		return NULL;
23267a8e76a3SSteven Rostedt 
2327d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
23287a8e76a3SSteven Rostedt 
2329334d4169SLai Jiangshan 	switch (event->type_len) {
23307a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
23312d622719STom Zanussi 		if (rb_null_event(event))
2332bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, 1);
23332d622719STom Zanussi 		/*
23342d622719STom Zanussi 		 * Because the writer could be discarding every
23352d622719STom Zanussi 		 * event it creates (which would probably be bad)
23362d622719STom Zanussi 		 * if we were to go back to "again" then we may never
23372d622719STom Zanussi 		 * catch up, and will trigger the warn on, or lock
23382d622719STom Zanussi 		 * the box. Return the padding, and we will release
23392d622719STom Zanussi 		 * the current locks, and try again.
23402d622719STom Zanussi 		 */
2341d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23422d622719STom Zanussi 		return event;
23437a8e76a3SSteven Rostedt 
23447a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
23457a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
2346d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23477a8e76a3SSteven Rostedt 		goto again;
23487a8e76a3SSteven Rostedt 
23497a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
23507a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
2351d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
23527a8e76a3SSteven Rostedt 		goto again;
23537a8e76a3SSteven Rostedt 
23547a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
23557a8e76a3SSteven Rostedt 		if (ts) {
23567a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
235737886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
235837886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
23597a8e76a3SSteven Rostedt 		}
23607a8e76a3SSteven Rostedt 		return event;
23617a8e76a3SSteven Rostedt 
23627a8e76a3SSteven Rostedt 	default:
23637a8e76a3SSteven Rostedt 		BUG();
23647a8e76a3SSteven Rostedt 	}
23657a8e76a3SSteven Rostedt 
23667a8e76a3SSteven Rostedt 	return NULL;
23677a8e76a3SSteven Rostedt }
2368c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
23697a8e76a3SSteven Rostedt 
2370f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2371f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
23727a8e76a3SSteven Rostedt {
23737a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
23747a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
23757a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2376818e3dd3SSteven Rostedt 	int nr_loops = 0;
23777a8e76a3SSteven Rostedt 
23787a8e76a3SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
23797a8e76a3SSteven Rostedt 		return NULL;
23807a8e76a3SSteven Rostedt 
23817a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
23827a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
23837a8e76a3SSteven Rostedt 
23847a8e76a3SSteven Rostedt  again:
2385818e3dd3SSteven Rostedt 	/*
2386818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
2387818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
2388818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
2389818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
2390818e3dd3SSteven Rostedt 	 * can have. Nesting 10 deep of interrupts is clearly
2391818e3dd3SSteven Rostedt 	 * an anomaly.
2392818e3dd3SSteven Rostedt 	 */
23933e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2394818e3dd3SSteven Rostedt 		return NULL;
2395818e3dd3SSteven Rostedt 
23967a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
23977a8e76a3SSteven Rostedt 		return NULL;
23987a8e76a3SSteven Rostedt 
23997a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
24007a8e76a3SSteven Rostedt 
2401334d4169SLai Jiangshan 	switch (event->type_len) {
24027a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
24032d622719STom Zanussi 		if (rb_null_event(event)) {
2404d769041fSSteven Rostedt 			rb_inc_iter(iter);
24057a8e76a3SSteven Rostedt 			goto again;
24062d622719STom Zanussi 		}
24072d622719STom Zanussi 		rb_advance_iter(iter);
24082d622719STom Zanussi 		return event;
24097a8e76a3SSteven Rostedt 
24107a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
24117a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
24127a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
24137a8e76a3SSteven Rostedt 		goto again;
24147a8e76a3SSteven Rostedt 
24157a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
24167a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
24177a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
24187a8e76a3SSteven Rostedt 		goto again;
24197a8e76a3SSteven Rostedt 
24207a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
24217a8e76a3SSteven Rostedt 		if (ts) {
24227a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
242337886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
242437886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
24257a8e76a3SSteven Rostedt 		}
24267a8e76a3SSteven Rostedt 		return event;
24277a8e76a3SSteven Rostedt 
24287a8e76a3SSteven Rostedt 	default:
24297a8e76a3SSteven Rostedt 		BUG();
24307a8e76a3SSteven Rostedt 	}
24317a8e76a3SSteven Rostedt 
24327a8e76a3SSteven Rostedt 	return NULL;
24337a8e76a3SSteven Rostedt }
2434c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
24357a8e76a3SSteven Rostedt 
24367a8e76a3SSteven Rostedt /**
2437f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
2438f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
2439f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
2440f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2441f83c9d0fSSteven Rostedt  *
2442f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2443f83c9d0fSSteven Rostedt  * not consume the data.
2444f83c9d0fSSteven Rostedt  */
2445f83c9d0fSSteven Rostedt struct ring_buffer_event *
2446f83c9d0fSSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2447f83c9d0fSSteven Rostedt {
2448f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
24498aabee57SSteven Rostedt 	struct ring_buffer_event *event;
2450f83c9d0fSSteven Rostedt 	unsigned long flags;
2451f83c9d0fSSteven Rostedt 
2452554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
24538aabee57SSteven Rostedt 		return NULL;
2454554f786eSSteven Rostedt 
24552d622719STom Zanussi  again:
2456f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2457f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2458f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2459f83c9d0fSSteven Rostedt 
2460334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
24612d622719STom Zanussi 		cpu_relax();
24622d622719STom Zanussi 		goto again;
24632d622719STom Zanussi 	}
24642d622719STom Zanussi 
2465f83c9d0fSSteven Rostedt 	return event;
2466f83c9d0fSSteven Rostedt }
2467f83c9d0fSSteven Rostedt 
2468f83c9d0fSSteven Rostedt /**
2469f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
2470f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
2471f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2472f83c9d0fSSteven Rostedt  *
2473f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2474f83c9d0fSSteven Rostedt  * not increment the iterator.
2475f83c9d0fSSteven Rostedt  */
2476f83c9d0fSSteven Rostedt struct ring_buffer_event *
2477f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2478f83c9d0fSSteven Rostedt {
2479f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2480f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
2481f83c9d0fSSteven Rostedt 	unsigned long flags;
2482f83c9d0fSSteven Rostedt 
24832d622719STom Zanussi  again:
2484f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2485f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
2486f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2487f83c9d0fSSteven Rostedt 
2488334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
24892d622719STom Zanussi 		cpu_relax();
24902d622719STom Zanussi 		goto again;
24912d622719STom Zanussi 	}
24922d622719STom Zanussi 
2493f83c9d0fSSteven Rostedt 	return event;
2494f83c9d0fSSteven Rostedt }
2495f83c9d0fSSteven Rostedt 
2496f83c9d0fSSteven Rostedt /**
24977a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
24987a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
24997a8e76a3SSteven Rostedt  *
25007a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
25017a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
25027a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
25037a8e76a3SSteven Rostedt  */
25047a8e76a3SSteven Rostedt struct ring_buffer_event *
25057a8e76a3SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
25067a8e76a3SSteven Rostedt {
2507554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2508554f786eSSteven Rostedt 	struct ring_buffer_event *event = NULL;
2509f83c9d0fSSteven Rostedt 	unsigned long flags;
25107a8e76a3SSteven Rostedt 
25112d622719STom Zanussi  again:
2512554f786eSSteven Rostedt 	/* might be called in atomic */
2513554f786eSSteven Rostedt 	preempt_disable();
25147a8e76a3SSteven Rostedt 
2515554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2516554f786eSSteven Rostedt 		goto out;
2517554f786eSSteven Rostedt 
2518554f786eSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2519f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
25207a8e76a3SSteven Rostedt 
2521f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2522f83c9d0fSSteven Rostedt 	if (!event)
2523554f786eSSteven Rostedt 		goto out_unlock;
2524f83c9d0fSSteven Rostedt 
2525d769041fSSteven Rostedt 	rb_advance_reader(cpu_buffer);
25267a8e76a3SSteven Rostedt 
2527554f786eSSteven Rostedt  out_unlock:
2528f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2529f83c9d0fSSteven Rostedt 
2530554f786eSSteven Rostedt  out:
2531554f786eSSteven Rostedt 	preempt_enable();
2532554f786eSSteven Rostedt 
2533334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
25342d622719STom Zanussi 		cpu_relax();
25352d622719STom Zanussi 		goto again;
25362d622719STom Zanussi 	}
25372d622719STom Zanussi 
25387a8e76a3SSteven Rostedt 	return event;
25397a8e76a3SSteven Rostedt }
2540c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
25417a8e76a3SSteven Rostedt 
25427a8e76a3SSteven Rostedt /**
25437a8e76a3SSteven Rostedt  * ring_buffer_read_start - start a non consuming read of the buffer
25447a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
25457a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
25467a8e76a3SSteven Rostedt  *
25477a8e76a3SSteven Rostedt  * This starts up an iteration through the buffer. It also disables
25487a8e76a3SSteven Rostedt  * the recording to the buffer until the reading is finished.
25497a8e76a3SSteven Rostedt  * This prevents the reading from being corrupted. This is not
25507a8e76a3SSteven Rostedt  * a consuming read, so a producer is not expected.
25517a8e76a3SSteven Rostedt  *
25527a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_finish.
25537a8e76a3SSteven Rostedt  */
25547a8e76a3SSteven Rostedt struct ring_buffer_iter *
25557a8e76a3SSteven Rostedt ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
25567a8e76a3SSteven Rostedt {
25577a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
25588aabee57SSteven Rostedt 	struct ring_buffer_iter *iter;
2559d769041fSSteven Rostedt 	unsigned long flags;
25607a8e76a3SSteven Rostedt 
25619e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
25628aabee57SSteven Rostedt 		return NULL;
25637a8e76a3SSteven Rostedt 
25647a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
25657a8e76a3SSteven Rostedt 	if (!iter)
25668aabee57SSteven Rostedt 		return NULL;
25677a8e76a3SSteven Rostedt 
25687a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25697a8e76a3SSteven Rostedt 
25707a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
25717a8e76a3SSteven Rostedt 
25727a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
25737a8e76a3SSteven Rostedt 	synchronize_sched();
25747a8e76a3SSteven Rostedt 
2575f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
25763e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2577642edba5SSteven Rostedt 	rb_iter_reset(iter);
25783e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2579f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
25807a8e76a3SSteven Rostedt 
25817a8e76a3SSteven Rostedt 	return iter;
25827a8e76a3SSteven Rostedt }
2583c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
25847a8e76a3SSteven Rostedt 
25857a8e76a3SSteven Rostedt /**
25867a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
25877a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
25887a8e76a3SSteven Rostedt  *
25897a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
25907a8e76a3SSteven Rostedt  * iterator.
25917a8e76a3SSteven Rostedt  */
25927a8e76a3SSteven Rostedt void
25937a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
25947a8e76a3SSteven Rostedt {
25957a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
25967a8e76a3SSteven Rostedt 
25977a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
25987a8e76a3SSteven Rostedt 	kfree(iter);
25997a8e76a3SSteven Rostedt }
2600c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
26017a8e76a3SSteven Rostedt 
26027a8e76a3SSteven Rostedt /**
26037a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
26047a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
26057a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
26067a8e76a3SSteven Rostedt  *
26077a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
26087a8e76a3SSteven Rostedt  */
26097a8e76a3SSteven Rostedt struct ring_buffer_event *
26107a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
26117a8e76a3SSteven Rostedt {
26127a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2613f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2614f83c9d0fSSteven Rostedt 	unsigned long flags;
26157a8e76a3SSteven Rostedt 
26162d622719STom Zanussi  again:
2617f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2618f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
26197a8e76a3SSteven Rostedt 	if (!event)
2620f83c9d0fSSteven Rostedt 		goto out;
26217a8e76a3SSteven Rostedt 
26227a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
2623f83c9d0fSSteven Rostedt  out:
2624f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
26257a8e76a3SSteven Rostedt 
2626334d4169SLai Jiangshan 	if (event && event->type_len == RINGBUF_TYPE_PADDING) {
26272d622719STom Zanussi 		cpu_relax();
26282d622719STom Zanussi 		goto again;
26292d622719STom Zanussi 	}
26302d622719STom Zanussi 
26317a8e76a3SSteven Rostedt 	return event;
26327a8e76a3SSteven Rostedt }
2633c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
26347a8e76a3SSteven Rostedt 
26357a8e76a3SSteven Rostedt /**
26367a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
26377a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
26387a8e76a3SSteven Rostedt  */
26397a8e76a3SSteven Rostedt unsigned long ring_buffer_size(struct ring_buffer *buffer)
26407a8e76a3SSteven Rostedt {
26417a8e76a3SSteven Rostedt 	return BUF_PAGE_SIZE * buffer->pages;
26427a8e76a3SSteven Rostedt }
2643c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
26447a8e76a3SSteven Rostedt 
26457a8e76a3SSteven Rostedt static void
26467a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
26477a8e76a3SSteven Rostedt {
26487a8e76a3SSteven Rostedt 	cpu_buffer->head_page
26497a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2650bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
2651778c55d4SSteven Rostedt 	local_set(&cpu_buffer->head_page->entries, 0);
2652abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
26537a8e76a3SSteven Rostedt 
26546f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
2655bf41a158SSteven Rostedt 
2656bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
2657bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
2658bf41a158SSteven Rostedt 
2659bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2660bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2661778c55d4SSteven Rostedt 	local_set(&cpu_buffer->reader_page->entries, 0);
2662abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
26636f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
2664d769041fSSteven Rostedt 
2665f0d2c681SSteven Rostedt 	cpu_buffer->nmi_dropped = 0;
2666f0d2c681SSteven Rostedt 	cpu_buffer->commit_overrun = 0;
26677a8e76a3SSteven Rostedt 	cpu_buffer->overrun = 0;
2668e4906effSSteven Rostedt 	cpu_buffer->read = 0;
2669e4906effSSteven Rostedt 	local_set(&cpu_buffer->entries, 0);
267069507c06SSteven Rostedt 
267169507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
267269507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
26737a8e76a3SSteven Rostedt }
26747a8e76a3SSteven Rostedt 
26757a8e76a3SSteven Rostedt /**
26767a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
26777a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
26787a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
26797a8e76a3SSteven Rostedt  */
26807a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
26817a8e76a3SSteven Rostedt {
26827a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
26837a8e76a3SSteven Rostedt 	unsigned long flags;
26847a8e76a3SSteven Rostedt 
26859e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
26868aabee57SSteven Rostedt 		return;
26877a8e76a3SSteven Rostedt 
268841ede23eSSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
268941ede23eSSteven Rostedt 
2690f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2691f83c9d0fSSteven Rostedt 
26923e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
26937a8e76a3SSteven Rostedt 
26947a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
26957a8e76a3SSteven Rostedt 
26963e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2697f83c9d0fSSteven Rostedt 
2698f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
269941ede23eSSteven Rostedt 
270041ede23eSSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
27017a8e76a3SSteven Rostedt }
2702c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
27037a8e76a3SSteven Rostedt 
27047a8e76a3SSteven Rostedt /**
27057a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
27067a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
27077a8e76a3SSteven Rostedt  */
27087a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
27097a8e76a3SSteven Rostedt {
27107a8e76a3SSteven Rostedt 	int cpu;
27117a8e76a3SSteven Rostedt 
27127a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
2713d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
27147a8e76a3SSteven Rostedt }
2715c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
27167a8e76a3SSteven Rostedt 
27177a8e76a3SSteven Rostedt /**
27187a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
27197a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
27207a8e76a3SSteven Rostedt  */
27217a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
27227a8e76a3SSteven Rostedt {
27237a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
27247a8e76a3SSteven Rostedt 	int cpu;
27257a8e76a3SSteven Rostedt 
27267a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
27277a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
27287a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
27297a8e76a3SSteven Rostedt 		if (!rb_per_cpu_empty(cpu_buffer))
27307a8e76a3SSteven Rostedt 			return 0;
27317a8e76a3SSteven Rostedt 	}
2732554f786eSSteven Rostedt 
27337a8e76a3SSteven Rostedt 	return 1;
27347a8e76a3SSteven Rostedt }
2735c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
27367a8e76a3SSteven Rostedt 
27377a8e76a3SSteven Rostedt /**
27387a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
27397a8e76a3SSteven Rostedt  * @buffer: The ring buffer
27407a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
27417a8e76a3SSteven Rostedt  */
27427a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
27437a8e76a3SSteven Rostedt {
27447a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
27458aabee57SSteven Rostedt 	int ret;
27467a8e76a3SSteven Rostedt 
27479e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
27488aabee57SSteven Rostedt 		return 1;
27497a8e76a3SSteven Rostedt 
27507a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2751554f786eSSteven Rostedt 	ret = rb_per_cpu_empty(cpu_buffer);
2752554f786eSSteven Rostedt 
2753554f786eSSteven Rostedt 
2754554f786eSSteven Rostedt 	return ret;
27557a8e76a3SSteven Rostedt }
2756c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
27577a8e76a3SSteven Rostedt 
27587a8e76a3SSteven Rostedt /**
27597a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
27607a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
27617a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
27627a8e76a3SSteven Rostedt  *
27637a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
27647a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
27657a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
27667a8e76a3SSteven Rostedt  * used at the moment.
27677a8e76a3SSteven Rostedt  */
27687a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
27697a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
27707a8e76a3SSteven Rostedt {
27717a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
27727a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
2773554f786eSSteven Rostedt 	int ret = -EINVAL;
2774554f786eSSteven Rostedt 
27759e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
27769e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
2777554f786eSSteven Rostedt 		goto out;
27787a8e76a3SSteven Rostedt 
27797a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
27806d102bc6SLai Jiangshan 	if (buffer_a->pages != buffer_b->pages)
2781554f786eSSteven Rostedt 		goto out;
2782554f786eSSteven Rostedt 
2783554f786eSSteven Rostedt 	ret = -EAGAIN;
27847a8e76a3SSteven Rostedt 
278597b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2786554f786eSSteven Rostedt 		goto out;
278797b17efeSSteven Rostedt 
278897b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
2789554f786eSSteven Rostedt 		goto out;
279097b17efeSSteven Rostedt 
279197b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
2792554f786eSSteven Rostedt 		goto out;
279397b17efeSSteven Rostedt 
27947a8e76a3SSteven Rostedt 	cpu_buffer_a = buffer_a->buffers[cpu];
27957a8e76a3SSteven Rostedt 	cpu_buffer_b = buffer_b->buffers[cpu];
27967a8e76a3SSteven Rostedt 
279797b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
2798554f786eSSteven Rostedt 		goto out;
279997b17efeSSteven Rostedt 
280097b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
2801554f786eSSteven Rostedt 		goto out;
280297b17efeSSteven Rostedt 
28037a8e76a3SSteven Rostedt 	/*
28047a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
28057a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
28067a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
28077a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
28087a8e76a3SSteven Rostedt 	 */
28097a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
28107a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
28117a8e76a3SSteven Rostedt 
28127a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
28137a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
28147a8e76a3SSteven Rostedt 
28157a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
28167a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
28177a8e76a3SSteven Rostedt 
28187a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
28197a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
28207a8e76a3SSteven Rostedt 
2821554f786eSSteven Rostedt 	ret = 0;
2822554f786eSSteven Rostedt out:
2823554f786eSSteven Rostedt 	return ret;
28247a8e76a3SSteven Rostedt }
2825c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
28267a8e76a3SSteven Rostedt 
28278789a9e7SSteven Rostedt /**
28288789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
28298789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
28308789a9e7SSteven Rostedt  *
28318789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
28328789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
28338789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
28348789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
28358789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
28368789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
28378789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
28388789a9e7SSteven Rostedt  *
28398789a9e7SSteven Rostedt  * Returns:
28408789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
28418789a9e7SSteven Rostedt  */
28428789a9e7SSteven Rostedt void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
28438789a9e7SSteven Rostedt {
2844044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
2845ef7a4a16SSteven Rostedt 	unsigned long addr;
28468789a9e7SSteven Rostedt 
28478789a9e7SSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
28488789a9e7SSteven Rostedt 	if (!addr)
28498789a9e7SSteven Rostedt 		return NULL;
28508789a9e7SSteven Rostedt 
2851044fa782SSteven Rostedt 	bpage = (void *)addr;
28528789a9e7SSteven Rostedt 
2853ef7a4a16SSteven Rostedt 	rb_init_page(bpage);
2854ef7a4a16SSteven Rostedt 
2855044fa782SSteven Rostedt 	return bpage;
28568789a9e7SSteven Rostedt }
2857d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
28588789a9e7SSteven Rostedt 
28598789a9e7SSteven Rostedt /**
28608789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
28618789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
28628789a9e7SSteven Rostedt  * @data: the page to free
28638789a9e7SSteven Rostedt  *
28648789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
28658789a9e7SSteven Rostedt  */
28668789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
28678789a9e7SSteven Rostedt {
28688789a9e7SSteven Rostedt 	free_page((unsigned long)data);
28698789a9e7SSteven Rostedt }
2870d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
28718789a9e7SSteven Rostedt 
28728789a9e7SSteven Rostedt /**
28738789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
28748789a9e7SSteven Rostedt  * @buffer: buffer to extract from
28758789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2876ef7a4a16SSteven Rostedt  * @len: amount to extract
28778789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
28788789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
28798789a9e7SSteven Rostedt  *
28808789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
28818789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
28828789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
28838789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
28848789a9e7SSteven Rostedt  *
28858789a9e7SSteven Rostedt  * for example:
2886b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
28878789a9e7SSteven Rostedt  *	if (!rpage)
28888789a9e7SSteven Rostedt  *		return error;
2889ef7a4a16SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2890667d2412SLai Jiangshan  *	if (ret >= 0)
2891667d2412SLai Jiangshan  *		process_page(rpage, ret);
28928789a9e7SSteven Rostedt  *
28938789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
28948789a9e7SSteven Rostedt  * the writer is off the reader page.
28958789a9e7SSteven Rostedt  *
28968789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
28978789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
28988789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
28998789a9e7SSteven Rostedt  *  responsible for that.
29008789a9e7SSteven Rostedt  *
29018789a9e7SSteven Rostedt  * Returns:
2902667d2412SLai Jiangshan  *  >=0 if data has been transferred, returns the offset of consumed data.
2903667d2412SLai Jiangshan  *  <0 if no data has been transferred.
29048789a9e7SSteven Rostedt  */
29058789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
2906ef7a4a16SSteven Rostedt 			  void **data_page, size_t len, int cpu, int full)
29078789a9e7SSteven Rostedt {
29088789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
29098789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
2910044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
2911ef7a4a16SSteven Rostedt 	struct buffer_page *reader;
29128789a9e7SSteven Rostedt 	unsigned long flags;
2913ef7a4a16SSteven Rostedt 	unsigned int commit;
2914667d2412SLai Jiangshan 	unsigned int read;
29154f3640f8SSteven Rostedt 	u64 save_timestamp;
2916667d2412SLai Jiangshan 	int ret = -1;
29178789a9e7SSteven Rostedt 
2918554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2919554f786eSSteven Rostedt 		goto out;
2920554f786eSSteven Rostedt 
2921474d32b6SSteven Rostedt 	/*
2922474d32b6SSteven Rostedt 	 * If len is not big enough to hold the page header, then
2923474d32b6SSteven Rostedt 	 * we can not copy anything.
2924474d32b6SSteven Rostedt 	 */
2925474d32b6SSteven Rostedt 	if (len <= BUF_PAGE_HDR_SIZE)
2926554f786eSSteven Rostedt 		goto out;
2927474d32b6SSteven Rostedt 
2928474d32b6SSteven Rostedt 	len -= BUF_PAGE_HDR_SIZE;
2929474d32b6SSteven Rostedt 
29308789a9e7SSteven Rostedt 	if (!data_page)
2931554f786eSSteven Rostedt 		goto out;
29328789a9e7SSteven Rostedt 
2933044fa782SSteven Rostedt 	bpage = *data_page;
2934044fa782SSteven Rostedt 	if (!bpage)
2935554f786eSSteven Rostedt 		goto out;
29368789a9e7SSteven Rostedt 
29378789a9e7SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
29388789a9e7SSteven Rostedt 
2939ef7a4a16SSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2940ef7a4a16SSteven Rostedt 	if (!reader)
2941554f786eSSteven Rostedt 		goto out_unlock;
29428789a9e7SSteven Rostedt 
2943ef7a4a16SSteven Rostedt 	event = rb_reader_event(cpu_buffer);
2944667d2412SLai Jiangshan 
2945ef7a4a16SSteven Rostedt 	read = reader->read;
2946ef7a4a16SSteven Rostedt 	commit = rb_page_commit(reader);
2947ef7a4a16SSteven Rostedt 
29488789a9e7SSteven Rostedt 	/*
2949474d32b6SSteven Rostedt 	 * If this page has been partially read or
2950474d32b6SSteven Rostedt 	 * if len is not big enough to read the rest of the page or
2951474d32b6SSteven Rostedt 	 * a writer is still on the page, then
2952474d32b6SSteven Rostedt 	 * we must copy the data from the page to the buffer.
2953474d32b6SSteven Rostedt 	 * Otherwise, we can simply swap the page with the one passed in.
29548789a9e7SSteven Rostedt 	 */
2955474d32b6SSteven Rostedt 	if (read || (len < (commit - read)) ||
2956ef7a4a16SSteven Rostedt 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
2957667d2412SLai Jiangshan 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2958474d32b6SSteven Rostedt 		unsigned int rpos = read;
2959474d32b6SSteven Rostedt 		unsigned int pos = 0;
2960ef7a4a16SSteven Rostedt 		unsigned int size;
29618789a9e7SSteven Rostedt 
29628789a9e7SSteven Rostedt 		if (full)
2963554f786eSSteven Rostedt 			goto out_unlock;
29648789a9e7SSteven Rostedt 
2965ef7a4a16SSteven Rostedt 		if (len > (commit - read))
2966ef7a4a16SSteven Rostedt 			len = (commit - read);
2967ef7a4a16SSteven Rostedt 
2968ef7a4a16SSteven Rostedt 		size = rb_event_length(event);
2969ef7a4a16SSteven Rostedt 
2970ef7a4a16SSteven Rostedt 		if (len < size)
2971554f786eSSteven Rostedt 			goto out_unlock;
2972ef7a4a16SSteven Rostedt 
29734f3640f8SSteven Rostedt 		/* save the current timestamp, since the user will need it */
29744f3640f8SSteven Rostedt 		save_timestamp = cpu_buffer->read_stamp;
29754f3640f8SSteven Rostedt 
2976ef7a4a16SSteven Rostedt 		/* Need to copy one event at a time */
2977ef7a4a16SSteven Rostedt 		do {
2978474d32b6SSteven Rostedt 			memcpy(bpage->data + pos, rpage->data + rpos, size);
2979ef7a4a16SSteven Rostedt 
2980ef7a4a16SSteven Rostedt 			len -= size;
2981ef7a4a16SSteven Rostedt 
2982ef7a4a16SSteven Rostedt 			rb_advance_reader(cpu_buffer);
2983474d32b6SSteven Rostedt 			rpos = reader->read;
2984474d32b6SSteven Rostedt 			pos += size;
2985ef7a4a16SSteven Rostedt 
2986ef7a4a16SSteven Rostedt 			event = rb_reader_event(cpu_buffer);
2987ef7a4a16SSteven Rostedt 			size = rb_event_length(event);
2988ef7a4a16SSteven Rostedt 		} while (len > size);
2989667d2412SLai Jiangshan 
2990667d2412SLai Jiangshan 		/* update bpage */
2991ef7a4a16SSteven Rostedt 		local_set(&bpage->commit, pos);
29924f3640f8SSteven Rostedt 		bpage->time_stamp = save_timestamp;
2993ef7a4a16SSteven Rostedt 
2994474d32b6SSteven Rostedt 		/* we copied everything to the beginning */
2995474d32b6SSteven Rostedt 		read = 0;
29968789a9e7SSteven Rostedt 	} else {
2997afbab76aSSteven Rostedt 		/* update the entry counter */
2998afbab76aSSteven Rostedt 		cpu_buffer->read += local_read(&reader->entries);
2999afbab76aSSteven Rostedt 
30008789a9e7SSteven Rostedt 		/* swap the pages */
3001044fa782SSteven Rostedt 		rb_init_page(bpage);
3002ef7a4a16SSteven Rostedt 		bpage = reader->page;
3003ef7a4a16SSteven Rostedt 		reader->page = *data_page;
3004ef7a4a16SSteven Rostedt 		local_set(&reader->write, 0);
3005778c55d4SSteven Rostedt 		local_set(&reader->entries, 0);
3006ef7a4a16SSteven Rostedt 		reader->read = 0;
3007044fa782SSteven Rostedt 		*data_page = bpage;
3008ef7a4a16SSteven Rostedt 	}
3009ef7a4a16SSteven Rostedt 	ret = read;
3010ef7a4a16SSteven Rostedt 
3011554f786eSSteven Rostedt  out_unlock:
30128789a9e7SSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
30138789a9e7SSteven Rostedt 
3014554f786eSSteven Rostedt  out:
30158789a9e7SSteven Rostedt 	return ret;
30168789a9e7SSteven Rostedt }
3017d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page);
30188789a9e7SSteven Rostedt 
3019a3583244SSteven Rostedt static ssize_t
3020a3583244SSteven Rostedt rb_simple_read(struct file *filp, char __user *ubuf,
3021a3583244SSteven Rostedt 	       size_t cnt, loff_t *ppos)
3022a3583244SSteven Rostedt {
30235e39841cSHannes Eder 	unsigned long *p = filp->private_data;
3024a3583244SSteven Rostedt 	char buf[64];
3025a3583244SSteven Rostedt 	int r;
3026a3583244SSteven Rostedt 
3027033601a3SSteven Rostedt 	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3028033601a3SSteven Rostedt 		r = sprintf(buf, "permanently disabled\n");
3029033601a3SSteven Rostedt 	else
3030033601a3SSteven Rostedt 		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3031a3583244SSteven Rostedt 
3032a3583244SSteven Rostedt 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3033a3583244SSteven Rostedt }
3034a3583244SSteven Rostedt 
3035a3583244SSteven Rostedt static ssize_t
3036a3583244SSteven Rostedt rb_simple_write(struct file *filp, const char __user *ubuf,
3037a3583244SSteven Rostedt 		size_t cnt, loff_t *ppos)
3038a3583244SSteven Rostedt {
30395e39841cSHannes Eder 	unsigned long *p = filp->private_data;
3040a3583244SSteven Rostedt 	char buf[64];
30415e39841cSHannes Eder 	unsigned long val;
3042a3583244SSteven Rostedt 	int ret;
3043a3583244SSteven Rostedt 
3044a3583244SSteven Rostedt 	if (cnt >= sizeof(buf))
3045a3583244SSteven Rostedt 		return -EINVAL;
3046a3583244SSteven Rostedt 
3047a3583244SSteven Rostedt 	if (copy_from_user(&buf, ubuf, cnt))
3048a3583244SSteven Rostedt 		return -EFAULT;
3049a3583244SSteven Rostedt 
3050a3583244SSteven Rostedt 	buf[cnt] = 0;
3051a3583244SSteven Rostedt 
3052a3583244SSteven Rostedt 	ret = strict_strtoul(buf, 10, &val);
3053a3583244SSteven Rostedt 	if (ret < 0)
3054a3583244SSteven Rostedt 		return ret;
3055a3583244SSteven Rostedt 
3056033601a3SSteven Rostedt 	if (val)
3057033601a3SSteven Rostedt 		set_bit(RB_BUFFERS_ON_BIT, p);
3058033601a3SSteven Rostedt 	else
3059033601a3SSteven Rostedt 		clear_bit(RB_BUFFERS_ON_BIT, p);
3060a3583244SSteven Rostedt 
3061a3583244SSteven Rostedt 	(*ppos)++;
3062a3583244SSteven Rostedt 
3063a3583244SSteven Rostedt 	return cnt;
3064a3583244SSteven Rostedt }
3065a3583244SSteven Rostedt 
30665e2336a0SSteven Rostedt static const struct file_operations rb_simple_fops = {
3067a3583244SSteven Rostedt 	.open		= tracing_open_generic,
3068a3583244SSteven Rostedt 	.read		= rb_simple_read,
3069a3583244SSteven Rostedt 	.write		= rb_simple_write,
3070a3583244SSteven Rostedt };
3071a3583244SSteven Rostedt 
3072a3583244SSteven Rostedt 
3073a3583244SSteven Rostedt static __init int rb_init_debugfs(void)
3074a3583244SSteven Rostedt {
3075a3583244SSteven Rostedt 	struct dentry *d_tracer;
3076a3583244SSteven Rostedt 
3077a3583244SSteven Rostedt 	d_tracer = tracing_init_dentry();
3078a3583244SSteven Rostedt 
30795452af66SFrederic Weisbecker 	trace_create_file("tracing_on", 0644, d_tracer,
3080033601a3SSteven Rostedt 			    &ring_buffer_flags, &rb_simple_fops);
3081a3583244SSteven Rostedt 
3082a3583244SSteven Rostedt 	return 0;
3083a3583244SSteven Rostedt }
3084a3583244SSteven Rostedt 
3085a3583244SSteven Rostedt fs_initcall(rb_init_debugfs);
3086554f786eSSteven Rostedt 
308759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
308809c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
3089554f786eSSteven Rostedt 			 unsigned long action, void *hcpu)
3090554f786eSSteven Rostedt {
3091554f786eSSteven Rostedt 	struct ring_buffer *buffer =
3092554f786eSSteven Rostedt 		container_of(self, struct ring_buffer, cpu_notify);
3093554f786eSSteven Rostedt 	long cpu = (long)hcpu;
3094554f786eSSteven Rostedt 
3095554f786eSSteven Rostedt 	switch (action) {
3096554f786eSSteven Rostedt 	case CPU_UP_PREPARE:
3097554f786eSSteven Rostedt 	case CPU_UP_PREPARE_FROZEN:
3098554f786eSSteven Rostedt 		if (cpu_isset(cpu, *buffer->cpumask))
3099554f786eSSteven Rostedt 			return NOTIFY_OK;
3100554f786eSSteven Rostedt 
3101554f786eSSteven Rostedt 		buffer->buffers[cpu] =
3102554f786eSSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
3103554f786eSSteven Rostedt 		if (!buffer->buffers[cpu]) {
3104554f786eSSteven Rostedt 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3105554f786eSSteven Rostedt 			     cpu);
3106554f786eSSteven Rostedt 			return NOTIFY_OK;
3107554f786eSSteven Rostedt 		}
3108554f786eSSteven Rostedt 		smp_wmb();
3109554f786eSSteven Rostedt 		cpu_set(cpu, *buffer->cpumask);
3110554f786eSSteven Rostedt 		break;
3111554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE:
3112554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE_FROZEN:
3113554f786eSSteven Rostedt 		/*
3114554f786eSSteven Rostedt 		 * Do nothing.
3115554f786eSSteven Rostedt 		 *  If we were to free the buffer, then the user would
3116554f786eSSteven Rostedt 		 *  lose any trace that was in the buffer.
3117554f786eSSteven Rostedt 		 */
3118554f786eSSteven Rostedt 		break;
3119554f786eSSteven Rostedt 	default:
3120554f786eSSteven Rostedt 		break;
3121554f786eSSteven Rostedt 	}
3122554f786eSSteven Rostedt 	return NOTIFY_OK;
3123554f786eSSteven Rostedt }
3124554f786eSSteven Rostedt #endif
3125