xref: /linux-6.15/kernel/trace/ring_buffer.c (revision aa18efb2)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
714131f2fSIngo Molnar #include <linux/trace_clock.h>
878d904b4SSteven Rostedt #include <linux/ftrace_irq.h>
97a8e76a3SSteven Rostedt #include <linux/spinlock.h>
107a8e76a3SSteven Rostedt #include <linux/debugfs.h>
117a8e76a3SSteven Rostedt #include <linux/uaccess.h>
12a81bd80aSSteven Rostedt #include <linux/hardirq.h>
137a8e76a3SSteven Rostedt #include <linux/module.h>
147a8e76a3SSteven Rostedt #include <linux/percpu.h>
157a8e76a3SSteven Rostedt #include <linux/mutex.h>
167a8e76a3SSteven Rostedt #include <linux/init.h>
177a8e76a3SSteven Rostedt #include <linux/hash.h>
187a8e76a3SSteven Rostedt #include <linux/list.h>
19554f786eSSteven Rostedt #include <linux/cpu.h>
207a8e76a3SSteven Rostedt #include <linux/fs.h>
217a8e76a3SSteven Rostedt 
22182e9f5fSSteven Rostedt #include "trace.h"
23182e9f5fSSteven Rostedt 
24033601a3SSteven Rostedt /*
25d1b182a8SSteven Rostedt  * The ring buffer header is special. We must manually up keep it.
26d1b182a8SSteven Rostedt  */
27d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
28d1b182a8SSteven Rostedt {
29d1b182a8SSteven Rostedt 	int ret;
30d1b182a8SSteven Rostedt 
31d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttype        :    2 bits\n");
32d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tlen         :    3 bits\n");
33d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
34d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
35d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\n");
36d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
37d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_PADDING);
38d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
39d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_TIME_EXTEND);
40d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tdata        : type == %d\n",
41d1b182a8SSteven Rostedt 			       RINGBUF_TYPE_DATA);
42d1b182a8SSteven Rostedt 
43d1b182a8SSteven Rostedt 	return ret;
44d1b182a8SSteven Rostedt }
45d1b182a8SSteven Rostedt 
46d1b182a8SSteven Rostedt /*
475cc98548SSteven Rostedt  * The ring buffer is made up of a list of pages. A separate list of pages is
485cc98548SSteven Rostedt  * allocated for each CPU. A writer may only write to a buffer that is
495cc98548SSteven Rostedt  * associated with the CPU it is currently executing on.  A reader may read
505cc98548SSteven Rostedt  * from any per cpu buffer.
515cc98548SSteven Rostedt  *
525cc98548SSteven Rostedt  * The reader is special. For each per cpu buffer, the reader has its own
535cc98548SSteven Rostedt  * reader page. When a reader has read the entire reader page, this reader
545cc98548SSteven Rostedt  * page is swapped with another page in the ring buffer.
555cc98548SSteven Rostedt  *
565cc98548SSteven Rostedt  * Now, as long as the writer is off the reader page, the reader can do what
575cc98548SSteven Rostedt  * ever it wants with that page. The writer will never write to that page
585cc98548SSteven Rostedt  * again (as long as it is out of the ring buffer).
595cc98548SSteven Rostedt  *
605cc98548SSteven Rostedt  * Here's some silly ASCII art.
615cc98548SSteven Rostedt  *
625cc98548SSteven Rostedt  *   +------+
635cc98548SSteven Rostedt  *   |reader|          RING BUFFER
645cc98548SSteven Rostedt  *   |page  |
655cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
665cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
675cc98548SSteven Rostedt  *                   +---+   +---+   +---+
685cc98548SSteven Rostedt  *                     ^               |
695cc98548SSteven Rostedt  *                     |               |
705cc98548SSteven Rostedt  *                     +---------------+
715cc98548SSteven Rostedt  *
725cc98548SSteven Rostedt  *
735cc98548SSteven Rostedt  *   +------+
745cc98548SSteven Rostedt  *   |reader|          RING BUFFER
755cc98548SSteven Rostedt  *   |page  |------------------v
765cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
775cc98548SSteven Rostedt  *                   |   |-->|   |-->|   |
785cc98548SSteven Rostedt  *                   +---+   +---+   +---+
795cc98548SSteven Rostedt  *                     ^               |
805cc98548SSteven Rostedt  *                     |               |
815cc98548SSteven Rostedt  *                     +---------------+
825cc98548SSteven Rostedt  *
835cc98548SSteven Rostedt  *
845cc98548SSteven Rostedt  *   +------+
855cc98548SSteven Rostedt  *   |reader|          RING BUFFER
865cc98548SSteven Rostedt  *   |page  |------------------v
875cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
885cc98548SSteven Rostedt  *      ^            |   |-->|   |-->|   |
895cc98548SSteven Rostedt  *      |            +---+   +---+   +---+
905cc98548SSteven Rostedt  *      |                              |
915cc98548SSteven Rostedt  *      |                              |
925cc98548SSteven Rostedt  *      +------------------------------+
935cc98548SSteven Rostedt  *
945cc98548SSteven Rostedt  *
955cc98548SSteven Rostedt  *   +------+
965cc98548SSteven Rostedt  *   |buffer|          RING BUFFER
975cc98548SSteven Rostedt  *   |page  |------------------v
985cc98548SSteven Rostedt  *   +------+        +---+   +---+   +---+
995cc98548SSteven Rostedt  *      ^            |   |   |   |-->|   |
1005cc98548SSteven Rostedt  *      |   New      +---+   +---+   +---+
1015cc98548SSteven Rostedt  *      |  Reader------^               |
1025cc98548SSteven Rostedt  *      |   page                       |
1035cc98548SSteven Rostedt  *      +------------------------------+
1045cc98548SSteven Rostedt  *
1055cc98548SSteven Rostedt  *
1065cc98548SSteven Rostedt  * After we make this swap, the reader can hand this page off to the splice
1075cc98548SSteven Rostedt  * code and be done with it. It can even allocate a new page if it needs to
1085cc98548SSteven Rostedt  * and swap that into the ring buffer.
1095cc98548SSteven Rostedt  *
1105cc98548SSteven Rostedt  * We will be using cmpxchg soon to make all this lockless.
1115cc98548SSteven Rostedt  *
1125cc98548SSteven Rostedt  */
1135cc98548SSteven Rostedt 
1145cc98548SSteven Rostedt /*
115033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
116033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
117033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
118033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
119033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
120033601a3SSteven Rostedt  *
121033601a3SSteven Rostedt  * There's three layers that must be on in order to write
122033601a3SSteven Rostedt  * to the ring buffer.
123033601a3SSteven Rostedt  *
124033601a3SSteven Rostedt  * 1) This global flag must be set.
125033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
126033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
127033601a3SSteven Rostedt  *
128033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
129033601a3SSteven Rostedt  * will permantly disable all ring buffers.
130033601a3SSteven Rostedt  */
131033601a3SSteven Rostedt 
132033601a3SSteven Rostedt /*
133033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
134033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
135033601a3SSteven Rostedt  *
136033601a3SSteven Rostedt  *  ON   DISABLED
137033601a3SSteven Rostedt  * ---- ----------
138033601a3SSteven Rostedt  *   0      0        : ring buffers are off
139033601a3SSteven Rostedt  *   1      0        : ring buffers are on
140033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
141033601a3SSteven Rostedt  */
142033601a3SSteven Rostedt 
143033601a3SSteven Rostedt enum {
144033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
145033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
146033601a3SSteven Rostedt };
147033601a3SSteven Rostedt 
148033601a3SSteven Rostedt enum {
149033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
150033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
151033601a3SSteven Rostedt };
152033601a3SSteven Rostedt 
1535e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
154a3583244SSteven Rostedt 
155474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
156474d32b6SSteven Rostedt 
157a3583244SSteven Rostedt /**
158a3583244SSteven Rostedt  * tracing_on - enable all tracing buffers
159a3583244SSteven Rostedt  *
160a3583244SSteven Rostedt  * This function enables all tracing buffers that may have been
161a3583244SSteven Rostedt  * disabled with tracing_off.
162a3583244SSteven Rostedt  */
163a3583244SSteven Rostedt void tracing_on(void)
164a3583244SSteven Rostedt {
165033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
166a3583244SSteven Rostedt }
167c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_on);
168a3583244SSteven Rostedt 
169a3583244SSteven Rostedt /**
170a3583244SSteven Rostedt  * tracing_off - turn off all tracing buffers
171a3583244SSteven Rostedt  *
172a3583244SSteven Rostedt  * This function stops all tracing buffers from recording data.
173a3583244SSteven Rostedt  * It does not disable any overhead the tracers themselves may
174a3583244SSteven Rostedt  * be causing. This function simply causes all recording to
175a3583244SSteven Rostedt  * the ring buffers to fail.
176a3583244SSteven Rostedt  */
177a3583244SSteven Rostedt void tracing_off(void)
178a3583244SSteven Rostedt {
179033601a3SSteven Rostedt 	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
180033601a3SSteven Rostedt }
181c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_off);
182033601a3SSteven Rostedt 
183033601a3SSteven Rostedt /**
184033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
185033601a3SSteven Rostedt  *
186033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
187c3706f00SWenji Huang  * permanently.
188033601a3SSteven Rostedt  */
189033601a3SSteven Rostedt void tracing_off_permanent(void)
190033601a3SSteven Rostedt {
191033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
192a3583244SSteven Rostedt }
193a3583244SSteven Rostedt 
194988ae9d6SSteven Rostedt /**
195988ae9d6SSteven Rostedt  * tracing_is_on - show state of ring buffers enabled
196988ae9d6SSteven Rostedt  */
197988ae9d6SSteven Rostedt int tracing_is_on(void)
198988ae9d6SSteven Rostedt {
199988ae9d6SSteven Rostedt 	return ring_buffer_flags == RB_BUFFERS_ON;
200988ae9d6SSteven Rostedt }
201988ae9d6SSteven Rostedt EXPORT_SYMBOL_GPL(tracing_is_on);
202988ae9d6SSteven Rostedt 
203d06bbd66SIngo Molnar #include "trace.h"
204d06bbd66SIngo Molnar 
205e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
20667d34724SAndrew Morton #define RB_ALIGNMENT		4U
2077a8e76a3SSteven Rostedt #define RB_MAX_SMALL_DATA	28
2087a8e76a3SSteven Rostedt 
2097a8e76a3SSteven Rostedt enum {
2107a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
2117a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
2127a8e76a3SSteven Rostedt };
2137a8e76a3SSteven Rostedt 
2142d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event)
2152d622719STom Zanussi {
2162d622719STom Zanussi 	return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
2172d622719STom Zanussi }
2182d622719STom Zanussi 
2192d622719STom Zanussi static inline int rb_discarded_event(struct ring_buffer_event *event)
2202d622719STom Zanussi {
2212d622719STom Zanussi 	return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
2222d622719STom Zanussi }
2232d622719STom Zanussi 
2242d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
2252d622719STom Zanussi {
2262d622719STom Zanussi 	event->type = RINGBUF_TYPE_PADDING;
2272d622719STom Zanussi 	event->time_delta = 0;
2282d622719STom Zanussi }
2292d622719STom Zanussi 
2302d622719STom Zanussi static unsigned
2312d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2322d622719STom Zanussi {
2332d622719STom Zanussi 	unsigned length;
2342d622719STom Zanussi 
2352d622719STom Zanussi 	if (event->len)
2362d622719STom Zanussi 		length = event->len * RB_ALIGNMENT;
2372d622719STom Zanussi 	else
2382d622719STom Zanussi 		length = event->array[0];
2392d622719STom Zanussi 	return length + RB_EVNT_HDR_SIZE;
2402d622719STom Zanussi }
2412d622719STom Zanussi 
2427a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
24334a148bfSAndrew Morton static unsigned
2447a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2457a8e76a3SSteven Rostedt {
2467a8e76a3SSteven Rostedt 	switch (event->type) {
2477a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
2482d622719STom Zanussi 		if (rb_null_event(event))
2497a8e76a3SSteven Rostedt 			/* undefined */
2507a8e76a3SSteven Rostedt 			return -1;
2512d622719STom Zanussi 		return rb_event_data_length(event);
2527a8e76a3SSteven Rostedt 
2537a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
2547a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
2557a8e76a3SSteven Rostedt 
2567a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
2577a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
2587a8e76a3SSteven Rostedt 
2597a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
2602d622719STom Zanussi 		return rb_event_data_length(event);
2617a8e76a3SSteven Rostedt 	default:
2627a8e76a3SSteven Rostedt 		BUG();
2637a8e76a3SSteven Rostedt 	}
2647a8e76a3SSteven Rostedt 	/* not hit */
2657a8e76a3SSteven Rostedt 	return 0;
2667a8e76a3SSteven Rostedt }
2677a8e76a3SSteven Rostedt 
2687a8e76a3SSteven Rostedt /**
2697a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
2707a8e76a3SSteven Rostedt  * @event: the event to get the length of
2717a8e76a3SSteven Rostedt  */
2727a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2737a8e76a3SSteven Rostedt {
274465634adSRobert Richter 	unsigned length = rb_event_length(event);
275465634adSRobert Richter 	if (event->type != RINGBUF_TYPE_DATA)
276465634adSRobert Richter 		return length;
277465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
278465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
279465634adSRobert Richter                 length -= sizeof(event->array[0]);
280465634adSRobert Richter 	return length;
2817a8e76a3SSteven Rostedt }
282c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2837a8e76a3SSteven Rostedt 
2847a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
28534a148bfSAndrew Morton static void *
2867a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
2877a8e76a3SSteven Rostedt {
2887a8e76a3SSteven Rostedt 	BUG_ON(event->type != RINGBUF_TYPE_DATA);
2897a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
2907a8e76a3SSteven Rostedt 	if (event->len)
2917a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
2927a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
2937a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
2947a8e76a3SSteven Rostedt }
2957a8e76a3SSteven Rostedt 
2967a8e76a3SSteven Rostedt /**
2977a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
2987a8e76a3SSteven Rostedt  * @event: the event to get the data from
2997a8e76a3SSteven Rostedt  */
3007a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3017a8e76a3SSteven Rostedt {
3027a8e76a3SSteven Rostedt 	return rb_event_data(event);
3037a8e76a3SSteven Rostedt }
304c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3057a8e76a3SSteven Rostedt 
3067a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
3079e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
3087a8e76a3SSteven Rostedt 
3097a8e76a3SSteven Rostedt #define TS_SHIFT	27
3107a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
3117a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
3127a8e76a3SSteven Rostedt 
313abc9b56dSSteven Rostedt struct buffer_data_page {
3147a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
315c3706f00SWenji Huang 	local_t		 commit;	/* write committed index */
316abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
317abc9b56dSSteven Rostedt };
318abc9b56dSSteven Rostedt 
319abc9b56dSSteven Rostedt struct buffer_page {
320abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
3216f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
3227a8e76a3SSteven Rostedt 	struct list_head list;		/* list of free pages */
323abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
3247a8e76a3SSteven Rostedt };
3257a8e76a3SSteven Rostedt 
326044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
327abc9b56dSSteven Rostedt {
328044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
329abc9b56dSSteven Rostedt }
330abc9b56dSSteven Rostedt 
331474d32b6SSteven Rostedt /**
332474d32b6SSteven Rostedt  * ring_buffer_page_len - the size of data on the page.
333474d32b6SSteven Rostedt  * @page: The page to read
334474d32b6SSteven Rostedt  *
335474d32b6SSteven Rostedt  * Returns the amount of data on the page, including buffer page header.
336474d32b6SSteven Rostedt  */
337ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page)
338ef7a4a16SSteven Rostedt {
339474d32b6SSteven Rostedt 	return local_read(&((struct buffer_data_page *)page)->commit)
340474d32b6SSteven Rostedt 		+ BUF_PAGE_HDR_SIZE;
341ef7a4a16SSteven Rostedt }
342ef7a4a16SSteven Rostedt 
3437a8e76a3SSteven Rostedt /*
344ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
345ed56829cSSteven Rostedt  * this issue out.
346ed56829cSSteven Rostedt  */
34734a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
348ed56829cSSteven Rostedt {
3496ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
350e4c2ce82SSteven Rostedt 	kfree(bpage);
351ed56829cSSteven Rostedt }
352ed56829cSSteven Rostedt 
353ed56829cSSteven Rostedt /*
3547a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
3557a8e76a3SSteven Rostedt  */
3567a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
3577a8e76a3SSteven Rostedt {
3587a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
3597a8e76a3SSteven Rostedt 		return 1;
3607a8e76a3SSteven Rostedt 	return 0;
3617a8e76a3SSteven Rostedt }
3627a8e76a3SSteven Rostedt 
363474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
3647a8e76a3SSteven Rostedt 
365d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s)
366d1b182a8SSteven Rostedt {
367d1b182a8SSteven Rostedt 	struct buffer_data_page field;
368d1b182a8SSteven Rostedt 	int ret;
369d1b182a8SSteven Rostedt 
370d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
371d1b182a8SSteven Rostedt 			       "offset:0;\tsize:%u;\n",
372d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.time_stamp));
373d1b182a8SSteven Rostedt 
374d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
375d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
376d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), commit),
377d1b182a8SSteven Rostedt 			       (unsigned int)sizeof(field.commit));
378d1b182a8SSteven Rostedt 
379d1b182a8SSteven Rostedt 	ret = trace_seq_printf(s, "\tfield: char data;\t"
380d1b182a8SSteven Rostedt 			       "offset:%u;\tsize:%u;\n",
381d1b182a8SSteven Rostedt 			       (unsigned int)offsetof(typeof(field), data),
382d1b182a8SSteven Rostedt 			       (unsigned int)BUF_PAGE_SIZE);
383d1b182a8SSteven Rostedt 
384d1b182a8SSteven Rostedt 	return ret;
385d1b182a8SSteven Rostedt }
386d1b182a8SSteven Rostedt 
3877a8e76a3SSteven Rostedt /*
3887a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
3897a8e76a3SSteven Rostedt  */
3907a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
3917a8e76a3SSteven Rostedt 	int				cpu;
3927a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
393f83c9d0fSSteven Rostedt 	spinlock_t			reader_lock; /* serialize readers */
3943e03fb7fSSteven Rostedt 	raw_spinlock_t			lock;
3957a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
3967a8e76a3SSteven Rostedt 	struct list_head		pages;
3976f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
3986f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
399c3706f00SWenji Huang 	struct buffer_page		*commit_page;	/* committed pages */
400d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
4017a8e76a3SSteven Rostedt 	unsigned long			overrun;
4027a8e76a3SSteven Rostedt 	unsigned long			entries;
4037a8e76a3SSteven Rostedt 	u64				write_stamp;
4047a8e76a3SSteven Rostedt 	u64				read_stamp;
4057a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
4067a8e76a3SSteven Rostedt };
4077a8e76a3SSteven Rostedt 
4087a8e76a3SSteven Rostedt struct ring_buffer {
4097a8e76a3SSteven Rostedt 	unsigned			pages;
4107a8e76a3SSteven Rostedt 	unsigned			flags;
4117a8e76a3SSteven Rostedt 	int				cpus;
4127a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
41300f62f61SArnaldo Carvalho de Melo 	cpumask_var_t			cpumask;
4147a8e76a3SSteven Rostedt 
4157a8e76a3SSteven Rostedt 	struct mutex			mutex;
4167a8e76a3SSteven Rostedt 
4177a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
418554f786eSSteven Rostedt 
41959222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
420554f786eSSteven Rostedt 	struct notifier_block		cpu_notify;
421554f786eSSteven Rostedt #endif
42237886f6aSSteven Rostedt 	u64				(*clock)(void);
4237a8e76a3SSteven Rostedt };
4247a8e76a3SSteven Rostedt 
4257a8e76a3SSteven Rostedt struct ring_buffer_iter {
4267a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
4277a8e76a3SSteven Rostedt 	unsigned long			head;
4287a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
4297a8e76a3SSteven Rostedt 	u64				read_stamp;
4307a8e76a3SSteven Rostedt };
4317a8e76a3SSteven Rostedt 
432f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
4337a8e76a3SSteven Rostedt #define RB_WARN_ON(buffer, cond)				\
4343e89c7bbSSteven Rostedt 	({							\
4353e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);			\
4363e89c7bbSSteven Rostedt 		if (_____ret) {					\
437bf41a158SSteven Rostedt 			atomic_inc(&buffer->record_disabled);	\
438bf41a158SSteven Rostedt 			WARN_ON(1);				\
439bf41a158SSteven Rostedt 		}						\
4403e89c7bbSSteven Rostedt 		_____ret;					\
4413e89c7bbSSteven Rostedt 	})
442f536aafcSSteven Rostedt 
44337886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
44437886f6aSSteven Rostedt #define DEBUG_SHIFT 0
44537886f6aSSteven Rostedt 
44637886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
44737886f6aSSteven Rostedt {
44837886f6aSSteven Rostedt 	u64 time;
44937886f6aSSteven Rostedt 
45037886f6aSSteven Rostedt 	preempt_disable_notrace();
45137886f6aSSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
45237886f6aSSteven Rostedt 	time = buffer->clock() << DEBUG_SHIFT;
45337886f6aSSteven Rostedt 	preempt_enable_no_resched_notrace();
45437886f6aSSteven Rostedt 
45537886f6aSSteven Rostedt 	return time;
45637886f6aSSteven Rostedt }
45737886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
45837886f6aSSteven Rostedt 
45937886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
46037886f6aSSteven Rostedt 				      int cpu, u64 *ts)
46137886f6aSSteven Rostedt {
46237886f6aSSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
46337886f6aSSteven Rostedt 	*ts >>= DEBUG_SHIFT;
46437886f6aSSteven Rostedt }
46537886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
46637886f6aSSteven Rostedt 
4677a8e76a3SSteven Rostedt /**
4687a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
4697a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
4707a8e76a3SSteven Rostedt  *
471c3706f00SWenji Huang  * As a safety measure we check to make sure the data pages have not
4727a8e76a3SSteven Rostedt  * been corrupted.
4737a8e76a3SSteven Rostedt  */
4747a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
4757a8e76a3SSteven Rostedt {
4767a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
477044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
4787a8e76a3SSteven Rostedt 
4793e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
4803e89c7bbSSteven Rostedt 		return -1;
4813e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
4823e89c7bbSSteven Rostedt 		return -1;
4837a8e76a3SSteven Rostedt 
484044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
4853e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
486044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
4873e89c7bbSSteven Rostedt 			return -1;
4883e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
489044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
4903e89c7bbSSteven Rostedt 			return -1;
4917a8e76a3SSteven Rostedt 	}
4927a8e76a3SSteven Rostedt 
4937a8e76a3SSteven Rostedt 	return 0;
4947a8e76a3SSteven Rostedt }
4957a8e76a3SSteven Rostedt 
4967a8e76a3SSteven Rostedt static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
4977a8e76a3SSteven Rostedt 			     unsigned nr_pages)
4987a8e76a3SSteven Rostedt {
4997a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
500044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
5017a8e76a3SSteven Rostedt 	unsigned long addr;
5027a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
5037a8e76a3SSteven Rostedt 	unsigned i;
5047a8e76a3SSteven Rostedt 
5057a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
506044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
507aa1e0e3bSSteven Rostedt 				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
508044fa782SSteven Rostedt 		if (!bpage)
509e4c2ce82SSteven Rostedt 			goto free_pages;
510044fa782SSteven Rostedt 		list_add(&bpage->list, &pages);
511e4c2ce82SSteven Rostedt 
5127a8e76a3SSteven Rostedt 		addr = __get_free_page(GFP_KERNEL);
5137a8e76a3SSteven Rostedt 		if (!addr)
5147a8e76a3SSteven Rostedt 			goto free_pages;
515044fa782SSteven Rostedt 		bpage->page = (void *)addr;
516044fa782SSteven Rostedt 		rb_init_page(bpage->page);
5177a8e76a3SSteven Rostedt 	}
5187a8e76a3SSteven Rostedt 
5197a8e76a3SSteven Rostedt 	list_splice(&pages, head);
5207a8e76a3SSteven Rostedt 
5217a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
5227a8e76a3SSteven Rostedt 
5237a8e76a3SSteven Rostedt 	return 0;
5247a8e76a3SSteven Rostedt 
5257a8e76a3SSteven Rostedt  free_pages:
526044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
527044fa782SSteven Rostedt 		list_del_init(&bpage->list);
528044fa782SSteven Rostedt 		free_buffer_page(bpage);
5297a8e76a3SSteven Rostedt 	}
5307a8e76a3SSteven Rostedt 	return -ENOMEM;
5317a8e76a3SSteven Rostedt }
5327a8e76a3SSteven Rostedt 
5337a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
5347a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
5357a8e76a3SSteven Rostedt {
5367a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
537044fa782SSteven Rostedt 	struct buffer_page *bpage;
538d769041fSSteven Rostedt 	unsigned long addr;
5397a8e76a3SSteven Rostedt 	int ret;
5407a8e76a3SSteven Rostedt 
5417a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
5427a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
5437a8e76a3SSteven Rostedt 	if (!cpu_buffer)
5447a8e76a3SSteven Rostedt 		return NULL;
5457a8e76a3SSteven Rostedt 
5467a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
5477a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
548f83c9d0fSSteven Rostedt 	spin_lock_init(&cpu_buffer->reader_lock);
5493e03fb7fSSteven Rostedt 	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
5507a8e76a3SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->pages);
5517a8e76a3SSteven Rostedt 
552044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
553e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
554044fa782SSteven Rostedt 	if (!bpage)
555e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
556e4c2ce82SSteven Rostedt 
557044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
558d769041fSSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
559d769041fSSteven Rostedt 	if (!addr)
560e4c2ce82SSteven Rostedt 		goto fail_free_reader;
561044fa782SSteven Rostedt 	bpage->page = (void *)addr;
562044fa782SSteven Rostedt 	rb_init_page(bpage->page);
563e4c2ce82SSteven Rostedt 
564d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
565d769041fSSteven Rostedt 
5667a8e76a3SSteven Rostedt 	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
5677a8e76a3SSteven Rostedt 	if (ret < 0)
568d769041fSSteven Rostedt 		goto fail_free_reader;
5697a8e76a3SSteven Rostedt 
5707a8e76a3SSteven Rostedt 	cpu_buffer->head_page
5717a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
572bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
5737a8e76a3SSteven Rostedt 
5747a8e76a3SSteven Rostedt 	return cpu_buffer;
5757a8e76a3SSteven Rostedt 
576d769041fSSteven Rostedt  fail_free_reader:
577d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
578d769041fSSteven Rostedt 
5797a8e76a3SSteven Rostedt  fail_free_buffer:
5807a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
5817a8e76a3SSteven Rostedt 	return NULL;
5827a8e76a3SSteven Rostedt }
5837a8e76a3SSteven Rostedt 
5847a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5857a8e76a3SSteven Rostedt {
5867a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
587044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
5887a8e76a3SSteven Rostedt 
589d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
590d769041fSSteven Rostedt 
591044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
592044fa782SSteven Rostedt 		list_del_init(&bpage->list);
593044fa782SSteven Rostedt 		free_buffer_page(bpage);
5947a8e76a3SSteven Rostedt 	}
5957a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
5967a8e76a3SSteven Rostedt }
5977a8e76a3SSteven Rostedt 
598a7b13743SSteven Rostedt /*
599a7b13743SSteven Rostedt  * Causes compile errors if the struct buffer_page gets bigger
600a7b13743SSteven Rostedt  * than the struct page.
601a7b13743SSteven Rostedt  */
602a7b13743SSteven Rostedt extern int ring_buffer_page_too_big(void);
603a7b13743SSteven Rostedt 
60459222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
60509c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
606554f786eSSteven Rostedt 			 unsigned long action, void *hcpu);
607554f786eSSteven Rostedt #endif
608554f786eSSteven Rostedt 
6097a8e76a3SSteven Rostedt /**
6107a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
61168814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
6127a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
6137a8e76a3SSteven Rostedt  *
6147a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
6157a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
6167a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
6177a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
6187a8e76a3SSteven Rostedt  */
6197a8e76a3SSteven Rostedt struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
6207a8e76a3SSteven Rostedt {
6217a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
6227a8e76a3SSteven Rostedt 	int bsize;
6237a8e76a3SSteven Rostedt 	int cpu;
6247a8e76a3SSteven Rostedt 
625a7b13743SSteven Rostedt 	/* Paranoid! Optimizes out when all is well */
626a7b13743SSteven Rostedt 	if (sizeof(struct buffer_page) > sizeof(struct page))
627a7b13743SSteven Rostedt 		ring_buffer_page_too_big();
628a7b13743SSteven Rostedt 
629a7b13743SSteven Rostedt 
6307a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
6317a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
6327a8e76a3SSteven Rostedt 			 GFP_KERNEL);
6337a8e76a3SSteven Rostedt 	if (!buffer)
6347a8e76a3SSteven Rostedt 		return NULL;
6357a8e76a3SSteven Rostedt 
6369e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
6379e01c1b7SRusty Russell 		goto fail_free_buffer;
6389e01c1b7SRusty Russell 
6397a8e76a3SSteven Rostedt 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
6407a8e76a3SSteven Rostedt 	buffer->flags = flags;
64137886f6aSSteven Rostedt 	buffer->clock = trace_clock_local;
6427a8e76a3SSteven Rostedt 
6437a8e76a3SSteven Rostedt 	/* need at least two pages */
6447a8e76a3SSteven Rostedt 	if (buffer->pages == 1)
6457a8e76a3SSteven Rostedt 		buffer->pages++;
6467a8e76a3SSteven Rostedt 
6473bf832ceSFrederic Weisbecker 	/*
6483bf832ceSFrederic Weisbecker 	 * In case of non-hotplug cpu, if the ring-buffer is allocated
6493bf832ceSFrederic Weisbecker 	 * in early initcall, it will not be notified of secondary cpus.
6503bf832ceSFrederic Weisbecker 	 * In that off case, we need to allocate for all possible cpus.
6513bf832ceSFrederic Weisbecker 	 */
6523bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU
653554f786eSSteven Rostedt 	get_online_cpus();
654554f786eSSteven Rostedt 	cpumask_copy(buffer->cpumask, cpu_online_mask);
6553bf832ceSFrederic Weisbecker #else
6563bf832ceSFrederic Weisbecker 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
6573bf832ceSFrederic Weisbecker #endif
6587a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
6597a8e76a3SSteven Rostedt 
6607a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
6617a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
6627a8e76a3SSteven Rostedt 				  GFP_KERNEL);
6637a8e76a3SSteven Rostedt 	if (!buffer->buffers)
6649e01c1b7SRusty Russell 		goto fail_free_cpumask;
6657a8e76a3SSteven Rostedt 
6667a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6677a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
6687a8e76a3SSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
6697a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
6707a8e76a3SSteven Rostedt 			goto fail_free_buffers;
6717a8e76a3SSteven Rostedt 	}
6727a8e76a3SSteven Rostedt 
67359222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
674554f786eSSteven Rostedt 	buffer->cpu_notify.notifier_call = rb_cpu_notify;
675554f786eSSteven Rostedt 	buffer->cpu_notify.priority = 0;
676554f786eSSteven Rostedt 	register_cpu_notifier(&buffer->cpu_notify);
677554f786eSSteven Rostedt #endif
678554f786eSSteven Rostedt 
679554f786eSSteven Rostedt 	put_online_cpus();
6807a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
6817a8e76a3SSteven Rostedt 
6827a8e76a3SSteven Rostedt 	return buffer;
6837a8e76a3SSteven Rostedt 
6847a8e76a3SSteven Rostedt  fail_free_buffers:
6857a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6867a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
6877a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
6887a8e76a3SSteven Rostedt 	}
6897a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
6907a8e76a3SSteven Rostedt 
6919e01c1b7SRusty Russell  fail_free_cpumask:
6929e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
693554f786eSSteven Rostedt 	put_online_cpus();
6949e01c1b7SRusty Russell 
6957a8e76a3SSteven Rostedt  fail_free_buffer:
6967a8e76a3SSteven Rostedt 	kfree(buffer);
6977a8e76a3SSteven Rostedt 	return NULL;
6987a8e76a3SSteven Rostedt }
699c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_alloc);
7007a8e76a3SSteven Rostedt 
7017a8e76a3SSteven Rostedt /**
7027a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
7037a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
7047a8e76a3SSteven Rostedt  */
7057a8e76a3SSteven Rostedt void
7067a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
7077a8e76a3SSteven Rostedt {
7087a8e76a3SSteven Rostedt 	int cpu;
7097a8e76a3SSteven Rostedt 
710554f786eSSteven Rostedt 	get_online_cpus();
711554f786eSSteven Rostedt 
71259222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
713554f786eSSteven Rostedt 	unregister_cpu_notifier(&buffer->cpu_notify);
714554f786eSSteven Rostedt #endif
715554f786eSSteven Rostedt 
7167a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
7177a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
7187a8e76a3SSteven Rostedt 
719554f786eSSteven Rostedt 	put_online_cpus();
720554f786eSSteven Rostedt 
7219e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
7229e01c1b7SRusty Russell 
7237a8e76a3SSteven Rostedt 	kfree(buffer);
7247a8e76a3SSteven Rostedt }
725c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
7267a8e76a3SSteven Rostedt 
72737886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer,
72837886f6aSSteven Rostedt 			   u64 (*clock)(void))
72937886f6aSSteven Rostedt {
73037886f6aSSteven Rostedt 	buffer->clock = clock;
73137886f6aSSteven Rostedt }
73237886f6aSSteven Rostedt 
7337a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
7347a8e76a3SSteven Rostedt 
7357a8e76a3SSteven Rostedt static void
7367a8e76a3SSteven Rostedt rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
7377a8e76a3SSteven Rostedt {
738044fa782SSteven Rostedt 	struct buffer_page *bpage;
7397a8e76a3SSteven Rostedt 	struct list_head *p;
7407a8e76a3SSteven Rostedt 	unsigned i;
7417a8e76a3SSteven Rostedt 
7427a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7437a8e76a3SSteven Rostedt 	synchronize_sched();
7447a8e76a3SSteven Rostedt 
7457a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7463e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7473e89c7bbSSteven Rostedt 			return;
7487a8e76a3SSteven Rostedt 		p = cpu_buffer->pages.next;
749044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
750044fa782SSteven Rostedt 		list_del_init(&bpage->list);
751044fa782SSteven Rostedt 		free_buffer_page(bpage);
7527a8e76a3SSteven Rostedt 	}
7533e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
7543e89c7bbSSteven Rostedt 		return;
7557a8e76a3SSteven Rostedt 
7567a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
7577a8e76a3SSteven Rostedt 
7587a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
7597a8e76a3SSteven Rostedt 
7607a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
7617a8e76a3SSteven Rostedt 
7627a8e76a3SSteven Rostedt }
7637a8e76a3SSteven Rostedt 
7647a8e76a3SSteven Rostedt static void
7657a8e76a3SSteven Rostedt rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
7667a8e76a3SSteven Rostedt 		struct list_head *pages, unsigned nr_pages)
7677a8e76a3SSteven Rostedt {
768044fa782SSteven Rostedt 	struct buffer_page *bpage;
7697a8e76a3SSteven Rostedt 	struct list_head *p;
7707a8e76a3SSteven Rostedt 	unsigned i;
7717a8e76a3SSteven Rostedt 
7727a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
7737a8e76a3SSteven Rostedt 	synchronize_sched();
7747a8e76a3SSteven Rostedt 
7757a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
7763e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
7773e89c7bbSSteven Rostedt 			return;
7787a8e76a3SSteven Rostedt 		p = pages->next;
779044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
780044fa782SSteven Rostedt 		list_del_init(&bpage->list);
781044fa782SSteven Rostedt 		list_add_tail(&bpage->list, &cpu_buffer->pages);
7827a8e76a3SSteven Rostedt 	}
7837a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
7847a8e76a3SSteven Rostedt 
7857a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
7867a8e76a3SSteven Rostedt 
7877a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
7887a8e76a3SSteven Rostedt }
7897a8e76a3SSteven Rostedt 
7907a8e76a3SSteven Rostedt /**
7917a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
7927a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
7937a8e76a3SSteven Rostedt  * @size: the new size.
7947a8e76a3SSteven Rostedt  *
7957a8e76a3SSteven Rostedt  * The tracer is responsible for making sure that the buffer is
7967a8e76a3SSteven Rostedt  * not being used while changing the size.
7977a8e76a3SSteven Rostedt  * Note: We may be able to change the above requirement by using
7987a8e76a3SSteven Rostedt  *  RCU synchronizations.
7997a8e76a3SSteven Rostedt  *
8007a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
8017a8e76a3SSteven Rostedt  *
8027a8e76a3SSteven Rostedt  * Returns -1 on failure.
8037a8e76a3SSteven Rostedt  */
8047a8e76a3SSteven Rostedt int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
8057a8e76a3SSteven Rostedt {
8067a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
8077a8e76a3SSteven Rostedt 	unsigned nr_pages, rm_pages, new_pages;
808044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
8097a8e76a3SSteven Rostedt 	unsigned long buffer_size;
8107a8e76a3SSteven Rostedt 	unsigned long addr;
8117a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
8127a8e76a3SSteven Rostedt 	int i, cpu;
8137a8e76a3SSteven Rostedt 
814ee51a1deSIngo Molnar 	/*
815ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
816ee51a1deSIngo Molnar 	 */
817ee51a1deSIngo Molnar 	if (!buffer)
818ee51a1deSIngo Molnar 		return size;
819ee51a1deSIngo Molnar 
8207a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8217a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
8227a8e76a3SSteven Rostedt 	buffer_size = buffer->pages * BUF_PAGE_SIZE;
8237a8e76a3SSteven Rostedt 
8247a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
8257a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
8267a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
8277a8e76a3SSteven Rostedt 
8287a8e76a3SSteven Rostedt 	if (size == buffer_size)
8297a8e76a3SSteven Rostedt 		return size;
8307a8e76a3SSteven Rostedt 
8317a8e76a3SSteven Rostedt 	mutex_lock(&buffer->mutex);
832554f786eSSteven Rostedt 	get_online_cpus();
8337a8e76a3SSteven Rostedt 
8347a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
8357a8e76a3SSteven Rostedt 
8367a8e76a3SSteven Rostedt 	if (size < buffer_size) {
8377a8e76a3SSteven Rostedt 
8387a8e76a3SSteven Rostedt 		/* easy case, just free pages */
839554f786eSSteven Rostedt 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
840554f786eSSteven Rostedt 			goto out_fail;
8417a8e76a3SSteven Rostedt 
8427a8e76a3SSteven Rostedt 		rm_pages = buffer->pages - nr_pages;
8437a8e76a3SSteven Rostedt 
8447a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
8457a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
8467a8e76a3SSteven Rostedt 			rb_remove_pages(cpu_buffer, rm_pages);
8477a8e76a3SSteven Rostedt 		}
8487a8e76a3SSteven Rostedt 		goto out;
8497a8e76a3SSteven Rostedt 	}
8507a8e76a3SSteven Rostedt 
8517a8e76a3SSteven Rostedt 	/*
8527a8e76a3SSteven Rostedt 	 * This is a bit more difficult. We only want to add pages
8537a8e76a3SSteven Rostedt 	 * when we can allocate enough for all CPUs. We do this
8547a8e76a3SSteven Rostedt 	 * by allocating all the pages and storing them on a local
8557a8e76a3SSteven Rostedt 	 * link list. If we succeed in our allocation, then we
8567a8e76a3SSteven Rostedt 	 * add these pages to the cpu_buffers. Otherwise we just free
8577a8e76a3SSteven Rostedt 	 * them all and return -ENOMEM;
8587a8e76a3SSteven Rostedt 	 */
859554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
860554f786eSSteven Rostedt 		goto out_fail;
861f536aafcSSteven Rostedt 
8627a8e76a3SSteven Rostedt 	new_pages = nr_pages - buffer->pages;
8637a8e76a3SSteven Rostedt 
8647a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8657a8e76a3SSteven Rostedt 		for (i = 0; i < new_pages; i++) {
866044fa782SSteven Rostedt 			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
867e4c2ce82SSteven Rostedt 						  cache_line_size()),
868e4c2ce82SSteven Rostedt 					    GFP_KERNEL, cpu_to_node(cpu));
869044fa782SSteven Rostedt 			if (!bpage)
870e4c2ce82SSteven Rostedt 				goto free_pages;
871044fa782SSteven Rostedt 			list_add(&bpage->list, &pages);
8727a8e76a3SSteven Rostedt 			addr = __get_free_page(GFP_KERNEL);
8737a8e76a3SSteven Rostedt 			if (!addr)
8747a8e76a3SSteven Rostedt 				goto free_pages;
875044fa782SSteven Rostedt 			bpage->page = (void *)addr;
876044fa782SSteven Rostedt 			rb_init_page(bpage->page);
8777a8e76a3SSteven Rostedt 		}
8787a8e76a3SSteven Rostedt 	}
8797a8e76a3SSteven Rostedt 
8807a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
8817a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
8827a8e76a3SSteven Rostedt 		rb_insert_pages(cpu_buffer, &pages, new_pages);
8837a8e76a3SSteven Rostedt 	}
8847a8e76a3SSteven Rostedt 
885554f786eSSteven Rostedt 	if (RB_WARN_ON(buffer, !list_empty(&pages)))
886554f786eSSteven Rostedt 		goto out_fail;
8877a8e76a3SSteven Rostedt 
8887a8e76a3SSteven Rostedt  out:
8897a8e76a3SSteven Rostedt 	buffer->pages = nr_pages;
890554f786eSSteven Rostedt 	put_online_cpus();
8917a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
8927a8e76a3SSteven Rostedt 
8937a8e76a3SSteven Rostedt 	return size;
8947a8e76a3SSteven Rostedt 
8957a8e76a3SSteven Rostedt  free_pages:
896044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
897044fa782SSteven Rostedt 		list_del_init(&bpage->list);
898044fa782SSteven Rostedt 		free_buffer_page(bpage);
8997a8e76a3SSteven Rostedt 	}
900554f786eSSteven Rostedt 	put_online_cpus();
901641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
9027a8e76a3SSteven Rostedt 	return -ENOMEM;
903554f786eSSteven Rostedt 
904554f786eSSteven Rostedt 	/*
905554f786eSSteven Rostedt 	 * Something went totally wrong, and we are too paranoid
906554f786eSSteven Rostedt 	 * to even clean up the mess.
907554f786eSSteven Rostedt 	 */
908554f786eSSteven Rostedt  out_fail:
909554f786eSSteven Rostedt 	put_online_cpus();
910554f786eSSteven Rostedt 	mutex_unlock(&buffer->mutex);
911554f786eSSteven Rostedt 	return -1;
9127a8e76a3SSteven Rostedt }
913c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
9147a8e76a3SSteven Rostedt 
9158789a9e7SSteven Rostedt static inline void *
916044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
9178789a9e7SSteven Rostedt {
918044fa782SSteven Rostedt 	return bpage->data + index;
9198789a9e7SSteven Rostedt }
9208789a9e7SSteven Rostedt 
921044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
9227a8e76a3SSteven Rostedt {
923044fa782SSteven Rostedt 	return bpage->page->data + index;
9247a8e76a3SSteven Rostedt }
9257a8e76a3SSteven Rostedt 
9267a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
927d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
9287a8e76a3SSteven Rostedt {
9296f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
9306f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
9316f807acdSSteven Rostedt }
9326f807acdSSteven Rostedt 
9336f807acdSSteven Rostedt static inline struct ring_buffer_event *
9346f807acdSSteven Rostedt rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
9356f807acdSSteven Rostedt {
9366f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->head_page,
9376f807acdSSteven Rostedt 			       cpu_buffer->head_page->read);
9387a8e76a3SSteven Rostedt }
9397a8e76a3SSteven Rostedt 
9407a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
9417a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
9427a8e76a3SSteven Rostedt {
9436f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
9447a8e76a3SSteven Rostedt }
9457a8e76a3SSteven Rostedt 
946bf41a158SSteven Rostedt static inline unsigned rb_page_write(struct buffer_page *bpage)
947bf41a158SSteven Rostedt {
948bf41a158SSteven Rostedt 	return local_read(&bpage->write);
949bf41a158SSteven Rostedt }
950bf41a158SSteven Rostedt 
951bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
952bf41a158SSteven Rostedt {
953abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
954bf41a158SSteven Rostedt }
955bf41a158SSteven Rostedt 
956bf41a158SSteven Rostedt /* Size is determined by what has been commited */
957bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
958bf41a158SSteven Rostedt {
959bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
960bf41a158SSteven Rostedt }
961bf41a158SSteven Rostedt 
962bf41a158SSteven Rostedt static inline unsigned
963bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
964bf41a158SSteven Rostedt {
965bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
966bf41a158SSteven Rostedt }
967bf41a158SSteven Rostedt 
968bf41a158SSteven Rostedt static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
969bf41a158SSteven Rostedt {
970bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->head_page);
971bf41a158SSteven Rostedt }
972bf41a158SSteven Rostedt 
9737a8e76a3SSteven Rostedt /*
9747a8e76a3SSteven Rostedt  * When the tail hits the head and the buffer is in overwrite mode,
9757a8e76a3SSteven Rostedt  * the head jumps to the next page and all content on the previous
9767a8e76a3SSteven Rostedt  * page is discarded. But before doing so, we update the overrun
9777a8e76a3SSteven Rostedt  * variable of the buffer.
9787a8e76a3SSteven Rostedt  */
9797a8e76a3SSteven Rostedt static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
9807a8e76a3SSteven Rostedt {
9817a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
9827a8e76a3SSteven Rostedt 	unsigned long head;
9837a8e76a3SSteven Rostedt 
9847a8e76a3SSteven Rostedt 	for (head = 0; head < rb_head_size(cpu_buffer);
9857a8e76a3SSteven Rostedt 	     head += rb_event_length(event)) {
9867a8e76a3SSteven Rostedt 
9876f807acdSSteven Rostedt 		event = __rb_page_index(cpu_buffer->head_page, head);
9883e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
9893e89c7bbSSteven Rostedt 			return;
9907a8e76a3SSteven Rostedt 		/* Only count data entries */
9917a8e76a3SSteven Rostedt 		if (event->type != RINGBUF_TYPE_DATA)
9927a8e76a3SSteven Rostedt 			continue;
9937a8e76a3SSteven Rostedt 		cpu_buffer->overrun++;
9947a8e76a3SSteven Rostedt 		cpu_buffer->entries--;
9957a8e76a3SSteven Rostedt 	}
9967a8e76a3SSteven Rostedt }
9977a8e76a3SSteven Rostedt 
9987a8e76a3SSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
999044fa782SSteven Rostedt 			       struct buffer_page **bpage)
10007a8e76a3SSteven Rostedt {
1001044fa782SSteven Rostedt 	struct list_head *p = (*bpage)->list.next;
10027a8e76a3SSteven Rostedt 
10037a8e76a3SSteven Rostedt 	if (p == &cpu_buffer->pages)
10047a8e76a3SSteven Rostedt 		p = p->next;
10057a8e76a3SSteven Rostedt 
1006044fa782SSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
10077a8e76a3SSteven Rostedt }
10087a8e76a3SSteven Rostedt 
1009bf41a158SSteven Rostedt static inline unsigned
1010bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
10117a8e76a3SSteven Rostedt {
1012bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1013bf41a158SSteven Rostedt 
1014bf41a158SSteven Rostedt 	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
10157a8e76a3SSteven Rostedt }
10167a8e76a3SSteven Rostedt 
101734a148bfSAndrew Morton static int
1018bf41a158SSteven Rostedt rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1019bf41a158SSteven Rostedt 	     struct ring_buffer_event *event)
10207a8e76a3SSteven Rostedt {
1021bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1022bf41a158SSteven Rostedt 	unsigned long index;
1023bf41a158SSteven Rostedt 
1024bf41a158SSteven Rostedt 	index = rb_event_index(event);
1025bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1026bf41a158SSteven Rostedt 
1027bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
1028bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
1029bf41a158SSteven Rostedt }
1030bf41a158SSteven Rostedt 
103134a148bfSAndrew Morton static void
1032bf41a158SSteven Rostedt rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
1033bf41a158SSteven Rostedt 		    struct ring_buffer_event *event)
1034bf41a158SSteven Rostedt {
1035bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
1036bf41a158SSteven Rostedt 	unsigned long index;
1037bf41a158SSteven Rostedt 
1038bf41a158SSteven Rostedt 	index = rb_event_index(event);
1039bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
1040bf41a158SSteven Rostedt 
1041bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page->page != (void *)addr) {
10423e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
10433e89c7bbSSteven Rostedt 			  cpu_buffer->commit_page == cpu_buffer->tail_page))
10443e89c7bbSSteven Rostedt 			return;
1045abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1046bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1047bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1048abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1049abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1050bf41a158SSteven Rostedt 	}
1051bf41a158SSteven Rostedt 
1052bf41a158SSteven Rostedt 	/* Now set the commit to the event's index */
1053abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->commit_page->page->commit, index);
1054bf41a158SSteven Rostedt }
1055bf41a158SSteven Rostedt 
105634a148bfSAndrew Morton static void
1057bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1058bf41a158SSteven Rostedt {
1059bf41a158SSteven Rostedt 	/*
1060bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
1061bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
1062bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
1063bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
1064bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
1065bf41a158SSteven Rostedt 	 * assign the commit to the tail.
1066bf41a158SSteven Rostedt 	 */
1067a8ccf1d6SSteven Rostedt  again:
1068bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1069abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1070bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1071bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1072abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
1073abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
1074bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
1075bf41a158SSteven Rostedt 		barrier();
1076bf41a158SSteven Rostedt 	}
1077bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
1078bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
1079abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
1080bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
1081bf41a158SSteven Rostedt 		barrier();
1082bf41a158SSteven Rostedt 	}
1083a8ccf1d6SSteven Rostedt 
1084a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
1085a8ccf1d6SSteven Rostedt 	barrier();
1086a8ccf1d6SSteven Rostedt 
1087a8ccf1d6SSteven Rostedt 	/*
1088a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
1089a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
1090a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
1091a8ccf1d6SSteven Rostedt 	 */
1092a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1093a8ccf1d6SSteven Rostedt 		goto again;
10947a8e76a3SSteven Rostedt }
10957a8e76a3SSteven Rostedt 
1096d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
10977a8e76a3SSteven Rostedt {
1098abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
10996f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
1100d769041fSSteven Rostedt }
1101d769041fSSteven Rostedt 
110234a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
1103d769041fSSteven Rostedt {
1104d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1105d769041fSSteven Rostedt 
1106d769041fSSteven Rostedt 	/*
1107d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
1108d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
1109d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
1110d769041fSSteven Rostedt 	 * to the head page instead of next.
1111d769041fSSteven Rostedt 	 */
1112d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
1113d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
1114d769041fSSteven Rostedt 	else
1115d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
1116d769041fSSteven Rostedt 
1117abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
11187a8e76a3SSteven Rostedt 	iter->head = 0;
11197a8e76a3SSteven Rostedt }
11207a8e76a3SSteven Rostedt 
11217a8e76a3SSteven Rostedt /**
11227a8e76a3SSteven Rostedt  * ring_buffer_update_event - update event type and data
11237a8e76a3SSteven Rostedt  * @event: the even to update
11247a8e76a3SSteven Rostedt  * @type: the type of event
11257a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
11267a8e76a3SSteven Rostedt  *
11277a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
11287a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
11297a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
11307a8e76a3SSteven Rostedt  * data field.
11317a8e76a3SSteven Rostedt  */
113234a148bfSAndrew Morton static void
11337a8e76a3SSteven Rostedt rb_update_event(struct ring_buffer_event *event,
11347a8e76a3SSteven Rostedt 			 unsigned type, unsigned length)
11357a8e76a3SSteven Rostedt {
11367a8e76a3SSteven Rostedt 	event->type = type;
11377a8e76a3SSteven Rostedt 
11387a8e76a3SSteven Rostedt 	switch (type) {
11397a8e76a3SSteven Rostedt 
11407a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
11417a8e76a3SSteven Rostedt 		break;
11427a8e76a3SSteven Rostedt 
11437a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
114467d34724SAndrew Morton 		event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
11457a8e76a3SSteven Rostedt 		break;
11467a8e76a3SSteven Rostedt 
11477a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
114867d34724SAndrew Morton 		event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
11497a8e76a3SSteven Rostedt 		break;
11507a8e76a3SSteven Rostedt 
11517a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
11527a8e76a3SSteven Rostedt 		length -= RB_EVNT_HDR_SIZE;
11537a8e76a3SSteven Rostedt 		if (length > RB_MAX_SMALL_DATA) {
11547a8e76a3SSteven Rostedt 			event->len = 0;
11557a8e76a3SSteven Rostedt 			event->array[0] = length;
11567a8e76a3SSteven Rostedt 		} else
115767d34724SAndrew Morton 			event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
11587a8e76a3SSteven Rostedt 		break;
11597a8e76a3SSteven Rostedt 	default:
11607a8e76a3SSteven Rostedt 		BUG();
11617a8e76a3SSteven Rostedt 	}
11627a8e76a3SSteven Rostedt }
11637a8e76a3SSteven Rostedt 
116434a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
11657a8e76a3SSteven Rostedt {
11667a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
11677a8e76a3SSteven Rostedt 
11687a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
11697a8e76a3SSteven Rostedt 	if (!length)
11707a8e76a3SSteven Rostedt 		length = 1;
11717a8e76a3SSteven Rostedt 
11727a8e76a3SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA)
11737a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
11747a8e76a3SSteven Rostedt 
11757a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
11767a8e76a3SSteven Rostedt 	length = ALIGN(length, RB_ALIGNMENT);
11777a8e76a3SSteven Rostedt 
11787a8e76a3SSteven Rostedt 	return length;
11797a8e76a3SSteven Rostedt }
11807a8e76a3SSteven Rostedt 
11817a8e76a3SSteven Rostedt static struct ring_buffer_event *
11827a8e76a3SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
11837a8e76a3SSteven Rostedt 		  unsigned type, unsigned long length, u64 *ts)
11847a8e76a3SSteven Rostedt {
118598db8df7SSteven Rostedt 	struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
1186bf41a158SSteven Rostedt 	unsigned long tail, write;
11877a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
11887a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1189bf41a158SSteven Rostedt 	unsigned long flags;
119078d904b4SSteven Rostedt 	bool lock_taken = false;
11917a8e76a3SSteven Rostedt 
119298db8df7SSteven Rostedt 	commit_page = cpu_buffer->commit_page;
119398db8df7SSteven Rostedt 	/* we just need to protect against interrupts */
119498db8df7SSteven Rostedt 	barrier();
11957a8e76a3SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
1196bf41a158SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
1197bf41a158SSteven Rostedt 	tail = write - length;
11987a8e76a3SSteven Rostedt 
1199bf41a158SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
1200bf41a158SSteven Rostedt 	if (write > BUF_PAGE_SIZE) {
12017a8e76a3SSteven Rostedt 		struct buffer_page *next_page = tail_page;
12027a8e76a3SSteven Rostedt 
12033e03fb7fSSteven Rostedt 		local_irq_save(flags);
120478d904b4SSteven Rostedt 		/*
1205a81bd80aSSteven Rostedt 		 * Since the write to the buffer is still not
1206a81bd80aSSteven Rostedt 		 * fully lockless, we must be careful with NMIs.
1207a81bd80aSSteven Rostedt 		 * The locks in the writers are taken when a write
1208a81bd80aSSteven Rostedt 		 * crosses to a new page. The locks protect against
1209a81bd80aSSteven Rostedt 		 * races with the readers (this will soon be fixed
1210a81bd80aSSteven Rostedt 		 * with a lockless solution).
1211a81bd80aSSteven Rostedt 		 *
1212a81bd80aSSteven Rostedt 		 * Because we can not protect against NMIs, and we
1213a81bd80aSSteven Rostedt 		 * want to keep traces reentrant, we need to manage
1214a81bd80aSSteven Rostedt 		 * what happens when we are in an NMI.
1215a81bd80aSSteven Rostedt 		 *
121678d904b4SSteven Rostedt 		 * NMIs can happen after we take the lock.
121778d904b4SSteven Rostedt 		 * If we are in an NMI, only take the lock
121878d904b4SSteven Rostedt 		 * if it is not already taken. Otherwise
121978d904b4SSteven Rostedt 		 * simply fail.
122078d904b4SSteven Rostedt 		 */
1221a81bd80aSSteven Rostedt 		if (unlikely(in_nmi())) {
122278d904b4SSteven Rostedt 			if (!__raw_spin_trylock(&cpu_buffer->lock))
122345141d46SSteven Rostedt 				goto out_reset;
122478d904b4SSteven Rostedt 		} else
12253e03fb7fSSteven Rostedt 			__raw_spin_lock(&cpu_buffer->lock);
1226bf41a158SSteven Rostedt 
122778d904b4SSteven Rostedt 		lock_taken = true;
122878d904b4SSteven Rostedt 
12297a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &next_page);
12307a8e76a3SSteven Rostedt 
1231d769041fSSteven Rostedt 		head_page = cpu_buffer->head_page;
1232d769041fSSteven Rostedt 		reader_page = cpu_buffer->reader_page;
1233d769041fSSteven Rostedt 
1234d769041fSSteven Rostedt 		/* we grabbed the lock before incrementing */
12353e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
123645141d46SSteven Rostedt 			goto out_reset;
1237bf41a158SSteven Rostedt 
1238bf41a158SSteven Rostedt 		/*
1239bf41a158SSteven Rostedt 		 * If for some reason, we had an interrupt storm that made
1240bf41a158SSteven Rostedt 		 * it all the way around the buffer, bail, and warn
1241bf41a158SSteven Rostedt 		 * about it.
1242bf41a158SSteven Rostedt 		 */
124398db8df7SSteven Rostedt 		if (unlikely(next_page == commit_page)) {
1244bf41a158SSteven Rostedt 			WARN_ON_ONCE(1);
124545141d46SSteven Rostedt 			goto out_reset;
1246bf41a158SSteven Rostedt 		}
1247d769041fSSteven Rostedt 
12487a8e76a3SSteven Rostedt 		if (next_page == head_page) {
12496f3b3440SLai Jiangshan 			if (!(buffer->flags & RB_FL_OVERWRITE))
125045141d46SSteven Rostedt 				goto out_reset;
12517a8e76a3SSteven Rostedt 
1252bf41a158SSteven Rostedt 			/* tail_page has not moved yet? */
1253bf41a158SSteven Rostedt 			if (tail_page == cpu_buffer->tail_page) {
12547a8e76a3SSteven Rostedt 				/* count overflows */
12557a8e76a3SSteven Rostedt 				rb_update_overflow(cpu_buffer);
12567a8e76a3SSteven Rostedt 
12577a8e76a3SSteven Rostedt 				rb_inc_page(cpu_buffer, &head_page);
12587a8e76a3SSteven Rostedt 				cpu_buffer->head_page = head_page;
1259bf41a158SSteven Rostedt 				cpu_buffer->head_page->read = 0;
1260bf41a158SSteven Rostedt 			}
12617a8e76a3SSteven Rostedt 		}
12627a8e76a3SSteven Rostedt 
1263bf41a158SSteven Rostedt 		/*
1264bf41a158SSteven Rostedt 		 * If the tail page is still the same as what we think
1265bf41a158SSteven Rostedt 		 * it is, then it is up to us to update the tail
1266bf41a158SSteven Rostedt 		 * pointer.
1267bf41a158SSteven Rostedt 		 */
1268bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->tail_page) {
1269bf41a158SSteven Rostedt 			local_set(&next_page->write, 0);
1270abc9b56dSSteven Rostedt 			local_set(&next_page->page->commit, 0);
1271bf41a158SSteven Rostedt 			cpu_buffer->tail_page = next_page;
1272bf41a158SSteven Rostedt 
1273bf41a158SSteven Rostedt 			/* reread the time stamp */
127437886f6aSSteven Rostedt 			*ts = ring_buffer_time_stamp(buffer, cpu_buffer->cpu);
1275abc9b56dSSteven Rostedt 			cpu_buffer->tail_page->page->time_stamp = *ts;
1276bf41a158SSteven Rostedt 		}
1277bf41a158SSteven Rostedt 
1278bf41a158SSteven Rostedt 		/*
1279bf41a158SSteven Rostedt 		 * The actual tail page has moved forward.
1280bf41a158SSteven Rostedt 		 */
1281bf41a158SSteven Rostedt 		if (tail < BUF_PAGE_SIZE) {
1282bf41a158SSteven Rostedt 			/* Mark the rest of the page with padding */
12836f807acdSSteven Rostedt 			event = __rb_page_index(tail_page, tail);
12842d622719STom Zanussi 			rb_event_set_padding(event);
12857a8e76a3SSteven Rostedt 		}
12867a8e76a3SSteven Rostedt 
1287bf41a158SSteven Rostedt 		if (tail <= BUF_PAGE_SIZE)
1288bf41a158SSteven Rostedt 			/* Set the write back to the previous setting */
1289bf41a158SSteven Rostedt 			local_set(&tail_page->write, tail);
1290bf41a158SSteven Rostedt 
1291bf41a158SSteven Rostedt 		/*
1292bf41a158SSteven Rostedt 		 * If this was a commit entry that failed,
1293bf41a158SSteven Rostedt 		 * increment that too
1294bf41a158SSteven Rostedt 		 */
1295bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->commit_page &&
1296bf41a158SSteven Rostedt 		    tail == rb_commit_index(cpu_buffer)) {
1297bf41a158SSteven Rostedt 			rb_set_commit_to_write(cpu_buffer);
12987a8e76a3SSteven Rostedt 		}
12997a8e76a3SSteven Rostedt 
13003e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
13013e03fb7fSSteven Rostedt 		local_irq_restore(flags);
1302bf41a158SSteven Rostedt 
1303bf41a158SSteven Rostedt 		/* fail and let the caller try again */
1304bf41a158SSteven Rostedt 		return ERR_PTR(-EAGAIN);
1305bf41a158SSteven Rostedt 	}
1306bf41a158SSteven Rostedt 
1307bf41a158SSteven Rostedt 	/* We reserved something on the buffer */
1308bf41a158SSteven Rostedt 
13093e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
13103e89c7bbSSteven Rostedt 		return NULL;
13117a8e76a3SSteven Rostedt 
13126f807acdSSteven Rostedt 	event = __rb_page_index(tail_page, tail);
13137a8e76a3SSteven Rostedt 	rb_update_event(event, type, length);
13147a8e76a3SSteven Rostedt 
1315bf41a158SSteven Rostedt 	/*
1316bf41a158SSteven Rostedt 	 * If this is a commit and the tail is zero, then update
1317bf41a158SSteven Rostedt 	 * this page's time stamp.
1318bf41a158SSteven Rostedt 	 */
1319bf41a158SSteven Rostedt 	if (!tail && rb_is_commit(cpu_buffer, event))
1320abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->time_stamp = *ts;
1321bf41a158SSteven Rostedt 
13227a8e76a3SSteven Rostedt 	return event;
1323bf41a158SSteven Rostedt 
132445141d46SSteven Rostedt  out_reset:
13256f3b3440SLai Jiangshan 	/* reset write */
13266f3b3440SLai Jiangshan 	if (tail <= BUF_PAGE_SIZE)
13276f3b3440SLai Jiangshan 		local_set(&tail_page->write, tail);
13286f3b3440SLai Jiangshan 
132978d904b4SSteven Rostedt 	if (likely(lock_taken))
13303e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
13313e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1332bf41a158SSteven Rostedt 	return NULL;
13337a8e76a3SSteven Rostedt }
13347a8e76a3SSteven Rostedt 
13357a8e76a3SSteven Rostedt static int
13367a8e76a3SSteven Rostedt rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
13377a8e76a3SSteven Rostedt 		  u64 *ts, u64 *delta)
13387a8e76a3SSteven Rostedt {
13397a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
13407a8e76a3SSteven Rostedt 	static int once;
1341bf41a158SSteven Rostedt 	int ret;
13427a8e76a3SSteven Rostedt 
13437a8e76a3SSteven Rostedt 	if (unlikely(*delta > (1ULL << 59) && !once++)) {
13447a8e76a3SSteven Rostedt 		printk(KERN_WARNING "Delta way too big! %llu"
13457a8e76a3SSteven Rostedt 		       " ts=%llu write stamp = %llu\n",
1346e2862c94SStephen Rothwell 		       (unsigned long long)*delta,
1347e2862c94SStephen Rothwell 		       (unsigned long long)*ts,
1348e2862c94SStephen Rothwell 		       (unsigned long long)cpu_buffer->write_stamp);
13497a8e76a3SSteven Rostedt 		WARN_ON(1);
13507a8e76a3SSteven Rostedt 	}
13517a8e76a3SSteven Rostedt 
13527a8e76a3SSteven Rostedt 	/*
13537a8e76a3SSteven Rostedt 	 * The delta is too big, we to add a
13547a8e76a3SSteven Rostedt 	 * new timestamp.
13557a8e76a3SSteven Rostedt 	 */
13567a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer,
13577a8e76a3SSteven Rostedt 				  RINGBUF_TYPE_TIME_EXTEND,
13587a8e76a3SSteven Rostedt 				  RB_LEN_TIME_EXTEND,
13597a8e76a3SSteven Rostedt 				  ts);
13607a8e76a3SSteven Rostedt 	if (!event)
1361bf41a158SSteven Rostedt 		return -EBUSY;
13627a8e76a3SSteven Rostedt 
1363bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1364bf41a158SSteven Rostedt 		return -EAGAIN;
1365bf41a158SSteven Rostedt 
1366bf41a158SSteven Rostedt 	/* Only a commited time event can update the write stamp */
1367bf41a158SSteven Rostedt 	if (rb_is_commit(cpu_buffer, event)) {
1368bf41a158SSteven Rostedt 		/*
1369bf41a158SSteven Rostedt 		 * If this is the first on the page, then we need to
1370bf41a158SSteven Rostedt 		 * update the page itself, and just put in a zero.
1371bf41a158SSteven Rostedt 		 */
1372bf41a158SSteven Rostedt 		if (rb_event_index(event)) {
13737a8e76a3SSteven Rostedt 			event->time_delta = *delta & TS_MASK;
13747a8e76a3SSteven Rostedt 			event->array[0] = *delta >> TS_SHIFT;
1375bf41a158SSteven Rostedt 		} else {
1376abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp = *ts;
1377bf41a158SSteven Rostedt 			event->time_delta = 0;
1378bf41a158SSteven Rostedt 			event->array[0] = 0;
1379bf41a158SSteven Rostedt 		}
13807a8e76a3SSteven Rostedt 		cpu_buffer->write_stamp = *ts;
1381bf41a158SSteven Rostedt 		/* let the caller know this was the commit */
1382bf41a158SSteven Rostedt 		ret = 1;
1383bf41a158SSteven Rostedt 	} else {
1384bf41a158SSteven Rostedt 		/* Darn, this is just wasted space */
1385bf41a158SSteven Rostedt 		event->time_delta = 0;
1386bf41a158SSteven Rostedt 		event->array[0] = 0;
1387bf41a158SSteven Rostedt 		ret = 0;
13887a8e76a3SSteven Rostedt 	}
13897a8e76a3SSteven Rostedt 
1390bf41a158SSteven Rostedt 	*delta = 0;
1391bf41a158SSteven Rostedt 
1392bf41a158SSteven Rostedt 	return ret;
13937a8e76a3SSteven Rostedt }
13947a8e76a3SSteven Rostedt 
13957a8e76a3SSteven Rostedt static struct ring_buffer_event *
13967a8e76a3SSteven Rostedt rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
13977a8e76a3SSteven Rostedt 		      unsigned type, unsigned long length)
13987a8e76a3SSteven Rostedt {
13997a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
14007a8e76a3SSteven Rostedt 	u64 ts, delta;
1401bf41a158SSteven Rostedt 	int commit = 0;
1402818e3dd3SSteven Rostedt 	int nr_loops = 0;
14037a8e76a3SSteven Rostedt 
1404bf41a158SSteven Rostedt  again:
1405818e3dd3SSteven Rostedt 	/*
1406818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
1407818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
1408818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
1409818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
1410818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
1411818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
1412818e3dd3SSteven Rostedt 	 * Bail!
1413818e3dd3SSteven Rostedt 	 */
14143e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1415818e3dd3SSteven Rostedt 		return NULL;
1416818e3dd3SSteven Rostedt 
141737886f6aSSteven Rostedt 	ts = ring_buffer_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
14187a8e76a3SSteven Rostedt 
1419bf41a158SSteven Rostedt 	/*
1420bf41a158SSteven Rostedt 	 * Only the first commit can update the timestamp.
1421bf41a158SSteven Rostedt 	 * Yes there is a race here. If an interrupt comes in
1422bf41a158SSteven Rostedt 	 * just after the conditional and it traces too, then it
1423bf41a158SSteven Rostedt 	 * will also check the deltas. More than one timestamp may
1424bf41a158SSteven Rostedt 	 * also be made. But only the entry that did the actual
1425bf41a158SSteven Rostedt 	 * commit will be something other than zero.
1426bf41a158SSteven Rostedt 	 */
1427bf41a158SSteven Rostedt 	if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1428bf41a158SSteven Rostedt 	    rb_page_write(cpu_buffer->tail_page) ==
1429bf41a158SSteven Rostedt 	    rb_commit_index(cpu_buffer)) {
1430bf41a158SSteven Rostedt 
14317a8e76a3SSteven Rostedt 		delta = ts - cpu_buffer->write_stamp;
14327a8e76a3SSteven Rostedt 
1433bf41a158SSteven Rostedt 		/* make sure this delta is calculated here */
1434bf41a158SSteven Rostedt 		barrier();
14357a8e76a3SSteven Rostedt 
1436bf41a158SSteven Rostedt 		/* Did the write stamp get updated already? */
1437bf41a158SSteven Rostedt 		if (unlikely(ts < cpu_buffer->write_stamp))
14384143c5cbSSteven Rostedt 			delta = 0;
1439bf41a158SSteven Rostedt 
1440bf41a158SSteven Rostedt 		if (test_time_stamp(delta)) {
1441bf41a158SSteven Rostedt 
1442bf41a158SSteven Rostedt 			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1443bf41a158SSteven Rostedt 
1444bf41a158SSteven Rostedt 			if (commit == -EBUSY)
14457a8e76a3SSteven Rostedt 				return NULL;
1446bf41a158SSteven Rostedt 
1447bf41a158SSteven Rostedt 			if (commit == -EAGAIN)
1448bf41a158SSteven Rostedt 				goto again;
1449bf41a158SSteven Rostedt 
1450bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, commit < 0);
14517a8e76a3SSteven Rostedt 		}
1452bf41a158SSteven Rostedt 	} else
1453bf41a158SSteven Rostedt 		/* Non commits have zero deltas */
14547a8e76a3SSteven Rostedt 		delta = 0;
14557a8e76a3SSteven Rostedt 
14567a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1457bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1458bf41a158SSteven Rostedt 		goto again;
14597a8e76a3SSteven Rostedt 
1460bf41a158SSteven Rostedt 	if (!event) {
1461bf41a158SSteven Rostedt 		if (unlikely(commit))
1462bf41a158SSteven Rostedt 			/*
1463bf41a158SSteven Rostedt 			 * Ouch! We needed a timestamp and it was commited. But
1464bf41a158SSteven Rostedt 			 * we didn't get our event reserved.
1465bf41a158SSteven Rostedt 			 */
1466bf41a158SSteven Rostedt 			rb_set_commit_to_write(cpu_buffer);
1467bf41a158SSteven Rostedt 		return NULL;
1468bf41a158SSteven Rostedt 	}
1469bf41a158SSteven Rostedt 
1470bf41a158SSteven Rostedt 	/*
1471bf41a158SSteven Rostedt 	 * If the timestamp was commited, make the commit our entry
1472bf41a158SSteven Rostedt 	 * now so that we will update it when needed.
1473bf41a158SSteven Rostedt 	 */
1474bf41a158SSteven Rostedt 	if (commit)
1475bf41a158SSteven Rostedt 		rb_set_commit_event(cpu_buffer, event);
1476bf41a158SSteven Rostedt 	else if (!rb_is_commit(cpu_buffer, event))
14777a8e76a3SSteven Rostedt 		delta = 0;
14787a8e76a3SSteven Rostedt 
14797a8e76a3SSteven Rostedt 	event->time_delta = delta;
14807a8e76a3SSteven Rostedt 
14817a8e76a3SSteven Rostedt 	return event;
14827a8e76a3SSteven Rostedt }
14837a8e76a3SSteven Rostedt 
1484*aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16
1485261842b7SSteven Rostedt 
1486261842b7SSteven Rostedt static int trace_recursive_lock(void)
1487261842b7SSteven Rostedt {
1488*aa18efb2SSteven Rostedt 	current->trace_recursion++;
1489261842b7SSteven Rostedt 
1490*aa18efb2SSteven Rostedt 	if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
1491*aa18efb2SSteven Rostedt 		return 0;
1492261842b7SSteven Rostedt 
1493261842b7SSteven Rostedt 	/* Disable all tracing before we do anything else */
1494261842b7SSteven Rostedt 	tracing_off_permanent();
1495e057a5e5SFrederic Weisbecker 
1496*aa18efb2SSteven Rostedt 	printk_once(KERN_WARNING "Tracing recursion: depth[%d]:"
1497e057a5e5SFrederic Weisbecker 		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
1498*aa18efb2SSteven Rostedt 		    current->trace_recursion,
1499e057a5e5SFrederic Weisbecker 		    hardirq_count() >> HARDIRQ_SHIFT,
1500e057a5e5SFrederic Weisbecker 		    softirq_count() >> SOFTIRQ_SHIFT,
1501e057a5e5SFrederic Weisbecker 		    in_nmi());
1502e057a5e5SFrederic Weisbecker 
1503261842b7SSteven Rostedt 	WARN_ON_ONCE(1);
1504261842b7SSteven Rostedt 	return -1;
1505261842b7SSteven Rostedt }
1506261842b7SSteven Rostedt 
1507261842b7SSteven Rostedt static void trace_recursive_unlock(void)
1508261842b7SSteven Rostedt {
1509*aa18efb2SSteven Rostedt 	WARN_ON_ONCE(!current->trace_recursion);
1510261842b7SSteven Rostedt 
1511*aa18efb2SSteven Rostedt 	current->trace_recursion--;
1512261842b7SSteven Rostedt }
1513261842b7SSteven Rostedt 
1514bf41a158SSteven Rostedt static DEFINE_PER_CPU(int, rb_need_resched);
1515bf41a158SSteven Rostedt 
15167a8e76a3SSteven Rostedt /**
15177a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
15187a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
15197a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
15207a8e76a3SSteven Rostedt  *
15217a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
15227a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
15237a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
15247a8e76a3SSteven Rostedt  *
15257a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
15267a8e76a3SSteven Rostedt  * which also includes the event header.
15277a8e76a3SSteven Rostedt  *
15287a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
15297a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
15307a8e76a3SSteven Rostedt  */
15317a8e76a3SSteven Rostedt struct ring_buffer_event *
15320a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
15337a8e76a3SSteven Rostedt {
15347a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15357a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1536bf41a158SSteven Rostedt 	int cpu, resched;
15377a8e76a3SSteven Rostedt 
1538033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1539a3583244SSteven Rostedt 		return NULL;
1540a3583244SSteven Rostedt 
15417a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
15427a8e76a3SSteven Rostedt 		return NULL;
15437a8e76a3SSteven Rostedt 
1544bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
1545182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1546bf41a158SSteven Rostedt 
1547261842b7SSteven Rostedt 	if (trace_recursive_lock())
1548261842b7SSteven Rostedt 		goto out_nocheck;
1549261842b7SSteven Rostedt 
15507a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
15517a8e76a3SSteven Rostedt 
15529e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1553d769041fSSteven Rostedt 		goto out;
15547a8e76a3SSteven Rostedt 
15557a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15567a8e76a3SSteven Rostedt 
15577a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
1558d769041fSSteven Rostedt 		goto out;
15597a8e76a3SSteven Rostedt 
15607a8e76a3SSteven Rostedt 	length = rb_calculate_event_length(length);
15617a8e76a3SSteven Rostedt 	if (length > BUF_PAGE_SIZE)
1562bf41a158SSteven Rostedt 		goto out;
15637a8e76a3SSteven Rostedt 
15647a8e76a3SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
15657a8e76a3SSteven Rostedt 	if (!event)
1566d769041fSSteven Rostedt 		goto out;
15677a8e76a3SSteven Rostedt 
1568bf41a158SSteven Rostedt 	/*
1569bf41a158SSteven Rostedt 	 * Need to store resched state on this cpu.
1570bf41a158SSteven Rostedt 	 * Only the first needs to.
1571bf41a158SSteven Rostedt 	 */
1572bf41a158SSteven Rostedt 
1573bf41a158SSteven Rostedt 	if (preempt_count() == 1)
1574bf41a158SSteven Rostedt 		per_cpu(rb_need_resched, cpu) = resched;
1575bf41a158SSteven Rostedt 
15767a8e76a3SSteven Rostedt 	return event;
15777a8e76a3SSteven Rostedt 
1578d769041fSSteven Rostedt  out:
1579261842b7SSteven Rostedt 	trace_recursive_unlock();
1580261842b7SSteven Rostedt 
1581261842b7SSteven Rostedt  out_nocheck:
1582182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
15837a8e76a3SSteven Rostedt 	return NULL;
15847a8e76a3SSteven Rostedt }
1585c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
15867a8e76a3SSteven Rostedt 
15877a8e76a3SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
15887a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
15897a8e76a3SSteven Rostedt {
15907a8e76a3SSteven Rostedt 	cpu_buffer->entries++;
1591bf41a158SSteven Rostedt 
1592bf41a158SSteven Rostedt 	/* Only process further if we own the commit */
1593bf41a158SSteven Rostedt 	if (!rb_is_commit(cpu_buffer, event))
1594bf41a158SSteven Rostedt 		return;
1595bf41a158SSteven Rostedt 
1596bf41a158SSteven Rostedt 	cpu_buffer->write_stamp += event->time_delta;
1597bf41a158SSteven Rostedt 
1598bf41a158SSteven Rostedt 	rb_set_commit_to_write(cpu_buffer);
15997a8e76a3SSteven Rostedt }
16007a8e76a3SSteven Rostedt 
16017a8e76a3SSteven Rostedt /**
16027a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
16037a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
16047a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
16057a8e76a3SSteven Rostedt  *
16067a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
16077a8e76a3SSteven Rostedt  *
16087a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
16097a8e76a3SSteven Rostedt  */
16107a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
16110a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
16127a8e76a3SSteven Rostedt {
16137a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
16147a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
16157a8e76a3SSteven Rostedt 
16167a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
16177a8e76a3SSteven Rostedt 
16187a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
16197a8e76a3SSteven Rostedt 
1620261842b7SSteven Rostedt 	trace_recursive_unlock();
1621261842b7SSteven Rostedt 
1622bf41a158SSteven Rostedt 	/*
1623bf41a158SSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1624bf41a158SSteven Rostedt 	 */
1625182e9f5fSSteven Rostedt 	if (preempt_count() == 1)
1626182e9f5fSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1627bf41a158SSteven Rostedt 	else
1628bf41a158SSteven Rostedt 		preempt_enable_no_resched_notrace();
16297a8e76a3SSteven Rostedt 
16307a8e76a3SSteven Rostedt 	return 0;
16317a8e76a3SSteven Rostedt }
1632c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
16337a8e76a3SSteven Rostedt 
1634f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event)
1635f3b9aae1SFrederic Weisbecker {
1636f3b9aae1SFrederic Weisbecker 	event->type = RINGBUF_TYPE_PADDING;
1637f3b9aae1SFrederic Weisbecker 	/* time delta must be non zero */
1638f3b9aae1SFrederic Weisbecker 	if (!event->time_delta)
1639f3b9aae1SFrederic Weisbecker 		event->time_delta = 1;
1640f3b9aae1SFrederic Weisbecker }
1641f3b9aae1SFrederic Weisbecker 
16427a8e76a3SSteven Rostedt /**
1643fa1b47ddSSteven Rostedt  * ring_buffer_event_discard - discard any event in the ring buffer
1644fa1b47ddSSteven Rostedt  * @event: the event to discard
1645fa1b47ddSSteven Rostedt  *
1646fa1b47ddSSteven Rostedt  * Sometimes a event that is in the ring buffer needs to be ignored.
1647fa1b47ddSSteven Rostedt  * This function lets the user discard an event in the ring buffer
1648fa1b47ddSSteven Rostedt  * and then that event will not be read later.
1649fa1b47ddSSteven Rostedt  *
1650fa1b47ddSSteven Rostedt  * Note, it is up to the user to be careful with this, and protect
1651fa1b47ddSSteven Rostedt  * against races. If the user discards an event that has been consumed
1652fa1b47ddSSteven Rostedt  * it is possible that it could corrupt the ring buffer.
1653fa1b47ddSSteven Rostedt  */
1654fa1b47ddSSteven Rostedt void ring_buffer_event_discard(struct ring_buffer_event *event)
1655fa1b47ddSSteven Rostedt {
1656f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1657fa1b47ddSSteven Rostedt }
1658fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1659fa1b47ddSSteven Rostedt 
1660fa1b47ddSSteven Rostedt /**
1661fa1b47ddSSteven Rostedt  * ring_buffer_commit_discard - discard an event that has not been committed
1662fa1b47ddSSteven Rostedt  * @buffer: the ring buffer
1663fa1b47ddSSteven Rostedt  * @event: non committed event to discard
1664fa1b47ddSSteven Rostedt  *
1665fa1b47ddSSteven Rostedt  * This is similar to ring_buffer_event_discard but must only be
1666fa1b47ddSSteven Rostedt  * performed on an event that has not been committed yet. The difference
1667fa1b47ddSSteven Rostedt  * is that this will also try to free the event from the ring buffer
1668fa1b47ddSSteven Rostedt  * if another event has not been added behind it.
1669fa1b47ddSSteven Rostedt  *
1670fa1b47ddSSteven Rostedt  * If another event has been added behind it, it will set the event
1671fa1b47ddSSteven Rostedt  * up as discarded, and perform the commit.
1672fa1b47ddSSteven Rostedt  *
1673fa1b47ddSSteven Rostedt  * If this function is called, do not call ring_buffer_unlock_commit on
1674fa1b47ddSSteven Rostedt  * the event.
1675fa1b47ddSSteven Rostedt  */
1676fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer,
1677fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event)
1678fa1b47ddSSteven Rostedt {
1679fa1b47ddSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
1680fa1b47ddSSteven Rostedt 	unsigned long new_index, old_index;
1681fa1b47ddSSteven Rostedt 	struct buffer_page *bpage;
1682fa1b47ddSSteven Rostedt 	unsigned long index;
1683fa1b47ddSSteven Rostedt 	unsigned long addr;
1684fa1b47ddSSteven Rostedt 	int cpu;
1685fa1b47ddSSteven Rostedt 
1686fa1b47ddSSteven Rostedt 	/* The event is discarded regardless */
1687f3b9aae1SFrederic Weisbecker 	rb_event_discard(event);
1688fa1b47ddSSteven Rostedt 
1689fa1b47ddSSteven Rostedt 	/*
1690fa1b47ddSSteven Rostedt 	 * This must only be called if the event has not been
1691fa1b47ddSSteven Rostedt 	 * committed yet. Thus we can assume that preemption
1692fa1b47ddSSteven Rostedt 	 * is still disabled.
1693fa1b47ddSSteven Rostedt 	 */
1694fa1b47ddSSteven Rostedt 	RB_WARN_ON(buffer, !preempt_count());
1695fa1b47ddSSteven Rostedt 
1696fa1b47ddSSteven Rostedt 	cpu = smp_processor_id();
1697fa1b47ddSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1698fa1b47ddSSteven Rostedt 
1699fa1b47ddSSteven Rostedt 	new_index = rb_event_index(event);
1700fa1b47ddSSteven Rostedt 	old_index = new_index + rb_event_length(event);
1701fa1b47ddSSteven Rostedt 	addr = (unsigned long)event;
1702fa1b47ddSSteven Rostedt 	addr &= PAGE_MASK;
1703fa1b47ddSSteven Rostedt 
1704fa1b47ddSSteven Rostedt 	bpage = cpu_buffer->tail_page;
1705fa1b47ddSSteven Rostedt 
1706fa1b47ddSSteven Rostedt 	if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
1707fa1b47ddSSteven Rostedt 		/*
1708fa1b47ddSSteven Rostedt 		 * This is on the tail page. It is possible that
1709fa1b47ddSSteven Rostedt 		 * a write could come in and move the tail page
1710fa1b47ddSSteven Rostedt 		 * and write to the next page. That is fine
1711fa1b47ddSSteven Rostedt 		 * because we just shorten what is on this page.
1712fa1b47ddSSteven Rostedt 		 */
1713fa1b47ddSSteven Rostedt 		index = local_cmpxchg(&bpage->write, old_index, new_index);
1714fa1b47ddSSteven Rostedt 		if (index == old_index)
1715fa1b47ddSSteven Rostedt 			goto out;
1716fa1b47ddSSteven Rostedt 	}
1717fa1b47ddSSteven Rostedt 
1718fa1b47ddSSteven Rostedt 	/*
1719fa1b47ddSSteven Rostedt 	 * The commit is still visible by the reader, so we
1720fa1b47ddSSteven Rostedt 	 * must increment entries.
1721fa1b47ddSSteven Rostedt 	 */
1722fa1b47ddSSteven Rostedt 	cpu_buffer->entries++;
1723fa1b47ddSSteven Rostedt  out:
1724fa1b47ddSSteven Rostedt 	/*
1725fa1b47ddSSteven Rostedt 	 * If a write came in and pushed the tail page
1726fa1b47ddSSteven Rostedt 	 * we still need to update the commit pointer
1727fa1b47ddSSteven Rostedt 	 * if we were the commit.
1728fa1b47ddSSteven Rostedt 	 */
1729fa1b47ddSSteven Rostedt 	if (rb_is_commit(cpu_buffer, event))
1730fa1b47ddSSteven Rostedt 		rb_set_commit_to_write(cpu_buffer);
1731fa1b47ddSSteven Rostedt 
1732f3b9aae1SFrederic Weisbecker 	trace_recursive_unlock();
1733f3b9aae1SFrederic Weisbecker 
1734fa1b47ddSSteven Rostedt 	/*
1735fa1b47ddSSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1736fa1b47ddSSteven Rostedt 	 */
1737fa1b47ddSSteven Rostedt 	if (preempt_count() == 1)
1738fa1b47ddSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1739fa1b47ddSSteven Rostedt 	else
1740fa1b47ddSSteven Rostedt 		preempt_enable_no_resched_notrace();
1741fa1b47ddSSteven Rostedt 
1742fa1b47ddSSteven Rostedt }
1743fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
1744fa1b47ddSSteven Rostedt 
1745fa1b47ddSSteven Rostedt /**
17467a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
17477a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
17487a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
17497a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
17507a8e76a3SSteven Rostedt  *
17517a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
17527a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
17537a8e76a3SSteven Rostedt  * may be easier to simply call this function.
17547a8e76a3SSteven Rostedt  *
17557a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
17567a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
17577a8e76a3SSteven Rostedt  */
17587a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
17597a8e76a3SSteven Rostedt 			unsigned long length,
17607a8e76a3SSteven Rostedt 			void *data)
17617a8e76a3SSteven Rostedt {
17627a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
17637a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1764bf41a158SSteven Rostedt 	unsigned long event_length;
17657a8e76a3SSteven Rostedt 	void *body;
17667a8e76a3SSteven Rostedt 	int ret = -EBUSY;
1767bf41a158SSteven Rostedt 	int cpu, resched;
17687a8e76a3SSteven Rostedt 
1769033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1770a3583244SSteven Rostedt 		return -EBUSY;
1771a3583244SSteven Rostedt 
17727a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
17737a8e76a3SSteven Rostedt 		return -EBUSY;
17747a8e76a3SSteven Rostedt 
1775182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1776bf41a158SSteven Rostedt 
17777a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
17787a8e76a3SSteven Rostedt 
17799e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1780d769041fSSteven Rostedt 		goto out;
17817a8e76a3SSteven Rostedt 
17827a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
17837a8e76a3SSteven Rostedt 
17847a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
17857a8e76a3SSteven Rostedt 		goto out;
17867a8e76a3SSteven Rostedt 
17877a8e76a3SSteven Rostedt 	event_length = rb_calculate_event_length(length);
17887a8e76a3SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer,
17897a8e76a3SSteven Rostedt 				      RINGBUF_TYPE_DATA, event_length);
17907a8e76a3SSteven Rostedt 	if (!event)
17917a8e76a3SSteven Rostedt 		goto out;
17927a8e76a3SSteven Rostedt 
17937a8e76a3SSteven Rostedt 	body = rb_event_data(event);
17947a8e76a3SSteven Rostedt 
17957a8e76a3SSteven Rostedt 	memcpy(body, data, length);
17967a8e76a3SSteven Rostedt 
17977a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
17987a8e76a3SSteven Rostedt 
17997a8e76a3SSteven Rostedt 	ret = 0;
18007a8e76a3SSteven Rostedt  out:
1801182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
18027a8e76a3SSteven Rostedt 
18037a8e76a3SSteven Rostedt 	return ret;
18047a8e76a3SSteven Rostedt }
1805c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
18067a8e76a3SSteven Rostedt 
180734a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1808bf41a158SSteven Rostedt {
1809bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
1810bf41a158SSteven Rostedt 	struct buffer_page *head = cpu_buffer->head_page;
1811bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
1812bf41a158SSteven Rostedt 
1813bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
1814bf41a158SSteven Rostedt 		(commit == reader ||
1815bf41a158SSteven Rostedt 		 (commit == head &&
1816bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
1817bf41a158SSteven Rostedt }
1818bf41a158SSteven Rostedt 
18197a8e76a3SSteven Rostedt /**
18207a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
18217a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
18227a8e76a3SSteven Rostedt  *
18237a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
18247a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
18257a8e76a3SSteven Rostedt  *
18267a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
18277a8e76a3SSteven Rostedt  */
18287a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
18297a8e76a3SSteven Rostedt {
18307a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
18317a8e76a3SSteven Rostedt }
1832c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
18337a8e76a3SSteven Rostedt 
18347a8e76a3SSteven Rostedt /**
18357a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
18367a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
18377a8e76a3SSteven Rostedt  *
18387a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
18397a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
18407a8e76a3SSteven Rostedt  */
18417a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
18427a8e76a3SSteven Rostedt {
18437a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
18447a8e76a3SSteven Rostedt }
1845c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
18467a8e76a3SSteven Rostedt 
18477a8e76a3SSteven Rostedt /**
18487a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
18497a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
18507a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
18517a8e76a3SSteven Rostedt  *
18527a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
18537a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
18547a8e76a3SSteven Rostedt  *
18557a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
18567a8e76a3SSteven Rostedt  */
18577a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
18587a8e76a3SSteven Rostedt {
18597a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18607a8e76a3SSteven Rostedt 
18619e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
18628aabee57SSteven Rostedt 		return;
18637a8e76a3SSteven Rostedt 
18647a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18657a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
18667a8e76a3SSteven Rostedt }
1867c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
18687a8e76a3SSteven Rostedt 
18697a8e76a3SSteven Rostedt /**
18707a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
18717a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
18727a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
18737a8e76a3SSteven Rostedt  *
18747a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
18757a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
18767a8e76a3SSteven Rostedt  */
18777a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
18787a8e76a3SSteven Rostedt {
18797a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18807a8e76a3SSteven Rostedt 
18819e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
18828aabee57SSteven Rostedt 		return;
18837a8e76a3SSteven Rostedt 
18847a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18857a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
18867a8e76a3SSteven Rostedt }
1887c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
18887a8e76a3SSteven Rostedt 
18897a8e76a3SSteven Rostedt /**
18907a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
18917a8e76a3SSteven Rostedt  * @buffer: The ring buffer
18927a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
18937a8e76a3SSteven Rostedt  */
18947a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
18957a8e76a3SSteven Rostedt {
18967a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18978aabee57SSteven Rostedt 	unsigned long ret;
18987a8e76a3SSteven Rostedt 
18999e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19008aabee57SSteven Rostedt 		return 0;
19017a8e76a3SSteven Rostedt 
19027a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1903554f786eSSteven Rostedt 	ret = cpu_buffer->entries;
1904554f786eSSteven Rostedt 
1905554f786eSSteven Rostedt 	return ret;
19067a8e76a3SSteven Rostedt }
1907c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
19087a8e76a3SSteven Rostedt 
19097a8e76a3SSteven Rostedt /**
19107a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
19117a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19127a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
19137a8e76a3SSteven Rostedt  */
19147a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
19157a8e76a3SSteven Rostedt {
19167a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19178aabee57SSteven Rostedt 	unsigned long ret;
19187a8e76a3SSteven Rostedt 
19199e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
19208aabee57SSteven Rostedt 		return 0;
19217a8e76a3SSteven Rostedt 
19227a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
1923554f786eSSteven Rostedt 	ret = cpu_buffer->overrun;
1924554f786eSSteven Rostedt 
1925554f786eSSteven Rostedt 	return ret;
19267a8e76a3SSteven Rostedt }
1927c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
19287a8e76a3SSteven Rostedt 
19297a8e76a3SSteven Rostedt /**
19307a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
19317a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19327a8e76a3SSteven Rostedt  *
19337a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
19347a8e76a3SSteven Rostedt  * (all CPU entries)
19357a8e76a3SSteven Rostedt  */
19367a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
19377a8e76a3SSteven Rostedt {
19387a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19397a8e76a3SSteven Rostedt 	unsigned long entries = 0;
19407a8e76a3SSteven Rostedt 	int cpu;
19417a8e76a3SSteven Rostedt 
19427a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
19437a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
19447a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
19457a8e76a3SSteven Rostedt 		entries += cpu_buffer->entries;
19467a8e76a3SSteven Rostedt 	}
19477a8e76a3SSteven Rostedt 
19487a8e76a3SSteven Rostedt 	return entries;
19497a8e76a3SSteven Rostedt }
1950c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
19517a8e76a3SSteven Rostedt 
19527a8e76a3SSteven Rostedt /**
19537a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in buffer
19547a8e76a3SSteven Rostedt  * @buffer: The ring buffer
19557a8e76a3SSteven Rostedt  *
19567a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
19577a8e76a3SSteven Rostedt  * (all CPU entries)
19587a8e76a3SSteven Rostedt  */
19597a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
19607a8e76a3SSteven Rostedt {
19617a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19627a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
19637a8e76a3SSteven Rostedt 	int cpu;
19647a8e76a3SSteven Rostedt 
19657a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
19667a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
19677a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
19687a8e76a3SSteven Rostedt 		overruns += cpu_buffer->overrun;
19697a8e76a3SSteven Rostedt 	}
19707a8e76a3SSteven Rostedt 
19717a8e76a3SSteven Rostedt 	return overruns;
19727a8e76a3SSteven Rostedt }
1973c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
19747a8e76a3SSteven Rostedt 
1975642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
19767a8e76a3SSteven Rostedt {
19777a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
19787a8e76a3SSteven Rostedt 
1979d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
1980d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
19817a8e76a3SSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
19826f807acdSSteven Rostedt 		iter->head = cpu_buffer->head_page->read;
1983d769041fSSteven Rostedt 	} else {
1984d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
19856f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
1986d769041fSSteven Rostedt 	}
1987d769041fSSteven Rostedt 	if (iter->head)
1988d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
1989d769041fSSteven Rostedt 	else
1990abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
1991642edba5SSteven Rostedt }
1992f83c9d0fSSteven Rostedt 
1993642edba5SSteven Rostedt /**
1994642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
1995642edba5SSteven Rostedt  * @iter: The iterator to reset
1996642edba5SSteven Rostedt  *
1997642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
1998642edba5SSteven Rostedt  * again.
1999642edba5SSteven Rostedt  */
2000642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2001642edba5SSteven Rostedt {
2002554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2003642edba5SSteven Rostedt 	unsigned long flags;
2004642edba5SSteven Rostedt 
2005554f786eSSteven Rostedt 	if (!iter)
2006554f786eSSteven Rostedt 		return;
2007554f786eSSteven Rostedt 
2008554f786eSSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
2009554f786eSSteven Rostedt 
2010642edba5SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2011642edba5SSteven Rostedt 	rb_iter_reset(iter);
2012f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
20137a8e76a3SSteven Rostedt }
2014c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
20157a8e76a3SSteven Rostedt 
20167a8e76a3SSteven Rostedt /**
20177a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
20187a8e76a3SSteven Rostedt  * @iter: The iterator to check
20197a8e76a3SSteven Rostedt  */
20207a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
20217a8e76a3SSteven Rostedt {
20227a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20237a8e76a3SSteven Rostedt 
20247a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
20257a8e76a3SSteven Rostedt 
2026bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
2027bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
20287a8e76a3SSteven Rostedt }
2029c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
20307a8e76a3SSteven Rostedt 
20317a8e76a3SSteven Rostedt static void
20327a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
20337a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
20347a8e76a3SSteven Rostedt {
20357a8e76a3SSteven Rostedt 	u64 delta;
20367a8e76a3SSteven Rostedt 
20377a8e76a3SSteven Rostedt 	switch (event->type) {
20387a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
20397a8e76a3SSteven Rostedt 		return;
20407a8e76a3SSteven Rostedt 
20417a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
20427a8e76a3SSteven Rostedt 		delta = event->array[0];
20437a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
20447a8e76a3SSteven Rostedt 		delta += event->time_delta;
20457a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
20467a8e76a3SSteven Rostedt 		return;
20477a8e76a3SSteven Rostedt 
20487a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
20497a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
20507a8e76a3SSteven Rostedt 		return;
20517a8e76a3SSteven Rostedt 
20527a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
20537a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
20547a8e76a3SSteven Rostedt 		return;
20557a8e76a3SSteven Rostedt 
20567a8e76a3SSteven Rostedt 	default:
20577a8e76a3SSteven Rostedt 		BUG();
20587a8e76a3SSteven Rostedt 	}
20597a8e76a3SSteven Rostedt 	return;
20607a8e76a3SSteven Rostedt }
20617a8e76a3SSteven Rostedt 
20627a8e76a3SSteven Rostedt static void
20637a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
20647a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
20657a8e76a3SSteven Rostedt {
20667a8e76a3SSteven Rostedt 	u64 delta;
20677a8e76a3SSteven Rostedt 
20687a8e76a3SSteven Rostedt 	switch (event->type) {
20697a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
20707a8e76a3SSteven Rostedt 		return;
20717a8e76a3SSteven Rostedt 
20727a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
20737a8e76a3SSteven Rostedt 		delta = event->array[0];
20747a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
20757a8e76a3SSteven Rostedt 		delta += event->time_delta;
20767a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
20777a8e76a3SSteven Rostedt 		return;
20787a8e76a3SSteven Rostedt 
20797a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
20807a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
20817a8e76a3SSteven Rostedt 		return;
20827a8e76a3SSteven Rostedt 
20837a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
20847a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
20857a8e76a3SSteven Rostedt 		return;
20867a8e76a3SSteven Rostedt 
20877a8e76a3SSteven Rostedt 	default:
20887a8e76a3SSteven Rostedt 		BUG();
20897a8e76a3SSteven Rostedt 	}
20907a8e76a3SSteven Rostedt 	return;
20917a8e76a3SSteven Rostedt }
20927a8e76a3SSteven Rostedt 
2093d769041fSSteven Rostedt static struct buffer_page *
2094d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
20957a8e76a3SSteven Rostedt {
2096d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
2097d769041fSSteven Rostedt 	unsigned long flags;
2098818e3dd3SSteven Rostedt 	int nr_loops = 0;
2099d769041fSSteven Rostedt 
21003e03fb7fSSteven Rostedt 	local_irq_save(flags);
21013e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2102d769041fSSteven Rostedt 
2103d769041fSSteven Rostedt  again:
2104818e3dd3SSteven Rostedt 	/*
2105818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
2106818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
2107818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
2108818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
2109818e3dd3SSteven Rostedt 	 */
21103e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2111818e3dd3SSteven Rostedt 		reader = NULL;
2112818e3dd3SSteven Rostedt 		goto out;
2113818e3dd3SSteven Rostedt 	}
2114818e3dd3SSteven Rostedt 
2115d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
2116d769041fSSteven Rostedt 
2117d769041fSSteven Rostedt 	/* If there's more to read, return this page */
2118bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
2119d769041fSSteven Rostedt 		goto out;
2120d769041fSSteven Rostedt 
2121d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
21223e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
21233e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
21243e89c7bbSSteven Rostedt 		goto out;
2125d769041fSSteven Rostedt 
2126d769041fSSteven Rostedt 	/* check if we caught up to the tail */
2127d769041fSSteven Rostedt 	reader = NULL;
2128bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2129d769041fSSteven Rostedt 		goto out;
21307a8e76a3SSteven Rostedt 
21317a8e76a3SSteven Rostedt 	/*
2132d769041fSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
2133d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
21347a8e76a3SSteven Rostedt 	 */
2135d769041fSSteven Rostedt 
2136d769041fSSteven Rostedt 	reader = cpu_buffer->head_page;
2137d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.next = reader->list.next;
2138d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
2139bf41a158SSteven Rostedt 
2140bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2141abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
2142d769041fSSteven Rostedt 
2143d769041fSSteven Rostedt 	/* Make the reader page now replace the head */
2144d769041fSSteven Rostedt 	reader->list.prev->next = &cpu_buffer->reader_page->list;
2145d769041fSSteven Rostedt 	reader->list.next->prev = &cpu_buffer->reader_page->list;
2146d769041fSSteven Rostedt 
2147d769041fSSteven Rostedt 	/*
2148d769041fSSteven Rostedt 	 * If the tail is on the reader, then we must set the head
2149d769041fSSteven Rostedt 	 * to the inserted page, otherwise we set it one before.
2150d769041fSSteven Rostedt 	 */
2151d769041fSSteven Rostedt 	cpu_buffer->head_page = cpu_buffer->reader_page;
2152d769041fSSteven Rostedt 
2153bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page != reader)
21547a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2155d769041fSSteven Rostedt 
2156d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
2157d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
2158d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
2159d769041fSSteven Rostedt 
2160d769041fSSteven Rostedt 	goto again;
2161d769041fSSteven Rostedt 
2162d769041fSSteven Rostedt  out:
21633e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
21643e03fb7fSSteven Rostedt 	local_irq_restore(flags);
2165d769041fSSteven Rostedt 
2166d769041fSSteven Rostedt 	return reader;
21677a8e76a3SSteven Rostedt }
21687a8e76a3SSteven Rostedt 
2169d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2170d769041fSSteven Rostedt {
2171d769041fSSteven Rostedt 	struct ring_buffer_event *event;
2172d769041fSSteven Rostedt 	struct buffer_page *reader;
2173d769041fSSteven Rostedt 	unsigned length;
2174d769041fSSteven Rostedt 
2175d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2176d769041fSSteven Rostedt 
2177d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
21783e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
21793e89c7bbSSteven Rostedt 		return;
2180d769041fSSteven Rostedt 
2181d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
21827a8e76a3SSteven Rostedt 
21832d622719STom Zanussi 	if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
21847a8e76a3SSteven Rostedt 		cpu_buffer->entries--;
21857a8e76a3SSteven Rostedt 
21867a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
21877a8e76a3SSteven Rostedt 
2188d769041fSSteven Rostedt 	length = rb_event_length(event);
21896f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
21907a8e76a3SSteven Rostedt }
21917a8e76a3SSteven Rostedt 
21927a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
21937a8e76a3SSteven Rostedt {
21947a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
21957a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
21967a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
21977a8e76a3SSteven Rostedt 	unsigned length;
21987a8e76a3SSteven Rostedt 
21997a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
22007a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
22017a8e76a3SSteven Rostedt 
22027a8e76a3SSteven Rostedt 	/*
22037a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
22047a8e76a3SSteven Rostedt 	 */
2205bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
22063e89c7bbSSteven Rostedt 		if (RB_WARN_ON(buffer,
22073e89c7bbSSteven Rostedt 			       iter->head_page == cpu_buffer->commit_page))
22083e89c7bbSSteven Rostedt 			return;
2209d769041fSSteven Rostedt 		rb_inc_iter(iter);
22107a8e76a3SSteven Rostedt 		return;
22117a8e76a3SSteven Rostedt 	}
22127a8e76a3SSteven Rostedt 
22137a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
22147a8e76a3SSteven Rostedt 
22157a8e76a3SSteven Rostedt 	length = rb_event_length(event);
22167a8e76a3SSteven Rostedt 
22177a8e76a3SSteven Rostedt 	/*
22187a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
22197a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
22207a8e76a3SSteven Rostedt 	 */
22213e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
2222f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
22233e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
22243e89c7bbSSteven Rostedt 		return;
22257a8e76a3SSteven Rostedt 
22267a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
22277a8e76a3SSteven Rostedt 
22287a8e76a3SSteven Rostedt 	iter->head += length;
22297a8e76a3SSteven Rostedt 
22307a8e76a3SSteven Rostedt 	/* check for end of page padding */
2231bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
2232bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
22337a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
22347a8e76a3SSteven Rostedt }
22357a8e76a3SSteven Rostedt 
2236f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2237f83c9d0fSSteven Rostedt rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
22387a8e76a3SSteven Rostedt {
22397a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
22407a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2241d769041fSSteven Rostedt 	struct buffer_page *reader;
2242818e3dd3SSteven Rostedt 	int nr_loops = 0;
22437a8e76a3SSteven Rostedt 
22447a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
22457a8e76a3SSteven Rostedt 
22467a8e76a3SSteven Rostedt  again:
2247818e3dd3SSteven Rostedt 	/*
2248818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
2249818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
2250818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
2251818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
2252818e3dd3SSteven Rostedt 	 * can have.  Nesting 10 deep of interrupts is clearly
2253818e3dd3SSteven Rostedt 	 * an anomaly.
2254818e3dd3SSteven Rostedt 	 */
22553e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2256818e3dd3SSteven Rostedt 		return NULL;
2257818e3dd3SSteven Rostedt 
2258d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2259d769041fSSteven Rostedt 	if (!reader)
22607a8e76a3SSteven Rostedt 		return NULL;
22617a8e76a3SSteven Rostedt 
2262d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
22637a8e76a3SSteven Rostedt 
22647a8e76a3SSteven Rostedt 	switch (event->type) {
22657a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
22662d622719STom Zanussi 		if (rb_null_event(event))
2267bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, 1);
22682d622719STom Zanussi 		/*
22692d622719STom Zanussi 		 * Because the writer could be discarding every
22702d622719STom Zanussi 		 * event it creates (which would probably be bad)
22712d622719STom Zanussi 		 * if we were to go back to "again" then we may never
22722d622719STom Zanussi 		 * catch up, and will trigger the warn on, or lock
22732d622719STom Zanussi 		 * the box. Return the padding, and we will release
22742d622719STom Zanussi 		 * the current locks, and try again.
22752d622719STom Zanussi 		 */
2276d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
22772d622719STom Zanussi 		return event;
22787a8e76a3SSteven Rostedt 
22797a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
22807a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
2281d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
22827a8e76a3SSteven Rostedt 		goto again;
22837a8e76a3SSteven Rostedt 
22847a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
22857a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
2286d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
22877a8e76a3SSteven Rostedt 		goto again;
22887a8e76a3SSteven Rostedt 
22897a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
22907a8e76a3SSteven Rostedt 		if (ts) {
22917a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
229237886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
229337886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
22947a8e76a3SSteven Rostedt 		}
22957a8e76a3SSteven Rostedt 		return event;
22967a8e76a3SSteven Rostedt 
22977a8e76a3SSteven Rostedt 	default:
22987a8e76a3SSteven Rostedt 		BUG();
22997a8e76a3SSteven Rostedt 	}
23007a8e76a3SSteven Rostedt 
23017a8e76a3SSteven Rostedt 	return NULL;
23027a8e76a3SSteven Rostedt }
2303c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
23047a8e76a3SSteven Rostedt 
2305f83c9d0fSSteven Rostedt static struct ring_buffer_event *
2306f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
23077a8e76a3SSteven Rostedt {
23087a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
23097a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
23107a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2311818e3dd3SSteven Rostedt 	int nr_loops = 0;
23127a8e76a3SSteven Rostedt 
23137a8e76a3SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
23147a8e76a3SSteven Rostedt 		return NULL;
23157a8e76a3SSteven Rostedt 
23167a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
23177a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
23187a8e76a3SSteven Rostedt 
23197a8e76a3SSteven Rostedt  again:
2320818e3dd3SSteven Rostedt 	/*
2321818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
2322818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
2323818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
2324818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
2325818e3dd3SSteven Rostedt 	 * can have. Nesting 10 deep of interrupts is clearly
2326818e3dd3SSteven Rostedt 	 * an anomaly.
2327818e3dd3SSteven Rostedt 	 */
23283e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
2329818e3dd3SSteven Rostedt 		return NULL;
2330818e3dd3SSteven Rostedt 
23317a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
23327a8e76a3SSteven Rostedt 		return NULL;
23337a8e76a3SSteven Rostedt 
23347a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
23357a8e76a3SSteven Rostedt 
23367a8e76a3SSteven Rostedt 	switch (event->type) {
23377a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
23382d622719STom Zanussi 		if (rb_null_event(event)) {
2339d769041fSSteven Rostedt 			rb_inc_iter(iter);
23407a8e76a3SSteven Rostedt 			goto again;
23412d622719STom Zanussi 		}
23422d622719STom Zanussi 		rb_advance_iter(iter);
23432d622719STom Zanussi 		return event;
23447a8e76a3SSteven Rostedt 
23457a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
23467a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
23477a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
23487a8e76a3SSteven Rostedt 		goto again;
23497a8e76a3SSteven Rostedt 
23507a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
23517a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
23527a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
23537a8e76a3SSteven Rostedt 		goto again;
23547a8e76a3SSteven Rostedt 
23557a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
23567a8e76a3SSteven Rostedt 		if (ts) {
23577a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
235837886f6aSSteven Rostedt 			ring_buffer_normalize_time_stamp(buffer,
235937886f6aSSteven Rostedt 							 cpu_buffer->cpu, ts);
23607a8e76a3SSteven Rostedt 		}
23617a8e76a3SSteven Rostedt 		return event;
23627a8e76a3SSteven Rostedt 
23637a8e76a3SSteven Rostedt 	default:
23647a8e76a3SSteven Rostedt 		BUG();
23657a8e76a3SSteven Rostedt 	}
23667a8e76a3SSteven Rostedt 
23677a8e76a3SSteven Rostedt 	return NULL;
23687a8e76a3SSteven Rostedt }
2369c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
23707a8e76a3SSteven Rostedt 
23717a8e76a3SSteven Rostedt /**
2372f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
2373f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
2374f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
2375f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2376f83c9d0fSSteven Rostedt  *
2377f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2378f83c9d0fSSteven Rostedt  * not consume the data.
2379f83c9d0fSSteven Rostedt  */
2380f83c9d0fSSteven Rostedt struct ring_buffer_event *
2381f83c9d0fSSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2382f83c9d0fSSteven Rostedt {
2383f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
23848aabee57SSteven Rostedt 	struct ring_buffer_event *event;
2385f83c9d0fSSteven Rostedt 	unsigned long flags;
2386f83c9d0fSSteven Rostedt 
2387554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
23888aabee57SSteven Rostedt 		return NULL;
2389554f786eSSteven Rostedt 
23902d622719STom Zanussi  again:
2391f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2392f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2393f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2394f83c9d0fSSteven Rostedt 
23952d622719STom Zanussi 	if (event && event->type == RINGBUF_TYPE_PADDING) {
23962d622719STom Zanussi 		cpu_relax();
23972d622719STom Zanussi 		goto again;
23982d622719STom Zanussi 	}
23992d622719STom Zanussi 
2400f83c9d0fSSteven Rostedt 	return event;
2401f83c9d0fSSteven Rostedt }
2402f83c9d0fSSteven Rostedt 
2403f83c9d0fSSteven Rostedt /**
2404f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
2405f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
2406f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2407f83c9d0fSSteven Rostedt  *
2408f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2409f83c9d0fSSteven Rostedt  * not increment the iterator.
2410f83c9d0fSSteven Rostedt  */
2411f83c9d0fSSteven Rostedt struct ring_buffer_event *
2412f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2413f83c9d0fSSteven Rostedt {
2414f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2415f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
2416f83c9d0fSSteven Rostedt 	unsigned long flags;
2417f83c9d0fSSteven Rostedt 
24182d622719STom Zanussi  again:
2419f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2420f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
2421f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2422f83c9d0fSSteven Rostedt 
24232d622719STom Zanussi 	if (event && event->type == RINGBUF_TYPE_PADDING) {
24242d622719STom Zanussi 		cpu_relax();
24252d622719STom Zanussi 		goto again;
24262d622719STom Zanussi 	}
24272d622719STom Zanussi 
2428f83c9d0fSSteven Rostedt 	return event;
2429f83c9d0fSSteven Rostedt }
2430f83c9d0fSSteven Rostedt 
2431f83c9d0fSSteven Rostedt /**
24327a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
24337a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
24347a8e76a3SSteven Rostedt  *
24357a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
24367a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
24377a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
24387a8e76a3SSteven Rostedt  */
24397a8e76a3SSteven Rostedt struct ring_buffer_event *
24407a8e76a3SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
24417a8e76a3SSteven Rostedt {
2442554f786eSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
2443554f786eSSteven Rostedt 	struct ring_buffer_event *event = NULL;
2444f83c9d0fSSteven Rostedt 	unsigned long flags;
24457a8e76a3SSteven Rostedt 
24462d622719STom Zanussi  again:
2447554f786eSSteven Rostedt 	/* might be called in atomic */
2448554f786eSSteven Rostedt 	preempt_disable();
24497a8e76a3SSteven Rostedt 
2450554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2451554f786eSSteven Rostedt 		goto out;
2452554f786eSSteven Rostedt 
2453554f786eSSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2454f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
24557a8e76a3SSteven Rostedt 
2456f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2457f83c9d0fSSteven Rostedt 	if (!event)
2458554f786eSSteven Rostedt 		goto out_unlock;
2459f83c9d0fSSteven Rostedt 
2460d769041fSSteven Rostedt 	rb_advance_reader(cpu_buffer);
24617a8e76a3SSteven Rostedt 
2462554f786eSSteven Rostedt  out_unlock:
2463f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2464f83c9d0fSSteven Rostedt 
2465554f786eSSteven Rostedt  out:
2466554f786eSSteven Rostedt 	preempt_enable();
2467554f786eSSteven Rostedt 
24682d622719STom Zanussi 	if (event && event->type == RINGBUF_TYPE_PADDING) {
24692d622719STom Zanussi 		cpu_relax();
24702d622719STom Zanussi 		goto again;
24712d622719STom Zanussi 	}
24722d622719STom Zanussi 
24737a8e76a3SSteven Rostedt 	return event;
24747a8e76a3SSteven Rostedt }
2475c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
24767a8e76a3SSteven Rostedt 
24777a8e76a3SSteven Rostedt /**
24787a8e76a3SSteven Rostedt  * ring_buffer_read_start - start a non consuming read of the buffer
24797a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
24807a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
24817a8e76a3SSteven Rostedt  *
24827a8e76a3SSteven Rostedt  * This starts up an iteration through the buffer. It also disables
24837a8e76a3SSteven Rostedt  * the recording to the buffer until the reading is finished.
24847a8e76a3SSteven Rostedt  * This prevents the reading from being corrupted. This is not
24857a8e76a3SSteven Rostedt  * a consuming read, so a producer is not expected.
24867a8e76a3SSteven Rostedt  *
24877a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_finish.
24887a8e76a3SSteven Rostedt  */
24897a8e76a3SSteven Rostedt struct ring_buffer_iter *
24907a8e76a3SSteven Rostedt ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
24917a8e76a3SSteven Rostedt {
24927a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
24938aabee57SSteven Rostedt 	struct ring_buffer_iter *iter;
2494d769041fSSteven Rostedt 	unsigned long flags;
24957a8e76a3SSteven Rostedt 
24969e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
24978aabee57SSteven Rostedt 		return NULL;
24987a8e76a3SSteven Rostedt 
24997a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
25007a8e76a3SSteven Rostedt 	if (!iter)
25018aabee57SSteven Rostedt 		return NULL;
25027a8e76a3SSteven Rostedt 
25037a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
25047a8e76a3SSteven Rostedt 
25057a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
25067a8e76a3SSteven Rostedt 
25077a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
25087a8e76a3SSteven Rostedt 	synchronize_sched();
25097a8e76a3SSteven Rostedt 
2510f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
25113e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2512642edba5SSteven Rostedt 	rb_iter_reset(iter);
25133e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2514f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
25157a8e76a3SSteven Rostedt 
25167a8e76a3SSteven Rostedt 	return iter;
25177a8e76a3SSteven Rostedt }
2518c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
25197a8e76a3SSteven Rostedt 
25207a8e76a3SSteven Rostedt /**
25217a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
25227a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
25237a8e76a3SSteven Rostedt  *
25247a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
25257a8e76a3SSteven Rostedt  * iterator.
25267a8e76a3SSteven Rostedt  */
25277a8e76a3SSteven Rostedt void
25287a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
25297a8e76a3SSteven Rostedt {
25307a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
25317a8e76a3SSteven Rostedt 
25327a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
25337a8e76a3SSteven Rostedt 	kfree(iter);
25347a8e76a3SSteven Rostedt }
2535c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
25367a8e76a3SSteven Rostedt 
25377a8e76a3SSteven Rostedt /**
25387a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
25397a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
25407a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
25417a8e76a3SSteven Rostedt  *
25427a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
25437a8e76a3SSteven Rostedt  */
25447a8e76a3SSteven Rostedt struct ring_buffer_event *
25457a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
25467a8e76a3SSteven Rostedt {
25477a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2548f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2549f83c9d0fSSteven Rostedt 	unsigned long flags;
25507a8e76a3SSteven Rostedt 
25512d622719STom Zanussi  again:
2552f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2553f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
25547a8e76a3SSteven Rostedt 	if (!event)
2555f83c9d0fSSteven Rostedt 		goto out;
25567a8e76a3SSteven Rostedt 
25577a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
2558f83c9d0fSSteven Rostedt  out:
2559f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
25607a8e76a3SSteven Rostedt 
25612d622719STom Zanussi 	if (event && event->type == RINGBUF_TYPE_PADDING) {
25622d622719STom Zanussi 		cpu_relax();
25632d622719STom Zanussi 		goto again;
25642d622719STom Zanussi 	}
25652d622719STom Zanussi 
25667a8e76a3SSteven Rostedt 	return event;
25677a8e76a3SSteven Rostedt }
2568c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
25697a8e76a3SSteven Rostedt 
25707a8e76a3SSteven Rostedt /**
25717a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
25727a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
25737a8e76a3SSteven Rostedt  */
25747a8e76a3SSteven Rostedt unsigned long ring_buffer_size(struct ring_buffer *buffer)
25757a8e76a3SSteven Rostedt {
25767a8e76a3SSteven Rostedt 	return BUF_PAGE_SIZE * buffer->pages;
25777a8e76a3SSteven Rostedt }
2578c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
25797a8e76a3SSteven Rostedt 
25807a8e76a3SSteven Rostedt static void
25817a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
25827a8e76a3SSteven Rostedt {
25837a8e76a3SSteven Rostedt 	cpu_buffer->head_page
25847a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2585bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
2586abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
25877a8e76a3SSteven Rostedt 
25886f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
2589bf41a158SSteven Rostedt 
2590bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
2591bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
2592bf41a158SSteven Rostedt 
2593bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2594bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2595abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
25966f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
2597d769041fSSteven Rostedt 
25987a8e76a3SSteven Rostedt 	cpu_buffer->overrun = 0;
25997a8e76a3SSteven Rostedt 	cpu_buffer->entries = 0;
260069507c06SSteven Rostedt 
260169507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
260269507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
26037a8e76a3SSteven Rostedt }
26047a8e76a3SSteven Rostedt 
26057a8e76a3SSteven Rostedt /**
26067a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
26077a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
26087a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
26097a8e76a3SSteven Rostedt  */
26107a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
26117a8e76a3SSteven Rostedt {
26127a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
26137a8e76a3SSteven Rostedt 	unsigned long flags;
26147a8e76a3SSteven Rostedt 
26159e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
26168aabee57SSteven Rostedt 		return;
26177a8e76a3SSteven Rostedt 
2618f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2619f83c9d0fSSteven Rostedt 
26203e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
26217a8e76a3SSteven Rostedt 
26227a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
26237a8e76a3SSteven Rostedt 
26243e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2625f83c9d0fSSteven Rostedt 
2626f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
26277a8e76a3SSteven Rostedt }
2628c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
26297a8e76a3SSteven Rostedt 
26307a8e76a3SSteven Rostedt /**
26317a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
26327a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
26337a8e76a3SSteven Rostedt  */
26347a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
26357a8e76a3SSteven Rostedt {
26367a8e76a3SSteven Rostedt 	int cpu;
26377a8e76a3SSteven Rostedt 
26387a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
2639d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
26407a8e76a3SSteven Rostedt }
2641c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
26427a8e76a3SSteven Rostedt 
26437a8e76a3SSteven Rostedt /**
26447a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
26457a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
26467a8e76a3SSteven Rostedt  */
26477a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
26487a8e76a3SSteven Rostedt {
26497a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
26507a8e76a3SSteven Rostedt 	int cpu;
26517a8e76a3SSteven Rostedt 
26527a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
26537a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
26547a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
26557a8e76a3SSteven Rostedt 		if (!rb_per_cpu_empty(cpu_buffer))
26567a8e76a3SSteven Rostedt 			return 0;
26577a8e76a3SSteven Rostedt 	}
2658554f786eSSteven Rostedt 
26597a8e76a3SSteven Rostedt 	return 1;
26607a8e76a3SSteven Rostedt }
2661c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
26627a8e76a3SSteven Rostedt 
26637a8e76a3SSteven Rostedt /**
26647a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
26657a8e76a3SSteven Rostedt  * @buffer: The ring buffer
26667a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
26677a8e76a3SSteven Rostedt  */
26687a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
26697a8e76a3SSteven Rostedt {
26707a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
26718aabee57SSteven Rostedt 	int ret;
26727a8e76a3SSteven Rostedt 
26739e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
26748aabee57SSteven Rostedt 		return 1;
26757a8e76a3SSteven Rostedt 
26767a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
2677554f786eSSteven Rostedt 	ret = rb_per_cpu_empty(cpu_buffer);
2678554f786eSSteven Rostedt 
2679554f786eSSteven Rostedt 
2680554f786eSSteven Rostedt 	return ret;
26817a8e76a3SSteven Rostedt }
2682c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
26837a8e76a3SSteven Rostedt 
26847a8e76a3SSteven Rostedt /**
26857a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
26867a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
26877a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
26887a8e76a3SSteven Rostedt  *
26897a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
26907a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
26917a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
26927a8e76a3SSteven Rostedt  * used at the moment.
26937a8e76a3SSteven Rostedt  */
26947a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
26957a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
26967a8e76a3SSteven Rostedt {
26977a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
26987a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
2699554f786eSSteven Rostedt 	int ret = -EINVAL;
2700554f786eSSteven Rostedt 
27019e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
27029e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
2703554f786eSSteven Rostedt 		goto out;
27047a8e76a3SSteven Rostedt 
27057a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
27066d102bc6SLai Jiangshan 	if (buffer_a->pages != buffer_b->pages)
2707554f786eSSteven Rostedt 		goto out;
2708554f786eSSteven Rostedt 
2709554f786eSSteven Rostedt 	ret = -EAGAIN;
27107a8e76a3SSteven Rostedt 
271197b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
2712554f786eSSteven Rostedt 		goto out;
271397b17efeSSteven Rostedt 
271497b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
2715554f786eSSteven Rostedt 		goto out;
271697b17efeSSteven Rostedt 
271797b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
2718554f786eSSteven Rostedt 		goto out;
271997b17efeSSteven Rostedt 
27207a8e76a3SSteven Rostedt 	cpu_buffer_a = buffer_a->buffers[cpu];
27217a8e76a3SSteven Rostedt 	cpu_buffer_b = buffer_b->buffers[cpu];
27227a8e76a3SSteven Rostedt 
272397b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
2724554f786eSSteven Rostedt 		goto out;
272597b17efeSSteven Rostedt 
272697b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
2727554f786eSSteven Rostedt 		goto out;
272897b17efeSSteven Rostedt 
27297a8e76a3SSteven Rostedt 	/*
27307a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
27317a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
27327a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
27337a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
27347a8e76a3SSteven Rostedt 	 */
27357a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
27367a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
27377a8e76a3SSteven Rostedt 
27387a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
27397a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
27407a8e76a3SSteven Rostedt 
27417a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
27427a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
27437a8e76a3SSteven Rostedt 
27447a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
27457a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
27467a8e76a3SSteven Rostedt 
2747554f786eSSteven Rostedt 	ret = 0;
2748554f786eSSteven Rostedt out:
2749554f786eSSteven Rostedt 	return ret;
27507a8e76a3SSteven Rostedt }
2751c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
27527a8e76a3SSteven Rostedt 
27538789a9e7SSteven Rostedt static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2754667d2412SLai Jiangshan 			      struct buffer_data_page *bpage,
2755667d2412SLai Jiangshan 			      unsigned int offset)
27568789a9e7SSteven Rostedt {
27578789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
27588789a9e7SSteven Rostedt 	unsigned long head;
27598789a9e7SSteven Rostedt 
27608789a9e7SSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2761667d2412SLai Jiangshan 	for (head = offset; head < local_read(&bpage->commit);
27628789a9e7SSteven Rostedt 	     head += rb_event_length(event)) {
27638789a9e7SSteven Rostedt 
2764044fa782SSteven Rostedt 		event = __rb_data_page_index(bpage, head);
27658789a9e7SSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
27668789a9e7SSteven Rostedt 			return;
27678789a9e7SSteven Rostedt 		/* Only count data entries */
27688789a9e7SSteven Rostedt 		if (event->type != RINGBUF_TYPE_DATA)
27698789a9e7SSteven Rostedt 			continue;
27708789a9e7SSteven Rostedt 		cpu_buffer->entries--;
27718789a9e7SSteven Rostedt 	}
27728789a9e7SSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
27738789a9e7SSteven Rostedt }
27748789a9e7SSteven Rostedt 
27758789a9e7SSteven Rostedt /**
27768789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
27778789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
27788789a9e7SSteven Rostedt  *
27798789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
27808789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
27818789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
27828789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
27838789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
27848789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
27858789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
27868789a9e7SSteven Rostedt  *
27878789a9e7SSteven Rostedt  * Returns:
27888789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
27898789a9e7SSteven Rostedt  */
27908789a9e7SSteven Rostedt void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
27918789a9e7SSteven Rostedt {
2792044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
2793ef7a4a16SSteven Rostedt 	unsigned long addr;
27948789a9e7SSteven Rostedt 
27958789a9e7SSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
27968789a9e7SSteven Rostedt 	if (!addr)
27978789a9e7SSteven Rostedt 		return NULL;
27988789a9e7SSteven Rostedt 
2799044fa782SSteven Rostedt 	bpage = (void *)addr;
28008789a9e7SSteven Rostedt 
2801ef7a4a16SSteven Rostedt 	rb_init_page(bpage);
2802ef7a4a16SSteven Rostedt 
2803044fa782SSteven Rostedt 	return bpage;
28048789a9e7SSteven Rostedt }
28058789a9e7SSteven Rostedt 
28068789a9e7SSteven Rostedt /**
28078789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
28088789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
28098789a9e7SSteven Rostedt  * @data: the page to free
28108789a9e7SSteven Rostedt  *
28118789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
28128789a9e7SSteven Rostedt  */
28138789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
28148789a9e7SSteven Rostedt {
28158789a9e7SSteven Rostedt 	free_page((unsigned long)data);
28168789a9e7SSteven Rostedt }
28178789a9e7SSteven Rostedt 
28188789a9e7SSteven Rostedt /**
28198789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
28208789a9e7SSteven Rostedt  * @buffer: buffer to extract from
28218789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2822ef7a4a16SSteven Rostedt  * @len: amount to extract
28238789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
28248789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
28258789a9e7SSteven Rostedt  *
28268789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
28278789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
28288789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
28298789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
28308789a9e7SSteven Rostedt  *
28318789a9e7SSteven Rostedt  * for example:
2832b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
28338789a9e7SSteven Rostedt  *	if (!rpage)
28348789a9e7SSteven Rostedt  *		return error;
2835ef7a4a16SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
2836667d2412SLai Jiangshan  *	if (ret >= 0)
2837667d2412SLai Jiangshan  *		process_page(rpage, ret);
28388789a9e7SSteven Rostedt  *
28398789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
28408789a9e7SSteven Rostedt  * the writer is off the reader page.
28418789a9e7SSteven Rostedt  *
28428789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
28438789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
28448789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
28458789a9e7SSteven Rostedt  *  responsible for that.
28468789a9e7SSteven Rostedt  *
28478789a9e7SSteven Rostedt  * Returns:
2848667d2412SLai Jiangshan  *  >=0 if data has been transferred, returns the offset of consumed data.
2849667d2412SLai Jiangshan  *  <0 if no data has been transferred.
28508789a9e7SSteven Rostedt  */
28518789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
2852ef7a4a16SSteven Rostedt 			  void **data_page, size_t len, int cpu, int full)
28538789a9e7SSteven Rostedt {
28548789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
28558789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
2856044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
2857ef7a4a16SSteven Rostedt 	struct buffer_page *reader;
28588789a9e7SSteven Rostedt 	unsigned long flags;
2859ef7a4a16SSteven Rostedt 	unsigned int commit;
2860667d2412SLai Jiangshan 	unsigned int read;
28614f3640f8SSteven Rostedt 	u64 save_timestamp;
2862667d2412SLai Jiangshan 	int ret = -1;
28638789a9e7SSteven Rostedt 
2864554f786eSSteven Rostedt 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2865554f786eSSteven Rostedt 		goto out;
2866554f786eSSteven Rostedt 
2867474d32b6SSteven Rostedt 	/*
2868474d32b6SSteven Rostedt 	 * If len is not big enough to hold the page header, then
2869474d32b6SSteven Rostedt 	 * we can not copy anything.
2870474d32b6SSteven Rostedt 	 */
2871474d32b6SSteven Rostedt 	if (len <= BUF_PAGE_HDR_SIZE)
2872554f786eSSteven Rostedt 		goto out;
2873474d32b6SSteven Rostedt 
2874474d32b6SSteven Rostedt 	len -= BUF_PAGE_HDR_SIZE;
2875474d32b6SSteven Rostedt 
28768789a9e7SSteven Rostedt 	if (!data_page)
2877554f786eSSteven Rostedt 		goto out;
28788789a9e7SSteven Rostedt 
2879044fa782SSteven Rostedt 	bpage = *data_page;
2880044fa782SSteven Rostedt 	if (!bpage)
2881554f786eSSteven Rostedt 		goto out;
28828789a9e7SSteven Rostedt 
28838789a9e7SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
28848789a9e7SSteven Rostedt 
2885ef7a4a16SSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
2886ef7a4a16SSteven Rostedt 	if (!reader)
2887554f786eSSteven Rostedt 		goto out_unlock;
28888789a9e7SSteven Rostedt 
2889ef7a4a16SSteven Rostedt 	event = rb_reader_event(cpu_buffer);
2890667d2412SLai Jiangshan 
2891ef7a4a16SSteven Rostedt 	read = reader->read;
2892ef7a4a16SSteven Rostedt 	commit = rb_page_commit(reader);
2893ef7a4a16SSteven Rostedt 
28948789a9e7SSteven Rostedt 	/*
2895474d32b6SSteven Rostedt 	 * If this page has been partially read or
2896474d32b6SSteven Rostedt 	 * if len is not big enough to read the rest of the page or
2897474d32b6SSteven Rostedt 	 * a writer is still on the page, then
2898474d32b6SSteven Rostedt 	 * we must copy the data from the page to the buffer.
2899474d32b6SSteven Rostedt 	 * Otherwise, we can simply swap the page with the one passed in.
29008789a9e7SSteven Rostedt 	 */
2901474d32b6SSteven Rostedt 	if (read || (len < (commit - read)) ||
2902ef7a4a16SSteven Rostedt 	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
2903667d2412SLai Jiangshan 		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
2904474d32b6SSteven Rostedt 		unsigned int rpos = read;
2905474d32b6SSteven Rostedt 		unsigned int pos = 0;
2906ef7a4a16SSteven Rostedt 		unsigned int size;
29078789a9e7SSteven Rostedt 
29088789a9e7SSteven Rostedt 		if (full)
2909554f786eSSteven Rostedt 			goto out_unlock;
29108789a9e7SSteven Rostedt 
2911ef7a4a16SSteven Rostedt 		if (len > (commit - read))
2912ef7a4a16SSteven Rostedt 			len = (commit - read);
2913ef7a4a16SSteven Rostedt 
2914ef7a4a16SSteven Rostedt 		size = rb_event_length(event);
2915ef7a4a16SSteven Rostedt 
2916ef7a4a16SSteven Rostedt 		if (len < size)
2917554f786eSSteven Rostedt 			goto out_unlock;
2918ef7a4a16SSteven Rostedt 
29194f3640f8SSteven Rostedt 		/* save the current timestamp, since the user will need it */
29204f3640f8SSteven Rostedt 		save_timestamp = cpu_buffer->read_stamp;
29214f3640f8SSteven Rostedt 
2922ef7a4a16SSteven Rostedt 		/* Need to copy one event at a time */
2923ef7a4a16SSteven Rostedt 		do {
2924474d32b6SSteven Rostedt 			memcpy(bpage->data + pos, rpage->data + rpos, size);
2925ef7a4a16SSteven Rostedt 
2926ef7a4a16SSteven Rostedt 			len -= size;
2927ef7a4a16SSteven Rostedt 
2928ef7a4a16SSteven Rostedt 			rb_advance_reader(cpu_buffer);
2929474d32b6SSteven Rostedt 			rpos = reader->read;
2930474d32b6SSteven Rostedt 			pos += size;
2931ef7a4a16SSteven Rostedt 
2932ef7a4a16SSteven Rostedt 			event = rb_reader_event(cpu_buffer);
2933ef7a4a16SSteven Rostedt 			size = rb_event_length(event);
2934ef7a4a16SSteven Rostedt 		} while (len > size);
2935667d2412SLai Jiangshan 
2936667d2412SLai Jiangshan 		/* update bpage */
2937ef7a4a16SSteven Rostedt 		local_set(&bpage->commit, pos);
29384f3640f8SSteven Rostedt 		bpage->time_stamp = save_timestamp;
2939ef7a4a16SSteven Rostedt 
2940474d32b6SSteven Rostedt 		/* we copied everything to the beginning */
2941474d32b6SSteven Rostedt 		read = 0;
29428789a9e7SSteven Rostedt 	} else {
29438789a9e7SSteven Rostedt 		/* swap the pages */
2944044fa782SSteven Rostedt 		rb_init_page(bpage);
2945ef7a4a16SSteven Rostedt 		bpage = reader->page;
2946ef7a4a16SSteven Rostedt 		reader->page = *data_page;
2947ef7a4a16SSteven Rostedt 		local_set(&reader->write, 0);
2948ef7a4a16SSteven Rostedt 		reader->read = 0;
2949044fa782SSteven Rostedt 		*data_page = bpage;
29508789a9e7SSteven Rostedt 
29518789a9e7SSteven Rostedt 		/* update the entry counter */
2952667d2412SLai Jiangshan 		rb_remove_entries(cpu_buffer, bpage, read);
2953ef7a4a16SSteven Rostedt 	}
2954ef7a4a16SSteven Rostedt 	ret = read;
2955ef7a4a16SSteven Rostedt 
2956554f786eSSteven Rostedt  out_unlock:
29578789a9e7SSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
29588789a9e7SSteven Rostedt 
2959554f786eSSteven Rostedt  out:
29608789a9e7SSteven Rostedt 	return ret;
29618789a9e7SSteven Rostedt }
29628789a9e7SSteven Rostedt 
2963a3583244SSteven Rostedt static ssize_t
2964a3583244SSteven Rostedt rb_simple_read(struct file *filp, char __user *ubuf,
2965a3583244SSteven Rostedt 	       size_t cnt, loff_t *ppos)
2966a3583244SSteven Rostedt {
29675e39841cSHannes Eder 	unsigned long *p = filp->private_data;
2968a3583244SSteven Rostedt 	char buf[64];
2969a3583244SSteven Rostedt 	int r;
2970a3583244SSteven Rostedt 
2971033601a3SSteven Rostedt 	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2972033601a3SSteven Rostedt 		r = sprintf(buf, "permanently disabled\n");
2973033601a3SSteven Rostedt 	else
2974033601a3SSteven Rostedt 		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2975a3583244SSteven Rostedt 
2976a3583244SSteven Rostedt 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2977a3583244SSteven Rostedt }
2978a3583244SSteven Rostedt 
2979a3583244SSteven Rostedt static ssize_t
2980a3583244SSteven Rostedt rb_simple_write(struct file *filp, const char __user *ubuf,
2981a3583244SSteven Rostedt 		size_t cnt, loff_t *ppos)
2982a3583244SSteven Rostedt {
29835e39841cSHannes Eder 	unsigned long *p = filp->private_data;
2984a3583244SSteven Rostedt 	char buf[64];
29855e39841cSHannes Eder 	unsigned long val;
2986a3583244SSteven Rostedt 	int ret;
2987a3583244SSteven Rostedt 
2988a3583244SSteven Rostedt 	if (cnt >= sizeof(buf))
2989a3583244SSteven Rostedt 		return -EINVAL;
2990a3583244SSteven Rostedt 
2991a3583244SSteven Rostedt 	if (copy_from_user(&buf, ubuf, cnt))
2992a3583244SSteven Rostedt 		return -EFAULT;
2993a3583244SSteven Rostedt 
2994a3583244SSteven Rostedt 	buf[cnt] = 0;
2995a3583244SSteven Rostedt 
2996a3583244SSteven Rostedt 	ret = strict_strtoul(buf, 10, &val);
2997a3583244SSteven Rostedt 	if (ret < 0)
2998a3583244SSteven Rostedt 		return ret;
2999a3583244SSteven Rostedt 
3000033601a3SSteven Rostedt 	if (val)
3001033601a3SSteven Rostedt 		set_bit(RB_BUFFERS_ON_BIT, p);
3002033601a3SSteven Rostedt 	else
3003033601a3SSteven Rostedt 		clear_bit(RB_BUFFERS_ON_BIT, p);
3004a3583244SSteven Rostedt 
3005a3583244SSteven Rostedt 	(*ppos)++;
3006a3583244SSteven Rostedt 
3007a3583244SSteven Rostedt 	return cnt;
3008a3583244SSteven Rostedt }
3009a3583244SSteven Rostedt 
30105e2336a0SSteven Rostedt static const struct file_operations rb_simple_fops = {
3011a3583244SSteven Rostedt 	.open		= tracing_open_generic,
3012a3583244SSteven Rostedt 	.read		= rb_simple_read,
3013a3583244SSteven Rostedt 	.write		= rb_simple_write,
3014a3583244SSteven Rostedt };
3015a3583244SSteven Rostedt 
3016a3583244SSteven Rostedt 
3017a3583244SSteven Rostedt static __init int rb_init_debugfs(void)
3018a3583244SSteven Rostedt {
3019a3583244SSteven Rostedt 	struct dentry *d_tracer;
3020a3583244SSteven Rostedt 
3021a3583244SSteven Rostedt 	d_tracer = tracing_init_dentry();
3022a3583244SSteven Rostedt 
30235452af66SFrederic Weisbecker 	trace_create_file("tracing_on", 0644, d_tracer,
3024033601a3SSteven Rostedt 			    &ring_buffer_flags, &rb_simple_fops);
3025a3583244SSteven Rostedt 
3026a3583244SSteven Rostedt 	return 0;
3027a3583244SSteven Rostedt }
3028a3583244SSteven Rostedt 
3029a3583244SSteven Rostedt fs_initcall(rb_init_debugfs);
3030554f786eSSteven Rostedt 
303159222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU
303209c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self,
3033554f786eSSteven Rostedt 			 unsigned long action, void *hcpu)
3034554f786eSSteven Rostedt {
3035554f786eSSteven Rostedt 	struct ring_buffer *buffer =
3036554f786eSSteven Rostedt 		container_of(self, struct ring_buffer, cpu_notify);
3037554f786eSSteven Rostedt 	long cpu = (long)hcpu;
3038554f786eSSteven Rostedt 
3039554f786eSSteven Rostedt 	switch (action) {
3040554f786eSSteven Rostedt 	case CPU_UP_PREPARE:
3041554f786eSSteven Rostedt 	case CPU_UP_PREPARE_FROZEN:
3042554f786eSSteven Rostedt 		if (cpu_isset(cpu, *buffer->cpumask))
3043554f786eSSteven Rostedt 			return NOTIFY_OK;
3044554f786eSSteven Rostedt 
3045554f786eSSteven Rostedt 		buffer->buffers[cpu] =
3046554f786eSSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
3047554f786eSSteven Rostedt 		if (!buffer->buffers[cpu]) {
3048554f786eSSteven Rostedt 			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3049554f786eSSteven Rostedt 			     cpu);
3050554f786eSSteven Rostedt 			return NOTIFY_OK;
3051554f786eSSteven Rostedt 		}
3052554f786eSSteven Rostedt 		smp_wmb();
3053554f786eSSteven Rostedt 		cpu_set(cpu, *buffer->cpumask);
3054554f786eSSteven Rostedt 		break;
3055554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE:
3056554f786eSSteven Rostedt 	case CPU_DOWN_PREPARE_FROZEN:
3057554f786eSSteven Rostedt 		/*
3058554f786eSSteven Rostedt 		 * Do nothing.
3059554f786eSSteven Rostedt 		 *  If we were to free the buffer, then the user would
3060554f786eSSteven Rostedt 		 *  lose any trace that was in the buffer.
3061554f786eSSteven Rostedt 		 */
3062554f786eSSteven Rostedt 		break;
3063554f786eSSteven Rostedt 	default:
3064554f786eSSteven Rostedt 		break;
3065554f786eSSteven Rostedt 	}
3066554f786eSSteven Rostedt 	return NOTIFY_OK;
3067554f786eSSteven Rostedt }
3068554f786eSSteven Rostedt #endif
3069