xref: /linux-6.15/kernel/trace/ring_buffer.c (revision b85fa01e)
17a8e76a3SSteven Rostedt /*
27a8e76a3SSteven Rostedt  * Generic ring buffer
37a8e76a3SSteven Rostedt  *
47a8e76a3SSteven Rostedt  * Copyright (C) 2008 Steven Rostedt <[email protected]>
57a8e76a3SSteven Rostedt  */
67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
778d904b4SSteven Rostedt #include <linux/ftrace_irq.h>
87a8e76a3SSteven Rostedt #include <linux/spinlock.h>
97a8e76a3SSteven Rostedt #include <linux/debugfs.h>
107a8e76a3SSteven Rostedt #include <linux/uaccess.h>
11a81bd80aSSteven Rostedt #include <linux/hardirq.h>
127a8e76a3SSteven Rostedt #include <linux/module.h>
137a8e76a3SSteven Rostedt #include <linux/percpu.h>
147a8e76a3SSteven Rostedt #include <linux/mutex.h>
157a8e76a3SSteven Rostedt #include <linux/sched.h>	/* used for sched_clock() (for now) */
167a8e76a3SSteven Rostedt #include <linux/init.h>
177a8e76a3SSteven Rostedt #include <linux/hash.h>
187a8e76a3SSteven Rostedt #include <linux/list.h>
197a8e76a3SSteven Rostedt #include <linux/fs.h>
207a8e76a3SSteven Rostedt 
21182e9f5fSSteven Rostedt #include "trace.h"
22182e9f5fSSteven Rostedt 
23033601a3SSteven Rostedt /*
24033601a3SSteven Rostedt  * A fast way to enable or disable all ring buffers is to
25033601a3SSteven Rostedt  * call tracing_on or tracing_off. Turning off the ring buffers
26033601a3SSteven Rostedt  * prevents all ring buffers from being recorded to.
27033601a3SSteven Rostedt  * Turning this switch on, makes it OK to write to the
28033601a3SSteven Rostedt  * ring buffer, if the ring buffer is enabled itself.
29033601a3SSteven Rostedt  *
30033601a3SSteven Rostedt  * There's three layers that must be on in order to write
31033601a3SSteven Rostedt  * to the ring buffer.
32033601a3SSteven Rostedt  *
33033601a3SSteven Rostedt  * 1) This global flag must be set.
34033601a3SSteven Rostedt  * 2) The ring buffer must be enabled for recording.
35033601a3SSteven Rostedt  * 3) The per cpu buffer must be enabled for recording.
36033601a3SSteven Rostedt  *
37033601a3SSteven Rostedt  * In case of an anomaly, this global flag has a bit set that
38033601a3SSteven Rostedt  * will permantly disable all ring buffers.
39033601a3SSteven Rostedt  */
40033601a3SSteven Rostedt 
41033601a3SSteven Rostedt /*
42033601a3SSteven Rostedt  * Global flag to disable all recording to ring buffers
43033601a3SSteven Rostedt  *  This has two bits: ON, DISABLED
44033601a3SSteven Rostedt  *
45033601a3SSteven Rostedt  *  ON   DISABLED
46033601a3SSteven Rostedt  * ---- ----------
47033601a3SSteven Rostedt  *   0      0        : ring buffers are off
48033601a3SSteven Rostedt  *   1      0        : ring buffers are on
49033601a3SSteven Rostedt  *   X      1        : ring buffers are permanently disabled
50033601a3SSteven Rostedt  */
51033601a3SSteven Rostedt 
52033601a3SSteven Rostedt enum {
53033601a3SSteven Rostedt 	RB_BUFFERS_ON_BIT	= 0,
54033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED_BIT	= 1,
55033601a3SSteven Rostedt };
56033601a3SSteven Rostedt 
57033601a3SSteven Rostedt enum {
58033601a3SSteven Rostedt 	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
59033601a3SSteven Rostedt 	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
60033601a3SSteven Rostedt };
61033601a3SSteven Rostedt 
62033601a3SSteven Rostedt static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
63a3583244SSteven Rostedt 
64a3583244SSteven Rostedt /**
65a3583244SSteven Rostedt  * tracing_on - enable all tracing buffers
66a3583244SSteven Rostedt  *
67a3583244SSteven Rostedt  * This function enables all tracing buffers that may have been
68a3583244SSteven Rostedt  * disabled with tracing_off.
69a3583244SSteven Rostedt  */
70a3583244SSteven Rostedt void tracing_on(void)
71a3583244SSteven Rostedt {
72033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
73a3583244SSteven Rostedt }
74c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_on);
75a3583244SSteven Rostedt 
76a3583244SSteven Rostedt /**
77a3583244SSteven Rostedt  * tracing_off - turn off all tracing buffers
78a3583244SSteven Rostedt  *
79a3583244SSteven Rostedt  * This function stops all tracing buffers from recording data.
80a3583244SSteven Rostedt  * It does not disable any overhead the tracers themselves may
81a3583244SSteven Rostedt  * be causing. This function simply causes all recording to
82a3583244SSteven Rostedt  * the ring buffers to fail.
83a3583244SSteven Rostedt  */
84a3583244SSteven Rostedt void tracing_off(void)
85a3583244SSteven Rostedt {
86033601a3SSteven Rostedt 	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
87033601a3SSteven Rostedt }
88c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_off);
89033601a3SSteven Rostedt 
90033601a3SSteven Rostedt /**
91033601a3SSteven Rostedt  * tracing_off_permanent - permanently disable ring buffers
92033601a3SSteven Rostedt  *
93033601a3SSteven Rostedt  * This function, once called, will disable all ring buffers
94033601a3SSteven Rostedt  * permanenty.
95033601a3SSteven Rostedt  */
96033601a3SSteven Rostedt void tracing_off_permanent(void)
97033601a3SSteven Rostedt {
98033601a3SSteven Rostedt 	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
99a3583244SSteven Rostedt }
100a3583244SSteven Rostedt 
101d06bbd66SIngo Molnar #include "trace.h"
102d06bbd66SIngo Molnar 
1037a8e76a3SSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
1047a8e76a3SSteven Rostedt #define DEBUG_SHIFT 0
1057a8e76a3SSteven Rostedt 
1067a8e76a3SSteven Rostedt /* FIXME!!! */
1077a8e76a3SSteven Rostedt u64 ring_buffer_time_stamp(int cpu)
1087a8e76a3SSteven Rostedt {
10947e74f2bSSteven Rostedt 	u64 time;
11047e74f2bSSteven Rostedt 
11147e74f2bSSteven Rostedt 	preempt_disable_notrace();
1127a8e76a3SSteven Rostedt 	/* shift to debug/test normalization and TIME_EXTENTS */
11347e74f2bSSteven Rostedt 	time = sched_clock() << DEBUG_SHIFT;
1142c2d7329SFrederic Weisbecker 	preempt_enable_no_resched_notrace();
11547e74f2bSSteven Rostedt 
11647e74f2bSSteven Rostedt 	return time;
1177a8e76a3SSteven Rostedt }
118c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1197a8e76a3SSteven Rostedt 
1207a8e76a3SSteven Rostedt void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
1217a8e76a3SSteven Rostedt {
1227a8e76a3SSteven Rostedt 	/* Just stupid testing the normalize function and deltas */
1237a8e76a3SSteven Rostedt 	*ts >>= DEBUG_SHIFT;
1247a8e76a3SSteven Rostedt }
125c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1267a8e76a3SSteven Rostedt 
1277a8e76a3SSteven Rostedt #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
12867d34724SAndrew Morton #define RB_ALIGNMENT		4U
1297a8e76a3SSteven Rostedt #define RB_MAX_SMALL_DATA	28
1307a8e76a3SSteven Rostedt 
1317a8e76a3SSteven Rostedt enum {
1327a8e76a3SSteven Rostedt 	RB_LEN_TIME_EXTEND = 8,
1337a8e76a3SSteven Rostedt 	RB_LEN_TIME_STAMP = 16,
1347a8e76a3SSteven Rostedt };
1357a8e76a3SSteven Rostedt 
1367a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
13734a148bfSAndrew Morton static unsigned
1387a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
1397a8e76a3SSteven Rostedt {
1407a8e76a3SSteven Rostedt 	unsigned length;
1417a8e76a3SSteven Rostedt 
1427a8e76a3SSteven Rostedt 	switch (event->type) {
1437a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
1447a8e76a3SSteven Rostedt 		/* undefined */
1457a8e76a3SSteven Rostedt 		return -1;
1467a8e76a3SSteven Rostedt 
1477a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
1487a8e76a3SSteven Rostedt 		return RB_LEN_TIME_EXTEND;
1497a8e76a3SSteven Rostedt 
1507a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
1517a8e76a3SSteven Rostedt 		return RB_LEN_TIME_STAMP;
1527a8e76a3SSteven Rostedt 
1537a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
1547a8e76a3SSteven Rostedt 		if (event->len)
15567d34724SAndrew Morton 			length = event->len * RB_ALIGNMENT;
1567a8e76a3SSteven Rostedt 		else
1577a8e76a3SSteven Rostedt 			length = event->array[0];
1587a8e76a3SSteven Rostedt 		return length + RB_EVNT_HDR_SIZE;
1597a8e76a3SSteven Rostedt 	default:
1607a8e76a3SSteven Rostedt 		BUG();
1617a8e76a3SSteven Rostedt 	}
1627a8e76a3SSteven Rostedt 	/* not hit */
1637a8e76a3SSteven Rostedt 	return 0;
1647a8e76a3SSteven Rostedt }
1657a8e76a3SSteven Rostedt 
1667a8e76a3SSteven Rostedt /**
1677a8e76a3SSteven Rostedt  * ring_buffer_event_length - return the length of the event
1687a8e76a3SSteven Rostedt  * @event: the event to get the length of
1697a8e76a3SSteven Rostedt  */
1707a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
1717a8e76a3SSteven Rostedt {
172465634adSRobert Richter 	unsigned length = rb_event_length(event);
173465634adSRobert Richter 	if (event->type != RINGBUF_TYPE_DATA)
174465634adSRobert Richter 		return length;
175465634adSRobert Richter 	length -= RB_EVNT_HDR_SIZE;
176465634adSRobert Richter 	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
177465634adSRobert Richter                 length -= sizeof(event->array[0]);
178465634adSRobert Richter 	return length;
1797a8e76a3SSteven Rostedt }
180c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
1817a8e76a3SSteven Rostedt 
1827a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
18334a148bfSAndrew Morton static void *
1847a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
1857a8e76a3SSteven Rostedt {
1867a8e76a3SSteven Rostedt 	BUG_ON(event->type != RINGBUF_TYPE_DATA);
1877a8e76a3SSteven Rostedt 	/* If length is in len field, then array[0] has the data */
1887a8e76a3SSteven Rostedt 	if (event->len)
1897a8e76a3SSteven Rostedt 		return (void *)&event->array[0];
1907a8e76a3SSteven Rostedt 	/* Otherwise length is in array[0] and array[1] has the data */
1917a8e76a3SSteven Rostedt 	return (void *)&event->array[1];
1927a8e76a3SSteven Rostedt }
1937a8e76a3SSteven Rostedt 
1947a8e76a3SSteven Rostedt /**
1957a8e76a3SSteven Rostedt  * ring_buffer_event_data - return the data of the event
1967a8e76a3SSteven Rostedt  * @event: the event to get the data from
1977a8e76a3SSteven Rostedt  */
1987a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
1997a8e76a3SSteven Rostedt {
2007a8e76a3SSteven Rostedt 	return rb_event_data(event);
2017a8e76a3SSteven Rostedt }
202c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
2037a8e76a3SSteven Rostedt 
2047a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu)		\
2059e01c1b7SRusty Russell 	for_each_cpu(cpu, buffer->cpumask)
2067a8e76a3SSteven Rostedt 
2077a8e76a3SSteven Rostedt #define TS_SHIFT	27
2087a8e76a3SSteven Rostedt #define TS_MASK		((1ULL << TS_SHIFT) - 1)
2097a8e76a3SSteven Rostedt #define TS_DELTA_TEST	(~TS_MASK)
2107a8e76a3SSteven Rostedt 
211abc9b56dSSteven Rostedt struct buffer_data_page {
2127a8e76a3SSteven Rostedt 	u64		 time_stamp;	/* page time stamp */
213bf41a158SSteven Rostedt 	local_t		 commit;	/* write commited index */
214abc9b56dSSteven Rostedt 	unsigned char	 data[];	/* data of buffer page */
215abc9b56dSSteven Rostedt };
216abc9b56dSSteven Rostedt 
217abc9b56dSSteven Rostedt struct buffer_page {
218abc9b56dSSteven Rostedt 	local_t		 write;		/* index for next write */
2196f807acdSSteven Rostedt 	unsigned	 read;		/* index for next read */
2207a8e76a3SSteven Rostedt 	struct list_head list;		/* list of free pages */
221abc9b56dSSteven Rostedt 	struct buffer_data_page *page;	/* Actual data page */
2227a8e76a3SSteven Rostedt };
2237a8e76a3SSteven Rostedt 
224044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
225abc9b56dSSteven Rostedt {
226044fa782SSteven Rostedt 	local_set(&bpage->commit, 0);
227abc9b56dSSteven Rostedt }
228abc9b56dSSteven Rostedt 
2297a8e76a3SSteven Rostedt /*
230ed56829cSSteven Rostedt  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
231ed56829cSSteven Rostedt  * this issue out.
232ed56829cSSteven Rostedt  */
23334a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
234ed56829cSSteven Rostedt {
2356ae2a076SSteven Rostedt 	free_page((unsigned long)bpage->page);
236e4c2ce82SSteven Rostedt 	kfree(bpage);
237ed56829cSSteven Rostedt }
238ed56829cSSteven Rostedt 
239ed56829cSSteven Rostedt /*
2407a8e76a3SSteven Rostedt  * We need to fit the time_stamp delta into 27 bits.
2417a8e76a3SSteven Rostedt  */
2427a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta)
2437a8e76a3SSteven Rostedt {
2447a8e76a3SSteven Rostedt 	if (delta & TS_DELTA_TEST)
2457a8e76a3SSteven Rostedt 		return 1;
2467a8e76a3SSteven Rostedt 	return 0;
2477a8e76a3SSteven Rostedt }
2487a8e76a3SSteven Rostedt 
249082605deSSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
2507a8e76a3SSteven Rostedt 
2517a8e76a3SSteven Rostedt /*
2527a8e76a3SSteven Rostedt  * head_page == tail_page && head == tail then buffer is empty.
2537a8e76a3SSteven Rostedt  */
2547a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
2557a8e76a3SSteven Rostedt 	int				cpu;
2567a8e76a3SSteven Rostedt 	struct ring_buffer		*buffer;
257f83c9d0fSSteven Rostedt 	spinlock_t			reader_lock; /* serialize readers */
2583e03fb7fSSteven Rostedt 	raw_spinlock_t			lock;
2597a8e76a3SSteven Rostedt 	struct lock_class_key		lock_key;
2607a8e76a3SSteven Rostedt 	struct list_head		pages;
2616f807acdSSteven Rostedt 	struct buffer_page		*head_page;	/* read from head */
2626f807acdSSteven Rostedt 	struct buffer_page		*tail_page;	/* write to tail */
263bf41a158SSteven Rostedt 	struct buffer_page		*commit_page;	/* commited pages */
264d769041fSSteven Rostedt 	struct buffer_page		*reader_page;
2657a8e76a3SSteven Rostedt 	unsigned long			overrun;
2667a8e76a3SSteven Rostedt 	unsigned long			entries;
2677a8e76a3SSteven Rostedt 	u64				write_stamp;
2687a8e76a3SSteven Rostedt 	u64				read_stamp;
2697a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
2707a8e76a3SSteven Rostedt };
2717a8e76a3SSteven Rostedt 
2727a8e76a3SSteven Rostedt struct ring_buffer {
2737a8e76a3SSteven Rostedt 	unsigned			pages;
2747a8e76a3SSteven Rostedt 	unsigned			flags;
2757a8e76a3SSteven Rostedt 	int				cpus;
2769e01c1b7SRusty Russell 	cpumask_var_t			cpumask;
2777a8e76a3SSteven Rostedt 	atomic_t			record_disabled;
2787a8e76a3SSteven Rostedt 
2797a8e76a3SSteven Rostedt 	struct mutex			mutex;
2807a8e76a3SSteven Rostedt 
2817a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	**buffers;
2827a8e76a3SSteven Rostedt };
2837a8e76a3SSteven Rostedt 
2847a8e76a3SSteven Rostedt struct ring_buffer_iter {
2857a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu	*cpu_buffer;
2867a8e76a3SSteven Rostedt 	unsigned long			head;
2877a8e76a3SSteven Rostedt 	struct buffer_page		*head_page;
2887a8e76a3SSteven Rostedt 	u64				read_stamp;
2897a8e76a3SSteven Rostedt };
2907a8e76a3SSteven Rostedt 
291f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
2927a8e76a3SSteven Rostedt #define RB_WARN_ON(buffer, cond)				\
2933e89c7bbSSteven Rostedt 	({							\
2943e89c7bbSSteven Rostedt 		int _____ret = unlikely(cond);			\
2953e89c7bbSSteven Rostedt 		if (_____ret) {					\
296bf41a158SSteven Rostedt 			atomic_inc(&buffer->record_disabled);	\
297bf41a158SSteven Rostedt 			WARN_ON(1);				\
298bf41a158SSteven Rostedt 		}						\
2993e89c7bbSSteven Rostedt 		_____ret;					\
3003e89c7bbSSteven Rostedt 	})
301f536aafcSSteven Rostedt 
3027a8e76a3SSteven Rostedt /**
3037a8e76a3SSteven Rostedt  * check_pages - integrity check of buffer pages
3047a8e76a3SSteven Rostedt  * @cpu_buffer: CPU buffer with pages to test
3057a8e76a3SSteven Rostedt  *
3067a8e76a3SSteven Rostedt  * As a safty measure we check to make sure the data pages have not
3077a8e76a3SSteven Rostedt  * been corrupted.
3087a8e76a3SSteven Rostedt  */
3097a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
3107a8e76a3SSteven Rostedt {
3117a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
312044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
3137a8e76a3SSteven Rostedt 
3143e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
3153e89c7bbSSteven Rostedt 		return -1;
3163e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
3173e89c7bbSSteven Rostedt 		return -1;
3187a8e76a3SSteven Rostedt 
319044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
3203e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
321044fa782SSteven Rostedt 			       bpage->list.next->prev != &bpage->list))
3223e89c7bbSSteven Rostedt 			return -1;
3233e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
324044fa782SSteven Rostedt 			       bpage->list.prev->next != &bpage->list))
3253e89c7bbSSteven Rostedt 			return -1;
3267a8e76a3SSteven Rostedt 	}
3277a8e76a3SSteven Rostedt 
3287a8e76a3SSteven Rostedt 	return 0;
3297a8e76a3SSteven Rostedt }
3307a8e76a3SSteven Rostedt 
3317a8e76a3SSteven Rostedt static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
3327a8e76a3SSteven Rostedt 			     unsigned nr_pages)
3337a8e76a3SSteven Rostedt {
3347a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
335044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
3367a8e76a3SSteven Rostedt 	unsigned long addr;
3377a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
3387a8e76a3SSteven Rostedt 	unsigned i;
3397a8e76a3SSteven Rostedt 
3407a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
341044fa782SSteven Rostedt 		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
342aa1e0e3bSSteven Rostedt 				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
343044fa782SSteven Rostedt 		if (!bpage)
344e4c2ce82SSteven Rostedt 			goto free_pages;
345044fa782SSteven Rostedt 		list_add(&bpage->list, &pages);
346e4c2ce82SSteven Rostedt 
3477a8e76a3SSteven Rostedt 		addr = __get_free_page(GFP_KERNEL);
3487a8e76a3SSteven Rostedt 		if (!addr)
3497a8e76a3SSteven Rostedt 			goto free_pages;
350044fa782SSteven Rostedt 		bpage->page = (void *)addr;
351044fa782SSteven Rostedt 		rb_init_page(bpage->page);
3527a8e76a3SSteven Rostedt 	}
3537a8e76a3SSteven Rostedt 
3547a8e76a3SSteven Rostedt 	list_splice(&pages, head);
3557a8e76a3SSteven Rostedt 
3567a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
3577a8e76a3SSteven Rostedt 
3587a8e76a3SSteven Rostedt 	return 0;
3597a8e76a3SSteven Rostedt 
3607a8e76a3SSteven Rostedt  free_pages:
361044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
362044fa782SSteven Rostedt 		list_del_init(&bpage->list);
363044fa782SSteven Rostedt 		free_buffer_page(bpage);
3647a8e76a3SSteven Rostedt 	}
3657a8e76a3SSteven Rostedt 	return -ENOMEM;
3667a8e76a3SSteven Rostedt }
3677a8e76a3SSteven Rostedt 
3687a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
3697a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
3707a8e76a3SSteven Rostedt {
3717a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
372044fa782SSteven Rostedt 	struct buffer_page *bpage;
373d769041fSSteven Rostedt 	unsigned long addr;
3747a8e76a3SSteven Rostedt 	int ret;
3757a8e76a3SSteven Rostedt 
3767a8e76a3SSteven Rostedt 	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
3777a8e76a3SSteven Rostedt 				  GFP_KERNEL, cpu_to_node(cpu));
3787a8e76a3SSteven Rostedt 	if (!cpu_buffer)
3797a8e76a3SSteven Rostedt 		return NULL;
3807a8e76a3SSteven Rostedt 
3817a8e76a3SSteven Rostedt 	cpu_buffer->cpu = cpu;
3827a8e76a3SSteven Rostedt 	cpu_buffer->buffer = buffer;
383f83c9d0fSSteven Rostedt 	spin_lock_init(&cpu_buffer->reader_lock);
3843e03fb7fSSteven Rostedt 	cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3857a8e76a3SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->pages);
3867a8e76a3SSteven Rostedt 
387044fa782SSteven Rostedt 	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
388e4c2ce82SSteven Rostedt 			    GFP_KERNEL, cpu_to_node(cpu));
389044fa782SSteven Rostedt 	if (!bpage)
390e4c2ce82SSteven Rostedt 		goto fail_free_buffer;
391e4c2ce82SSteven Rostedt 
392044fa782SSteven Rostedt 	cpu_buffer->reader_page = bpage;
393d769041fSSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
394d769041fSSteven Rostedt 	if (!addr)
395e4c2ce82SSteven Rostedt 		goto fail_free_reader;
396044fa782SSteven Rostedt 	bpage->page = (void *)addr;
397044fa782SSteven Rostedt 	rb_init_page(bpage->page);
398e4c2ce82SSteven Rostedt 
399d769041fSSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
400d769041fSSteven Rostedt 
4017a8e76a3SSteven Rostedt 	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
4027a8e76a3SSteven Rostedt 	if (ret < 0)
403d769041fSSteven Rostedt 		goto fail_free_reader;
4047a8e76a3SSteven Rostedt 
4057a8e76a3SSteven Rostedt 	cpu_buffer->head_page
4067a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
407bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
4087a8e76a3SSteven Rostedt 
4097a8e76a3SSteven Rostedt 	return cpu_buffer;
4107a8e76a3SSteven Rostedt 
411d769041fSSteven Rostedt  fail_free_reader:
412d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
413d769041fSSteven Rostedt 
4147a8e76a3SSteven Rostedt  fail_free_buffer:
4157a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
4167a8e76a3SSteven Rostedt 	return NULL;
4177a8e76a3SSteven Rostedt }
4187a8e76a3SSteven Rostedt 
4197a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
4207a8e76a3SSteven Rostedt {
4217a8e76a3SSteven Rostedt 	struct list_head *head = &cpu_buffer->pages;
422044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
4237a8e76a3SSteven Rostedt 
424d769041fSSteven Rostedt 	list_del_init(&cpu_buffer->reader_page->list);
425d769041fSSteven Rostedt 	free_buffer_page(cpu_buffer->reader_page);
426d769041fSSteven Rostedt 
427044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, head, list) {
428044fa782SSteven Rostedt 		list_del_init(&bpage->list);
429044fa782SSteven Rostedt 		free_buffer_page(bpage);
4307a8e76a3SSteven Rostedt 	}
4317a8e76a3SSteven Rostedt 	kfree(cpu_buffer);
4327a8e76a3SSteven Rostedt }
4337a8e76a3SSteven Rostedt 
434a7b13743SSteven Rostedt /*
435a7b13743SSteven Rostedt  * Causes compile errors if the struct buffer_page gets bigger
436a7b13743SSteven Rostedt  * than the struct page.
437a7b13743SSteven Rostedt  */
438a7b13743SSteven Rostedt extern int ring_buffer_page_too_big(void);
439a7b13743SSteven Rostedt 
4407a8e76a3SSteven Rostedt /**
4417a8e76a3SSteven Rostedt  * ring_buffer_alloc - allocate a new ring_buffer
44268814b58SRobert Richter  * @size: the size in bytes per cpu that is needed.
4437a8e76a3SSteven Rostedt  * @flags: attributes to set for the ring buffer.
4447a8e76a3SSteven Rostedt  *
4457a8e76a3SSteven Rostedt  * Currently the only flag that is available is the RB_FL_OVERWRITE
4467a8e76a3SSteven Rostedt  * flag. This flag means that the buffer will overwrite old data
4477a8e76a3SSteven Rostedt  * when the buffer wraps. If this flag is not set, the buffer will
4487a8e76a3SSteven Rostedt  * drop data when the tail hits the head.
4497a8e76a3SSteven Rostedt  */
4507a8e76a3SSteven Rostedt struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
4517a8e76a3SSteven Rostedt {
4527a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
4537a8e76a3SSteven Rostedt 	int bsize;
4547a8e76a3SSteven Rostedt 	int cpu;
4557a8e76a3SSteven Rostedt 
456a7b13743SSteven Rostedt 	/* Paranoid! Optimizes out when all is well */
457a7b13743SSteven Rostedt 	if (sizeof(struct buffer_page) > sizeof(struct page))
458a7b13743SSteven Rostedt 		ring_buffer_page_too_big();
459a7b13743SSteven Rostedt 
460a7b13743SSteven Rostedt 
4617a8e76a3SSteven Rostedt 	/* keep it in its own cache line */
4627a8e76a3SSteven Rostedt 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
4637a8e76a3SSteven Rostedt 			 GFP_KERNEL);
4647a8e76a3SSteven Rostedt 	if (!buffer)
4657a8e76a3SSteven Rostedt 		return NULL;
4667a8e76a3SSteven Rostedt 
4679e01c1b7SRusty Russell 	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
4689e01c1b7SRusty Russell 		goto fail_free_buffer;
4699e01c1b7SRusty Russell 
4707a8e76a3SSteven Rostedt 	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
4717a8e76a3SSteven Rostedt 	buffer->flags = flags;
4727a8e76a3SSteven Rostedt 
4737a8e76a3SSteven Rostedt 	/* need at least two pages */
4747a8e76a3SSteven Rostedt 	if (buffer->pages == 1)
4757a8e76a3SSteven Rostedt 		buffer->pages++;
4767a8e76a3SSteven Rostedt 
4779e01c1b7SRusty Russell 	cpumask_copy(buffer->cpumask, cpu_possible_mask);
4787a8e76a3SSteven Rostedt 	buffer->cpus = nr_cpu_ids;
4797a8e76a3SSteven Rostedt 
4807a8e76a3SSteven Rostedt 	bsize = sizeof(void *) * nr_cpu_ids;
4817a8e76a3SSteven Rostedt 	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
4827a8e76a3SSteven Rostedt 				  GFP_KERNEL);
4837a8e76a3SSteven Rostedt 	if (!buffer->buffers)
4849e01c1b7SRusty Russell 		goto fail_free_cpumask;
4857a8e76a3SSteven Rostedt 
4867a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
4877a8e76a3SSteven Rostedt 		buffer->buffers[cpu] =
4887a8e76a3SSteven Rostedt 			rb_allocate_cpu_buffer(buffer, cpu);
4897a8e76a3SSteven Rostedt 		if (!buffer->buffers[cpu])
4907a8e76a3SSteven Rostedt 			goto fail_free_buffers;
4917a8e76a3SSteven Rostedt 	}
4927a8e76a3SSteven Rostedt 
4937a8e76a3SSteven Rostedt 	mutex_init(&buffer->mutex);
4947a8e76a3SSteven Rostedt 
4957a8e76a3SSteven Rostedt 	return buffer;
4967a8e76a3SSteven Rostedt 
4977a8e76a3SSteven Rostedt  fail_free_buffers:
4987a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
4997a8e76a3SSteven Rostedt 		if (buffer->buffers[cpu])
5007a8e76a3SSteven Rostedt 			rb_free_cpu_buffer(buffer->buffers[cpu]);
5017a8e76a3SSteven Rostedt 	}
5027a8e76a3SSteven Rostedt 	kfree(buffer->buffers);
5037a8e76a3SSteven Rostedt 
5049e01c1b7SRusty Russell  fail_free_cpumask:
5059e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
5069e01c1b7SRusty Russell 
5077a8e76a3SSteven Rostedt  fail_free_buffer:
5087a8e76a3SSteven Rostedt 	kfree(buffer);
5097a8e76a3SSteven Rostedt 	return NULL;
5107a8e76a3SSteven Rostedt }
511c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_alloc);
5127a8e76a3SSteven Rostedt 
5137a8e76a3SSteven Rostedt /**
5147a8e76a3SSteven Rostedt  * ring_buffer_free - free a ring buffer.
5157a8e76a3SSteven Rostedt  * @buffer: the buffer to free.
5167a8e76a3SSteven Rostedt  */
5177a8e76a3SSteven Rostedt void
5187a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer)
5197a8e76a3SSteven Rostedt {
5207a8e76a3SSteven Rostedt 	int cpu;
5217a8e76a3SSteven Rostedt 
5227a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
5237a8e76a3SSteven Rostedt 		rb_free_cpu_buffer(buffer->buffers[cpu]);
5247a8e76a3SSteven Rostedt 
5259e01c1b7SRusty Russell 	free_cpumask_var(buffer->cpumask);
5269e01c1b7SRusty Russell 
5277a8e76a3SSteven Rostedt 	kfree(buffer);
5287a8e76a3SSteven Rostedt }
529c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
5307a8e76a3SSteven Rostedt 
5317a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
5327a8e76a3SSteven Rostedt 
5337a8e76a3SSteven Rostedt static void
5347a8e76a3SSteven Rostedt rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
5357a8e76a3SSteven Rostedt {
536044fa782SSteven Rostedt 	struct buffer_page *bpage;
5377a8e76a3SSteven Rostedt 	struct list_head *p;
5387a8e76a3SSteven Rostedt 	unsigned i;
5397a8e76a3SSteven Rostedt 
5407a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
5417a8e76a3SSteven Rostedt 	synchronize_sched();
5427a8e76a3SSteven Rostedt 
5437a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
5443e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
5453e89c7bbSSteven Rostedt 			return;
5467a8e76a3SSteven Rostedt 		p = cpu_buffer->pages.next;
547044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
548044fa782SSteven Rostedt 		list_del_init(&bpage->list);
549044fa782SSteven Rostedt 		free_buffer_page(bpage);
5507a8e76a3SSteven Rostedt 	}
5513e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
5523e89c7bbSSteven Rostedt 		return;
5537a8e76a3SSteven Rostedt 
5547a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
5557a8e76a3SSteven Rostedt 
5567a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
5577a8e76a3SSteven Rostedt 
5587a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
5597a8e76a3SSteven Rostedt 
5607a8e76a3SSteven Rostedt }
5617a8e76a3SSteven Rostedt 
5627a8e76a3SSteven Rostedt static void
5637a8e76a3SSteven Rostedt rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
5647a8e76a3SSteven Rostedt 		struct list_head *pages, unsigned nr_pages)
5657a8e76a3SSteven Rostedt {
566044fa782SSteven Rostedt 	struct buffer_page *bpage;
5677a8e76a3SSteven Rostedt 	struct list_head *p;
5687a8e76a3SSteven Rostedt 	unsigned i;
5697a8e76a3SSteven Rostedt 
5707a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
5717a8e76a3SSteven Rostedt 	synchronize_sched();
5727a8e76a3SSteven Rostedt 
5737a8e76a3SSteven Rostedt 	for (i = 0; i < nr_pages; i++) {
5743e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
5753e89c7bbSSteven Rostedt 			return;
5767a8e76a3SSteven Rostedt 		p = pages->next;
577044fa782SSteven Rostedt 		bpage = list_entry(p, struct buffer_page, list);
578044fa782SSteven Rostedt 		list_del_init(&bpage->list);
579044fa782SSteven Rostedt 		list_add_tail(&bpage->list, &cpu_buffer->pages);
5807a8e76a3SSteven Rostedt 	}
5817a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
5827a8e76a3SSteven Rostedt 
5837a8e76a3SSteven Rostedt 	rb_check_pages(cpu_buffer);
5847a8e76a3SSteven Rostedt 
5857a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
5867a8e76a3SSteven Rostedt }
5877a8e76a3SSteven Rostedt 
5887a8e76a3SSteven Rostedt /**
5897a8e76a3SSteven Rostedt  * ring_buffer_resize - resize the ring buffer
5907a8e76a3SSteven Rostedt  * @buffer: the buffer to resize.
5917a8e76a3SSteven Rostedt  * @size: the new size.
5927a8e76a3SSteven Rostedt  *
5937a8e76a3SSteven Rostedt  * The tracer is responsible for making sure that the buffer is
5947a8e76a3SSteven Rostedt  * not being used while changing the size.
5957a8e76a3SSteven Rostedt  * Note: We may be able to change the above requirement by using
5967a8e76a3SSteven Rostedt  *  RCU synchronizations.
5977a8e76a3SSteven Rostedt  *
5987a8e76a3SSteven Rostedt  * Minimum size is 2 * BUF_PAGE_SIZE.
5997a8e76a3SSteven Rostedt  *
6007a8e76a3SSteven Rostedt  * Returns -1 on failure.
6017a8e76a3SSteven Rostedt  */
6027a8e76a3SSteven Rostedt int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
6037a8e76a3SSteven Rostedt {
6047a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
6057a8e76a3SSteven Rostedt 	unsigned nr_pages, rm_pages, new_pages;
606044fa782SSteven Rostedt 	struct buffer_page *bpage, *tmp;
6077a8e76a3SSteven Rostedt 	unsigned long buffer_size;
6087a8e76a3SSteven Rostedt 	unsigned long addr;
6097a8e76a3SSteven Rostedt 	LIST_HEAD(pages);
6107a8e76a3SSteven Rostedt 	int i, cpu;
6117a8e76a3SSteven Rostedt 
612ee51a1deSIngo Molnar 	/*
613ee51a1deSIngo Molnar 	 * Always succeed at resizing a non-existent buffer:
614ee51a1deSIngo Molnar 	 */
615ee51a1deSIngo Molnar 	if (!buffer)
616ee51a1deSIngo Molnar 		return size;
617ee51a1deSIngo Molnar 
6187a8e76a3SSteven Rostedt 	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
6197a8e76a3SSteven Rostedt 	size *= BUF_PAGE_SIZE;
6207a8e76a3SSteven Rostedt 	buffer_size = buffer->pages * BUF_PAGE_SIZE;
6217a8e76a3SSteven Rostedt 
6227a8e76a3SSteven Rostedt 	/* we need a minimum of two pages */
6237a8e76a3SSteven Rostedt 	if (size < BUF_PAGE_SIZE * 2)
6247a8e76a3SSteven Rostedt 		size = BUF_PAGE_SIZE * 2;
6257a8e76a3SSteven Rostedt 
6267a8e76a3SSteven Rostedt 	if (size == buffer_size)
6277a8e76a3SSteven Rostedt 		return size;
6287a8e76a3SSteven Rostedt 
6297a8e76a3SSteven Rostedt 	mutex_lock(&buffer->mutex);
6307a8e76a3SSteven Rostedt 
6317a8e76a3SSteven Rostedt 	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
6327a8e76a3SSteven Rostedt 
6337a8e76a3SSteven Rostedt 	if (size < buffer_size) {
6347a8e76a3SSteven Rostedt 
6357a8e76a3SSteven Rostedt 		/* easy case, just free pages */
6363e89c7bbSSteven Rostedt 		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
6373e89c7bbSSteven Rostedt 			mutex_unlock(&buffer->mutex);
6383e89c7bbSSteven Rostedt 			return -1;
6393e89c7bbSSteven Rostedt 		}
6407a8e76a3SSteven Rostedt 
6417a8e76a3SSteven Rostedt 		rm_pages = buffer->pages - nr_pages;
6427a8e76a3SSteven Rostedt 
6437a8e76a3SSteven Rostedt 		for_each_buffer_cpu(buffer, cpu) {
6447a8e76a3SSteven Rostedt 			cpu_buffer = buffer->buffers[cpu];
6457a8e76a3SSteven Rostedt 			rb_remove_pages(cpu_buffer, rm_pages);
6467a8e76a3SSteven Rostedt 		}
6477a8e76a3SSteven Rostedt 		goto out;
6487a8e76a3SSteven Rostedt 	}
6497a8e76a3SSteven Rostedt 
6507a8e76a3SSteven Rostedt 	/*
6517a8e76a3SSteven Rostedt 	 * This is a bit more difficult. We only want to add pages
6527a8e76a3SSteven Rostedt 	 * when we can allocate enough for all CPUs. We do this
6537a8e76a3SSteven Rostedt 	 * by allocating all the pages and storing them on a local
6547a8e76a3SSteven Rostedt 	 * link list. If we succeed in our allocation, then we
6557a8e76a3SSteven Rostedt 	 * add these pages to the cpu_buffers. Otherwise we just free
6567a8e76a3SSteven Rostedt 	 * them all and return -ENOMEM;
6577a8e76a3SSteven Rostedt 	 */
6583e89c7bbSSteven Rostedt 	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
6593e89c7bbSSteven Rostedt 		mutex_unlock(&buffer->mutex);
6603e89c7bbSSteven Rostedt 		return -1;
6613e89c7bbSSteven Rostedt 	}
662f536aafcSSteven Rostedt 
6637a8e76a3SSteven Rostedt 	new_pages = nr_pages - buffer->pages;
6647a8e76a3SSteven Rostedt 
6657a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6667a8e76a3SSteven Rostedt 		for (i = 0; i < new_pages; i++) {
667044fa782SSteven Rostedt 			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
668e4c2ce82SSteven Rostedt 						  cache_line_size()),
669e4c2ce82SSteven Rostedt 					    GFP_KERNEL, cpu_to_node(cpu));
670044fa782SSteven Rostedt 			if (!bpage)
671e4c2ce82SSteven Rostedt 				goto free_pages;
672044fa782SSteven Rostedt 			list_add(&bpage->list, &pages);
6737a8e76a3SSteven Rostedt 			addr = __get_free_page(GFP_KERNEL);
6747a8e76a3SSteven Rostedt 			if (!addr)
6757a8e76a3SSteven Rostedt 				goto free_pages;
676044fa782SSteven Rostedt 			bpage->page = (void *)addr;
677044fa782SSteven Rostedt 			rb_init_page(bpage->page);
6787a8e76a3SSteven Rostedt 		}
6797a8e76a3SSteven Rostedt 	}
6807a8e76a3SSteven Rostedt 
6817a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
6827a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
6837a8e76a3SSteven Rostedt 		rb_insert_pages(cpu_buffer, &pages, new_pages);
6847a8e76a3SSteven Rostedt 	}
6857a8e76a3SSteven Rostedt 
6863e89c7bbSSteven Rostedt 	if (RB_WARN_ON(buffer, !list_empty(&pages))) {
6873e89c7bbSSteven Rostedt 		mutex_unlock(&buffer->mutex);
6883e89c7bbSSteven Rostedt 		return -1;
6893e89c7bbSSteven Rostedt 	}
6907a8e76a3SSteven Rostedt 
6917a8e76a3SSteven Rostedt  out:
6927a8e76a3SSteven Rostedt 	buffer->pages = nr_pages;
6937a8e76a3SSteven Rostedt 	mutex_unlock(&buffer->mutex);
6947a8e76a3SSteven Rostedt 
6957a8e76a3SSteven Rostedt 	return size;
6967a8e76a3SSteven Rostedt 
6977a8e76a3SSteven Rostedt  free_pages:
698044fa782SSteven Rostedt 	list_for_each_entry_safe(bpage, tmp, &pages, list) {
699044fa782SSteven Rostedt 		list_del_init(&bpage->list);
700044fa782SSteven Rostedt 		free_buffer_page(bpage);
7017a8e76a3SSteven Rostedt 	}
702641d2f63SVegard Nossum 	mutex_unlock(&buffer->mutex);
7037a8e76a3SSteven Rostedt 	return -ENOMEM;
7047a8e76a3SSteven Rostedt }
705c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
7067a8e76a3SSteven Rostedt 
7077a8e76a3SSteven Rostedt static inline int rb_null_event(struct ring_buffer_event *event)
7087a8e76a3SSteven Rostedt {
7097a8e76a3SSteven Rostedt 	return event->type == RINGBUF_TYPE_PADDING;
7107a8e76a3SSteven Rostedt }
7117a8e76a3SSteven Rostedt 
7128789a9e7SSteven Rostedt static inline void *
713044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
7148789a9e7SSteven Rostedt {
715044fa782SSteven Rostedt 	return bpage->data + index;
7168789a9e7SSteven Rostedt }
7178789a9e7SSteven Rostedt 
718044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
7197a8e76a3SSteven Rostedt {
720044fa782SSteven Rostedt 	return bpage->page->data + index;
7217a8e76a3SSteven Rostedt }
7227a8e76a3SSteven Rostedt 
7237a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
724d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
7257a8e76a3SSteven Rostedt {
7266f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->reader_page,
7276f807acdSSteven Rostedt 			       cpu_buffer->reader_page->read);
7286f807acdSSteven Rostedt }
7296f807acdSSteven Rostedt 
7306f807acdSSteven Rostedt static inline struct ring_buffer_event *
7316f807acdSSteven Rostedt rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
7326f807acdSSteven Rostedt {
7336f807acdSSteven Rostedt 	return __rb_page_index(cpu_buffer->head_page,
7346f807acdSSteven Rostedt 			       cpu_buffer->head_page->read);
7357a8e76a3SSteven Rostedt }
7367a8e76a3SSteven Rostedt 
7377a8e76a3SSteven Rostedt static inline struct ring_buffer_event *
7387a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter)
7397a8e76a3SSteven Rostedt {
7406f807acdSSteven Rostedt 	return __rb_page_index(iter->head_page, iter->head);
7417a8e76a3SSteven Rostedt }
7427a8e76a3SSteven Rostedt 
743bf41a158SSteven Rostedt static inline unsigned rb_page_write(struct buffer_page *bpage)
744bf41a158SSteven Rostedt {
745bf41a158SSteven Rostedt 	return local_read(&bpage->write);
746bf41a158SSteven Rostedt }
747bf41a158SSteven Rostedt 
748bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage)
749bf41a158SSteven Rostedt {
750abc9b56dSSteven Rostedt 	return local_read(&bpage->page->commit);
751bf41a158SSteven Rostedt }
752bf41a158SSteven Rostedt 
753bf41a158SSteven Rostedt /* Size is determined by what has been commited */
754bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage)
755bf41a158SSteven Rostedt {
756bf41a158SSteven Rostedt 	return rb_page_commit(bpage);
757bf41a158SSteven Rostedt }
758bf41a158SSteven Rostedt 
759bf41a158SSteven Rostedt static inline unsigned
760bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
761bf41a158SSteven Rostedt {
762bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->commit_page);
763bf41a158SSteven Rostedt }
764bf41a158SSteven Rostedt 
765bf41a158SSteven Rostedt static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
766bf41a158SSteven Rostedt {
767bf41a158SSteven Rostedt 	return rb_page_commit(cpu_buffer->head_page);
768bf41a158SSteven Rostedt }
769bf41a158SSteven Rostedt 
7707a8e76a3SSteven Rostedt /*
7717a8e76a3SSteven Rostedt  * When the tail hits the head and the buffer is in overwrite mode,
7727a8e76a3SSteven Rostedt  * the head jumps to the next page and all content on the previous
7737a8e76a3SSteven Rostedt  * page is discarded. But before doing so, we update the overrun
7747a8e76a3SSteven Rostedt  * variable of the buffer.
7757a8e76a3SSteven Rostedt  */
7767a8e76a3SSteven Rostedt static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
7777a8e76a3SSteven Rostedt {
7787a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
7797a8e76a3SSteven Rostedt 	unsigned long head;
7807a8e76a3SSteven Rostedt 
7817a8e76a3SSteven Rostedt 	for (head = 0; head < rb_head_size(cpu_buffer);
7827a8e76a3SSteven Rostedt 	     head += rb_event_length(event)) {
7837a8e76a3SSteven Rostedt 
7846f807acdSSteven Rostedt 		event = __rb_page_index(cpu_buffer->head_page, head);
7853e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
7863e89c7bbSSteven Rostedt 			return;
7877a8e76a3SSteven Rostedt 		/* Only count data entries */
7887a8e76a3SSteven Rostedt 		if (event->type != RINGBUF_TYPE_DATA)
7897a8e76a3SSteven Rostedt 			continue;
7907a8e76a3SSteven Rostedt 		cpu_buffer->overrun++;
7917a8e76a3SSteven Rostedt 		cpu_buffer->entries--;
7927a8e76a3SSteven Rostedt 	}
7937a8e76a3SSteven Rostedt }
7947a8e76a3SSteven Rostedt 
7957a8e76a3SSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
796044fa782SSteven Rostedt 			       struct buffer_page **bpage)
7977a8e76a3SSteven Rostedt {
798044fa782SSteven Rostedt 	struct list_head *p = (*bpage)->list.next;
7997a8e76a3SSteven Rostedt 
8007a8e76a3SSteven Rostedt 	if (p == &cpu_buffer->pages)
8017a8e76a3SSteven Rostedt 		p = p->next;
8027a8e76a3SSteven Rostedt 
803044fa782SSteven Rostedt 	*bpage = list_entry(p, struct buffer_page, list);
8047a8e76a3SSteven Rostedt }
8057a8e76a3SSteven Rostedt 
806bf41a158SSteven Rostedt static inline unsigned
807bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event)
8087a8e76a3SSteven Rostedt {
809bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
810bf41a158SSteven Rostedt 
811bf41a158SSteven Rostedt 	return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
8127a8e76a3SSteven Rostedt }
8137a8e76a3SSteven Rostedt 
81434a148bfSAndrew Morton static int
815bf41a158SSteven Rostedt rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
816bf41a158SSteven Rostedt 	     struct ring_buffer_event *event)
8177a8e76a3SSteven Rostedt {
818bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
819bf41a158SSteven Rostedt 	unsigned long index;
820bf41a158SSteven Rostedt 
821bf41a158SSteven Rostedt 	index = rb_event_index(event);
822bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
823bf41a158SSteven Rostedt 
824bf41a158SSteven Rostedt 	return cpu_buffer->commit_page->page == (void *)addr &&
825bf41a158SSteven Rostedt 		rb_commit_index(cpu_buffer) == index;
826bf41a158SSteven Rostedt }
827bf41a158SSteven Rostedt 
82834a148bfSAndrew Morton static void
829bf41a158SSteven Rostedt rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
830bf41a158SSteven Rostedt 		    struct ring_buffer_event *event)
831bf41a158SSteven Rostedt {
832bf41a158SSteven Rostedt 	unsigned long addr = (unsigned long)event;
833bf41a158SSteven Rostedt 	unsigned long index;
834bf41a158SSteven Rostedt 
835bf41a158SSteven Rostedt 	index = rb_event_index(event);
836bf41a158SSteven Rostedt 	addr &= PAGE_MASK;
837bf41a158SSteven Rostedt 
838bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page->page != (void *)addr) {
8393e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer,
8403e89c7bbSSteven Rostedt 			  cpu_buffer->commit_page == cpu_buffer->tail_page))
8413e89c7bbSSteven Rostedt 			return;
842abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
843bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
844bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
845abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
846abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
847bf41a158SSteven Rostedt 	}
848bf41a158SSteven Rostedt 
849bf41a158SSteven Rostedt 	/* Now set the commit to the event's index */
850abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->commit_page->page->commit, index);
851bf41a158SSteven Rostedt }
852bf41a158SSteven Rostedt 
85334a148bfSAndrew Morton static void
854bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
855bf41a158SSteven Rostedt {
856bf41a158SSteven Rostedt 	/*
857bf41a158SSteven Rostedt 	 * We only race with interrupts and NMIs on this CPU.
858bf41a158SSteven Rostedt 	 * If we own the commit event, then we can commit
859bf41a158SSteven Rostedt 	 * all others that interrupted us, since the interruptions
860bf41a158SSteven Rostedt 	 * are in stack format (they finish before they come
861bf41a158SSteven Rostedt 	 * back to us). This allows us to do a simple loop to
862bf41a158SSteven Rostedt 	 * assign the commit to the tail.
863bf41a158SSteven Rostedt 	 */
864a8ccf1d6SSteven Rostedt  again:
865bf41a158SSteven Rostedt 	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
866abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
867bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
868bf41a158SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
869abc9b56dSSteven Rostedt 		cpu_buffer->write_stamp =
870abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp;
871bf41a158SSteven Rostedt 		/* add barrier to keep gcc from optimizing too much */
872bf41a158SSteven Rostedt 		barrier();
873bf41a158SSteven Rostedt 	}
874bf41a158SSteven Rostedt 	while (rb_commit_index(cpu_buffer) !=
875bf41a158SSteven Rostedt 	       rb_page_write(cpu_buffer->commit_page)) {
876abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->commit =
877bf41a158SSteven Rostedt 			cpu_buffer->commit_page->write;
878bf41a158SSteven Rostedt 		barrier();
879bf41a158SSteven Rostedt 	}
880a8ccf1d6SSteven Rostedt 
881a8ccf1d6SSteven Rostedt 	/* again, keep gcc from optimizing */
882a8ccf1d6SSteven Rostedt 	barrier();
883a8ccf1d6SSteven Rostedt 
884a8ccf1d6SSteven Rostedt 	/*
885a8ccf1d6SSteven Rostedt 	 * If an interrupt came in just after the first while loop
886a8ccf1d6SSteven Rostedt 	 * and pushed the tail page forward, we will be left with
887a8ccf1d6SSteven Rostedt 	 * a dangling commit that will never go forward.
888a8ccf1d6SSteven Rostedt 	 */
889a8ccf1d6SSteven Rostedt 	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
890a8ccf1d6SSteven Rostedt 		goto again;
8917a8e76a3SSteven Rostedt }
8927a8e76a3SSteven Rostedt 
893d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
8947a8e76a3SSteven Rostedt {
895abc9b56dSSteven Rostedt 	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
8966f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
897d769041fSSteven Rostedt }
898d769041fSSteven Rostedt 
89934a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
900d769041fSSteven Rostedt {
901d769041fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
902d769041fSSteven Rostedt 
903d769041fSSteven Rostedt 	/*
904d769041fSSteven Rostedt 	 * The iterator could be on the reader page (it starts there).
905d769041fSSteven Rostedt 	 * But the head could have moved, since the reader was
906d769041fSSteven Rostedt 	 * found. Check for this case and assign the iterator
907d769041fSSteven Rostedt 	 * to the head page instead of next.
908d769041fSSteven Rostedt 	 */
909d769041fSSteven Rostedt 	if (iter->head_page == cpu_buffer->reader_page)
910d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
911d769041fSSteven Rostedt 	else
912d769041fSSteven Rostedt 		rb_inc_page(cpu_buffer, &iter->head_page);
913d769041fSSteven Rostedt 
914abc9b56dSSteven Rostedt 	iter->read_stamp = iter->head_page->page->time_stamp;
9157a8e76a3SSteven Rostedt 	iter->head = 0;
9167a8e76a3SSteven Rostedt }
9177a8e76a3SSteven Rostedt 
9187a8e76a3SSteven Rostedt /**
9197a8e76a3SSteven Rostedt  * ring_buffer_update_event - update event type and data
9207a8e76a3SSteven Rostedt  * @event: the even to update
9217a8e76a3SSteven Rostedt  * @type: the type of event
9227a8e76a3SSteven Rostedt  * @length: the size of the event field in the ring buffer
9237a8e76a3SSteven Rostedt  *
9247a8e76a3SSteven Rostedt  * Update the type and data fields of the event. The length
9257a8e76a3SSteven Rostedt  * is the actual size that is written to the ring buffer,
9267a8e76a3SSteven Rostedt  * and with this, we can determine what to place into the
9277a8e76a3SSteven Rostedt  * data field.
9287a8e76a3SSteven Rostedt  */
92934a148bfSAndrew Morton static void
9307a8e76a3SSteven Rostedt rb_update_event(struct ring_buffer_event *event,
9317a8e76a3SSteven Rostedt 			 unsigned type, unsigned length)
9327a8e76a3SSteven Rostedt {
9337a8e76a3SSteven Rostedt 	event->type = type;
9347a8e76a3SSteven Rostedt 
9357a8e76a3SSteven Rostedt 	switch (type) {
9367a8e76a3SSteven Rostedt 
9377a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
9387a8e76a3SSteven Rostedt 		break;
9397a8e76a3SSteven Rostedt 
9407a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
94167d34724SAndrew Morton 		event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT);
9427a8e76a3SSteven Rostedt 		break;
9437a8e76a3SSteven Rostedt 
9447a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
94567d34724SAndrew Morton 		event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT);
9467a8e76a3SSteven Rostedt 		break;
9477a8e76a3SSteven Rostedt 
9487a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
9497a8e76a3SSteven Rostedt 		length -= RB_EVNT_HDR_SIZE;
9507a8e76a3SSteven Rostedt 		if (length > RB_MAX_SMALL_DATA) {
9517a8e76a3SSteven Rostedt 			event->len = 0;
9527a8e76a3SSteven Rostedt 			event->array[0] = length;
9537a8e76a3SSteven Rostedt 		} else
95467d34724SAndrew Morton 			event->len = DIV_ROUND_UP(length, RB_ALIGNMENT);
9557a8e76a3SSteven Rostedt 		break;
9567a8e76a3SSteven Rostedt 	default:
9577a8e76a3SSteven Rostedt 		BUG();
9587a8e76a3SSteven Rostedt 	}
9597a8e76a3SSteven Rostedt }
9607a8e76a3SSteven Rostedt 
96134a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length)
9627a8e76a3SSteven Rostedt {
9637a8e76a3SSteven Rostedt 	struct ring_buffer_event event; /* Used only for sizeof array */
9647a8e76a3SSteven Rostedt 
9657a8e76a3SSteven Rostedt 	/* zero length can cause confusions */
9667a8e76a3SSteven Rostedt 	if (!length)
9677a8e76a3SSteven Rostedt 		length = 1;
9687a8e76a3SSteven Rostedt 
9697a8e76a3SSteven Rostedt 	if (length > RB_MAX_SMALL_DATA)
9707a8e76a3SSteven Rostedt 		length += sizeof(event.array[0]);
9717a8e76a3SSteven Rostedt 
9727a8e76a3SSteven Rostedt 	length += RB_EVNT_HDR_SIZE;
9737a8e76a3SSteven Rostedt 	length = ALIGN(length, RB_ALIGNMENT);
9747a8e76a3SSteven Rostedt 
9757a8e76a3SSteven Rostedt 	return length;
9767a8e76a3SSteven Rostedt }
9777a8e76a3SSteven Rostedt 
9787a8e76a3SSteven Rostedt static struct ring_buffer_event *
9797a8e76a3SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
9807a8e76a3SSteven Rostedt 		  unsigned type, unsigned long length, u64 *ts)
9817a8e76a3SSteven Rostedt {
98298db8df7SSteven Rostedt 	struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
983bf41a158SSteven Rostedt 	unsigned long tail, write;
9847a8e76a3SSteven Rostedt 	struct ring_buffer *buffer = cpu_buffer->buffer;
9857a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
986bf41a158SSteven Rostedt 	unsigned long flags;
98778d904b4SSteven Rostedt 	bool lock_taken = false;
9887a8e76a3SSteven Rostedt 
98998db8df7SSteven Rostedt 	commit_page = cpu_buffer->commit_page;
99098db8df7SSteven Rostedt 	/* we just need to protect against interrupts */
99198db8df7SSteven Rostedt 	barrier();
9927a8e76a3SSteven Rostedt 	tail_page = cpu_buffer->tail_page;
993bf41a158SSteven Rostedt 	write = local_add_return(length, &tail_page->write);
994bf41a158SSteven Rostedt 	tail = write - length;
9957a8e76a3SSteven Rostedt 
996bf41a158SSteven Rostedt 	/* See if we shot pass the end of this buffer page */
997bf41a158SSteven Rostedt 	if (write > BUF_PAGE_SIZE) {
9987a8e76a3SSteven Rostedt 		struct buffer_page *next_page = tail_page;
9997a8e76a3SSteven Rostedt 
10003e03fb7fSSteven Rostedt 		local_irq_save(flags);
100178d904b4SSteven Rostedt 		/*
1002a81bd80aSSteven Rostedt 		 * Since the write to the buffer is still not
1003a81bd80aSSteven Rostedt 		 * fully lockless, we must be careful with NMIs.
1004a81bd80aSSteven Rostedt 		 * The locks in the writers are taken when a write
1005a81bd80aSSteven Rostedt 		 * crosses to a new page. The locks protect against
1006a81bd80aSSteven Rostedt 		 * races with the readers (this will soon be fixed
1007a81bd80aSSteven Rostedt 		 * with a lockless solution).
1008a81bd80aSSteven Rostedt 		 *
1009a81bd80aSSteven Rostedt 		 * Because we can not protect against NMIs, and we
1010a81bd80aSSteven Rostedt 		 * want to keep traces reentrant, we need to manage
1011a81bd80aSSteven Rostedt 		 * what happens when we are in an NMI.
1012a81bd80aSSteven Rostedt 		 *
101378d904b4SSteven Rostedt 		 * NMIs can happen after we take the lock.
101478d904b4SSteven Rostedt 		 * If we are in an NMI, only take the lock
101578d904b4SSteven Rostedt 		 * if it is not already taken. Otherwise
101678d904b4SSteven Rostedt 		 * simply fail.
101778d904b4SSteven Rostedt 		 */
1018a81bd80aSSteven Rostedt 		if (unlikely(in_nmi())) {
101978d904b4SSteven Rostedt 			if (!__raw_spin_trylock(&cpu_buffer->lock))
102078d904b4SSteven Rostedt 				goto out_unlock;
102178d904b4SSteven Rostedt 		} else
10223e03fb7fSSteven Rostedt 			__raw_spin_lock(&cpu_buffer->lock);
1023bf41a158SSteven Rostedt 
102478d904b4SSteven Rostedt 		lock_taken = true;
102578d904b4SSteven Rostedt 
10267a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &next_page);
10277a8e76a3SSteven Rostedt 
1028d769041fSSteven Rostedt 		head_page = cpu_buffer->head_page;
1029d769041fSSteven Rostedt 		reader_page = cpu_buffer->reader_page;
1030d769041fSSteven Rostedt 
1031d769041fSSteven Rostedt 		/* we grabbed the lock before incrementing */
10323e89c7bbSSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
10333e89c7bbSSteven Rostedt 			goto out_unlock;
1034bf41a158SSteven Rostedt 
1035bf41a158SSteven Rostedt 		/*
1036bf41a158SSteven Rostedt 		 * If for some reason, we had an interrupt storm that made
1037bf41a158SSteven Rostedt 		 * it all the way around the buffer, bail, and warn
1038bf41a158SSteven Rostedt 		 * about it.
1039bf41a158SSteven Rostedt 		 */
104098db8df7SSteven Rostedt 		if (unlikely(next_page == commit_page)) {
1041bf41a158SSteven Rostedt 			WARN_ON_ONCE(1);
1042bf41a158SSteven Rostedt 			goto out_unlock;
1043bf41a158SSteven Rostedt 		}
1044d769041fSSteven Rostedt 
10457a8e76a3SSteven Rostedt 		if (next_page == head_page) {
10466f3b3440SLai Jiangshan 			if (!(buffer->flags & RB_FL_OVERWRITE))
1047bf41a158SSteven Rostedt 				goto out_unlock;
10487a8e76a3SSteven Rostedt 
1049bf41a158SSteven Rostedt 			/* tail_page has not moved yet? */
1050bf41a158SSteven Rostedt 			if (tail_page == cpu_buffer->tail_page) {
10517a8e76a3SSteven Rostedt 				/* count overflows */
10527a8e76a3SSteven Rostedt 				rb_update_overflow(cpu_buffer);
10537a8e76a3SSteven Rostedt 
10547a8e76a3SSteven Rostedt 				rb_inc_page(cpu_buffer, &head_page);
10557a8e76a3SSteven Rostedt 				cpu_buffer->head_page = head_page;
1056bf41a158SSteven Rostedt 				cpu_buffer->head_page->read = 0;
1057bf41a158SSteven Rostedt 			}
10587a8e76a3SSteven Rostedt 		}
10597a8e76a3SSteven Rostedt 
1060bf41a158SSteven Rostedt 		/*
1061bf41a158SSteven Rostedt 		 * If the tail page is still the same as what we think
1062bf41a158SSteven Rostedt 		 * it is, then it is up to us to update the tail
1063bf41a158SSteven Rostedt 		 * pointer.
1064bf41a158SSteven Rostedt 		 */
1065bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->tail_page) {
1066bf41a158SSteven Rostedt 			local_set(&next_page->write, 0);
1067abc9b56dSSteven Rostedt 			local_set(&next_page->page->commit, 0);
1068bf41a158SSteven Rostedt 			cpu_buffer->tail_page = next_page;
1069bf41a158SSteven Rostedt 
1070bf41a158SSteven Rostedt 			/* reread the time stamp */
1071bf41a158SSteven Rostedt 			*ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1072abc9b56dSSteven Rostedt 			cpu_buffer->tail_page->page->time_stamp = *ts;
1073bf41a158SSteven Rostedt 		}
1074bf41a158SSteven Rostedt 
1075bf41a158SSteven Rostedt 		/*
1076bf41a158SSteven Rostedt 		 * The actual tail page has moved forward.
1077bf41a158SSteven Rostedt 		 */
1078bf41a158SSteven Rostedt 		if (tail < BUF_PAGE_SIZE) {
1079bf41a158SSteven Rostedt 			/* Mark the rest of the page with padding */
10806f807acdSSteven Rostedt 			event = __rb_page_index(tail_page, tail);
10817a8e76a3SSteven Rostedt 			event->type = RINGBUF_TYPE_PADDING;
10827a8e76a3SSteven Rostedt 		}
10837a8e76a3SSteven Rostedt 
1084bf41a158SSteven Rostedt 		if (tail <= BUF_PAGE_SIZE)
1085bf41a158SSteven Rostedt 			/* Set the write back to the previous setting */
1086bf41a158SSteven Rostedt 			local_set(&tail_page->write, tail);
1087bf41a158SSteven Rostedt 
1088bf41a158SSteven Rostedt 		/*
1089bf41a158SSteven Rostedt 		 * If this was a commit entry that failed,
1090bf41a158SSteven Rostedt 		 * increment that too
1091bf41a158SSteven Rostedt 		 */
1092bf41a158SSteven Rostedt 		if (tail_page == cpu_buffer->commit_page &&
1093bf41a158SSteven Rostedt 		    tail == rb_commit_index(cpu_buffer)) {
1094bf41a158SSteven Rostedt 			rb_set_commit_to_write(cpu_buffer);
10957a8e76a3SSteven Rostedt 		}
10967a8e76a3SSteven Rostedt 
10973e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
10983e03fb7fSSteven Rostedt 		local_irq_restore(flags);
1099bf41a158SSteven Rostedt 
1100bf41a158SSteven Rostedt 		/* fail and let the caller try again */
1101bf41a158SSteven Rostedt 		return ERR_PTR(-EAGAIN);
1102bf41a158SSteven Rostedt 	}
1103bf41a158SSteven Rostedt 
1104bf41a158SSteven Rostedt 	/* We reserved something on the buffer */
1105bf41a158SSteven Rostedt 
11063e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
11073e89c7bbSSteven Rostedt 		return NULL;
11087a8e76a3SSteven Rostedt 
11096f807acdSSteven Rostedt 	event = __rb_page_index(tail_page, tail);
11107a8e76a3SSteven Rostedt 	rb_update_event(event, type, length);
11117a8e76a3SSteven Rostedt 
1112bf41a158SSteven Rostedt 	/*
1113bf41a158SSteven Rostedt 	 * If this is a commit and the tail is zero, then update
1114bf41a158SSteven Rostedt 	 * this page's time stamp.
1115bf41a158SSteven Rostedt 	 */
1116bf41a158SSteven Rostedt 	if (!tail && rb_is_commit(cpu_buffer, event))
1117abc9b56dSSteven Rostedt 		cpu_buffer->commit_page->page->time_stamp = *ts;
1118bf41a158SSteven Rostedt 
11197a8e76a3SSteven Rostedt 	return event;
1120bf41a158SSteven Rostedt 
1121bf41a158SSteven Rostedt  out_unlock:
11226f3b3440SLai Jiangshan 	/* reset write */
11236f3b3440SLai Jiangshan 	if (tail <= BUF_PAGE_SIZE)
11246f3b3440SLai Jiangshan 		local_set(&tail_page->write, tail);
11256f3b3440SLai Jiangshan 
112678d904b4SSteven Rostedt 	if (likely(lock_taken))
11273e03fb7fSSteven Rostedt 		__raw_spin_unlock(&cpu_buffer->lock);
11283e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1129bf41a158SSteven Rostedt 	return NULL;
11307a8e76a3SSteven Rostedt }
11317a8e76a3SSteven Rostedt 
11327a8e76a3SSteven Rostedt static int
11337a8e76a3SSteven Rostedt rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
11347a8e76a3SSteven Rostedt 		  u64 *ts, u64 *delta)
11357a8e76a3SSteven Rostedt {
11367a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
11377a8e76a3SSteven Rostedt 	static int once;
1138bf41a158SSteven Rostedt 	int ret;
11397a8e76a3SSteven Rostedt 
11407a8e76a3SSteven Rostedt 	if (unlikely(*delta > (1ULL << 59) && !once++)) {
11417a8e76a3SSteven Rostedt 		printk(KERN_WARNING "Delta way too big! %llu"
11427a8e76a3SSteven Rostedt 		       " ts=%llu write stamp = %llu\n",
1143e2862c94SStephen Rothwell 		       (unsigned long long)*delta,
1144e2862c94SStephen Rothwell 		       (unsigned long long)*ts,
1145e2862c94SStephen Rothwell 		       (unsigned long long)cpu_buffer->write_stamp);
11467a8e76a3SSteven Rostedt 		WARN_ON(1);
11477a8e76a3SSteven Rostedt 	}
11487a8e76a3SSteven Rostedt 
11497a8e76a3SSteven Rostedt 	/*
11507a8e76a3SSteven Rostedt 	 * The delta is too big, we to add a
11517a8e76a3SSteven Rostedt 	 * new timestamp.
11527a8e76a3SSteven Rostedt 	 */
11537a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer,
11547a8e76a3SSteven Rostedt 				  RINGBUF_TYPE_TIME_EXTEND,
11557a8e76a3SSteven Rostedt 				  RB_LEN_TIME_EXTEND,
11567a8e76a3SSteven Rostedt 				  ts);
11577a8e76a3SSteven Rostedt 	if (!event)
1158bf41a158SSteven Rostedt 		return -EBUSY;
11597a8e76a3SSteven Rostedt 
1160bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1161bf41a158SSteven Rostedt 		return -EAGAIN;
1162bf41a158SSteven Rostedt 
1163bf41a158SSteven Rostedt 	/* Only a commited time event can update the write stamp */
1164bf41a158SSteven Rostedt 	if (rb_is_commit(cpu_buffer, event)) {
1165bf41a158SSteven Rostedt 		/*
1166bf41a158SSteven Rostedt 		 * If this is the first on the page, then we need to
1167bf41a158SSteven Rostedt 		 * update the page itself, and just put in a zero.
1168bf41a158SSteven Rostedt 		 */
1169bf41a158SSteven Rostedt 		if (rb_event_index(event)) {
11707a8e76a3SSteven Rostedt 			event->time_delta = *delta & TS_MASK;
11717a8e76a3SSteven Rostedt 			event->array[0] = *delta >> TS_SHIFT;
1172bf41a158SSteven Rostedt 		} else {
1173abc9b56dSSteven Rostedt 			cpu_buffer->commit_page->page->time_stamp = *ts;
1174bf41a158SSteven Rostedt 			event->time_delta = 0;
1175bf41a158SSteven Rostedt 			event->array[0] = 0;
1176bf41a158SSteven Rostedt 		}
11777a8e76a3SSteven Rostedt 		cpu_buffer->write_stamp = *ts;
1178bf41a158SSteven Rostedt 		/* let the caller know this was the commit */
1179bf41a158SSteven Rostedt 		ret = 1;
1180bf41a158SSteven Rostedt 	} else {
1181bf41a158SSteven Rostedt 		/* Darn, this is just wasted space */
1182bf41a158SSteven Rostedt 		event->time_delta = 0;
1183bf41a158SSteven Rostedt 		event->array[0] = 0;
1184bf41a158SSteven Rostedt 		ret = 0;
11857a8e76a3SSteven Rostedt 	}
11867a8e76a3SSteven Rostedt 
1187bf41a158SSteven Rostedt 	*delta = 0;
1188bf41a158SSteven Rostedt 
1189bf41a158SSteven Rostedt 	return ret;
11907a8e76a3SSteven Rostedt }
11917a8e76a3SSteven Rostedt 
11927a8e76a3SSteven Rostedt static struct ring_buffer_event *
11937a8e76a3SSteven Rostedt rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
11947a8e76a3SSteven Rostedt 		      unsigned type, unsigned long length)
11957a8e76a3SSteven Rostedt {
11967a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
11977a8e76a3SSteven Rostedt 	u64 ts, delta;
1198bf41a158SSteven Rostedt 	int commit = 0;
1199818e3dd3SSteven Rostedt 	int nr_loops = 0;
12007a8e76a3SSteven Rostedt 
1201bf41a158SSteven Rostedt  again:
1202818e3dd3SSteven Rostedt 	/*
1203818e3dd3SSteven Rostedt 	 * We allow for interrupts to reenter here and do a trace.
1204818e3dd3SSteven Rostedt 	 * If one does, it will cause this original code to loop
1205818e3dd3SSteven Rostedt 	 * back here. Even with heavy interrupts happening, this
1206818e3dd3SSteven Rostedt 	 * should only happen a few times in a row. If this happens
1207818e3dd3SSteven Rostedt 	 * 1000 times in a row, there must be either an interrupt
1208818e3dd3SSteven Rostedt 	 * storm or we have something buggy.
1209818e3dd3SSteven Rostedt 	 * Bail!
1210818e3dd3SSteven Rostedt 	 */
12113e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1212818e3dd3SSteven Rostedt 		return NULL;
1213818e3dd3SSteven Rostedt 
12147a8e76a3SSteven Rostedt 	ts = ring_buffer_time_stamp(cpu_buffer->cpu);
12157a8e76a3SSteven Rostedt 
1216bf41a158SSteven Rostedt 	/*
1217bf41a158SSteven Rostedt 	 * Only the first commit can update the timestamp.
1218bf41a158SSteven Rostedt 	 * Yes there is a race here. If an interrupt comes in
1219bf41a158SSteven Rostedt 	 * just after the conditional and it traces too, then it
1220bf41a158SSteven Rostedt 	 * will also check the deltas. More than one timestamp may
1221bf41a158SSteven Rostedt 	 * also be made. But only the entry that did the actual
1222bf41a158SSteven Rostedt 	 * commit will be something other than zero.
1223bf41a158SSteven Rostedt 	 */
1224bf41a158SSteven Rostedt 	if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1225bf41a158SSteven Rostedt 	    rb_page_write(cpu_buffer->tail_page) ==
1226bf41a158SSteven Rostedt 	    rb_commit_index(cpu_buffer)) {
1227bf41a158SSteven Rostedt 
12287a8e76a3SSteven Rostedt 		delta = ts - cpu_buffer->write_stamp;
12297a8e76a3SSteven Rostedt 
1230bf41a158SSteven Rostedt 		/* make sure this delta is calculated here */
1231bf41a158SSteven Rostedt 		barrier();
12327a8e76a3SSteven Rostedt 
1233bf41a158SSteven Rostedt 		/* Did the write stamp get updated already? */
1234bf41a158SSteven Rostedt 		if (unlikely(ts < cpu_buffer->write_stamp))
12354143c5cbSSteven Rostedt 			delta = 0;
1236bf41a158SSteven Rostedt 
1237bf41a158SSteven Rostedt 		if (test_time_stamp(delta)) {
1238bf41a158SSteven Rostedt 
1239bf41a158SSteven Rostedt 			commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1240bf41a158SSteven Rostedt 
1241bf41a158SSteven Rostedt 			if (commit == -EBUSY)
12427a8e76a3SSteven Rostedt 				return NULL;
1243bf41a158SSteven Rostedt 
1244bf41a158SSteven Rostedt 			if (commit == -EAGAIN)
1245bf41a158SSteven Rostedt 				goto again;
1246bf41a158SSteven Rostedt 
1247bf41a158SSteven Rostedt 			RB_WARN_ON(cpu_buffer, commit < 0);
12487a8e76a3SSteven Rostedt 		}
1249bf41a158SSteven Rostedt 	} else
1250bf41a158SSteven Rostedt 		/* Non commits have zero deltas */
12517a8e76a3SSteven Rostedt 		delta = 0;
12527a8e76a3SSteven Rostedt 
12537a8e76a3SSteven Rostedt 	event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1254bf41a158SSteven Rostedt 	if (PTR_ERR(event) == -EAGAIN)
1255bf41a158SSteven Rostedt 		goto again;
12567a8e76a3SSteven Rostedt 
1257bf41a158SSteven Rostedt 	if (!event) {
1258bf41a158SSteven Rostedt 		if (unlikely(commit))
1259bf41a158SSteven Rostedt 			/*
1260bf41a158SSteven Rostedt 			 * Ouch! We needed a timestamp and it was commited. But
1261bf41a158SSteven Rostedt 			 * we didn't get our event reserved.
1262bf41a158SSteven Rostedt 			 */
1263bf41a158SSteven Rostedt 			rb_set_commit_to_write(cpu_buffer);
1264bf41a158SSteven Rostedt 		return NULL;
1265bf41a158SSteven Rostedt 	}
1266bf41a158SSteven Rostedt 
1267bf41a158SSteven Rostedt 	/*
1268bf41a158SSteven Rostedt 	 * If the timestamp was commited, make the commit our entry
1269bf41a158SSteven Rostedt 	 * now so that we will update it when needed.
1270bf41a158SSteven Rostedt 	 */
1271bf41a158SSteven Rostedt 	if (commit)
1272bf41a158SSteven Rostedt 		rb_set_commit_event(cpu_buffer, event);
1273bf41a158SSteven Rostedt 	else if (!rb_is_commit(cpu_buffer, event))
12747a8e76a3SSteven Rostedt 		delta = 0;
12757a8e76a3SSteven Rostedt 
12767a8e76a3SSteven Rostedt 	event->time_delta = delta;
12777a8e76a3SSteven Rostedt 
12787a8e76a3SSteven Rostedt 	return event;
12797a8e76a3SSteven Rostedt }
12807a8e76a3SSteven Rostedt 
1281bf41a158SSteven Rostedt static DEFINE_PER_CPU(int, rb_need_resched);
1282bf41a158SSteven Rostedt 
12837a8e76a3SSteven Rostedt /**
12847a8e76a3SSteven Rostedt  * ring_buffer_lock_reserve - reserve a part of the buffer
12857a8e76a3SSteven Rostedt  * @buffer: the ring buffer to reserve from
12867a8e76a3SSteven Rostedt  * @length: the length of the data to reserve (excluding event header)
12877a8e76a3SSteven Rostedt  *
12887a8e76a3SSteven Rostedt  * Returns a reseverd event on the ring buffer to copy directly to.
12897a8e76a3SSteven Rostedt  * The user of this interface will need to get the body to write into
12907a8e76a3SSteven Rostedt  * and can use the ring_buffer_event_data() interface.
12917a8e76a3SSteven Rostedt  *
12927a8e76a3SSteven Rostedt  * The length is the length of the data needed, not the event length
12937a8e76a3SSteven Rostedt  * which also includes the event header.
12947a8e76a3SSteven Rostedt  *
12957a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
12967a8e76a3SSteven Rostedt  * If NULL is returned, then nothing has been allocated or locked.
12977a8e76a3SSteven Rostedt  */
12987a8e76a3SSteven Rostedt struct ring_buffer_event *
12990a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
13007a8e76a3SSteven Rostedt {
13017a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
13027a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1303bf41a158SSteven Rostedt 	int cpu, resched;
13047a8e76a3SSteven Rostedt 
1305033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1306a3583244SSteven Rostedt 		return NULL;
1307a3583244SSteven Rostedt 
13087a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
13097a8e76a3SSteven Rostedt 		return NULL;
13107a8e76a3SSteven Rostedt 
1311bf41a158SSteven Rostedt 	/* If we are tracing schedule, we don't want to recurse */
1312182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1313bf41a158SSteven Rostedt 
13147a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
13157a8e76a3SSteven Rostedt 
13169e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1317d769041fSSteven Rostedt 		goto out;
13187a8e76a3SSteven Rostedt 
13197a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
13207a8e76a3SSteven Rostedt 
13217a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
1322d769041fSSteven Rostedt 		goto out;
13237a8e76a3SSteven Rostedt 
13247a8e76a3SSteven Rostedt 	length = rb_calculate_event_length(length);
13257a8e76a3SSteven Rostedt 	if (length > BUF_PAGE_SIZE)
1326bf41a158SSteven Rostedt 		goto out;
13277a8e76a3SSteven Rostedt 
13287a8e76a3SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
13297a8e76a3SSteven Rostedt 	if (!event)
1330d769041fSSteven Rostedt 		goto out;
13317a8e76a3SSteven Rostedt 
1332bf41a158SSteven Rostedt 	/*
1333bf41a158SSteven Rostedt 	 * Need to store resched state on this cpu.
1334bf41a158SSteven Rostedt 	 * Only the first needs to.
1335bf41a158SSteven Rostedt 	 */
1336bf41a158SSteven Rostedt 
1337bf41a158SSteven Rostedt 	if (preempt_count() == 1)
1338bf41a158SSteven Rostedt 		per_cpu(rb_need_resched, cpu) = resched;
1339bf41a158SSteven Rostedt 
13407a8e76a3SSteven Rostedt 	return event;
13417a8e76a3SSteven Rostedt 
1342d769041fSSteven Rostedt  out:
1343182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
13447a8e76a3SSteven Rostedt 	return NULL;
13457a8e76a3SSteven Rostedt }
1346c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
13477a8e76a3SSteven Rostedt 
13487a8e76a3SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
13497a8e76a3SSteven Rostedt 		      struct ring_buffer_event *event)
13507a8e76a3SSteven Rostedt {
13517a8e76a3SSteven Rostedt 	cpu_buffer->entries++;
1352bf41a158SSteven Rostedt 
1353bf41a158SSteven Rostedt 	/* Only process further if we own the commit */
1354bf41a158SSteven Rostedt 	if (!rb_is_commit(cpu_buffer, event))
1355bf41a158SSteven Rostedt 		return;
1356bf41a158SSteven Rostedt 
1357bf41a158SSteven Rostedt 	cpu_buffer->write_stamp += event->time_delta;
1358bf41a158SSteven Rostedt 
1359bf41a158SSteven Rostedt 	rb_set_commit_to_write(cpu_buffer);
13607a8e76a3SSteven Rostedt }
13617a8e76a3SSteven Rostedt 
13627a8e76a3SSteven Rostedt /**
13637a8e76a3SSteven Rostedt  * ring_buffer_unlock_commit - commit a reserved
13647a8e76a3SSteven Rostedt  * @buffer: The buffer to commit to
13657a8e76a3SSteven Rostedt  * @event: The event pointer to commit.
13667a8e76a3SSteven Rostedt  *
13677a8e76a3SSteven Rostedt  * This commits the data to the ring buffer, and releases any locks held.
13687a8e76a3SSteven Rostedt  *
13697a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_lock_reserve.
13707a8e76a3SSteven Rostedt  */
13717a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer,
13720a987751SArnaldo Carvalho de Melo 			      struct ring_buffer_event *event)
13737a8e76a3SSteven Rostedt {
13747a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
13757a8e76a3SSteven Rostedt 	int cpu = raw_smp_processor_id();
13767a8e76a3SSteven Rostedt 
13777a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
13787a8e76a3SSteven Rostedt 
13797a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
13807a8e76a3SSteven Rostedt 
1381bf41a158SSteven Rostedt 	/*
1382bf41a158SSteven Rostedt 	 * Only the last preempt count needs to restore preemption.
1383bf41a158SSteven Rostedt 	 */
1384182e9f5fSSteven Rostedt 	if (preempt_count() == 1)
1385182e9f5fSSteven Rostedt 		ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1386bf41a158SSteven Rostedt 	else
1387bf41a158SSteven Rostedt 		preempt_enable_no_resched_notrace();
13887a8e76a3SSteven Rostedt 
13897a8e76a3SSteven Rostedt 	return 0;
13907a8e76a3SSteven Rostedt }
1391c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
13927a8e76a3SSteven Rostedt 
13937a8e76a3SSteven Rostedt /**
13947a8e76a3SSteven Rostedt  * ring_buffer_write - write data to the buffer without reserving
13957a8e76a3SSteven Rostedt  * @buffer: The ring buffer to write to.
13967a8e76a3SSteven Rostedt  * @length: The length of the data being written (excluding the event header)
13977a8e76a3SSteven Rostedt  * @data: The data to write to the buffer.
13987a8e76a3SSteven Rostedt  *
13997a8e76a3SSteven Rostedt  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
14007a8e76a3SSteven Rostedt  * one function. If you already have the data to write to the buffer, it
14017a8e76a3SSteven Rostedt  * may be easier to simply call this function.
14027a8e76a3SSteven Rostedt  *
14037a8e76a3SSteven Rostedt  * Note, like ring_buffer_lock_reserve, the length is the length of the data
14047a8e76a3SSteven Rostedt  * and not the length of the event which would hold the header.
14057a8e76a3SSteven Rostedt  */
14067a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer,
14077a8e76a3SSteven Rostedt 			unsigned long length,
14087a8e76a3SSteven Rostedt 			void *data)
14097a8e76a3SSteven Rostedt {
14107a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
14117a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1412bf41a158SSteven Rostedt 	unsigned long event_length;
14137a8e76a3SSteven Rostedt 	void *body;
14147a8e76a3SSteven Rostedt 	int ret = -EBUSY;
1415bf41a158SSteven Rostedt 	int cpu, resched;
14167a8e76a3SSteven Rostedt 
1417033601a3SSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
1418a3583244SSteven Rostedt 		return -EBUSY;
1419a3583244SSteven Rostedt 
14207a8e76a3SSteven Rostedt 	if (atomic_read(&buffer->record_disabled))
14217a8e76a3SSteven Rostedt 		return -EBUSY;
14227a8e76a3SSteven Rostedt 
1423182e9f5fSSteven Rostedt 	resched = ftrace_preempt_disable();
1424bf41a158SSteven Rostedt 
14257a8e76a3SSteven Rostedt 	cpu = raw_smp_processor_id();
14267a8e76a3SSteven Rostedt 
14279e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
1428d769041fSSteven Rostedt 		goto out;
14297a8e76a3SSteven Rostedt 
14307a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
14317a8e76a3SSteven Rostedt 
14327a8e76a3SSteven Rostedt 	if (atomic_read(&cpu_buffer->record_disabled))
14337a8e76a3SSteven Rostedt 		goto out;
14347a8e76a3SSteven Rostedt 
14357a8e76a3SSteven Rostedt 	event_length = rb_calculate_event_length(length);
14367a8e76a3SSteven Rostedt 	event = rb_reserve_next_event(cpu_buffer,
14377a8e76a3SSteven Rostedt 				      RINGBUF_TYPE_DATA, event_length);
14387a8e76a3SSteven Rostedt 	if (!event)
14397a8e76a3SSteven Rostedt 		goto out;
14407a8e76a3SSteven Rostedt 
14417a8e76a3SSteven Rostedt 	body = rb_event_data(event);
14427a8e76a3SSteven Rostedt 
14437a8e76a3SSteven Rostedt 	memcpy(body, data, length);
14447a8e76a3SSteven Rostedt 
14457a8e76a3SSteven Rostedt 	rb_commit(cpu_buffer, event);
14467a8e76a3SSteven Rostedt 
14477a8e76a3SSteven Rostedt 	ret = 0;
14487a8e76a3SSteven Rostedt  out:
1449182e9f5fSSteven Rostedt 	ftrace_preempt_enable(resched);
14507a8e76a3SSteven Rostedt 
14517a8e76a3SSteven Rostedt 	return ret;
14527a8e76a3SSteven Rostedt }
1453c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
14547a8e76a3SSteven Rostedt 
145534a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1456bf41a158SSteven Rostedt {
1457bf41a158SSteven Rostedt 	struct buffer_page *reader = cpu_buffer->reader_page;
1458bf41a158SSteven Rostedt 	struct buffer_page *head = cpu_buffer->head_page;
1459bf41a158SSteven Rostedt 	struct buffer_page *commit = cpu_buffer->commit_page;
1460bf41a158SSteven Rostedt 
1461bf41a158SSteven Rostedt 	return reader->read == rb_page_commit(reader) &&
1462bf41a158SSteven Rostedt 		(commit == reader ||
1463bf41a158SSteven Rostedt 		 (commit == head &&
1464bf41a158SSteven Rostedt 		  head->read == rb_page_commit(commit)));
1465bf41a158SSteven Rostedt }
1466bf41a158SSteven Rostedt 
14677a8e76a3SSteven Rostedt /**
14687a8e76a3SSteven Rostedt  * ring_buffer_record_disable - stop all writes into the buffer
14697a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
14707a8e76a3SSteven Rostedt  *
14717a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
14727a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
14737a8e76a3SSteven Rostedt  *
14747a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
14757a8e76a3SSteven Rostedt  */
14767a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer)
14777a8e76a3SSteven Rostedt {
14787a8e76a3SSteven Rostedt 	atomic_inc(&buffer->record_disabled);
14797a8e76a3SSteven Rostedt }
1480c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
14817a8e76a3SSteven Rostedt 
14827a8e76a3SSteven Rostedt /**
14837a8e76a3SSteven Rostedt  * ring_buffer_record_enable - enable writes to the buffer
14847a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
14857a8e76a3SSteven Rostedt  *
14867a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
14877a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
14887a8e76a3SSteven Rostedt  */
14897a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer)
14907a8e76a3SSteven Rostedt {
14917a8e76a3SSteven Rostedt 	atomic_dec(&buffer->record_disabled);
14927a8e76a3SSteven Rostedt }
1493c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
14947a8e76a3SSteven Rostedt 
14957a8e76a3SSteven Rostedt /**
14967a8e76a3SSteven Rostedt  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
14977a8e76a3SSteven Rostedt  * @buffer: The ring buffer to stop writes to.
14987a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to stop
14997a8e76a3SSteven Rostedt  *
15007a8e76a3SSteven Rostedt  * This prevents all writes to the buffer. Any attempt to write
15017a8e76a3SSteven Rostedt  * to the buffer after this will fail and return NULL.
15027a8e76a3SSteven Rostedt  *
15037a8e76a3SSteven Rostedt  * The caller should call synchronize_sched() after this.
15047a8e76a3SSteven Rostedt  */
15057a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
15067a8e76a3SSteven Rostedt {
15077a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15087a8e76a3SSteven Rostedt 
15099e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
15107a8e76a3SSteven Rostedt 		return;
15117a8e76a3SSteven Rostedt 
15127a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15137a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
15147a8e76a3SSteven Rostedt }
1515c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
15167a8e76a3SSteven Rostedt 
15177a8e76a3SSteven Rostedt /**
15187a8e76a3SSteven Rostedt  * ring_buffer_record_enable_cpu - enable writes to the buffer
15197a8e76a3SSteven Rostedt  * @buffer: The ring buffer to enable writes
15207a8e76a3SSteven Rostedt  * @cpu: The CPU to enable.
15217a8e76a3SSteven Rostedt  *
15227a8e76a3SSteven Rostedt  * Note, multiple disables will need the same number of enables
15237a8e76a3SSteven Rostedt  * to truely enable the writing (much like preempt_disable).
15247a8e76a3SSteven Rostedt  */
15257a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
15267a8e76a3SSteven Rostedt {
15277a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15287a8e76a3SSteven Rostedt 
15299e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
15307a8e76a3SSteven Rostedt 		return;
15317a8e76a3SSteven Rostedt 
15327a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15337a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
15347a8e76a3SSteven Rostedt }
1535c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
15367a8e76a3SSteven Rostedt 
15377a8e76a3SSteven Rostedt /**
15387a8e76a3SSteven Rostedt  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
15397a8e76a3SSteven Rostedt  * @buffer: The ring buffer
15407a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the entries from.
15417a8e76a3SSteven Rostedt  */
15427a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
15437a8e76a3SSteven Rostedt {
15447a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15457a8e76a3SSteven Rostedt 
15469e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
15477a8e76a3SSteven Rostedt 		return 0;
15487a8e76a3SSteven Rostedt 
15497a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15507a8e76a3SSteven Rostedt 	return cpu_buffer->entries;
15517a8e76a3SSteven Rostedt }
1552c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
15537a8e76a3SSteven Rostedt 
15547a8e76a3SSteven Rostedt /**
15557a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
15567a8e76a3SSteven Rostedt  * @buffer: The ring buffer
15577a8e76a3SSteven Rostedt  * @cpu: The per CPU buffer to get the number of overruns from
15587a8e76a3SSteven Rostedt  */
15597a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
15607a8e76a3SSteven Rostedt {
15617a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15627a8e76a3SSteven Rostedt 
15639e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
15647a8e76a3SSteven Rostedt 		return 0;
15657a8e76a3SSteven Rostedt 
15667a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
15677a8e76a3SSteven Rostedt 	return cpu_buffer->overrun;
15687a8e76a3SSteven Rostedt }
1569c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
15707a8e76a3SSteven Rostedt 
15717a8e76a3SSteven Rostedt /**
15727a8e76a3SSteven Rostedt  * ring_buffer_entries - get the number of entries in a buffer
15737a8e76a3SSteven Rostedt  * @buffer: The ring buffer
15747a8e76a3SSteven Rostedt  *
15757a8e76a3SSteven Rostedt  * Returns the total number of entries in the ring buffer
15767a8e76a3SSteven Rostedt  * (all CPU entries)
15777a8e76a3SSteven Rostedt  */
15787a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer)
15797a8e76a3SSteven Rostedt {
15807a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
15817a8e76a3SSteven Rostedt 	unsigned long entries = 0;
15827a8e76a3SSteven Rostedt 	int cpu;
15837a8e76a3SSteven Rostedt 
15847a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
15857a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
15867a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
15877a8e76a3SSteven Rostedt 		entries += cpu_buffer->entries;
15887a8e76a3SSteven Rostedt 	}
15897a8e76a3SSteven Rostedt 
15907a8e76a3SSteven Rostedt 	return entries;
15917a8e76a3SSteven Rostedt }
1592c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
15937a8e76a3SSteven Rostedt 
15947a8e76a3SSteven Rostedt /**
15957a8e76a3SSteven Rostedt  * ring_buffer_overrun_cpu - get the number of overruns in buffer
15967a8e76a3SSteven Rostedt  * @buffer: The ring buffer
15977a8e76a3SSteven Rostedt  *
15987a8e76a3SSteven Rostedt  * Returns the total number of overruns in the ring buffer
15997a8e76a3SSteven Rostedt  * (all CPU entries)
16007a8e76a3SSteven Rostedt  */
16017a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
16027a8e76a3SSteven Rostedt {
16037a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
16047a8e76a3SSteven Rostedt 	unsigned long overruns = 0;
16057a8e76a3SSteven Rostedt 	int cpu;
16067a8e76a3SSteven Rostedt 
16077a8e76a3SSteven Rostedt 	/* if you care about this being correct, lock the buffer */
16087a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
16097a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
16107a8e76a3SSteven Rostedt 		overruns += cpu_buffer->overrun;
16117a8e76a3SSteven Rostedt 	}
16127a8e76a3SSteven Rostedt 
16137a8e76a3SSteven Rostedt 	return overruns;
16147a8e76a3SSteven Rostedt }
1615c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
16167a8e76a3SSteven Rostedt 
1617642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
16187a8e76a3SSteven Rostedt {
16197a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
16207a8e76a3SSteven Rostedt 
1621d769041fSSteven Rostedt 	/* Iterator usage is expected to have record disabled */
1622d769041fSSteven Rostedt 	if (list_empty(&cpu_buffer->reader_page->list)) {
16237a8e76a3SSteven Rostedt 		iter->head_page = cpu_buffer->head_page;
16246f807acdSSteven Rostedt 		iter->head = cpu_buffer->head_page->read;
1625d769041fSSteven Rostedt 	} else {
1626d769041fSSteven Rostedt 		iter->head_page = cpu_buffer->reader_page;
16276f807acdSSteven Rostedt 		iter->head = cpu_buffer->reader_page->read;
1628d769041fSSteven Rostedt 	}
1629d769041fSSteven Rostedt 	if (iter->head)
1630d769041fSSteven Rostedt 		iter->read_stamp = cpu_buffer->read_stamp;
1631d769041fSSteven Rostedt 	else
1632abc9b56dSSteven Rostedt 		iter->read_stamp = iter->head_page->page->time_stamp;
1633642edba5SSteven Rostedt }
1634f83c9d0fSSteven Rostedt 
1635642edba5SSteven Rostedt /**
1636642edba5SSteven Rostedt  * ring_buffer_iter_reset - reset an iterator
1637642edba5SSteven Rostedt  * @iter: The iterator to reset
1638642edba5SSteven Rostedt  *
1639642edba5SSteven Rostedt  * Resets the iterator, so that it will start from the beginning
1640642edba5SSteven Rostedt  * again.
1641642edba5SSteven Rostedt  */
1642642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1643642edba5SSteven Rostedt {
1644642edba5SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1645642edba5SSteven Rostedt 	unsigned long flags;
1646642edba5SSteven Rostedt 
1647642edba5SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1648642edba5SSteven Rostedt 	rb_iter_reset(iter);
1649f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
16507a8e76a3SSteven Rostedt }
1651c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
16527a8e76a3SSteven Rostedt 
16537a8e76a3SSteven Rostedt /**
16547a8e76a3SSteven Rostedt  * ring_buffer_iter_empty - check if an iterator has no more to read
16557a8e76a3SSteven Rostedt  * @iter: The iterator to check
16567a8e76a3SSteven Rostedt  */
16577a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
16587a8e76a3SSteven Rostedt {
16597a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
16607a8e76a3SSteven Rostedt 
16617a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
16627a8e76a3SSteven Rostedt 
1663bf41a158SSteven Rostedt 	return iter->head_page == cpu_buffer->commit_page &&
1664bf41a158SSteven Rostedt 		iter->head == rb_commit_index(cpu_buffer);
16657a8e76a3SSteven Rostedt }
1666c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
16677a8e76a3SSteven Rostedt 
16687a8e76a3SSteven Rostedt static void
16697a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
16707a8e76a3SSteven Rostedt 		     struct ring_buffer_event *event)
16717a8e76a3SSteven Rostedt {
16727a8e76a3SSteven Rostedt 	u64 delta;
16737a8e76a3SSteven Rostedt 
16747a8e76a3SSteven Rostedt 	switch (event->type) {
16757a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
16767a8e76a3SSteven Rostedt 		return;
16777a8e76a3SSteven Rostedt 
16787a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
16797a8e76a3SSteven Rostedt 		delta = event->array[0];
16807a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
16817a8e76a3SSteven Rostedt 		delta += event->time_delta;
16827a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += delta;
16837a8e76a3SSteven Rostedt 		return;
16847a8e76a3SSteven Rostedt 
16857a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
16867a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
16877a8e76a3SSteven Rostedt 		return;
16887a8e76a3SSteven Rostedt 
16897a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
16907a8e76a3SSteven Rostedt 		cpu_buffer->read_stamp += event->time_delta;
16917a8e76a3SSteven Rostedt 		return;
16927a8e76a3SSteven Rostedt 
16937a8e76a3SSteven Rostedt 	default:
16947a8e76a3SSteven Rostedt 		BUG();
16957a8e76a3SSteven Rostedt 	}
16967a8e76a3SSteven Rostedt 	return;
16977a8e76a3SSteven Rostedt }
16987a8e76a3SSteven Rostedt 
16997a8e76a3SSteven Rostedt static void
17007a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
17017a8e76a3SSteven Rostedt 			  struct ring_buffer_event *event)
17027a8e76a3SSteven Rostedt {
17037a8e76a3SSteven Rostedt 	u64 delta;
17047a8e76a3SSteven Rostedt 
17057a8e76a3SSteven Rostedt 	switch (event->type) {
17067a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
17077a8e76a3SSteven Rostedt 		return;
17087a8e76a3SSteven Rostedt 
17097a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
17107a8e76a3SSteven Rostedt 		delta = event->array[0];
17117a8e76a3SSteven Rostedt 		delta <<= TS_SHIFT;
17127a8e76a3SSteven Rostedt 		delta += event->time_delta;
17137a8e76a3SSteven Rostedt 		iter->read_stamp += delta;
17147a8e76a3SSteven Rostedt 		return;
17157a8e76a3SSteven Rostedt 
17167a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
17177a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
17187a8e76a3SSteven Rostedt 		return;
17197a8e76a3SSteven Rostedt 
17207a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
17217a8e76a3SSteven Rostedt 		iter->read_stamp += event->time_delta;
17227a8e76a3SSteven Rostedt 		return;
17237a8e76a3SSteven Rostedt 
17247a8e76a3SSteven Rostedt 	default:
17257a8e76a3SSteven Rostedt 		BUG();
17267a8e76a3SSteven Rostedt 	}
17277a8e76a3SSteven Rostedt 	return;
17287a8e76a3SSteven Rostedt }
17297a8e76a3SSteven Rostedt 
1730d769041fSSteven Rostedt static struct buffer_page *
1731d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
17327a8e76a3SSteven Rostedt {
1733d769041fSSteven Rostedt 	struct buffer_page *reader = NULL;
1734d769041fSSteven Rostedt 	unsigned long flags;
1735818e3dd3SSteven Rostedt 	int nr_loops = 0;
1736d769041fSSteven Rostedt 
17373e03fb7fSSteven Rostedt 	local_irq_save(flags);
17383e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
1739d769041fSSteven Rostedt 
1740d769041fSSteven Rostedt  again:
1741818e3dd3SSteven Rostedt 	/*
1742818e3dd3SSteven Rostedt 	 * This should normally only loop twice. But because the
1743818e3dd3SSteven Rostedt 	 * start of the reader inserts an empty page, it causes
1744818e3dd3SSteven Rostedt 	 * a case where we will loop three times. There should be no
1745818e3dd3SSteven Rostedt 	 * reason to loop four times (that I know of).
1746818e3dd3SSteven Rostedt 	 */
17473e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1748818e3dd3SSteven Rostedt 		reader = NULL;
1749818e3dd3SSteven Rostedt 		goto out;
1750818e3dd3SSteven Rostedt 	}
1751818e3dd3SSteven Rostedt 
1752d769041fSSteven Rostedt 	reader = cpu_buffer->reader_page;
1753d769041fSSteven Rostedt 
1754d769041fSSteven Rostedt 	/* If there's more to read, return this page */
1755bf41a158SSteven Rostedt 	if (cpu_buffer->reader_page->read < rb_page_size(reader))
1756d769041fSSteven Rostedt 		goto out;
1757d769041fSSteven Rostedt 
1758d769041fSSteven Rostedt 	/* Never should we have an index greater than the size */
17593e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
17603e89c7bbSSteven Rostedt 		       cpu_buffer->reader_page->read > rb_page_size(reader)))
17613e89c7bbSSteven Rostedt 		goto out;
1762d769041fSSteven Rostedt 
1763d769041fSSteven Rostedt 	/* check if we caught up to the tail */
1764d769041fSSteven Rostedt 	reader = NULL;
1765bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1766d769041fSSteven Rostedt 		goto out;
17677a8e76a3SSteven Rostedt 
17687a8e76a3SSteven Rostedt 	/*
1769d769041fSSteven Rostedt 	 * Splice the empty reader page into the list around the head.
1770d769041fSSteven Rostedt 	 * Reset the reader page to size zero.
17717a8e76a3SSteven Rostedt 	 */
1772d769041fSSteven Rostedt 
1773d769041fSSteven Rostedt 	reader = cpu_buffer->head_page;
1774d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.next = reader->list.next;
1775d769041fSSteven Rostedt 	cpu_buffer->reader_page->list.prev = reader->list.prev;
1776bf41a158SSteven Rostedt 
1777bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
1778abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
1779d769041fSSteven Rostedt 
1780d769041fSSteven Rostedt 	/* Make the reader page now replace the head */
1781d769041fSSteven Rostedt 	reader->list.prev->next = &cpu_buffer->reader_page->list;
1782d769041fSSteven Rostedt 	reader->list.next->prev = &cpu_buffer->reader_page->list;
1783d769041fSSteven Rostedt 
1784d769041fSSteven Rostedt 	/*
1785d769041fSSteven Rostedt 	 * If the tail is on the reader, then we must set the head
1786d769041fSSteven Rostedt 	 * to the inserted page, otherwise we set it one before.
1787d769041fSSteven Rostedt 	 */
1788d769041fSSteven Rostedt 	cpu_buffer->head_page = cpu_buffer->reader_page;
1789d769041fSSteven Rostedt 
1790bf41a158SSteven Rostedt 	if (cpu_buffer->commit_page != reader)
17917a8e76a3SSteven Rostedt 		rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1792d769041fSSteven Rostedt 
1793d769041fSSteven Rostedt 	/* Finally update the reader page to the new head */
1794d769041fSSteven Rostedt 	cpu_buffer->reader_page = reader;
1795d769041fSSteven Rostedt 	rb_reset_reader_page(cpu_buffer);
1796d769041fSSteven Rostedt 
1797d769041fSSteven Rostedt 	goto again;
1798d769041fSSteven Rostedt 
1799d769041fSSteven Rostedt  out:
18003e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
18013e03fb7fSSteven Rostedt 	local_irq_restore(flags);
1802d769041fSSteven Rostedt 
1803d769041fSSteven Rostedt 	return reader;
18047a8e76a3SSteven Rostedt }
18057a8e76a3SSteven Rostedt 
1806d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1807d769041fSSteven Rostedt {
1808d769041fSSteven Rostedt 	struct ring_buffer_event *event;
1809d769041fSSteven Rostedt 	struct buffer_page *reader;
1810d769041fSSteven Rostedt 	unsigned length;
1811d769041fSSteven Rostedt 
1812d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
1813d769041fSSteven Rostedt 
1814d769041fSSteven Rostedt 	/* This function should not be called when buffer is empty */
18153e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, !reader))
18163e89c7bbSSteven Rostedt 		return;
1817d769041fSSteven Rostedt 
1818d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
18197a8e76a3SSteven Rostedt 
18207a8e76a3SSteven Rostedt 	if (event->type == RINGBUF_TYPE_DATA)
18217a8e76a3SSteven Rostedt 		cpu_buffer->entries--;
18227a8e76a3SSteven Rostedt 
18237a8e76a3SSteven Rostedt 	rb_update_read_stamp(cpu_buffer, event);
18247a8e76a3SSteven Rostedt 
1825d769041fSSteven Rostedt 	length = rb_event_length(event);
18266f807acdSSteven Rostedt 	cpu_buffer->reader_page->read += length;
18277a8e76a3SSteven Rostedt }
18287a8e76a3SSteven Rostedt 
18297a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
18307a8e76a3SSteven Rostedt {
18317a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
18327a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18337a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
18347a8e76a3SSteven Rostedt 	unsigned length;
18357a8e76a3SSteven Rostedt 
18367a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
18377a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
18387a8e76a3SSteven Rostedt 
18397a8e76a3SSteven Rostedt 	/*
18407a8e76a3SSteven Rostedt 	 * Check if we are at the end of the buffer.
18417a8e76a3SSteven Rostedt 	 */
1842bf41a158SSteven Rostedt 	if (iter->head >= rb_page_size(iter->head_page)) {
18433e89c7bbSSteven Rostedt 		if (RB_WARN_ON(buffer,
18443e89c7bbSSteven Rostedt 			       iter->head_page == cpu_buffer->commit_page))
18453e89c7bbSSteven Rostedt 			return;
1846d769041fSSteven Rostedt 		rb_inc_iter(iter);
18477a8e76a3SSteven Rostedt 		return;
18487a8e76a3SSteven Rostedt 	}
18497a8e76a3SSteven Rostedt 
18507a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
18517a8e76a3SSteven Rostedt 
18527a8e76a3SSteven Rostedt 	length = rb_event_length(event);
18537a8e76a3SSteven Rostedt 
18547a8e76a3SSteven Rostedt 	/*
18557a8e76a3SSteven Rostedt 	 * This should not be called to advance the header if we are
18567a8e76a3SSteven Rostedt 	 * at the tail of the buffer.
18577a8e76a3SSteven Rostedt 	 */
18583e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer,
1859f536aafcSSteven Rostedt 		       (iter->head_page == cpu_buffer->commit_page) &&
18603e89c7bbSSteven Rostedt 		       (iter->head + length > rb_commit_index(cpu_buffer))))
18613e89c7bbSSteven Rostedt 		return;
18627a8e76a3SSteven Rostedt 
18637a8e76a3SSteven Rostedt 	rb_update_iter_read_stamp(iter, event);
18647a8e76a3SSteven Rostedt 
18657a8e76a3SSteven Rostedt 	iter->head += length;
18667a8e76a3SSteven Rostedt 
18677a8e76a3SSteven Rostedt 	/* check for end of page padding */
1868bf41a158SSteven Rostedt 	if ((iter->head >= rb_page_size(iter->head_page)) &&
1869bf41a158SSteven Rostedt 	    (iter->head_page != cpu_buffer->commit_page))
18707a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
18717a8e76a3SSteven Rostedt }
18727a8e76a3SSteven Rostedt 
1873f83c9d0fSSteven Rostedt static struct ring_buffer_event *
1874f83c9d0fSSteven Rostedt rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
18757a8e76a3SSteven Rostedt {
18767a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
18777a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1878d769041fSSteven Rostedt 	struct buffer_page *reader;
1879818e3dd3SSteven Rostedt 	int nr_loops = 0;
18807a8e76a3SSteven Rostedt 
18819e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
18827a8e76a3SSteven Rostedt 		return NULL;
18837a8e76a3SSteven Rostedt 
18847a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
18857a8e76a3SSteven Rostedt 
18867a8e76a3SSteven Rostedt  again:
1887818e3dd3SSteven Rostedt 	/*
1888818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
1889818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
1890818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
1891818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
1892818e3dd3SSteven Rostedt 	 * can have.  Nesting 10 deep of interrupts is clearly
1893818e3dd3SSteven Rostedt 	 * an anomaly.
1894818e3dd3SSteven Rostedt 	 */
18953e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1896818e3dd3SSteven Rostedt 		return NULL;
1897818e3dd3SSteven Rostedt 
1898d769041fSSteven Rostedt 	reader = rb_get_reader_page(cpu_buffer);
1899d769041fSSteven Rostedt 	if (!reader)
19007a8e76a3SSteven Rostedt 		return NULL;
19017a8e76a3SSteven Rostedt 
1902d769041fSSteven Rostedt 	event = rb_reader_event(cpu_buffer);
19037a8e76a3SSteven Rostedt 
19047a8e76a3SSteven Rostedt 	switch (event->type) {
19057a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
1906bf41a158SSteven Rostedt 		RB_WARN_ON(cpu_buffer, 1);
1907d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
1908d769041fSSteven Rostedt 		return NULL;
19097a8e76a3SSteven Rostedt 
19107a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
19117a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
1912d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
19137a8e76a3SSteven Rostedt 		goto again;
19147a8e76a3SSteven Rostedt 
19157a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
19167a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
1917d769041fSSteven Rostedt 		rb_advance_reader(cpu_buffer);
19187a8e76a3SSteven Rostedt 		goto again;
19197a8e76a3SSteven Rostedt 
19207a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
19217a8e76a3SSteven Rostedt 		if (ts) {
19227a8e76a3SSteven Rostedt 			*ts = cpu_buffer->read_stamp + event->time_delta;
19237a8e76a3SSteven Rostedt 			ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
19247a8e76a3SSteven Rostedt 		}
19257a8e76a3SSteven Rostedt 		return event;
19267a8e76a3SSteven Rostedt 
19277a8e76a3SSteven Rostedt 	default:
19287a8e76a3SSteven Rostedt 		BUG();
19297a8e76a3SSteven Rostedt 	}
19307a8e76a3SSteven Rostedt 
19317a8e76a3SSteven Rostedt 	return NULL;
19327a8e76a3SSteven Rostedt }
1933c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
19347a8e76a3SSteven Rostedt 
1935f83c9d0fSSteven Rostedt static struct ring_buffer_event *
1936f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
19377a8e76a3SSteven Rostedt {
19387a8e76a3SSteven Rostedt 	struct ring_buffer *buffer;
19397a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
19407a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
1941818e3dd3SSteven Rostedt 	int nr_loops = 0;
19427a8e76a3SSteven Rostedt 
19437a8e76a3SSteven Rostedt 	if (ring_buffer_iter_empty(iter))
19447a8e76a3SSteven Rostedt 		return NULL;
19457a8e76a3SSteven Rostedt 
19467a8e76a3SSteven Rostedt 	cpu_buffer = iter->cpu_buffer;
19477a8e76a3SSteven Rostedt 	buffer = cpu_buffer->buffer;
19487a8e76a3SSteven Rostedt 
19497a8e76a3SSteven Rostedt  again:
1950818e3dd3SSteven Rostedt 	/*
1951818e3dd3SSteven Rostedt 	 * We repeat when a timestamp is encountered. It is possible
1952818e3dd3SSteven Rostedt 	 * to get multiple timestamps from an interrupt entering just
1953818e3dd3SSteven Rostedt 	 * as one timestamp is about to be written. The max times
1954818e3dd3SSteven Rostedt 	 * that this can happen is the number of nested interrupts we
1955818e3dd3SSteven Rostedt 	 * can have. Nesting 10 deep of interrupts is clearly
1956818e3dd3SSteven Rostedt 	 * an anomaly.
1957818e3dd3SSteven Rostedt 	 */
19583e89c7bbSSteven Rostedt 	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1959818e3dd3SSteven Rostedt 		return NULL;
1960818e3dd3SSteven Rostedt 
19617a8e76a3SSteven Rostedt 	if (rb_per_cpu_empty(cpu_buffer))
19627a8e76a3SSteven Rostedt 		return NULL;
19637a8e76a3SSteven Rostedt 
19647a8e76a3SSteven Rostedt 	event = rb_iter_head_event(iter);
19657a8e76a3SSteven Rostedt 
19667a8e76a3SSteven Rostedt 	switch (event->type) {
19677a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_PADDING:
1968d769041fSSteven Rostedt 		rb_inc_iter(iter);
19697a8e76a3SSteven Rostedt 		goto again;
19707a8e76a3SSteven Rostedt 
19717a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_EXTEND:
19727a8e76a3SSteven Rostedt 		/* Internal data, OK to advance */
19737a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
19747a8e76a3SSteven Rostedt 		goto again;
19757a8e76a3SSteven Rostedt 
19767a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_TIME_STAMP:
19777a8e76a3SSteven Rostedt 		/* FIXME: not implemented */
19787a8e76a3SSteven Rostedt 		rb_advance_iter(iter);
19797a8e76a3SSteven Rostedt 		goto again;
19807a8e76a3SSteven Rostedt 
19817a8e76a3SSteven Rostedt 	case RINGBUF_TYPE_DATA:
19827a8e76a3SSteven Rostedt 		if (ts) {
19837a8e76a3SSteven Rostedt 			*ts = iter->read_stamp + event->time_delta;
19847a8e76a3SSteven Rostedt 			ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
19857a8e76a3SSteven Rostedt 		}
19867a8e76a3SSteven Rostedt 		return event;
19877a8e76a3SSteven Rostedt 
19887a8e76a3SSteven Rostedt 	default:
19897a8e76a3SSteven Rostedt 		BUG();
19907a8e76a3SSteven Rostedt 	}
19917a8e76a3SSteven Rostedt 
19927a8e76a3SSteven Rostedt 	return NULL;
19937a8e76a3SSteven Rostedt }
1994c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
19957a8e76a3SSteven Rostedt 
19967a8e76a3SSteven Rostedt /**
1997f83c9d0fSSteven Rostedt  * ring_buffer_peek - peek at the next event to be read
1998f83c9d0fSSteven Rostedt  * @buffer: The ring buffer to read
1999f83c9d0fSSteven Rostedt  * @cpu: The cpu to peak at
2000f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2001f83c9d0fSSteven Rostedt  *
2002f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2003f83c9d0fSSteven Rostedt  * not consume the data.
2004f83c9d0fSSteven Rostedt  */
2005f83c9d0fSSteven Rostedt struct ring_buffer_event *
2006f83c9d0fSSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2007f83c9d0fSSteven Rostedt {
2008f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2009f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
2010f83c9d0fSSteven Rostedt 	unsigned long flags;
2011f83c9d0fSSteven Rostedt 
2012f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2013f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2014f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2015f83c9d0fSSteven Rostedt 
2016f83c9d0fSSteven Rostedt 	return event;
2017f83c9d0fSSteven Rostedt }
2018f83c9d0fSSteven Rostedt 
2019f83c9d0fSSteven Rostedt /**
2020f83c9d0fSSteven Rostedt  * ring_buffer_iter_peek - peek at the next event to be read
2021f83c9d0fSSteven Rostedt  * @iter: The ring buffer iterator
2022f83c9d0fSSteven Rostedt  * @ts: The timestamp counter of this event.
2023f83c9d0fSSteven Rostedt  *
2024f83c9d0fSSteven Rostedt  * This will return the event that will be read next, but does
2025f83c9d0fSSteven Rostedt  * not increment the iterator.
2026f83c9d0fSSteven Rostedt  */
2027f83c9d0fSSteven Rostedt struct ring_buffer_event *
2028f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2029f83c9d0fSSteven Rostedt {
2030f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2031f83c9d0fSSteven Rostedt 	struct ring_buffer_event *event;
2032f83c9d0fSSteven Rostedt 	unsigned long flags;
2033f83c9d0fSSteven Rostedt 
2034f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2035f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
2036f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2037f83c9d0fSSteven Rostedt 
2038f83c9d0fSSteven Rostedt 	return event;
2039f83c9d0fSSteven Rostedt }
2040f83c9d0fSSteven Rostedt 
2041f83c9d0fSSteven Rostedt /**
20427a8e76a3SSteven Rostedt  * ring_buffer_consume - return an event and consume it
20437a8e76a3SSteven Rostedt  * @buffer: The ring buffer to get the next event from
20447a8e76a3SSteven Rostedt  *
20457a8e76a3SSteven Rostedt  * Returns the next event in the ring buffer, and that event is consumed.
20467a8e76a3SSteven Rostedt  * Meaning, that sequential reads will keep returning a different event,
20477a8e76a3SSteven Rostedt  * and eventually empty the ring buffer if the producer is slower.
20487a8e76a3SSteven Rostedt  */
20497a8e76a3SSteven Rostedt struct ring_buffer_event *
20507a8e76a3SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
20517a8e76a3SSteven Rostedt {
2052f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
20537a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2054f83c9d0fSSteven Rostedt 	unsigned long flags;
20557a8e76a3SSteven Rostedt 
20569e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
20577a8e76a3SSteven Rostedt 		return NULL;
20587a8e76a3SSteven Rostedt 
2059f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
20607a8e76a3SSteven Rostedt 
2061f83c9d0fSSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, ts);
2062f83c9d0fSSteven Rostedt 	if (!event)
2063f83c9d0fSSteven Rostedt 		goto out;
2064f83c9d0fSSteven Rostedt 
2065d769041fSSteven Rostedt 	rb_advance_reader(cpu_buffer);
20667a8e76a3SSteven Rostedt 
2067f83c9d0fSSteven Rostedt  out:
2068f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2069f83c9d0fSSteven Rostedt 
20707a8e76a3SSteven Rostedt 	return event;
20717a8e76a3SSteven Rostedt }
2072c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
20737a8e76a3SSteven Rostedt 
20747a8e76a3SSteven Rostedt /**
20757a8e76a3SSteven Rostedt  * ring_buffer_read_start - start a non consuming read of the buffer
20767a8e76a3SSteven Rostedt  * @buffer: The ring buffer to read from
20777a8e76a3SSteven Rostedt  * @cpu: The cpu buffer to iterate over
20787a8e76a3SSteven Rostedt  *
20797a8e76a3SSteven Rostedt  * This starts up an iteration through the buffer. It also disables
20807a8e76a3SSteven Rostedt  * the recording to the buffer until the reading is finished.
20817a8e76a3SSteven Rostedt  * This prevents the reading from being corrupted. This is not
20827a8e76a3SSteven Rostedt  * a consuming read, so a producer is not expected.
20837a8e76a3SSteven Rostedt  *
20847a8e76a3SSteven Rostedt  * Must be paired with ring_buffer_finish.
20857a8e76a3SSteven Rostedt  */
20867a8e76a3SSteven Rostedt struct ring_buffer_iter *
20877a8e76a3SSteven Rostedt ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
20887a8e76a3SSteven Rostedt {
20897a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
20907a8e76a3SSteven Rostedt 	struct ring_buffer_iter *iter;
2091d769041fSSteven Rostedt 	unsigned long flags;
20927a8e76a3SSteven Rostedt 
20939e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
20947a8e76a3SSteven Rostedt 		return NULL;
20957a8e76a3SSteven Rostedt 
20967a8e76a3SSteven Rostedt 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
20977a8e76a3SSteven Rostedt 	if (!iter)
20987a8e76a3SSteven Rostedt 		return NULL;
20997a8e76a3SSteven Rostedt 
21007a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
21017a8e76a3SSteven Rostedt 
21027a8e76a3SSteven Rostedt 	iter->cpu_buffer = cpu_buffer;
21037a8e76a3SSteven Rostedt 
21047a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer->record_disabled);
21057a8e76a3SSteven Rostedt 	synchronize_sched();
21067a8e76a3SSteven Rostedt 
2107f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
21083e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2109642edba5SSteven Rostedt 	rb_iter_reset(iter);
21103e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2111f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
21127a8e76a3SSteven Rostedt 
21137a8e76a3SSteven Rostedt 	return iter;
21147a8e76a3SSteven Rostedt }
2115c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
21167a8e76a3SSteven Rostedt 
21177a8e76a3SSteven Rostedt /**
21187a8e76a3SSteven Rostedt  * ring_buffer_finish - finish reading the iterator of the buffer
21197a8e76a3SSteven Rostedt  * @iter: The iterator retrieved by ring_buffer_start
21207a8e76a3SSteven Rostedt  *
21217a8e76a3SSteven Rostedt  * This re-enables the recording to the buffer, and frees the
21227a8e76a3SSteven Rostedt  * iterator.
21237a8e76a3SSteven Rostedt  */
21247a8e76a3SSteven Rostedt void
21257a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
21267a8e76a3SSteven Rostedt {
21277a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
21287a8e76a3SSteven Rostedt 
21297a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer->record_disabled);
21307a8e76a3SSteven Rostedt 	kfree(iter);
21317a8e76a3SSteven Rostedt }
2132c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
21337a8e76a3SSteven Rostedt 
21347a8e76a3SSteven Rostedt /**
21357a8e76a3SSteven Rostedt  * ring_buffer_read - read the next item in the ring buffer by the iterator
21367a8e76a3SSteven Rostedt  * @iter: The ring buffer iterator
21377a8e76a3SSteven Rostedt  * @ts: The time stamp of the event read.
21387a8e76a3SSteven Rostedt  *
21397a8e76a3SSteven Rostedt  * This reads the next event in the ring buffer and increments the iterator.
21407a8e76a3SSteven Rostedt  */
21417a8e76a3SSteven Rostedt struct ring_buffer_event *
21427a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
21437a8e76a3SSteven Rostedt {
21447a8e76a3SSteven Rostedt 	struct ring_buffer_event *event;
2145f83c9d0fSSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2146f83c9d0fSSteven Rostedt 	unsigned long flags;
21477a8e76a3SSteven Rostedt 
2148f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2149f83c9d0fSSteven Rostedt 	event = rb_iter_peek(iter, ts);
21507a8e76a3SSteven Rostedt 	if (!event)
2151f83c9d0fSSteven Rostedt 		goto out;
21527a8e76a3SSteven Rostedt 
21537a8e76a3SSteven Rostedt 	rb_advance_iter(iter);
2154f83c9d0fSSteven Rostedt  out:
2155f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
21567a8e76a3SSteven Rostedt 
21577a8e76a3SSteven Rostedt 	return event;
21587a8e76a3SSteven Rostedt }
2159c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read);
21607a8e76a3SSteven Rostedt 
21617a8e76a3SSteven Rostedt /**
21627a8e76a3SSteven Rostedt  * ring_buffer_size - return the size of the ring buffer (in bytes)
21637a8e76a3SSteven Rostedt  * @buffer: The ring buffer.
21647a8e76a3SSteven Rostedt  */
21657a8e76a3SSteven Rostedt unsigned long ring_buffer_size(struct ring_buffer *buffer)
21667a8e76a3SSteven Rostedt {
21677a8e76a3SSteven Rostedt 	return BUF_PAGE_SIZE * buffer->pages;
21687a8e76a3SSteven Rostedt }
2169c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
21707a8e76a3SSteven Rostedt 
21717a8e76a3SSteven Rostedt static void
21727a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
21737a8e76a3SSteven Rostedt {
21747a8e76a3SSteven Rostedt 	cpu_buffer->head_page
21757a8e76a3SSteven Rostedt 		= list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2176bf41a158SSteven Rostedt 	local_set(&cpu_buffer->head_page->write, 0);
2177abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->head_page->page->commit, 0);
21787a8e76a3SSteven Rostedt 
21796f807acdSSteven Rostedt 	cpu_buffer->head_page->read = 0;
2180bf41a158SSteven Rostedt 
2181bf41a158SSteven Rostedt 	cpu_buffer->tail_page = cpu_buffer->head_page;
2182bf41a158SSteven Rostedt 	cpu_buffer->commit_page = cpu_buffer->head_page;
2183bf41a158SSteven Rostedt 
2184bf41a158SSteven Rostedt 	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2185bf41a158SSteven Rostedt 	local_set(&cpu_buffer->reader_page->write, 0);
2186abc9b56dSSteven Rostedt 	local_set(&cpu_buffer->reader_page->page->commit, 0);
21876f807acdSSteven Rostedt 	cpu_buffer->reader_page->read = 0;
2188d769041fSSteven Rostedt 
21897a8e76a3SSteven Rostedt 	cpu_buffer->overrun = 0;
21907a8e76a3SSteven Rostedt 	cpu_buffer->entries = 0;
219169507c06SSteven Rostedt 
219269507c06SSteven Rostedt 	cpu_buffer->write_stamp = 0;
219369507c06SSteven Rostedt 	cpu_buffer->read_stamp = 0;
21947a8e76a3SSteven Rostedt }
21957a8e76a3SSteven Rostedt 
21967a8e76a3SSteven Rostedt /**
21977a8e76a3SSteven Rostedt  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
21987a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset a per cpu buffer of
21997a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to be reset
22007a8e76a3SSteven Rostedt  */
22017a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
22027a8e76a3SSteven Rostedt {
22037a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
22047a8e76a3SSteven Rostedt 	unsigned long flags;
22057a8e76a3SSteven Rostedt 
22069e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
22077a8e76a3SSteven Rostedt 		return;
22087a8e76a3SSteven Rostedt 
2209f83c9d0fSSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2210f83c9d0fSSteven Rostedt 
22113e03fb7fSSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
22127a8e76a3SSteven Rostedt 
22137a8e76a3SSteven Rostedt 	rb_reset_cpu(cpu_buffer);
22147a8e76a3SSteven Rostedt 
22153e03fb7fSSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
2216f83c9d0fSSteven Rostedt 
2217f83c9d0fSSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
22187a8e76a3SSteven Rostedt }
2219c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
22207a8e76a3SSteven Rostedt 
22217a8e76a3SSteven Rostedt /**
22227a8e76a3SSteven Rostedt  * ring_buffer_reset - reset a ring buffer
22237a8e76a3SSteven Rostedt  * @buffer: The ring buffer to reset all cpu buffers
22247a8e76a3SSteven Rostedt  */
22257a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer)
22267a8e76a3SSteven Rostedt {
22277a8e76a3SSteven Rostedt 	int cpu;
22287a8e76a3SSteven Rostedt 
22297a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu)
2230d769041fSSteven Rostedt 		ring_buffer_reset_cpu(buffer, cpu);
22317a8e76a3SSteven Rostedt }
2232c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
22337a8e76a3SSteven Rostedt 
22347a8e76a3SSteven Rostedt /**
22357a8e76a3SSteven Rostedt  * rind_buffer_empty - is the ring buffer empty?
22367a8e76a3SSteven Rostedt  * @buffer: The ring buffer to test
22377a8e76a3SSteven Rostedt  */
22387a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer)
22397a8e76a3SSteven Rostedt {
22407a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
22417a8e76a3SSteven Rostedt 	int cpu;
22427a8e76a3SSteven Rostedt 
22437a8e76a3SSteven Rostedt 	/* yes this is racy, but if you don't like the race, lock the buffer */
22447a8e76a3SSteven Rostedt 	for_each_buffer_cpu(buffer, cpu) {
22457a8e76a3SSteven Rostedt 		cpu_buffer = buffer->buffers[cpu];
22467a8e76a3SSteven Rostedt 		if (!rb_per_cpu_empty(cpu_buffer))
22477a8e76a3SSteven Rostedt 			return 0;
22487a8e76a3SSteven Rostedt 	}
22497a8e76a3SSteven Rostedt 	return 1;
22507a8e76a3SSteven Rostedt }
2251c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
22527a8e76a3SSteven Rostedt 
22537a8e76a3SSteven Rostedt /**
22547a8e76a3SSteven Rostedt  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
22557a8e76a3SSteven Rostedt  * @buffer: The ring buffer
22567a8e76a3SSteven Rostedt  * @cpu: The CPU buffer to test
22577a8e76a3SSteven Rostedt  */
22587a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
22597a8e76a3SSteven Rostedt {
22607a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer;
22617a8e76a3SSteven Rostedt 
22629e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
22637a8e76a3SSteven Rostedt 		return 1;
22647a8e76a3SSteven Rostedt 
22657a8e76a3SSteven Rostedt 	cpu_buffer = buffer->buffers[cpu];
22667a8e76a3SSteven Rostedt 	return rb_per_cpu_empty(cpu_buffer);
22677a8e76a3SSteven Rostedt }
2268c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
22697a8e76a3SSteven Rostedt 
22707a8e76a3SSteven Rostedt /**
22717a8e76a3SSteven Rostedt  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
22727a8e76a3SSteven Rostedt  * @buffer_a: One buffer to swap with
22737a8e76a3SSteven Rostedt  * @buffer_b: The other buffer to swap with
22747a8e76a3SSteven Rostedt  *
22757a8e76a3SSteven Rostedt  * This function is useful for tracers that want to take a "snapshot"
22767a8e76a3SSteven Rostedt  * of a CPU buffer and has another back up buffer lying around.
22777a8e76a3SSteven Rostedt  * it is expected that the tracer handles the cpu buffer not being
22787a8e76a3SSteven Rostedt  * used at the moment.
22797a8e76a3SSteven Rostedt  */
22807a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
22817a8e76a3SSteven Rostedt 			 struct ring_buffer *buffer_b, int cpu)
22827a8e76a3SSteven Rostedt {
22837a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_a;
22847a8e76a3SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer_b;
22857a8e76a3SSteven Rostedt 
22869e01c1b7SRusty Russell 	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
22879e01c1b7SRusty Russell 	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
22887a8e76a3SSteven Rostedt 		return -EINVAL;
22897a8e76a3SSteven Rostedt 
22907a8e76a3SSteven Rostedt 	/* At least make sure the two buffers are somewhat the same */
22916d102bc6SLai Jiangshan 	if (buffer_a->pages != buffer_b->pages)
22927a8e76a3SSteven Rostedt 		return -EINVAL;
22937a8e76a3SSteven Rostedt 
229497b17efeSSteven Rostedt 	if (ring_buffer_flags != RB_BUFFERS_ON)
229597b17efeSSteven Rostedt 		return -EAGAIN;
229697b17efeSSteven Rostedt 
229797b17efeSSteven Rostedt 	if (atomic_read(&buffer_a->record_disabled))
229897b17efeSSteven Rostedt 		return -EAGAIN;
229997b17efeSSteven Rostedt 
230097b17efeSSteven Rostedt 	if (atomic_read(&buffer_b->record_disabled))
230197b17efeSSteven Rostedt 		return -EAGAIN;
230297b17efeSSteven Rostedt 
23037a8e76a3SSteven Rostedt 	cpu_buffer_a = buffer_a->buffers[cpu];
23047a8e76a3SSteven Rostedt 	cpu_buffer_b = buffer_b->buffers[cpu];
23057a8e76a3SSteven Rostedt 
230697b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_a->record_disabled))
230797b17efeSSteven Rostedt 		return -EAGAIN;
230897b17efeSSteven Rostedt 
230997b17efeSSteven Rostedt 	if (atomic_read(&cpu_buffer_b->record_disabled))
231097b17efeSSteven Rostedt 		return -EAGAIN;
231197b17efeSSteven Rostedt 
23127a8e76a3SSteven Rostedt 	/*
23137a8e76a3SSteven Rostedt 	 * We can't do a synchronize_sched here because this
23147a8e76a3SSteven Rostedt 	 * function can be called in atomic context.
23157a8e76a3SSteven Rostedt 	 * Normally this will be called from the same CPU as cpu.
23167a8e76a3SSteven Rostedt 	 * If not it's up to the caller to protect this.
23177a8e76a3SSteven Rostedt 	 */
23187a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_a->record_disabled);
23197a8e76a3SSteven Rostedt 	atomic_inc(&cpu_buffer_b->record_disabled);
23207a8e76a3SSteven Rostedt 
23217a8e76a3SSteven Rostedt 	buffer_a->buffers[cpu] = cpu_buffer_b;
23227a8e76a3SSteven Rostedt 	buffer_b->buffers[cpu] = cpu_buffer_a;
23237a8e76a3SSteven Rostedt 
23247a8e76a3SSteven Rostedt 	cpu_buffer_b->buffer = buffer_a;
23257a8e76a3SSteven Rostedt 	cpu_buffer_a->buffer = buffer_b;
23267a8e76a3SSteven Rostedt 
23277a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_a->record_disabled);
23287a8e76a3SSteven Rostedt 	atomic_dec(&cpu_buffer_b->record_disabled);
23297a8e76a3SSteven Rostedt 
23307a8e76a3SSteven Rostedt 	return 0;
23317a8e76a3SSteven Rostedt }
2332c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
23337a8e76a3SSteven Rostedt 
23348789a9e7SSteven Rostedt static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2335044fa782SSteven Rostedt 			      struct buffer_data_page *bpage)
23368789a9e7SSteven Rostedt {
23378789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
23388789a9e7SSteven Rostedt 	unsigned long head;
23398789a9e7SSteven Rostedt 
23408789a9e7SSteven Rostedt 	__raw_spin_lock(&cpu_buffer->lock);
2341044fa782SSteven Rostedt 	for (head = 0; head < local_read(&bpage->commit);
23428789a9e7SSteven Rostedt 	     head += rb_event_length(event)) {
23438789a9e7SSteven Rostedt 
2344044fa782SSteven Rostedt 		event = __rb_data_page_index(bpage, head);
23458789a9e7SSteven Rostedt 		if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
23468789a9e7SSteven Rostedt 			return;
23478789a9e7SSteven Rostedt 		/* Only count data entries */
23488789a9e7SSteven Rostedt 		if (event->type != RINGBUF_TYPE_DATA)
23498789a9e7SSteven Rostedt 			continue;
23508789a9e7SSteven Rostedt 		cpu_buffer->entries--;
23518789a9e7SSteven Rostedt 	}
23528789a9e7SSteven Rostedt 	__raw_spin_unlock(&cpu_buffer->lock);
23538789a9e7SSteven Rostedt }
23548789a9e7SSteven Rostedt 
23558789a9e7SSteven Rostedt /**
23568789a9e7SSteven Rostedt  * ring_buffer_alloc_read_page - allocate a page to read from buffer
23578789a9e7SSteven Rostedt  * @buffer: the buffer to allocate for.
23588789a9e7SSteven Rostedt  *
23598789a9e7SSteven Rostedt  * This function is used in conjunction with ring_buffer_read_page.
23608789a9e7SSteven Rostedt  * When reading a full page from the ring buffer, these functions
23618789a9e7SSteven Rostedt  * can be used to speed up the process. The calling function should
23628789a9e7SSteven Rostedt  * allocate a few pages first with this function. Then when it
23638789a9e7SSteven Rostedt  * needs to get pages from the ring buffer, it passes the result
23648789a9e7SSteven Rostedt  * of this function into ring_buffer_read_page, which will swap
23658789a9e7SSteven Rostedt  * the page that was allocated, with the read page of the buffer.
23668789a9e7SSteven Rostedt  *
23678789a9e7SSteven Rostedt  * Returns:
23688789a9e7SSteven Rostedt  *  The page allocated, or NULL on error.
23698789a9e7SSteven Rostedt  */
23708789a9e7SSteven Rostedt void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
23718789a9e7SSteven Rostedt {
23728789a9e7SSteven Rostedt 	unsigned long addr;
2373044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
23748789a9e7SSteven Rostedt 
23758789a9e7SSteven Rostedt 	addr = __get_free_page(GFP_KERNEL);
23768789a9e7SSteven Rostedt 	if (!addr)
23778789a9e7SSteven Rostedt 		return NULL;
23788789a9e7SSteven Rostedt 
2379044fa782SSteven Rostedt 	bpage = (void *)addr;
23808789a9e7SSteven Rostedt 
2381044fa782SSteven Rostedt 	return bpage;
23828789a9e7SSteven Rostedt }
23838789a9e7SSteven Rostedt 
23848789a9e7SSteven Rostedt /**
23858789a9e7SSteven Rostedt  * ring_buffer_free_read_page - free an allocated read page
23868789a9e7SSteven Rostedt  * @buffer: the buffer the page was allocate for
23878789a9e7SSteven Rostedt  * @data: the page to free
23888789a9e7SSteven Rostedt  *
23898789a9e7SSteven Rostedt  * Free a page allocated from ring_buffer_alloc_read_page.
23908789a9e7SSteven Rostedt  */
23918789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
23928789a9e7SSteven Rostedt {
23938789a9e7SSteven Rostedt 	free_page((unsigned long)data);
23948789a9e7SSteven Rostedt }
23958789a9e7SSteven Rostedt 
23968789a9e7SSteven Rostedt /**
23978789a9e7SSteven Rostedt  * ring_buffer_read_page - extract a page from the ring buffer
23988789a9e7SSteven Rostedt  * @buffer: buffer to extract from
23998789a9e7SSteven Rostedt  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
24008789a9e7SSteven Rostedt  * @cpu: the cpu of the buffer to extract
24018789a9e7SSteven Rostedt  * @full: should the extraction only happen when the page is full.
24028789a9e7SSteven Rostedt  *
24038789a9e7SSteven Rostedt  * This function will pull out a page from the ring buffer and consume it.
24048789a9e7SSteven Rostedt  * @data_page must be the address of the variable that was returned
24058789a9e7SSteven Rostedt  * from ring_buffer_alloc_read_page. This is because the page might be used
24068789a9e7SSteven Rostedt  * to swap with a page in the ring buffer.
24078789a9e7SSteven Rostedt  *
24088789a9e7SSteven Rostedt  * for example:
2409*b85fa01eSLai Jiangshan  *	rpage = ring_buffer_alloc_read_page(buffer);
24108789a9e7SSteven Rostedt  *	if (!rpage)
24118789a9e7SSteven Rostedt  *		return error;
24128789a9e7SSteven Rostedt  *	ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
24138789a9e7SSteven Rostedt  *	if (ret)
24148789a9e7SSteven Rostedt  *		process_page(rpage);
24158789a9e7SSteven Rostedt  *
24168789a9e7SSteven Rostedt  * When @full is set, the function will not return true unless
24178789a9e7SSteven Rostedt  * the writer is off the reader page.
24188789a9e7SSteven Rostedt  *
24198789a9e7SSteven Rostedt  * Note: it is up to the calling functions to handle sleeps and wakeups.
24208789a9e7SSteven Rostedt  *  The ring buffer can be used anywhere in the kernel and can not
24218789a9e7SSteven Rostedt  *  blindly call wake_up. The layer that uses the ring buffer must be
24228789a9e7SSteven Rostedt  *  responsible for that.
24238789a9e7SSteven Rostedt  *
24248789a9e7SSteven Rostedt  * Returns:
24258789a9e7SSteven Rostedt  *  1 if data has been transferred
24268789a9e7SSteven Rostedt  *  0 if no data has been transferred.
24278789a9e7SSteven Rostedt  */
24288789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer,
24298789a9e7SSteven Rostedt 			    void **data_page, int cpu, int full)
24308789a9e7SSteven Rostedt {
24318789a9e7SSteven Rostedt 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
24328789a9e7SSteven Rostedt 	struct ring_buffer_event *event;
2433044fa782SSteven Rostedt 	struct buffer_data_page *bpage;
24348789a9e7SSteven Rostedt 	unsigned long flags;
24358789a9e7SSteven Rostedt 	int ret = 0;
24368789a9e7SSteven Rostedt 
24378789a9e7SSteven Rostedt 	if (!data_page)
24388789a9e7SSteven Rostedt 		return 0;
24398789a9e7SSteven Rostedt 
2440044fa782SSteven Rostedt 	bpage = *data_page;
2441044fa782SSteven Rostedt 	if (!bpage)
24428789a9e7SSteven Rostedt 		return 0;
24438789a9e7SSteven Rostedt 
24448789a9e7SSteven Rostedt 	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
24458789a9e7SSteven Rostedt 
24468789a9e7SSteven Rostedt 	/*
24478789a9e7SSteven Rostedt 	 * rb_buffer_peek will get the next ring buffer if
24488789a9e7SSteven Rostedt 	 * the current reader page is empty.
24498789a9e7SSteven Rostedt 	 */
24508789a9e7SSteven Rostedt 	event = rb_buffer_peek(buffer, cpu, NULL);
24518789a9e7SSteven Rostedt 	if (!event)
24528789a9e7SSteven Rostedt 		goto out;
24538789a9e7SSteven Rostedt 
24548789a9e7SSteven Rostedt 	/* check for data */
24558789a9e7SSteven Rostedt 	if (!local_read(&cpu_buffer->reader_page->page->commit))
24568789a9e7SSteven Rostedt 		goto out;
24578789a9e7SSteven Rostedt 	/*
24588789a9e7SSteven Rostedt 	 * If the writer is already off of the read page, then simply
24598789a9e7SSteven Rostedt 	 * switch the read page with the given page. Otherwise
24608789a9e7SSteven Rostedt 	 * we need to copy the data from the reader to the writer.
24618789a9e7SSteven Rostedt 	 */
24628789a9e7SSteven Rostedt 	if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
24638789a9e7SSteven Rostedt 		unsigned int read = cpu_buffer->reader_page->read;
2464*b85fa01eSLai Jiangshan 		unsigned int commit = rb_page_commit(cpu_buffer->reader_page);
24658789a9e7SSteven Rostedt 
24668789a9e7SSteven Rostedt 		if (full)
24678789a9e7SSteven Rostedt 			goto out;
24688789a9e7SSteven Rostedt 		/* The writer is still on the reader page, we must copy */
2469044fa782SSteven Rostedt 		memcpy(bpage->data,
24708789a9e7SSteven Rostedt 		       cpu_buffer->reader_page->page->data + read,
2471*b85fa01eSLai Jiangshan 		       commit - read);
24728789a9e7SSteven Rostedt 
24738789a9e7SSteven Rostedt 		/* consume what was read */
2474*b85fa01eSLai Jiangshan 		cpu_buffer->reader_page->read = commit;
24758789a9e7SSteven Rostedt 	} else {
24768789a9e7SSteven Rostedt 		/* swap the pages */
2477044fa782SSteven Rostedt 		rb_init_page(bpage);
2478044fa782SSteven Rostedt 		bpage = cpu_buffer->reader_page->page;
24798789a9e7SSteven Rostedt 		cpu_buffer->reader_page->page = *data_page;
24808789a9e7SSteven Rostedt 		cpu_buffer->reader_page->read = 0;
2481044fa782SSteven Rostedt 		*data_page = bpage;
24828789a9e7SSteven Rostedt 	}
24838789a9e7SSteven Rostedt 	ret = 1;
24848789a9e7SSteven Rostedt 
24858789a9e7SSteven Rostedt 	/* update the entry counter */
2486044fa782SSteven Rostedt 	rb_remove_entries(cpu_buffer, bpage);
24878789a9e7SSteven Rostedt  out:
24888789a9e7SSteven Rostedt 	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
24898789a9e7SSteven Rostedt 
24908789a9e7SSteven Rostedt 	return ret;
24918789a9e7SSteven Rostedt }
24928789a9e7SSteven Rostedt 
2493a3583244SSteven Rostedt static ssize_t
2494a3583244SSteven Rostedt rb_simple_read(struct file *filp, char __user *ubuf,
2495a3583244SSteven Rostedt 	       size_t cnt, loff_t *ppos)
2496a3583244SSteven Rostedt {
2497033601a3SSteven Rostedt 	long *p = filp->private_data;
2498a3583244SSteven Rostedt 	char buf[64];
2499a3583244SSteven Rostedt 	int r;
2500a3583244SSteven Rostedt 
2501033601a3SSteven Rostedt 	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2502033601a3SSteven Rostedt 		r = sprintf(buf, "permanently disabled\n");
2503033601a3SSteven Rostedt 	else
2504033601a3SSteven Rostedt 		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2505a3583244SSteven Rostedt 
2506a3583244SSteven Rostedt 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2507a3583244SSteven Rostedt }
2508a3583244SSteven Rostedt 
2509a3583244SSteven Rostedt static ssize_t
2510a3583244SSteven Rostedt rb_simple_write(struct file *filp, const char __user *ubuf,
2511a3583244SSteven Rostedt 		size_t cnt, loff_t *ppos)
2512a3583244SSteven Rostedt {
2513033601a3SSteven Rostedt 	long *p = filp->private_data;
2514a3583244SSteven Rostedt 	char buf[64];
2515a3583244SSteven Rostedt 	long val;
2516a3583244SSteven Rostedt 	int ret;
2517a3583244SSteven Rostedt 
2518a3583244SSteven Rostedt 	if (cnt >= sizeof(buf))
2519a3583244SSteven Rostedt 		return -EINVAL;
2520a3583244SSteven Rostedt 
2521a3583244SSteven Rostedt 	if (copy_from_user(&buf, ubuf, cnt))
2522a3583244SSteven Rostedt 		return -EFAULT;
2523a3583244SSteven Rostedt 
2524a3583244SSteven Rostedt 	buf[cnt] = 0;
2525a3583244SSteven Rostedt 
2526a3583244SSteven Rostedt 	ret = strict_strtoul(buf, 10, &val);
2527a3583244SSteven Rostedt 	if (ret < 0)
2528a3583244SSteven Rostedt 		return ret;
2529a3583244SSteven Rostedt 
2530033601a3SSteven Rostedt 	if (val)
2531033601a3SSteven Rostedt 		set_bit(RB_BUFFERS_ON_BIT, p);
2532033601a3SSteven Rostedt 	else
2533033601a3SSteven Rostedt 		clear_bit(RB_BUFFERS_ON_BIT, p);
2534a3583244SSteven Rostedt 
2535a3583244SSteven Rostedt 	(*ppos)++;
2536a3583244SSteven Rostedt 
2537a3583244SSteven Rostedt 	return cnt;
2538a3583244SSteven Rostedt }
2539a3583244SSteven Rostedt 
2540a3583244SSteven Rostedt static struct file_operations rb_simple_fops = {
2541a3583244SSteven Rostedt 	.open		= tracing_open_generic,
2542a3583244SSteven Rostedt 	.read		= rb_simple_read,
2543a3583244SSteven Rostedt 	.write		= rb_simple_write,
2544a3583244SSteven Rostedt };
2545a3583244SSteven Rostedt 
2546a3583244SSteven Rostedt 
2547a3583244SSteven Rostedt static __init int rb_init_debugfs(void)
2548a3583244SSteven Rostedt {
2549a3583244SSteven Rostedt 	struct dentry *d_tracer;
2550a3583244SSteven Rostedt 	struct dentry *entry;
2551a3583244SSteven Rostedt 
2552a3583244SSteven Rostedt 	d_tracer = tracing_init_dentry();
2553a3583244SSteven Rostedt 
2554a3583244SSteven Rostedt 	entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2555033601a3SSteven Rostedt 				    &ring_buffer_flags, &rb_simple_fops);
2556a3583244SSteven Rostedt 	if (!entry)
2557a3583244SSteven Rostedt 		pr_warning("Could not create debugfs 'tracing_on' entry\n");
2558a3583244SSteven Rostedt 
2559a3583244SSteven Rostedt 	return 0;
2560a3583244SSteven Rostedt }
2561a3583244SSteven Rostedt 
2562a3583244SSteven Rostedt fs_initcall(rb_init_debugfs);
2563