1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
27a8e76a3SSteven Rostedt /*
37a8e76a3SSteven Rostedt * Generic ring buffer
47a8e76a3SSteven Rostedt *
57a8e76a3SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]>
67a8e76a3SSteven Rostedt */
728575c61SSteven Rostedt (VMware) #include <linux/trace_recursion.h>
8af658dcaSSteven Rostedt (Red Hat) #include <linux/trace_events.h>
97a8e76a3SSteven Rostedt #include <linux/ring_buffer.h>
1014131f2fSIngo Molnar #include <linux/trace_clock.h>
11e6017571SIngo Molnar #include <linux/sched/clock.h>
12117c3920SVincent Donnefort #include <linux/cacheflush.h>
130b07436dSSteven Rostedt #include <linux/trace_seq.h>
147a8e76a3SSteven Rostedt #include <linux/spinlock.h>
1515693458SSteven Rostedt (Red Hat) #include <linux/irq_work.h>
16a356646aSSteven Rostedt (VMware) #include <linux/security.h>
177a8e76a3SSteven Rostedt #include <linux/uaccess.h>
18a81bd80aSSteven Rostedt #include <linux/hardirq.h>
196c43e554SSteven Rostedt (Red Hat) #include <linux/kthread.h> /* for self test */
207a8e76a3SSteven Rostedt #include <linux/module.h>
217a8e76a3SSteven Rostedt #include <linux/percpu.h>
227a8e76a3SSteven Rostedt #include <linux/mutex.h>
236c43e554SSteven Rostedt (Red Hat) #include <linux/delay.h>
245a0e3ad6STejun Heo #include <linux/slab.h>
257a8e76a3SSteven Rostedt #include <linux/init.h>
267a8e76a3SSteven Rostedt #include <linux/hash.h>
277a8e76a3SSteven Rostedt #include <linux/list.h>
28554f786eSSteven Rostedt #include <linux/cpu.h>
29927e56dbSSteven Rostedt (VMware) #include <linux/oom.h>
30117c3920SVincent Donnefort #include <linux/mm.h>
317a8e76a3SSteven Rostedt
32c84897c0SSteven Rostedt (Google) #include <asm/local64.h>
3379615760SChristoph Lameter #include <asm/local.h>
34bcba8d4dSSteven Rostedt #include <asm/setup.h>
35182e9f5fSSteven Rostedt
36950032ffSSteven Rostedt (Google) #include "trace.h"
37950032ffSSteven Rostedt (Google)
386695da58SSteven Rostedt (Google) /*
396695da58SSteven Rostedt (Google) * The "absolute" timestamp in the buffer is only 59 bits.
406695da58SSteven Rostedt (Google) * If a clock has the 5 MSBs set, it needs to be saved and
416695da58SSteven Rostedt (Google) * reinserted.
426695da58SSteven Rostedt (Google) */
436695da58SSteven Rostedt (Google) #define TS_MSB (0xf8ULL << 56)
446695da58SSteven Rostedt (Google) #define ABS_TS_MASK (~TS_MSB)
456695da58SSteven Rostedt (Google)
4683f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work);
4783f40318SVaibhav Nagarnaik
48d0f2d6e9SSteven Rostedt #define RING_BUFFER_META_MAGIC 0xBADFEED
49d0f2d6e9SSteven Rostedt
50be68d63aSSteven Rostedt (Google) struct ring_buffer_meta {
51d0f2d6e9SSteven Rostedt int magic;
524009cc31SSteven Rostedt int struct_sizes;
534009cc31SSteven Rostedt unsigned long total_size;
544009cc31SSteven Rostedt unsigned long buffers_offset;
554009cc31SSteven Rostedt };
564009cc31SSteven Rostedt
574009cc31SSteven Rostedt struct ring_buffer_cpu_meta {
58c76883f1SSteven Rostedt (Google) unsigned long first_buffer;
59b14d0329SSteven Rostedt (Google) unsigned long head_buffer;
60b14d0329SSteven Rostedt (Google) unsigned long commit_buffer;
61b14d0329SSteven Rostedt (Google) __u32 subbuf_size;
62b14d0329SSteven Rostedt (Google) __u32 nr_subbufs;
63b14d0329SSteven Rostedt (Google) int buffers[];
64be68d63aSSteven Rostedt (Google) };
65be68d63aSSteven Rostedt (Google)
66033601a3SSteven Rostedt /*
67d1b182a8SSteven Rostedt * The ring buffer header is special. We must manually up keep it.
68d1b182a8SSteven Rostedt */
ring_buffer_print_entry_header(struct trace_seq * s)69d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s)
70d1b182a8SSteven Rostedt {
71c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "# compressed entry header\n");
72c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\ttype_len : 5 bits\n");
73c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\ttime_delta : 27 bits\n");
74c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\tarray : 32 bits\n");
75c0cd93aaSSteven Rostedt (Red Hat) trace_seq_putc(s, '\n');
76c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tpadding : type == %d\n",
77d1b182a8SSteven Rostedt RINGBUF_TYPE_PADDING);
78c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\ttime_extend : type == %d\n",
79d1b182a8SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND);
80dc4e2801STom Zanussi trace_seq_printf(s, "\ttime_stamp : type == %d\n",
81dc4e2801STom Zanussi RINGBUF_TYPE_TIME_STAMP);
82c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tdata max type_len == %d\n",
83334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
84d1b182a8SSteven Rostedt
85c0cd93aaSSteven Rostedt (Red Hat) return !trace_seq_has_overflowed(s);
86d1b182a8SSteven Rostedt }
87d1b182a8SSteven Rostedt
88d1b182a8SSteven Rostedt /*
895cc98548SSteven Rostedt * The ring buffer is made up of a list of pages. A separate list of pages is
905cc98548SSteven Rostedt * allocated for each CPU. A writer may only write to a buffer that is
915cc98548SSteven Rostedt * associated with the CPU it is currently executing on. A reader may read
925cc98548SSteven Rostedt * from any per cpu buffer.
935cc98548SSteven Rostedt *
945cc98548SSteven Rostedt * The reader is special. For each per cpu buffer, the reader has its own
955cc98548SSteven Rostedt * reader page. When a reader has read the entire reader page, this reader
965cc98548SSteven Rostedt * page is swapped with another page in the ring buffer.
975cc98548SSteven Rostedt *
985cc98548SSteven Rostedt * Now, as long as the writer is off the reader page, the reader can do what
995cc98548SSteven Rostedt * ever it wants with that page. The writer will never write to that page
1005cc98548SSteven Rostedt * again (as long as it is out of the ring buffer).
1015cc98548SSteven Rostedt *
1025cc98548SSteven Rostedt * Here's some silly ASCII art.
1035cc98548SSteven Rostedt *
1045cc98548SSteven Rostedt * +------+
1055cc98548SSteven Rostedt * |reader| RING BUFFER
1065cc98548SSteven Rostedt * |page |
1075cc98548SSteven Rostedt * +------+ +---+ +---+ +---+
1085cc98548SSteven Rostedt * | |-->| |-->| |
1095cc98548SSteven Rostedt * +---+ +---+ +---+
1105cc98548SSteven Rostedt * ^ |
1115cc98548SSteven Rostedt * | |
1125cc98548SSteven Rostedt * +---------------+
1135cc98548SSteven Rostedt *
1145cc98548SSteven Rostedt *
1155cc98548SSteven Rostedt * +------+
1165cc98548SSteven Rostedt * |reader| RING BUFFER
1175cc98548SSteven Rostedt * |page |------------------v
1185cc98548SSteven Rostedt * +------+ +---+ +---+ +---+
1195cc98548SSteven Rostedt * | |-->| |-->| |
1205cc98548SSteven Rostedt * +---+ +---+ +---+
1215cc98548SSteven Rostedt * ^ |
1225cc98548SSteven Rostedt * | |
1235cc98548SSteven Rostedt * +---------------+
1245cc98548SSteven Rostedt *
1255cc98548SSteven Rostedt *
1265cc98548SSteven Rostedt * +------+
1275cc98548SSteven Rostedt * |reader| RING BUFFER
1285cc98548SSteven Rostedt * |page |------------------v
1295cc98548SSteven Rostedt * +------+ +---+ +---+ +---+
1305cc98548SSteven Rostedt * ^ | |-->| |-->| |
1315cc98548SSteven Rostedt * | +---+ +---+ +---+
1325cc98548SSteven Rostedt * | |
1335cc98548SSteven Rostedt * | |
1345cc98548SSteven Rostedt * +------------------------------+
1355cc98548SSteven Rostedt *
1365cc98548SSteven Rostedt *
1375cc98548SSteven Rostedt * +------+
1385cc98548SSteven Rostedt * |buffer| RING BUFFER
1395cc98548SSteven Rostedt * |page |------------------v
1405cc98548SSteven Rostedt * +------+ +---+ +---+ +---+
1415cc98548SSteven Rostedt * ^ | | | |-->| |
1425cc98548SSteven Rostedt * | New +---+ +---+ +---+
1435cc98548SSteven Rostedt * | Reader------^ |
1445cc98548SSteven Rostedt * | page |
1455cc98548SSteven Rostedt * +------------------------------+
1465cc98548SSteven Rostedt *
1475cc98548SSteven Rostedt *
1485cc98548SSteven Rostedt * After we make this swap, the reader can hand this page off to the splice
1495cc98548SSteven Rostedt * code and be done with it. It can even allocate a new page if it needs to
1505cc98548SSteven Rostedt * and swap that into the ring buffer.
1515cc98548SSteven Rostedt *
1525cc98548SSteven Rostedt * We will be using cmpxchg soon to make all this lockless.
1535cc98548SSteven Rostedt *
1545cc98548SSteven Rostedt */
1555cc98548SSteven Rostedt
156499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */
157499e5470SSteven Rostedt #define RB_BUFFER_OFF (1 << 20)
158499e5470SSteven Rostedt
159474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
160474d32b6SSteven Rostedt
161e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
16267d34724SAndrew Morton #define RB_ALIGNMENT 4U
163334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
164c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
165adab66b7SSteven Rostedt (VMware)
166adab66b7SSteven Rostedt (VMware) #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
167adab66b7SSteven Rostedt (VMware) # define RB_FORCE_8BYTE_ALIGNMENT 0
168adab66b7SSteven Rostedt (VMware) # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
169adab66b7SSteven Rostedt (VMware) #else
170adab66b7SSteven Rostedt (VMware) # define RB_FORCE_8BYTE_ALIGNMENT 1
171adab66b7SSteven Rostedt (VMware) # define RB_ARCH_ALIGNMENT 8U
172adab66b7SSteven Rostedt (VMware) #endif
173adab66b7SSteven Rostedt (VMware)
174adab66b7SSteven Rostedt (VMware) #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
175649508f6SJames Hogan
176334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
177334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
1787a8e76a3SSteven Rostedt
1797a8e76a3SSteven Rostedt enum {
1807a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND = 8,
181dc4e2801STom Zanussi RB_LEN_TIME_STAMP = 8,
1827a8e76a3SSteven Rostedt };
1837a8e76a3SSteven Rostedt
18469d1b839SSteven Rostedt #define skip_time_extend(event) \
18569d1b839SSteven Rostedt ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
18669d1b839SSteven Rostedt
187dc4e2801STom Zanussi #define extended_time(event) \
188dc4e2801STom Zanussi (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
189dc4e2801STom Zanussi
rb_null_event(struct ring_buffer_event * event)190bc92b956SUros Bizjak static inline bool rb_null_event(struct ring_buffer_event *event)
1912d622719STom Zanussi {
192a1863c21SSteven Rostedt return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
1932d622719STom Zanussi }
1942d622719STom Zanussi
rb_event_set_padding(struct ring_buffer_event * event)1952d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event)
1962d622719STom Zanussi {
197a1863c21SSteven Rostedt /* padding has a NULL time_delta */
198334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING;
1992d622719STom Zanussi event->time_delta = 0;
2002d622719STom Zanussi }
2012d622719STom Zanussi
2022d622719STom Zanussi static unsigned
rb_event_data_length(struct ring_buffer_event * event)2032d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event)
2042d622719STom Zanussi {
2052d622719STom Zanussi unsigned length;
2062d622719STom Zanussi
207334d4169SLai Jiangshan if (event->type_len)
208334d4169SLai Jiangshan length = event->type_len * RB_ALIGNMENT;
2092d622719STom Zanussi else
2102d622719STom Zanussi length = event->array[0];
2112d622719STom Zanussi return length + RB_EVNT_HDR_SIZE;
2122d622719STom Zanussi }
2132d622719STom Zanussi
21469d1b839SSteven Rostedt /*
21569d1b839SSteven Rostedt * Return the length of the given event. Will return
21669d1b839SSteven Rostedt * the length of the time extend if the event is a
21769d1b839SSteven Rostedt * time extend.
21869d1b839SSteven Rostedt */
21969d1b839SSteven Rostedt static inline unsigned
rb_event_length(struct ring_buffer_event * event)2207a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event)
2217a8e76a3SSteven Rostedt {
222334d4169SLai Jiangshan switch (event->type_len) {
2237a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING:
2242d622719STom Zanussi if (rb_null_event(event))
2257a8e76a3SSteven Rostedt /* undefined */
2267a8e76a3SSteven Rostedt return -1;
227334d4169SLai Jiangshan return event->array[0] + RB_EVNT_HDR_SIZE;
2287a8e76a3SSteven Rostedt
2297a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
2307a8e76a3SSteven Rostedt return RB_LEN_TIME_EXTEND;
2317a8e76a3SSteven Rostedt
2327a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP:
2337a8e76a3SSteven Rostedt return RB_LEN_TIME_STAMP;
2347a8e76a3SSteven Rostedt
2357a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA:
2362d622719STom Zanussi return rb_event_data_length(event);
2377a8e76a3SSteven Rostedt default:
238da4d401aSSteven Rostedt (VMware) WARN_ON_ONCE(1);
2397a8e76a3SSteven Rostedt }
2407a8e76a3SSteven Rostedt /* not hit */
2417a8e76a3SSteven Rostedt return 0;
2427a8e76a3SSteven Rostedt }
2437a8e76a3SSteven Rostedt
24469d1b839SSteven Rostedt /*
24569d1b839SSteven Rostedt * Return total length of time extend and data,
24669d1b839SSteven Rostedt * or just the event length for all other events.
24769d1b839SSteven Rostedt */
24869d1b839SSteven Rostedt static inline unsigned
rb_event_ts_length(struct ring_buffer_event * event)24969d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event)
25069d1b839SSteven Rostedt {
25169d1b839SSteven Rostedt unsigned len = 0;
25269d1b839SSteven Rostedt
253dc4e2801STom Zanussi if (extended_time(event)) {
25469d1b839SSteven Rostedt /* time extends include the data event after it */
25569d1b839SSteven Rostedt len = RB_LEN_TIME_EXTEND;
25669d1b839SSteven Rostedt event = skip_time_extend(event);
25769d1b839SSteven Rostedt }
25869d1b839SSteven Rostedt return len + rb_event_length(event);
25969d1b839SSteven Rostedt }
26069d1b839SSteven Rostedt
2617a8e76a3SSteven Rostedt /**
2627a8e76a3SSteven Rostedt * ring_buffer_event_length - return the length of the event
2637a8e76a3SSteven Rostedt * @event: the event to get the length of
26469d1b839SSteven Rostedt *
26569d1b839SSteven Rostedt * Returns the size of the data load of a data event.
26669d1b839SSteven Rostedt * If the event is something other than a data event, it
26769d1b839SSteven Rostedt * returns the size of the event itself. With the exception
26869d1b839SSteven Rostedt * of a TIME EXTEND, where it still returns the size of the
26969d1b839SSteven Rostedt * data load of the data event after it.
2707a8e76a3SSteven Rostedt */
ring_buffer_event_length(struct ring_buffer_event * event)2717a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event)
2727a8e76a3SSteven Rostedt {
27369d1b839SSteven Rostedt unsigned length;
27469d1b839SSteven Rostedt
275dc4e2801STom Zanussi if (extended_time(event))
27669d1b839SSteven Rostedt event = skip_time_extend(event);
27769d1b839SSteven Rostedt
27869d1b839SSteven Rostedt length = rb_event_length(event);
279334d4169SLai Jiangshan if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
280465634adSRobert Richter return length;
281465634adSRobert Richter length -= RB_EVNT_HDR_SIZE;
282465634adSRobert Richter if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
283465634adSRobert Richter length -= sizeof(event->array[0]);
284465634adSRobert Richter return length;
2857a8e76a3SSteven Rostedt }
286c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length);
2877a8e76a3SSteven Rostedt
2887a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */
289929ddbf3SSteven Rostedt (Red Hat) static __always_inline void *
rb_event_data(struct ring_buffer_event * event)2907a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event)
2917a8e76a3SSteven Rostedt {
292dc4e2801STom Zanussi if (extended_time(event))
29369d1b839SSteven Rostedt event = skip_time_extend(event);
294da4d401aSSteven Rostedt (VMware) WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
2957a8e76a3SSteven Rostedt /* If length is in len field, then array[0] has the data */
296334d4169SLai Jiangshan if (event->type_len)
2977a8e76a3SSteven Rostedt return (void *)&event->array[0];
2987a8e76a3SSteven Rostedt /* Otherwise length is in array[0] and array[1] has the data */
2997a8e76a3SSteven Rostedt return (void *)&event->array[1];
3007a8e76a3SSteven Rostedt }
3017a8e76a3SSteven Rostedt
3027a8e76a3SSteven Rostedt /**
3037a8e76a3SSteven Rostedt * ring_buffer_event_data - return the data of the event
3047a8e76a3SSteven Rostedt * @event: the event to get the data from
3057a8e76a3SSteven Rostedt */
ring_buffer_event_data(struct ring_buffer_event * event)3067a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event)
3077a8e76a3SSteven Rostedt {
3087a8e76a3SSteven Rostedt return rb_event_data(event);
3097a8e76a3SSteven Rostedt }
310c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data);
3117a8e76a3SSteven Rostedt
3127a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu) \
3139e01c1b7SRusty Russell for_each_cpu(cpu, buffer->cpumask)
3147a8e76a3SSteven Rostedt
315b23d7a5fSNicholas Piggin #define for_each_online_buffer_cpu(buffer, cpu) \
316b23d7a5fSNicholas Piggin for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
317b23d7a5fSNicholas Piggin
3187a8e76a3SSteven Rostedt #define TS_SHIFT 27
3197a8e76a3SSteven Rostedt #define TS_MASK ((1ULL << TS_SHIFT) - 1)
3207a8e76a3SSteven Rostedt #define TS_DELTA_TEST (~TS_MASK)
3217a8e76a3SSteven Rostedt
rb_event_time_stamp(struct ring_buffer_event * event)322e20044f7SSteven Rostedt (VMware) static u64 rb_event_time_stamp(struct ring_buffer_event *event)
323e20044f7SSteven Rostedt (VMware) {
324e20044f7SSteven Rostedt (VMware) u64 ts;
325e20044f7SSteven Rostedt (VMware)
326e20044f7SSteven Rostedt (VMware) ts = event->array[0];
327e20044f7SSteven Rostedt (VMware) ts <<= TS_SHIFT;
328e20044f7SSteven Rostedt (VMware) ts += event->time_delta;
329e20044f7SSteven Rostedt (VMware)
330e20044f7SSteven Rostedt (VMware) return ts;
331e20044f7SSteven Rostedt (VMware) }
332e20044f7SSteven Rostedt (VMware)
33366a8cb95SSteven Rostedt /* Flag when events were overwritten */
33466a8cb95SSteven Rostedt #define RB_MISSED_EVENTS (1 << 31)
335ff0ff84aSSteven Rostedt /* Missed count stored at end */
336ff0ff84aSSteven Rostedt #define RB_MISSED_STORED (1 << 30)
33766a8cb95SSteven Rostedt
338fe832be0SSteven Rostedt (Google) #define RB_MISSED_MASK (3 << 30)
339fe832be0SSteven Rostedt (Google)
340abc9b56dSSteven Rostedt struct buffer_data_page {
3417a8e76a3SSteven Rostedt u64 time_stamp; /* page time stamp */
342c3706f00SWenji Huang local_t commit; /* write committed index */
343649508f6SJames Hogan unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
344abc9b56dSSteven Rostedt };
345abc9b56dSSteven Rostedt
346bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page {
347bce761d7STzvetomir Stoyanov (VMware) unsigned order; /* order of the page */
348bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_page *data; /* actual data, stored in this page */
349bce761d7STzvetomir Stoyanov (VMware) };
350bce761d7STzvetomir Stoyanov (VMware)
35177ae365eSSteven Rostedt /*
35277ae365eSSteven Rostedt * Note, the buffer_page list must be first. The buffer pages
35377ae365eSSteven Rostedt * are allocated in cache lines, which means that each buffer
35477ae365eSSteven Rostedt * page will be at the beginning of a cache line, and thus
35577ae365eSSteven Rostedt * the least significant bits will be zero. We use this to
35677ae365eSSteven Rostedt * add flags in the list struct pointers, to make the ring buffer
35777ae365eSSteven Rostedt * lockless.
35877ae365eSSteven Rostedt */
359abc9b56dSSteven Rostedt struct buffer_page {
360778c55d4SSteven Rostedt struct list_head list; /* list of buffer pages */
361abc9b56dSSteven Rostedt local_t write; /* index for next write */
3626f807acdSSteven Rostedt unsigned read; /* index for next read */
363778c55d4SSteven Rostedt local_t entries; /* entries on this page */
364ff0ff84aSSteven Rostedt unsigned long real_end; /* real end of data */
365f9b94daaSTzvetomir Stoyanov (VMware) unsigned order; /* order of the page */
366be68d63aSSteven Rostedt (Google) u32 id:30; /* ID for external mapping */
367be68d63aSSteven Rostedt (Google) u32 range:1; /* Mapped via a range */
368abc9b56dSSteven Rostedt struct buffer_data_page *page; /* Actual data page */
3697a8e76a3SSteven Rostedt };
3707a8e76a3SSteven Rostedt
37177ae365eSSteven Rostedt /*
37277ae365eSSteven Rostedt * The buffer page counters, write and entries, must be reset
37377ae365eSSteven Rostedt * atomically when crossing page boundaries. To synchronize this
37477ae365eSSteven Rostedt * update, two counters are inserted into the number. One is
37577ae365eSSteven Rostedt * the actual counter for the write position or count on the page.
37677ae365eSSteven Rostedt *
37777ae365eSSteven Rostedt * The other is a counter of updaters. Before an update happens
37877ae365eSSteven Rostedt * the update partition of the counter is incremented. This will
37977ae365eSSteven Rostedt * allow the updater to update the counter atomically.
38077ae365eSSteven Rostedt *
38177ae365eSSteven Rostedt * The counter is 20 bits, and the state data is 12.
38277ae365eSSteven Rostedt */
38377ae365eSSteven Rostedt #define RB_WRITE_MASK 0xfffff
38477ae365eSSteven Rostedt #define RB_WRITE_INTCNT (1 << 20)
38577ae365eSSteven Rostedt
rb_init_page(struct buffer_data_page * bpage)386044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage)
387abc9b56dSSteven Rostedt {
388044fa782SSteven Rostedt local_set(&bpage->commit, 0);
389abc9b56dSSteven Rostedt }
390abc9b56dSSteven Rostedt
rb_page_commit(struct buffer_page * bpage)39145d99ea4SZheng Yejian static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
39245d99ea4SZheng Yejian {
39345d99ea4SZheng Yejian return local_read(&bpage->page->commit);
39445d99ea4SZheng Yejian }
39545d99ea4SZheng Yejian
free_buffer_page(struct buffer_page * bpage)39634a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage)
397ed56829cSSteven Rostedt {
398be68d63aSSteven Rostedt (Google) /* Range pages are not to be freed */
399be68d63aSSteven Rostedt (Google) if (!bpage->range)
400f9b94daaSTzvetomir Stoyanov (VMware) free_pages((unsigned long)bpage->page, bpage->order);
401e4c2ce82SSteven Rostedt kfree(bpage);
402ed56829cSSteven Rostedt }
403ed56829cSSteven Rostedt
404ed56829cSSteven Rostedt /*
4057a8e76a3SSteven Rostedt * We need to fit the time_stamp delta into 27 bits.
4067a8e76a3SSteven Rostedt */
test_time_stamp(u64 delta)407bc92b956SUros Bizjak static inline bool test_time_stamp(u64 delta)
4087a8e76a3SSteven Rostedt {
409bc92b956SUros Bizjak return !!(delta & TS_DELTA_TEST);
4107a8e76a3SSteven Rostedt }
4117a8e76a3SSteven Rostedt
41215693458SSteven Rostedt (Red Hat) struct rb_irq_work {
41315693458SSteven Rostedt (Red Hat) struct irq_work work;
41415693458SSteven Rostedt (Red Hat) wait_queue_head_t waiters;
4151e0d6714SSteven Rostedt (Red Hat) wait_queue_head_t full_waiters;
416b70f2938SSteven Rostedt (Google) atomic_t seq;
41715693458SSteven Rostedt (Red Hat) bool waiters_pending;
4181e0d6714SSteven Rostedt (Red Hat) bool full_waiters_pending;
4191e0d6714SSteven Rostedt (Red Hat) bool wakeup_full;
42015693458SSteven Rostedt (Red Hat) };
42115693458SSteven Rostedt (Red Hat)
4227a8e76a3SSteven Rostedt /*
423fcc742eaSSteven Rostedt (Red Hat) * Structure to hold event state and handle nested events.
424fcc742eaSSteven Rostedt (Red Hat) */
425fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info {
426fcc742eaSSteven Rostedt (Red Hat) u64 ts;
427fcc742eaSSteven Rostedt (Red Hat) u64 delta;
42858fbc3c6SSteven Rostedt (VMware) u64 before;
42958fbc3c6SSteven Rostedt (VMware) u64 after;
430fcc742eaSSteven Rostedt (Red Hat) unsigned long length;
431fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page;
432fcc742eaSSteven Rostedt (Red Hat) int add_timestamp;
433fcc742eaSSteven Rostedt (Red Hat) };
434fcc742eaSSteven Rostedt (Red Hat)
435fcc742eaSSteven Rostedt (Red Hat) /*
436a389d86fSSteven Rostedt (VMware) * Used for the add_timestamp
437a389d86fSSteven Rostedt (VMware) * NONE
4387c4b4a51SSteven Rostedt (VMware) * EXTEND - wants a time extend
4397c4b4a51SSteven Rostedt (VMware) * ABSOLUTE - the buffer requests all events to have absolute time stamps
440a389d86fSSteven Rostedt (VMware) * FORCE - force a full time stamp.
441a389d86fSSteven Rostedt (VMware) */
442a389d86fSSteven Rostedt (VMware) enum {
4437c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_NONE = 0,
4447c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_EXTEND = BIT(1),
4457c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_ABSOLUTE = BIT(2),
4467c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_FORCE = BIT(3)
447a389d86fSSteven Rostedt (VMware) };
448a389d86fSSteven Rostedt (VMware) /*
449a497adb4SSteven Rostedt (Red Hat) * Used for which event context the event is in.
450b02414c8SSteven Rostedt (VMware) * TRANSITION = 0
451b02414c8SSteven Rostedt (VMware) * NMI = 1
452b02414c8SSteven Rostedt (VMware) * IRQ = 2
453b02414c8SSteven Rostedt (VMware) * SOFTIRQ = 3
454b02414c8SSteven Rostedt (VMware) * NORMAL = 4
455a497adb4SSteven Rostedt (Red Hat) *
456a497adb4SSteven Rostedt (Red Hat) * See trace_recursive_lock() comment below for more details.
457a497adb4SSteven Rostedt (Red Hat) */
458a497adb4SSteven Rostedt (Red Hat) enum {
459b02414c8SSteven Rostedt (VMware) RB_CTX_TRANSITION,
460a497adb4SSteven Rostedt (Red Hat) RB_CTX_NMI,
461a497adb4SSteven Rostedt (Red Hat) RB_CTX_IRQ,
462a497adb4SSteven Rostedt (Red Hat) RB_CTX_SOFTIRQ,
463a497adb4SSteven Rostedt (Red Hat) RB_CTX_NORMAL,
464a497adb4SSteven Rostedt (Red Hat) RB_CTX_MAX
465a497adb4SSteven Rostedt (Red Hat) };
466a497adb4SSteven Rostedt (Red Hat)
46710464b4aSSteven Rostedt (VMware) struct rb_time_struct {
46810464b4aSSteven Rostedt (VMware) local64_t time;
46910464b4aSSteven Rostedt (VMware) };
47010464b4aSSteven Rostedt (VMware) typedef struct rb_time_struct rb_time_t;
47110464b4aSSteven Rostedt (VMware)
4728672e494SSteven Rostedt (VMware) #define MAX_NEST 5
4738672e494SSteven Rostedt (VMware)
474a497adb4SSteven Rostedt (Red Hat) /*
4757a8e76a3SSteven Rostedt * head_page == tail_page && head == tail then buffer is empty.
4767a8e76a3SSteven Rostedt */
4777a8e76a3SSteven Rostedt struct ring_buffer_per_cpu {
4787a8e76a3SSteven Rostedt int cpu;
479985023deSRichard Kennedy atomic_t record_disabled;
48007b8b10eSSteven Rostedt (VMware) atomic_t resize_disabled;
48113292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
4825389f6faSThomas Gleixner raw_spinlock_t reader_lock; /* serialize readers */
483445c8951SThomas Gleixner arch_spinlock_t lock;
4847a8e76a3SSteven Rostedt struct lock_class_key lock_key;
48573a757e6SSteven Rostedt (VMware) struct buffer_data_page *free_page;
4869b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages;
48758a09ec6SSteven Rostedt (Red Hat) unsigned int current_context;
4883adc54faSSteven Rostedt struct list_head *pages;
489b237e1f7SPetr Pavlu /* pages generation counter, incremented when the list changes */
490b237e1f7SPetr Pavlu unsigned long cnt;
4916f807acdSSteven Rostedt struct buffer_page *head_page; /* read from head */
4926f807acdSSteven Rostedt struct buffer_page *tail_page; /* write to tail */
493c3706f00SWenji Huang struct buffer_page *commit_page; /* committed pages */
494d769041fSSteven Rostedt struct buffer_page *reader_page;
49566a8cb95SSteven Rostedt unsigned long lost_events;
49666a8cb95SSteven Rostedt unsigned long last_overrun;
4978e012066SSteven Rostedt (VMware) unsigned long nest;
498c64e148aSVaibhav Nagarnaik local_t entries_bytes;
499e4906effSSteven Rostedt local_t entries;
500884bfe89SSlava Pestov local_t overrun;
501884bfe89SSlava Pestov local_t commit_overrun;
502884bfe89SSlava Pestov local_t dropped_events;
503fa743953SSteven Rostedt local_t committing;
504fa743953SSteven Rostedt local_t commits;
5052c2b0a78SSteven Rostedt (VMware) local_t pages_touched;
50631029a8bSSteven Rostedt (Google) local_t pages_lost;
5072c2b0a78SSteven Rostedt (VMware) local_t pages_read;
50803329f99SSteven Rostedt (VMware) long last_pages_touch;
5092c2b0a78SSteven Rostedt (VMware) size_t shortest_full;
51077ae365eSSteven Rostedt unsigned long read;
511c64e148aSVaibhav Nagarnaik unsigned long read_bytes;
51210464b4aSSteven Rostedt (VMware) rb_time_t write_stamp;
51310464b4aSSteven Rostedt (VMware) rb_time_t before_stamp;
5148672e494SSteven Rostedt (VMware) u64 event_stamp[MAX_NEST];
5157a8e76a3SSteven Rostedt u64 read_stamp;
5162d093282SZheng Yejian /* pages removed since last reset */
5172d093282SZheng Yejian unsigned long pages_removed;
518117c3920SVincent Donnefort
519117c3920SVincent Donnefort unsigned int mapped;
520dd4900d9SSteven Rostedt (Google) unsigned int user_mapped; /* user space mapping */
521117c3920SVincent Donnefort struct mutex mapping_lock;
522117c3920SVincent Donnefort unsigned long *subbuf_ids; /* ID to subbuf VA */
523117c3920SVincent Donnefort struct trace_buffer_meta *meta_page;
5244009cc31SSteven Rostedt struct ring_buffer_cpu_meta *ring_meta;
525117c3920SVincent Donnefort
526438ced17SVaibhav Nagarnaik /* ring buffer pages to update, > 0 to add, < 0 to remove */
5279b94a8fbSSteven Rostedt (Red Hat) long nr_pages_to_update;
528438ced17SVaibhav Nagarnaik struct list_head new_pages; /* new pages to add */
52983f40318SVaibhav Nagarnaik struct work_struct update_pages_work;
53005fdd70dSVaibhav Nagarnaik struct completion update_done;
53115693458SSteven Rostedt (Red Hat)
53215693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work;
5337a8e76a3SSteven Rostedt };
5347a8e76a3SSteven Rostedt
53513292494SSteven Rostedt (VMware) struct trace_buffer {
5367a8e76a3SSteven Rostedt unsigned flags;
5377a8e76a3SSteven Rostedt int cpus;
5387a8e76a3SSteven Rostedt atomic_t record_disabled;
5398a96c028SChen Lin atomic_t resizing;
54000f62f61SArnaldo Carvalho de Melo cpumask_var_t cpumask;
5417a8e76a3SSteven Rostedt
5421f8a6a10SPeter Zijlstra struct lock_class_key *reader_lock_key;
5431f8a6a10SPeter Zijlstra
5447a8e76a3SSteven Rostedt struct mutex mutex;
5457a8e76a3SSteven Rostedt
5467a8e76a3SSteven Rostedt struct ring_buffer_per_cpu **buffers;
547554f786eSSteven Rostedt
548b32614c0SSebastian Andrzej Siewior struct hlist_node node;
54937886f6aSSteven Rostedt u64 (*clock)(void);
55015693458SSteven Rostedt (Red Hat)
55115693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work;
55200b41452STom Zanussi bool time_stamp_abs;
553139f8400STzvetomir Stoyanov (VMware)
554be68d63aSSteven Rostedt (Google) unsigned long range_addr_start;
555be68d63aSSteven Rostedt (Google) unsigned long range_addr_end;
556be68d63aSSteven Rostedt (Google)
5574009cc31SSteven Rostedt struct ring_buffer_meta *meta;
5588f3e6659SSteven Rostedt (Google)
559139f8400STzvetomir Stoyanov (VMware) unsigned int subbuf_size;
5602808e31eSTzvetomir Stoyanov (VMware) unsigned int subbuf_order;
561139f8400STzvetomir Stoyanov (VMware) unsigned int max_data_size;
5627a8e76a3SSteven Rostedt };
5637a8e76a3SSteven Rostedt
5647a8e76a3SSteven Rostedt struct ring_buffer_iter {
5657a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
5667a8e76a3SSteven Rostedt unsigned long head;
567785888c5SSteven Rostedt (VMware) unsigned long next_event;
5687a8e76a3SSteven Rostedt struct buffer_page *head_page;
569492a74f4SSteven Rostedt struct buffer_page *cache_reader_page;
570492a74f4SSteven Rostedt unsigned long cache_read;
5712d093282SZheng Yejian unsigned long cache_pages_removed;
5727a8e76a3SSteven Rostedt u64 read_stamp;
57328e3fc56SSteven Rostedt (VMware) u64 page_stamp;
574785888c5SSteven Rostedt (VMware) struct ring_buffer_event *event;
575139f8400STzvetomir Stoyanov (VMware) size_t event_size;
576c9b7a4a7SSteven Rostedt (VMware) int missed_events;
5777a8e76a3SSteven Rostedt };
5787a8e76a3SSteven Rostedt
ring_buffer_print_page_header(struct trace_buffer * buffer,struct trace_seq * s)579139f8400STzvetomir Stoyanov (VMware) int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
58010464b4aSSteven Rostedt (VMware) {
581d5cfbdfcSTzvetomir Stoyanov (VMware) struct buffer_data_page field;
582d5cfbdfcSTzvetomir Stoyanov (VMware)
583d5cfbdfcSTzvetomir Stoyanov (VMware) trace_seq_printf(s, "\tfield: u64 timestamp;\t"
584d5cfbdfcSTzvetomir Stoyanov (VMware) "offset:0;\tsize:%u;\tsigned:%u;\n",
585d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)sizeof(field.time_stamp),
586d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)is_signed_type(u64));
587d5cfbdfcSTzvetomir Stoyanov (VMware)
588d5cfbdfcSTzvetomir Stoyanov (VMware) trace_seq_printf(s, "\tfield: local_t commit;\t"
589d5cfbdfcSTzvetomir Stoyanov (VMware) "offset:%u;\tsize:%u;\tsigned:%u;\n",
590d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)offsetof(typeof(field), commit),
591d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)sizeof(field.commit),
592d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)is_signed_type(long));
593d5cfbdfcSTzvetomir Stoyanov (VMware)
594d5cfbdfcSTzvetomir Stoyanov (VMware) trace_seq_printf(s, "\tfield: int overwrite;\t"
595d5cfbdfcSTzvetomir Stoyanov (VMware) "offset:%u;\tsize:%u;\tsigned:%u;\n",
596d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)offsetof(typeof(field), commit),
597d5cfbdfcSTzvetomir Stoyanov (VMware) 1,
598d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)is_signed_type(long));
599d5cfbdfcSTzvetomir Stoyanov (VMware)
600d5cfbdfcSTzvetomir Stoyanov (VMware) trace_seq_printf(s, "\tfield: char data;\t"
601d5cfbdfcSTzvetomir Stoyanov (VMware) "offset:%u;\tsize:%u;\tsigned:%u;\n",
602d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)offsetof(typeof(field), data),
603139f8400STzvetomir Stoyanov (VMware) (unsigned int)buffer->subbuf_size,
604d5cfbdfcSTzvetomir Stoyanov (VMware) (unsigned int)is_signed_type(char));
605d5cfbdfcSTzvetomir Stoyanov (VMware)
606d5cfbdfcSTzvetomir Stoyanov (VMware) return !trace_seq_has_overflowed(s);
60710464b4aSSteven Rostedt (VMware) }
60810464b4aSSteven Rostedt (VMware)
rb_time_read(rb_time_t * t,u64 * ret)609c84897c0SSteven Rostedt (Google) static inline void rb_time_read(rb_time_t *t, u64 *ret)
61010464b4aSSteven Rostedt (VMware) {
61110464b4aSSteven Rostedt (VMware) *ret = local64_read(&t->time);
61210464b4aSSteven Rostedt (VMware) }
rb_time_set(rb_time_t * t,u64 val)61310464b4aSSteven Rostedt (VMware) static void rb_time_set(rb_time_t *t, u64 val)
61410464b4aSSteven Rostedt (VMware) {
61510464b4aSSteven Rostedt (VMware) local64_set(&t->time, val);
61610464b4aSSteven Rostedt (VMware) }
61710464b4aSSteven Rostedt (VMware)
618a948c69dSSteven Rostedt (VMware) /*
619a948c69dSSteven Rostedt (VMware) * Enable this to make sure that the event passed to
620a948c69dSSteven Rostedt (VMware) * ring_buffer_event_time_stamp() is not committed and also
621a948c69dSSteven Rostedt (VMware) * is on the buffer that it passed in.
622a948c69dSSteven Rostedt (VMware) */
623a948c69dSSteven Rostedt (VMware) //#define RB_VERIFY_EVENT
624a948c69dSSteven Rostedt (VMware) #ifdef RB_VERIFY_EVENT
625a948c69dSSteven Rostedt (VMware) static struct list_head *rb_list_head(struct list_head *list);
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)626a948c69dSSteven Rostedt (VMware) static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
627a948c69dSSteven Rostedt (VMware) void *event)
628a948c69dSSteven Rostedt (VMware) {
629a948c69dSSteven Rostedt (VMware) struct buffer_page *page = cpu_buffer->commit_page;
630a948c69dSSteven Rostedt (VMware) struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
631a948c69dSSteven Rostedt (VMware) struct list_head *next;
632a948c69dSSteven Rostedt (VMware) long commit, write;
633a948c69dSSteven Rostedt (VMware) unsigned long addr = (unsigned long)event;
634a948c69dSSteven Rostedt (VMware) bool done = false;
635a948c69dSSteven Rostedt (VMware) int stop = 0;
636a948c69dSSteven Rostedt (VMware)
637a948c69dSSteven Rostedt (VMware) /* Make sure the event exists and is not committed yet */
638a948c69dSSteven Rostedt (VMware) do {
639a948c69dSSteven Rostedt (VMware) if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
640a948c69dSSteven Rostedt (VMware) done = true;
641a948c69dSSteven Rostedt (VMware) commit = local_read(&page->page->commit);
642a948c69dSSteven Rostedt (VMware) write = local_read(&page->write);
643a948c69dSSteven Rostedt (VMware) if (addr >= (unsigned long)&page->page->data[commit] &&
644a948c69dSSteven Rostedt (VMware) addr < (unsigned long)&page->page->data[write])
645a948c69dSSteven Rostedt (VMware) return;
646a948c69dSSteven Rostedt (VMware)
647a948c69dSSteven Rostedt (VMware) next = rb_list_head(page->list.next);
648a948c69dSSteven Rostedt (VMware) page = list_entry(next, struct buffer_page, list);
649a948c69dSSteven Rostedt (VMware) } while (!done);
650a948c69dSSteven Rostedt (VMware) WARN_ON_ONCE(1);
651a948c69dSSteven Rostedt (VMware) }
652a948c69dSSteven Rostedt (VMware) #else
verify_event(struct ring_buffer_per_cpu * cpu_buffer,void * event)653a948c69dSSteven Rostedt (VMware) static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
654a948c69dSSteven Rostedt (VMware) void *event)
655a948c69dSSteven Rostedt (VMware) {
656a948c69dSSteven Rostedt (VMware) }
657a948c69dSSteven Rostedt (VMware) #endif
658a948c69dSSteven Rostedt (VMware)
6596695da58SSteven Rostedt (Google) /*
6606695da58SSteven Rostedt (Google) * The absolute time stamp drops the 5 MSBs and some clocks may
6616695da58SSteven Rostedt (Google) * require them. The rb_fix_abs_ts() will take a previous full
6626695da58SSteven Rostedt (Google) * time stamp, and add the 5 MSB of that time stamp on to the
6636695da58SSteven Rostedt (Google) * saved absolute time stamp. Then they are compared in case of
6646695da58SSteven Rostedt (Google) * the unlikely event that the latest time stamp incremented
6656695da58SSteven Rostedt (Google) * the 5 MSB.
6666695da58SSteven Rostedt (Google) */
rb_fix_abs_ts(u64 abs,u64 save_ts)6676695da58SSteven Rostedt (Google) static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
6686695da58SSteven Rostedt (Google) {
6696695da58SSteven Rostedt (Google) if (save_ts & TS_MSB) {
6706695da58SSteven Rostedt (Google) abs |= save_ts & TS_MSB;
6716695da58SSteven Rostedt (Google) /* Check for overflow */
6726695da58SSteven Rostedt (Google) if (unlikely(abs < save_ts))
6736695da58SSteven Rostedt (Google) abs += 1ULL << 59;
6746695da58SSteven Rostedt (Google) }
6756695da58SSteven Rostedt (Google) return abs;
6766695da58SSteven Rostedt (Google) }
677a948c69dSSteven Rostedt (VMware)
678efe6196aSSteven Rostedt (VMware) static inline u64 rb_time_stamp(struct trace_buffer *buffer);
679efe6196aSSteven Rostedt (VMware)
680efe6196aSSteven Rostedt (VMware) /**
681efe6196aSSteven Rostedt (VMware) * ring_buffer_event_time_stamp - return the event's current time stamp
682efe6196aSSteven Rostedt (VMware) * @buffer: The buffer that the event is on
683efe6196aSSteven Rostedt (VMware) * @event: the event to get the time stamp of
684efe6196aSSteven Rostedt (VMware) *
685efe6196aSSteven Rostedt (VMware) * Note, this must be called after @event is reserved, and before it is
686efe6196aSSteven Rostedt (VMware) * committed to the ring buffer. And must be called from the same
687efe6196aSSteven Rostedt (VMware) * context where the event was reserved (normal, softirq, irq, etc).
688efe6196aSSteven Rostedt (VMware) *
689efe6196aSSteven Rostedt (VMware) * Returns the time stamp associated with the current event.
690efe6196aSSteven Rostedt (VMware) * If the event has an extended time stamp, then that is used as
691efe6196aSSteven Rostedt (VMware) * the time stamp to return.
692efe6196aSSteven Rostedt (VMware) * In the highly unlikely case that the event was nested more than
693efe6196aSSteven Rostedt (VMware) * the max nesting, then the write_stamp of the buffer is returned,
694efe6196aSSteven Rostedt (VMware) * otherwise current time is returned, but that really neither of
695efe6196aSSteven Rostedt (VMware) * the last two cases should ever happen.
696efe6196aSSteven Rostedt (VMware) */
ring_buffer_event_time_stamp(struct trace_buffer * buffer,struct ring_buffer_event * event)697efe6196aSSteven Rostedt (VMware) u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
698efe6196aSSteven Rostedt (VMware) struct ring_buffer_event *event)
699efe6196aSSteven Rostedt (VMware) {
700efe6196aSSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
701efe6196aSSteven Rostedt (VMware) unsigned int nest;
702efe6196aSSteven Rostedt (VMware) u64 ts;
703efe6196aSSteven Rostedt (VMware)
704efe6196aSSteven Rostedt (VMware) /* If the event includes an absolute time, then just use that */
7056695da58SSteven Rostedt (Google) if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
7066695da58SSteven Rostedt (Google) ts = rb_event_time_stamp(event);
7076695da58SSteven Rostedt (Google) return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
7086695da58SSteven Rostedt (Google) }
709efe6196aSSteven Rostedt (VMware)
710a948c69dSSteven Rostedt (VMware) nest = local_read(&cpu_buffer->committing);
711a948c69dSSteven Rostedt (VMware) verify_event(cpu_buffer, event);
712a948c69dSSteven Rostedt (VMware) if (WARN_ON_ONCE(!nest))
713a948c69dSSteven Rostedt (VMware) goto fail;
714a948c69dSSteven Rostedt (VMware)
715efe6196aSSteven Rostedt (VMware) /* Read the current saved nesting level time stamp */
716a948c69dSSteven Rostedt (VMware) if (likely(--nest < MAX_NEST))
717efe6196aSSteven Rostedt (VMware) return cpu_buffer->event_stamp[nest];
718efe6196aSSteven Rostedt (VMware)
719a948c69dSSteven Rostedt (VMware) /* Shouldn't happen, warn if it does */
720a948c69dSSteven Rostedt (VMware) WARN_ONCE(1, "nest (%d) greater than max", nest);
721efe6196aSSteven Rostedt (VMware)
722a948c69dSSteven Rostedt (VMware) fail:
723c84897c0SSteven Rostedt (Google) rb_time_read(&cpu_buffer->write_stamp, &ts);
724efe6196aSSteven Rostedt (VMware)
725efe6196aSSteven Rostedt (VMware) return ts;
726efe6196aSSteven Rostedt (VMware) }
727efe6196aSSteven Rostedt (VMware)
7282c2b0a78SSteven Rostedt (VMware) /**
729b7085b6fSJiapeng Chong * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
7302c2b0a78SSteven Rostedt (VMware) * @buffer: The ring_buffer to get the number of pages from
7312c2b0a78SSteven Rostedt (VMware) * @cpu: The cpu of the ring_buffer to get the number of pages from
7322c2b0a78SSteven Rostedt (VMware) *
7332c2b0a78SSteven Rostedt (VMware) * Returns the number of pages that have content in the ring buffer.
7342c2b0a78SSteven Rostedt (VMware) */
ring_buffer_nr_dirty_pages(struct trace_buffer * buffer,int cpu)73513292494SSteven Rostedt (VMware) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
7362c2b0a78SSteven Rostedt (VMware) {
7372c2b0a78SSteven Rostedt (VMware) size_t read;
73831029a8bSSteven Rostedt (Google) size_t lost;
7392c2b0a78SSteven Rostedt (VMware) size_t cnt;
7402c2b0a78SSteven Rostedt (VMware)
7412c2b0a78SSteven Rostedt (VMware) read = local_read(&buffer->buffers[cpu]->pages_read);
74231029a8bSSteven Rostedt (Google) lost = local_read(&buffer->buffers[cpu]->pages_lost);
7432c2b0a78SSteven Rostedt (VMware) cnt = local_read(&buffer->buffers[cpu]->pages_touched);
74431029a8bSSteven Rostedt (Google)
74531029a8bSSteven Rostedt (Google) if (WARN_ON_ONCE(cnt < lost))
74631029a8bSSteven Rostedt (Google) return 0;
74731029a8bSSteven Rostedt (Google)
74831029a8bSSteven Rostedt (Google) cnt -= lost;
74931029a8bSSteven Rostedt (Google)
7502c2b0a78SSteven Rostedt (VMware) /* The reader can read an empty page, but not more than that */
7512c2b0a78SSteven Rostedt (VMware) if (cnt < read) {
7522c2b0a78SSteven Rostedt (VMware) WARN_ON_ONCE(read > cnt + 1);
7532c2b0a78SSteven Rostedt (VMware) return 0;
7542c2b0a78SSteven Rostedt (VMware) }
7552c2b0a78SSteven Rostedt (VMware)
7562c2b0a78SSteven Rostedt (VMware) return cnt - read;
7572c2b0a78SSteven Rostedt (VMware) }
7582c2b0a78SSteven Rostedt (VMware)
full_hit(struct trace_buffer * buffer,int cpu,int full)75942fb0a1eSSteven Rostedt (Google) static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
76042fb0a1eSSteven Rostedt (Google) {
76142fb0a1eSSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
76242fb0a1eSSteven Rostedt (Google) size_t nr_pages;
76342fb0a1eSSteven Rostedt (Google) size_t dirty;
76442fb0a1eSSteven Rostedt (Google)
76542fb0a1eSSteven Rostedt (Google) nr_pages = cpu_buffer->nr_pages;
76642fb0a1eSSteven Rostedt (Google) if (!nr_pages || !full)
76742fb0a1eSSteven Rostedt (Google) return true;
76842fb0a1eSSteven Rostedt (Google)
769623b1f89SSteven Rostedt (Google) /*
770623b1f89SSteven Rostedt (Google) * Add one as dirty will never equal nr_pages, as the sub-buffer
771623b1f89SSteven Rostedt (Google) * that the writer is on is not counted as dirty.
772623b1f89SSteven Rostedt (Google) * This is needed if "buffer_percent" is set to 100.
773623b1f89SSteven Rostedt (Google) */
774623b1f89SSteven Rostedt (Google) dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
77542fb0a1eSSteven Rostedt (Google)
776623b1f89SSteven Rostedt (Google) return (dirty * 100) >= (full * nr_pages);
77742fb0a1eSSteven Rostedt (Google) }
77842fb0a1eSSteven Rostedt (Google)
77915693458SSteven Rostedt (Red Hat) /*
78015693458SSteven Rostedt (Red Hat) * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
78115693458SSteven Rostedt (Red Hat) *
78215693458SSteven Rostedt (Red Hat) * Schedules a delayed work to wake up any task that is blocked on the
78315693458SSteven Rostedt (Red Hat) * ring buffer waiters queue.
78415693458SSteven Rostedt (Red Hat) */
rb_wake_up_waiters(struct irq_work * work)78515693458SSteven Rostedt (Red Hat) static void rb_wake_up_waiters(struct irq_work *work)
78615693458SSteven Rostedt (Red Hat) {
78715693458SSteven Rostedt (Red Hat) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
78815693458SSteven Rostedt (Red Hat)
789b70f2938SSteven Rostedt (Google) /* For waiters waiting for the first wake up */
790b70f2938SSteven Rostedt (Google) (void)atomic_fetch_inc_release(&rbwork->seq);
791b70f2938SSteven Rostedt (Google)
79215693458SSteven Rostedt (Red Hat) wake_up_all(&rbwork->waiters);
793ec0bbc5eSSteven Rostedt (Google) if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
79468282dd9SSteven Rostedt (Google) /* Only cpu_buffer sets the above flags */
79568282dd9SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer =
79668282dd9SSteven Rostedt (Google) container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
79768282dd9SSteven Rostedt (Google)
79868282dd9SSteven Rostedt (Google) /* Called from interrupt context */
79968282dd9SSteven Rostedt (Google) raw_spin_lock(&cpu_buffer->reader_lock);
8001e0d6714SSteven Rostedt (Red Hat) rbwork->wakeup_full = false;
801ec0bbc5eSSteven Rostedt (Google) rbwork->full_waiters_pending = false;
80268282dd9SSteven Rostedt (Google)
80368282dd9SSteven Rostedt (Google) /* Waking up all waiters, they will reset the shortest full */
80468282dd9SSteven Rostedt (Google) cpu_buffer->shortest_full = 0;
80568282dd9SSteven Rostedt (Google) raw_spin_unlock(&cpu_buffer->reader_lock);
80668282dd9SSteven Rostedt (Google)
8071e0d6714SSteven Rostedt (Red Hat) wake_up_all(&rbwork->full_waiters);
8081e0d6714SSteven Rostedt (Red Hat) }
80915693458SSteven Rostedt (Red Hat) }
81015693458SSteven Rostedt (Red Hat)
81115693458SSteven Rostedt (Red Hat) /**
8127e9fbbb1SSteven Rostedt (Google) * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
8137e9fbbb1SSteven Rostedt (Google) * @buffer: The ring buffer to wake waiters on
814151e34d1SGaosheng Cui * @cpu: The CPU buffer to wake waiters on
8157e9fbbb1SSteven Rostedt (Google) *
8167e9fbbb1SSteven Rostedt (Google) * In the case of a file that represents a ring buffer is closing,
8177e9fbbb1SSteven Rostedt (Google) * it is prudent to wake up any waiters that are on this.
8187e9fbbb1SSteven Rostedt (Google) */
ring_buffer_wake_waiters(struct trace_buffer * buffer,int cpu)8197e9fbbb1SSteven Rostedt (Google) void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
8207e9fbbb1SSteven Rostedt (Google) {
8217e9fbbb1SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer;
8227e9fbbb1SSteven Rostedt (Google) struct rb_irq_work *rbwork;
8237e9fbbb1SSteven Rostedt (Google)
8247433632cSSteven Rostedt (Google) if (!buffer)
8257433632cSSteven Rostedt (Google) return;
8267433632cSSteven Rostedt (Google)
8277e9fbbb1SSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS) {
8287e9fbbb1SSteven Rostedt (Google)
8297e9fbbb1SSteven Rostedt (Google) /* Wake up individual ones too. One level recursion */
8307e9fbbb1SSteven Rostedt (Google) for_each_buffer_cpu(buffer, cpu)
8317e9fbbb1SSteven Rostedt (Google) ring_buffer_wake_waiters(buffer, cpu);
8327e9fbbb1SSteven Rostedt (Google)
8337e9fbbb1SSteven Rostedt (Google) rbwork = &buffer->irq_work;
8347e9fbbb1SSteven Rostedt (Google) } else {
8357433632cSSteven Rostedt (Google) if (WARN_ON_ONCE(!buffer->buffers))
8367433632cSSteven Rostedt (Google) return;
8377433632cSSteven Rostedt (Google) if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
8387433632cSSteven Rostedt (Google) return;
8397433632cSSteven Rostedt (Google)
8407e9fbbb1SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
8417433632cSSteven Rostedt (Google) /* The CPU buffer may not have been initialized yet */
8427433632cSSteven Rostedt (Google) if (!cpu_buffer)
8437433632cSSteven Rostedt (Google) return;
8447e9fbbb1SSteven Rostedt (Google) rbwork = &cpu_buffer->irq_work;
8457e9fbbb1SSteven Rostedt (Google) }
8467e9fbbb1SSteven Rostedt (Google)
84739a7dc23SSteven Rostedt (Google) /* This can be called in any context */
84839a7dc23SSteven Rostedt (Google) irq_work_queue(&rbwork->work);
8497e9fbbb1SSteven Rostedt (Google) }
8507e9fbbb1SSteven Rostedt (Google)
rb_watermark_hit(struct trace_buffer * buffer,int cpu,int full)851b3594573SSteven Rostedt (Google) static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
852b3594573SSteven Rostedt (Google) {
853b3594573SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer;
854b3594573SSteven Rostedt (Google) bool ret = false;
855b3594573SSteven Rostedt (Google)
856b3594573SSteven Rostedt (Google) /* Reads of all CPUs always waits for any data */
857b3594573SSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS)
858b3594573SSteven Rostedt (Google) return !ring_buffer_empty(buffer);
859b3594573SSteven Rostedt (Google)
860b3594573SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
861b3594573SSteven Rostedt (Google)
862b3594573SSteven Rostedt (Google) if (!ring_buffer_empty_cpu(buffer, cpu)) {
863b3594573SSteven Rostedt (Google) unsigned long flags;
864b3594573SSteven Rostedt (Google) bool pagebusy;
865b3594573SSteven Rostedt (Google)
866b3594573SSteven Rostedt (Google) if (!full)
867b3594573SSteven Rostedt (Google) return true;
868b3594573SSteven Rostedt (Google)
869b3594573SSteven Rostedt (Google) raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
870b3594573SSteven Rostedt (Google) pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
871b3594573SSteven Rostedt (Google) ret = !pagebusy && full_hit(buffer, cpu, full);
872b3594573SSteven Rostedt (Google)
873761d9473SSteven Rostedt (Google) if (!ret && (!cpu_buffer->shortest_full ||
874761d9473SSteven Rostedt (Google) cpu_buffer->shortest_full > full)) {
875b3594573SSteven Rostedt (Google) cpu_buffer->shortest_full = full;
876761d9473SSteven Rostedt (Google) }
877b3594573SSteven Rostedt (Google) raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
878b3594573SSteven Rostedt (Google) }
879b3594573SSteven Rostedt (Google) return ret;
880b3594573SSteven Rostedt (Google) }
881b3594573SSteven Rostedt (Google)
8827af9ded0SSteven Rostedt (Google) static inline bool
rb_wait_cond(struct rb_irq_work * rbwork,struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)8837af9ded0SSteven Rostedt (Google) rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
8847af9ded0SSteven Rostedt (Google) int cpu, int full, ring_buffer_cond_fn cond, void *data)
88515693458SSteven Rostedt (Red Hat) {
8867af9ded0SSteven Rostedt (Google) if (rb_watermark_hit(buffer, cpu, full))
8877af9ded0SSteven Rostedt (Google) return true;
88815693458SSteven Rostedt (Red Hat)
8897af9ded0SSteven Rostedt (Google) if (cond(data))
8907af9ded0SSteven Rostedt (Google) return true;
89115693458SSteven Rostedt (Red Hat)
89215693458SSteven Rostedt (Red Hat) /*
89315693458SSteven Rostedt (Red Hat) * The events can happen in critical sections where
89415693458SSteven Rostedt (Red Hat) * checking a work queue can cause deadlocks.
89515693458SSteven Rostedt (Red Hat) * After adding a task to the queue, this flag is set
89615693458SSteven Rostedt (Red Hat) * only to notify events to try to wake up the queue
89715693458SSteven Rostedt (Red Hat) * using irq_work.
89815693458SSteven Rostedt (Red Hat) *
89915693458SSteven Rostedt (Red Hat) * We don't clear it even if the buffer is no longer
90015693458SSteven Rostedt (Red Hat) * empty. The flag only causes the next event to run
90115693458SSteven Rostedt (Red Hat) * irq_work to do the work queue wake up. The worse
90215693458SSteven Rostedt (Red Hat) * that can happen if we race with !trace_empty() is that
90315693458SSteven Rostedt (Red Hat) * an event will cause an irq_work to try to wake up
90415693458SSteven Rostedt (Red Hat) * an empty queue.
90515693458SSteven Rostedt (Red Hat) *
90615693458SSteven Rostedt (Red Hat) * There's no reason to protect this flag either, as
90715693458SSteven Rostedt (Red Hat) * the work queue and irq_work logic will do the necessary
90815693458SSteven Rostedt (Red Hat) * synchronization for the wake ups. The only thing
90915693458SSteven Rostedt (Red Hat) * that is necessary is that the wake up happens after
91015693458SSteven Rostedt (Red Hat) * a task has been queued. It's OK for spurious wake ups.
91115693458SSteven Rostedt (Red Hat) */
9121e0d6714SSteven Rostedt (Red Hat) if (full)
9137af9ded0SSteven Rostedt (Google) rbwork->full_waiters_pending = true;
9141e0d6714SSteven Rostedt (Red Hat) else
9157af9ded0SSteven Rostedt (Google) rbwork->waiters_pending = true;
91615693458SSteven Rostedt (Red Hat)
9177af9ded0SSteven Rostedt (Google) return false;
918e30f53aaSRabin Vincent }
919e30f53aaSRabin Vincent
920b70f2938SSteven Rostedt (Google) struct rb_wait_data {
921b70f2938SSteven Rostedt (Google) struct rb_irq_work *irq_work;
922b70f2938SSteven Rostedt (Google) int seq;
923b70f2938SSteven Rostedt (Google) };
924b70f2938SSteven Rostedt (Google)
9257af9ded0SSteven Rostedt (Google) /*
9267af9ded0SSteven Rostedt (Google) * The default wait condition for ring_buffer_wait() is to just to exit the
9277af9ded0SSteven Rostedt (Google) * wait loop the first time it is woken up.
9287af9ded0SSteven Rostedt (Google) */
rb_wait_once(void * data)9297af9ded0SSteven Rostedt (Google) static bool rb_wait_once(void *data)
9307af9ded0SSteven Rostedt (Google) {
931b70f2938SSteven Rostedt (Google) struct rb_wait_data *rdata = data;
932b70f2938SSteven Rostedt (Google) struct rb_irq_work *rbwork = rdata->irq_work;
933e30f53aaSRabin Vincent
934b70f2938SSteven Rostedt (Google) return atomic_read_acquire(&rbwork->seq) != rdata->seq;
9357af9ded0SSteven Rostedt (Google) }
9367af9ded0SSteven Rostedt (Google)
9377af9ded0SSteven Rostedt (Google) /**
9387af9ded0SSteven Rostedt (Google) * ring_buffer_wait - wait for input to the ring buffer
9397af9ded0SSteven Rostedt (Google) * @buffer: buffer to wait on
9407af9ded0SSteven Rostedt (Google) * @cpu: the cpu buffer to wait on
9417af9ded0SSteven Rostedt (Google) * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
9422aa043a5SSteven Rostedt (Google) * @cond: condition function to break out of wait (NULL to run once)
9432aa043a5SSteven Rostedt (Google) * @data: the data to pass to @cond.
9447af9ded0SSteven Rostedt (Google) *
9457af9ded0SSteven Rostedt (Google) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
9467af9ded0SSteven Rostedt (Google) * as data is added to any of the @buffer's cpu buffers. Otherwise
9477af9ded0SSteven Rostedt (Google) * it will wait for data to be added to a specific cpu buffer.
9487af9ded0SSteven Rostedt (Google) */
ring_buffer_wait(struct trace_buffer * buffer,int cpu,int full,ring_buffer_cond_fn cond,void * data)9492aa043a5SSteven Rostedt (Google) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
9502aa043a5SSteven Rostedt (Google) ring_buffer_cond_fn cond, void *data)
9517af9ded0SSteven Rostedt (Google) {
9527af9ded0SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer;
9537af9ded0SSteven Rostedt (Google) struct wait_queue_head *waitq;
9547af9ded0SSteven Rostedt (Google) struct rb_irq_work *rbwork;
955b70f2938SSteven Rostedt (Google) struct rb_wait_data rdata;
9567af9ded0SSteven Rostedt (Google) int ret = 0;
9577af9ded0SSteven Rostedt (Google)
9587af9ded0SSteven Rostedt (Google) /*
9597af9ded0SSteven Rostedt (Google) * Depending on what the caller is waiting for, either any
9607af9ded0SSteven Rostedt (Google) * data in any cpu buffer, or a specific buffer, put the
9617af9ded0SSteven Rostedt (Google) * caller on the appropriate wait queue.
9627af9ded0SSteven Rostedt (Google) */
9637af9ded0SSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS) {
9647af9ded0SSteven Rostedt (Google) rbwork = &buffer->irq_work;
9657af9ded0SSteven Rostedt (Google) /* Full only makes sense on per cpu reads */
9667af9ded0SSteven Rostedt (Google) full = 0;
9677af9ded0SSteven Rostedt (Google) } else {
9687af9ded0SSteven Rostedt (Google) if (!cpumask_test_cpu(cpu, buffer->cpumask))
9697af9ded0SSteven Rostedt (Google) return -ENODEV;
9707af9ded0SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
9717af9ded0SSteven Rostedt (Google) rbwork = &cpu_buffer->irq_work;
9727af9ded0SSteven Rostedt (Google) }
9737af9ded0SSteven Rostedt (Google)
9747af9ded0SSteven Rostedt (Google) if (full)
9757af9ded0SSteven Rostedt (Google) waitq = &rbwork->full_waiters;
9767af9ded0SSteven Rostedt (Google) else
9777af9ded0SSteven Rostedt (Google) waitq = &rbwork->waiters;
9787af9ded0SSteven Rostedt (Google)
979b70f2938SSteven Rostedt (Google) /* Set up to exit loop as soon as it is woken */
980b70f2938SSteven Rostedt (Google) if (!cond) {
981b70f2938SSteven Rostedt (Google) cond = rb_wait_once;
982b70f2938SSteven Rostedt (Google) rdata.irq_work = rbwork;
983b70f2938SSteven Rostedt (Google) rdata.seq = atomic_read_acquire(&rbwork->seq);
984b70f2938SSteven Rostedt (Google) data = &rdata;
985b70f2938SSteven Rostedt (Google) }
986b70f2938SSteven Rostedt (Google)
9877af9ded0SSteven Rostedt (Google) ret = wait_event_interruptible((*waitq),
9887af9ded0SSteven Rostedt (Google) rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
989b3594573SSteven Rostedt (Google)
990e30f53aaSRabin Vincent return ret;
99115693458SSteven Rostedt (Red Hat) }
99215693458SSteven Rostedt (Red Hat)
99315693458SSteven Rostedt (Red Hat) /**
99415693458SSteven Rostedt (Red Hat) * ring_buffer_poll_wait - poll on buffer input
99515693458SSteven Rostedt (Red Hat) * @buffer: buffer to wait on
99615693458SSteven Rostedt (Red Hat) * @cpu: the cpu buffer to wait on
99715693458SSteven Rostedt (Red Hat) * @filp: the file descriptor
99815693458SSteven Rostedt (Red Hat) * @poll_table: The poll descriptor
99942fb0a1eSSteven Rostedt (Google) * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
100015693458SSteven Rostedt (Red Hat) *
100115693458SSteven Rostedt (Red Hat) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
100215693458SSteven Rostedt (Red Hat) * as data is added to any of the @buffer's cpu buffers. Otherwise
100315693458SSteven Rostedt (Red Hat) * it will wait for data to be added to a specific cpu buffer.
100415693458SSteven Rostedt (Red Hat) *
1005a9a08845SLinus Torvalds * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
100615693458SSteven Rostedt (Red Hat) * zero otherwise.
100715693458SSteven Rostedt (Red Hat) */
ring_buffer_poll_wait(struct trace_buffer * buffer,int cpu,struct file * filp,poll_table * poll_table,int full)100813292494SSteven Rostedt (VMware) __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
100942fb0a1eSSteven Rostedt (Google) struct file *filp, poll_table *poll_table, int full)
101015693458SSteven Rostedt (Red Hat) {
101115693458SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer;
101268282dd9SSteven Rostedt (Google) struct rb_irq_work *rbwork;
101315693458SSteven Rostedt (Red Hat)
101442fb0a1eSSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS) {
101568282dd9SSteven Rostedt (Google) rbwork = &buffer->irq_work;
101642fb0a1eSSteven Rostedt (Google) full = 0;
101742fb0a1eSSteven Rostedt (Google) } else {
10186721cb60SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask))
101966bbea9eSVincent Donnefort return EPOLLERR;
10206721cb60SSteven Rostedt (Red Hat)
102115693458SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu];
102268282dd9SSteven Rostedt (Google) rbwork = &cpu_buffer->irq_work;
102315693458SSteven Rostedt (Red Hat) }
102415693458SSteven Rostedt (Red Hat)
102542fb0a1eSSteven Rostedt (Google) if (full) {
102668282dd9SSteven Rostedt (Google) poll_wait(filp, &rbwork->full_waiters, poll_table);
102768282dd9SSteven Rostedt (Google)
1028e36f19a6SSteven Rostedt (Google) if (rb_watermark_hit(buffer, cpu, full))
10298145f1c3SSteven Rostedt (Google) return EPOLLIN | EPOLLRDNORM;
10308145f1c3SSteven Rostedt (Google) /*
10318145f1c3SSteven Rostedt (Google) * Only allow full_waiters_pending update to be seen after
1032e36f19a6SSteven Rostedt (Google) * the shortest_full is set (in rb_watermark_hit). If the
1033e36f19a6SSteven Rostedt (Google) * writer sees the full_waiters_pending flag set, it will
1034e36f19a6SSteven Rostedt (Google) * compare the amount in the ring buffer to shortest_full.
1035e36f19a6SSteven Rostedt (Google) * If the amount in the ring buffer is greater than the
1036e36f19a6SSteven Rostedt (Google) * shortest_full percent, it will call the irq_work handler
1037e36f19a6SSteven Rostedt (Google) * to wake up this list. The irq_handler will reset shortest_full
10388145f1c3SSteven Rostedt (Google) * back to zero. That's done under the reader_lock, but
10398145f1c3SSteven Rostedt (Google) * the below smp_mb() makes sure that the update to
10408145f1c3SSteven Rostedt (Google) * full_waiters_pending doesn't leak up into the above.
10418145f1c3SSteven Rostedt (Google) */
10428145f1c3SSteven Rostedt (Google) smp_mb();
104368282dd9SSteven Rostedt (Google) rbwork->full_waiters_pending = true;
10448145f1c3SSteven Rostedt (Google) return 0;
10458145f1c3SSteven Rostedt (Google) }
10468145f1c3SSteven Rostedt (Google)
104768282dd9SSteven Rostedt (Google) poll_wait(filp, &rbwork->waiters, poll_table);
104868282dd9SSteven Rostedt (Google) rbwork->waiters_pending = true;
104942fb0a1eSSteven Rostedt (Google)
10504ce97dbfSJosef Bacik /*
10514ce97dbfSJosef Bacik * There's a tight race between setting the waiters_pending and
10524ce97dbfSJosef Bacik * checking if the ring buffer is empty. Once the waiters_pending bit
10534ce97dbfSJosef Bacik * is set, the next event will wake the task up, but we can get stuck
10544ce97dbfSJosef Bacik * if there's only a single event in.
10554ce97dbfSJosef Bacik *
10564ce97dbfSJosef Bacik * FIXME: Ideally, we need a memory barrier on the writer side as well,
10574ce97dbfSJosef Bacik * but adding a memory barrier to all events will cause too much of a
10584ce97dbfSJosef Bacik * performance hit in the fast path. We only need a memory barrier when
10594ce97dbfSJosef Bacik * the buffer goes from empty to having content. But as this race is
10604ce97dbfSJosef Bacik * extremely small, and it's not a problem if another event comes in, we
10614ce97dbfSJosef Bacik * will fix it later.
10624ce97dbfSJosef Bacik */
10634ce97dbfSJosef Bacik smp_mb();
106415693458SSteven Rostedt (Red Hat)
106515693458SSteven Rostedt (Red Hat) if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
106615693458SSteven Rostedt (Red Hat) (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1067a9a08845SLinus Torvalds return EPOLLIN | EPOLLRDNORM;
106815693458SSteven Rostedt (Red Hat) return 0;
106915693458SSteven Rostedt (Red Hat) }
107015693458SSteven Rostedt (Red Hat)
1071f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */
1072077c5407SSteven Rostedt #define RB_WARN_ON(b, cond) \
10733e89c7bbSSteven Rostedt ({ \
10743e89c7bbSSteven Rostedt int _____ret = unlikely(cond); \
10753e89c7bbSSteven Rostedt if (_____ret) { \
1076077c5407SSteven Rostedt if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1077077c5407SSteven Rostedt struct ring_buffer_per_cpu *__b = \
1078077c5407SSteven Rostedt (void *)b; \
1079077c5407SSteven Rostedt atomic_inc(&__b->buffer->record_disabled); \
1080077c5407SSteven Rostedt } else \
1081077c5407SSteven Rostedt atomic_inc(&b->record_disabled); \
1082bf41a158SSteven Rostedt WARN_ON(1); \
1083bf41a158SSteven Rostedt } \
10843e89c7bbSSteven Rostedt _____ret; \
10853e89c7bbSSteven Rostedt })
1086f536aafcSSteven Rostedt
108737886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */
108837886f6aSSteven Rostedt #define DEBUG_SHIFT 0
108937886f6aSSteven Rostedt
rb_time_stamp(struct trace_buffer * buffer)109013292494SSteven Rostedt (VMware) static inline u64 rb_time_stamp(struct trace_buffer *buffer)
109188eb0125SSteven Rostedt {
1092bbeba3e5SSteven Rostedt (VMware) u64 ts;
1093bbeba3e5SSteven Rostedt (VMware)
1094bbeba3e5SSteven Rostedt (VMware) /* Skip retpolines :-( */
1095aefb2f2eSBreno Leitao if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1096bbeba3e5SSteven Rostedt (VMware) ts = trace_clock_local();
1097bbeba3e5SSteven Rostedt (VMware) else
1098bbeba3e5SSteven Rostedt (VMware) ts = buffer->clock();
1099bbeba3e5SSteven Rostedt (VMware)
110088eb0125SSteven Rostedt /* shift to debug/test normalization and TIME_EXTENTS */
1101bbeba3e5SSteven Rostedt (VMware) return ts << DEBUG_SHIFT;
110288eb0125SSteven Rostedt }
110388eb0125SSteven Rostedt
ring_buffer_time_stamp(struct trace_buffer * buffer)1104f3ef7202SYordan Karadzhov (VMware) u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
110537886f6aSSteven Rostedt {
110637886f6aSSteven Rostedt u64 time;
110737886f6aSSteven Rostedt
110837886f6aSSteven Rostedt preempt_disable_notrace();
11096d3f1e12SJiri Olsa time = rb_time_stamp(buffer);
1110d6097c9eSPeter Zijlstra preempt_enable_notrace();
111137886f6aSSteven Rostedt
111237886f6aSSteven Rostedt return time;
111337886f6aSSteven Rostedt }
111437886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
111537886f6aSSteven Rostedt
ring_buffer_normalize_time_stamp(struct trace_buffer * buffer,int cpu,u64 * ts)111613292494SSteven Rostedt (VMware) void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
111737886f6aSSteven Rostedt int cpu, u64 *ts)
111837886f6aSSteven Rostedt {
111937886f6aSSteven Rostedt /* Just stupid testing the normalize function and deltas */
112037886f6aSSteven Rostedt *ts >>= DEBUG_SHIFT;
112137886f6aSSteven Rostedt }
112237886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
112337886f6aSSteven Rostedt
112477ae365eSSteven Rostedt /*
112577ae365eSSteven Rostedt * Making the ring buffer lockless makes things tricky.
112677ae365eSSteven Rostedt * Although writes only happen on the CPU that they are on,
112777ae365eSSteven Rostedt * and they only need to worry about interrupts. Reads can
112877ae365eSSteven Rostedt * happen on any CPU.
112977ae365eSSteven Rostedt *
113077ae365eSSteven Rostedt * The reader page is always off the ring buffer, but when the
113177ae365eSSteven Rostedt * reader finishes with a page, it needs to swap its page with
113277ae365eSSteven Rostedt * a new one from the buffer. The reader needs to take from
113377ae365eSSteven Rostedt * the head (writes go to the tail). But if a writer is in overwrite
113477ae365eSSteven Rostedt * mode and wraps, it must push the head page forward.
113577ae365eSSteven Rostedt *
113677ae365eSSteven Rostedt * Here lies the problem.
113777ae365eSSteven Rostedt *
113877ae365eSSteven Rostedt * The reader must be careful to replace only the head page, and
113977ae365eSSteven Rostedt * not another one. As described at the top of the file in the
114077ae365eSSteven Rostedt * ASCII art, the reader sets its old page to point to the next
114177ae365eSSteven Rostedt * page after head. It then sets the page after head to point to
114277ae365eSSteven Rostedt * the old reader page. But if the writer moves the head page
114377ae365eSSteven Rostedt * during this operation, the reader could end up with the tail.
114477ae365eSSteven Rostedt *
114577ae365eSSteven Rostedt * We use cmpxchg to help prevent this race. We also do something
114677ae365eSSteven Rostedt * special with the page before head. We set the LSB to 1.
114777ae365eSSteven Rostedt *
114877ae365eSSteven Rostedt * When the writer must push the page forward, it will clear the
114977ae365eSSteven Rostedt * bit that points to the head page, move the head, and then set
115077ae365eSSteven Rostedt * the bit that points to the new head page.
115177ae365eSSteven Rostedt *
115277ae365eSSteven Rostedt * We also don't want an interrupt coming in and moving the head
115377ae365eSSteven Rostedt * page on another writer. Thus we use the second LSB to catch
115477ae365eSSteven Rostedt * that too. Thus:
115577ae365eSSteven Rostedt *
115677ae365eSSteven Rostedt * head->list->prev->next bit 1 bit 0
115777ae365eSSteven Rostedt * ------- -------
115877ae365eSSteven Rostedt * Normal page 0 0
115977ae365eSSteven Rostedt * Points to head page 0 1
116077ae365eSSteven Rostedt * New head page 1 0
116177ae365eSSteven Rostedt *
116277ae365eSSteven Rostedt * Note we can not trust the prev pointer of the head page, because:
116377ae365eSSteven Rostedt *
116477ae365eSSteven Rostedt * +----+ +-----+ +-----+
116577ae365eSSteven Rostedt * | |------>| T |---X--->| N |
116677ae365eSSteven Rostedt * | |<------| | | |
116777ae365eSSteven Rostedt * +----+ +-----+ +-----+
116877ae365eSSteven Rostedt * ^ ^ |
116977ae365eSSteven Rostedt * | +-----+ | |
117077ae365eSSteven Rostedt * +----------| R |----------+ |
117177ae365eSSteven Rostedt * | |<-----------+
117277ae365eSSteven Rostedt * +-----+
117377ae365eSSteven Rostedt *
117477ae365eSSteven Rostedt * Key: ---X--> HEAD flag set in pointer
117577ae365eSSteven Rostedt * T Tail page
117677ae365eSSteven Rostedt * R Reader page
117777ae365eSSteven Rostedt * N Next page
117877ae365eSSteven Rostedt *
117977ae365eSSteven Rostedt * (see __rb_reserve_next() to see where this happens)
118077ae365eSSteven Rostedt *
118177ae365eSSteven Rostedt * What the above shows is that the reader just swapped out
118277ae365eSSteven Rostedt * the reader page with a page in the buffer, but before it
118377ae365eSSteven Rostedt * could make the new header point back to the new page added
118477ae365eSSteven Rostedt * it was preempted by a writer. The writer moved forward onto
118577ae365eSSteven Rostedt * the new page added by the reader and is about to move forward
118677ae365eSSteven Rostedt * again.
118777ae365eSSteven Rostedt *
118877ae365eSSteven Rostedt * You can see, it is legitimate for the previous pointer of
118977ae365eSSteven Rostedt * the head (or any page) not to point back to itself. But only
11906167c205SSteven Rostedt (VMware) * temporarily.
119177ae365eSSteven Rostedt */
119277ae365eSSteven Rostedt
119377ae365eSSteven Rostedt #define RB_PAGE_NORMAL 0UL
119477ae365eSSteven Rostedt #define RB_PAGE_HEAD 1UL
119577ae365eSSteven Rostedt #define RB_PAGE_UPDATE 2UL
119677ae365eSSteven Rostedt
119777ae365eSSteven Rostedt
119877ae365eSSteven Rostedt #define RB_FLAG_MASK 3UL
119977ae365eSSteven Rostedt
120077ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */
120177ae365eSSteven Rostedt #define RB_PAGE_MOVED 4UL
120277ae365eSSteven Rostedt
120377ae365eSSteven Rostedt /*
120477ae365eSSteven Rostedt * rb_list_head - remove any bit
120577ae365eSSteven Rostedt */
rb_list_head(struct list_head * list)120677ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list)
120777ae365eSSteven Rostedt {
120877ae365eSSteven Rostedt unsigned long val = (unsigned long)list;
120977ae365eSSteven Rostedt
121077ae365eSSteven Rostedt return (struct list_head *)(val & ~RB_FLAG_MASK);
121177ae365eSSteven Rostedt }
121277ae365eSSteven Rostedt
121377ae365eSSteven Rostedt /*
12146d3f1e12SJiri Olsa * rb_is_head_page - test if the given page is the head page
121577ae365eSSteven Rostedt *
121677ae365eSSteven Rostedt * Because the reader may move the head_page pointer, we can
121777ae365eSSteven Rostedt * not trust what the head page is (it may be pointing to
121877ae365eSSteven Rostedt * the reader page). But if the next page is a header page,
121977ae365eSSteven Rostedt * its flags will be non zero.
122077ae365eSSteven Rostedt */
122142b16b3fSJesper Juhl static inline int
rb_is_head_page(struct buffer_page * page,struct list_head * list)12226689bed3SQiujun Huang rb_is_head_page(struct buffer_page *page, struct list_head *list)
122377ae365eSSteven Rostedt {
122477ae365eSSteven Rostedt unsigned long val;
122577ae365eSSteven Rostedt
122677ae365eSSteven Rostedt val = (unsigned long)list->next;
122777ae365eSSteven Rostedt
122877ae365eSSteven Rostedt if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
122977ae365eSSteven Rostedt return RB_PAGE_MOVED;
123077ae365eSSteven Rostedt
123177ae365eSSteven Rostedt return val & RB_FLAG_MASK;
123277ae365eSSteven Rostedt }
123377ae365eSSteven Rostedt
123477ae365eSSteven Rostedt /*
123577ae365eSSteven Rostedt * rb_is_reader_page
123677ae365eSSteven Rostedt *
123777ae365eSSteven Rostedt * The unique thing about the reader page, is that, if the
123877ae365eSSteven Rostedt * writer is ever on it, the previous pointer never points
123977ae365eSSteven Rostedt * back to the reader page.
124077ae365eSSteven Rostedt */
rb_is_reader_page(struct buffer_page * page)124106ca3209SYaowei Bai static bool rb_is_reader_page(struct buffer_page *page)
124277ae365eSSteven Rostedt {
124377ae365eSSteven Rostedt struct list_head *list = page->list.prev;
124477ae365eSSteven Rostedt
124577ae365eSSteven Rostedt return rb_list_head(list->next) != &page->list;
124677ae365eSSteven Rostedt }
124777ae365eSSteven Rostedt
124877ae365eSSteven Rostedt /*
124977ae365eSSteven Rostedt * rb_set_list_to_head - set a list_head to be pointing to head.
125077ae365eSSteven Rostedt */
rb_set_list_to_head(struct list_head * list)12516689bed3SQiujun Huang static void rb_set_list_to_head(struct list_head *list)
125277ae365eSSteven Rostedt {
125377ae365eSSteven Rostedt unsigned long *ptr;
125477ae365eSSteven Rostedt
125577ae365eSSteven Rostedt ptr = (unsigned long *)&list->next;
125677ae365eSSteven Rostedt *ptr |= RB_PAGE_HEAD;
125777ae365eSSteven Rostedt *ptr &= ~RB_PAGE_UPDATE;
125877ae365eSSteven Rostedt }
125977ae365eSSteven Rostedt
126077ae365eSSteven Rostedt /*
126177ae365eSSteven Rostedt * rb_head_page_activate - sets up head page
126277ae365eSSteven Rostedt */
rb_head_page_activate(struct ring_buffer_per_cpu * cpu_buffer)126377ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
126477ae365eSSteven Rostedt {
126577ae365eSSteven Rostedt struct buffer_page *head;
126677ae365eSSteven Rostedt
126777ae365eSSteven Rostedt head = cpu_buffer->head_page;
126877ae365eSSteven Rostedt if (!head)
126977ae365eSSteven Rostedt return;
127077ae365eSSteven Rostedt
127177ae365eSSteven Rostedt /*
127277ae365eSSteven Rostedt * Set the previous list pointer to have the HEAD flag.
127377ae365eSSteven Rostedt */
12746689bed3SQiujun Huang rb_set_list_to_head(head->list.prev);
1275b14d0329SSteven Rostedt (Google)
1276b14d0329SSteven Rostedt (Google) if (cpu_buffer->ring_meta) {
12774009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
1278b14d0329SSteven Rostedt (Google) meta->head_buffer = (unsigned long)head->page;
1279b14d0329SSteven Rostedt (Google) }
128077ae365eSSteven Rostedt }
128177ae365eSSteven Rostedt
rb_list_head_clear(struct list_head * list)128277ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list)
128377ae365eSSteven Rostedt {
128477ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&list->next;
128577ae365eSSteven Rostedt
128677ae365eSSteven Rostedt *ptr &= ~RB_FLAG_MASK;
128777ae365eSSteven Rostedt }
128877ae365eSSteven Rostedt
128977ae365eSSteven Rostedt /*
12906167c205SSteven Rostedt (VMware) * rb_head_page_deactivate - clears head page ptr (for free list)
129177ae365eSSteven Rostedt */
129277ae365eSSteven Rostedt static void
rb_head_page_deactivate(struct ring_buffer_per_cpu * cpu_buffer)129377ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
129477ae365eSSteven Rostedt {
129577ae365eSSteven Rostedt struct list_head *hd;
129677ae365eSSteven Rostedt
129777ae365eSSteven Rostedt /* Go through the whole list and clear any pointers found. */
129877ae365eSSteven Rostedt rb_list_head_clear(cpu_buffer->pages);
129977ae365eSSteven Rostedt
130077ae365eSSteven Rostedt list_for_each(hd, cpu_buffer->pages)
130177ae365eSSteven Rostedt rb_list_head_clear(hd);
130277ae365eSSteven Rostedt }
130377ae365eSSteven Rostedt
rb_head_page_set(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag,int new_flag)130477ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
130577ae365eSSteven Rostedt struct buffer_page *head,
130677ae365eSSteven Rostedt struct buffer_page *prev,
130777ae365eSSteven Rostedt int old_flag, int new_flag)
130877ae365eSSteven Rostedt {
130977ae365eSSteven Rostedt struct list_head *list;
131077ae365eSSteven Rostedt unsigned long val = (unsigned long)&head->list;
131177ae365eSSteven Rostedt unsigned long ret;
131277ae365eSSteven Rostedt
131377ae365eSSteven Rostedt list = &prev->list;
131477ae365eSSteven Rostedt
131577ae365eSSteven Rostedt val &= ~RB_FLAG_MASK;
131677ae365eSSteven Rostedt
131708a40816SSteven Rostedt ret = cmpxchg((unsigned long *)&list->next,
131877ae365eSSteven Rostedt val | old_flag, val | new_flag);
131977ae365eSSteven Rostedt
132077ae365eSSteven Rostedt /* check if the reader took the page */
132177ae365eSSteven Rostedt if ((ret & ~RB_FLAG_MASK) != val)
132277ae365eSSteven Rostedt return RB_PAGE_MOVED;
132377ae365eSSteven Rostedt
132477ae365eSSteven Rostedt return ret & RB_FLAG_MASK;
132577ae365eSSteven Rostedt }
132677ae365eSSteven Rostedt
rb_head_page_set_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)132777ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
132877ae365eSSteven Rostedt struct buffer_page *head,
132977ae365eSSteven Rostedt struct buffer_page *prev,
133077ae365eSSteven Rostedt int old_flag)
133177ae365eSSteven Rostedt {
133277ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev,
133377ae365eSSteven Rostedt old_flag, RB_PAGE_UPDATE);
133477ae365eSSteven Rostedt }
133577ae365eSSteven Rostedt
rb_head_page_set_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)133677ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
133777ae365eSSteven Rostedt struct buffer_page *head,
133877ae365eSSteven Rostedt struct buffer_page *prev,
133977ae365eSSteven Rostedt int old_flag)
134077ae365eSSteven Rostedt {
134177ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev,
134277ae365eSSteven Rostedt old_flag, RB_PAGE_HEAD);
134377ae365eSSteven Rostedt }
134477ae365eSSteven Rostedt
rb_head_page_set_normal(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * head,struct buffer_page * prev,int old_flag)134577ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
134677ae365eSSteven Rostedt struct buffer_page *head,
134777ae365eSSteven Rostedt struct buffer_page *prev,
134877ae365eSSteven Rostedt int old_flag)
134977ae365eSSteven Rostedt {
135077ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev,
135177ae365eSSteven Rostedt old_flag, RB_PAGE_NORMAL);
135277ae365eSSteven Rostedt }
135377ae365eSSteven Rostedt
rb_inc_page(struct buffer_page ** bpage)13546689bed3SQiujun Huang static inline void rb_inc_page(struct buffer_page **bpage)
135577ae365eSSteven Rostedt {
135677ae365eSSteven Rostedt struct list_head *p = rb_list_head((*bpage)->list.next);
135777ae365eSSteven Rostedt
135877ae365eSSteven Rostedt *bpage = list_entry(p, struct buffer_page, list);
135977ae365eSSteven Rostedt }
136077ae365eSSteven Rostedt
136177ae365eSSteven Rostedt static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu * cpu_buffer)136277ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
136377ae365eSSteven Rostedt {
136477ae365eSSteven Rostedt struct buffer_page *head;
136577ae365eSSteven Rostedt struct buffer_page *page;
136677ae365eSSteven Rostedt struct list_head *list;
136777ae365eSSteven Rostedt int i;
136877ae365eSSteven Rostedt
136977ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
137077ae365eSSteven Rostedt return NULL;
137177ae365eSSteven Rostedt
137277ae365eSSteven Rostedt /* sanity check */
137377ae365eSSteven Rostedt list = cpu_buffer->pages;
137477ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
137577ae365eSSteven Rostedt return NULL;
137677ae365eSSteven Rostedt
137777ae365eSSteven Rostedt page = head = cpu_buffer->head_page;
137877ae365eSSteven Rostedt /*
137977ae365eSSteven Rostedt * It is possible that the writer moves the header behind
138077ae365eSSteven Rostedt * where we started, and we miss in one loop.
138177ae365eSSteven Rostedt * A second loop should grab the header, but we'll do
138277ae365eSSteven Rostedt * three loops just because I'm paranoid.
138377ae365eSSteven Rostedt */
138477ae365eSSteven Rostedt for (i = 0; i < 3; i++) {
138577ae365eSSteven Rostedt do {
13866689bed3SQiujun Huang if (rb_is_head_page(page, page->list.prev)) {
138777ae365eSSteven Rostedt cpu_buffer->head_page = page;
138877ae365eSSteven Rostedt return page;
138977ae365eSSteven Rostedt }
13906689bed3SQiujun Huang rb_inc_page(&page);
139177ae365eSSteven Rostedt } while (page != head);
139277ae365eSSteven Rostedt }
139377ae365eSSteven Rostedt
139477ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1);
139577ae365eSSteven Rostedt
139677ae365eSSteven Rostedt return NULL;
139777ae365eSSteven Rostedt }
139877ae365eSSteven Rostedt
rb_head_page_replace(struct buffer_page * old,struct buffer_page * new)1399bc92b956SUros Bizjak static bool rb_head_page_replace(struct buffer_page *old,
140077ae365eSSteven Rostedt struct buffer_page *new)
140177ae365eSSteven Rostedt {
140277ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&old->list.prev->next;
140377ae365eSSteven Rostedt unsigned long val;
140477ae365eSSteven Rostedt
140577ae365eSSteven Rostedt val = *ptr & ~RB_FLAG_MASK;
140677ae365eSSteven Rostedt val |= RB_PAGE_HEAD;
140777ae365eSSteven Rostedt
140800a8478fSUros Bizjak return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
140977ae365eSSteven Rostedt }
141077ae365eSSteven Rostedt
141177ae365eSSteven Rostedt /*
141277ae365eSSteven Rostedt * rb_tail_page_update - move the tail page forward
141377ae365eSSteven Rostedt */
rb_tail_page_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)141470004986SSteven Rostedt (Red Hat) static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
141577ae365eSSteven Rostedt struct buffer_page *tail_page,
141677ae365eSSteven Rostedt struct buffer_page *next_page)
141777ae365eSSteven Rostedt {
141877ae365eSSteven Rostedt unsigned long old_entries;
141977ae365eSSteven Rostedt unsigned long old_write;
142077ae365eSSteven Rostedt
142177ae365eSSteven Rostedt /*
142277ae365eSSteven Rostedt * The tail page now needs to be moved forward.
142377ae365eSSteven Rostedt *
142477ae365eSSteven Rostedt * We need to reset the tail page, but without messing
142577ae365eSSteven Rostedt * with possible erasing of data brought in by interrupts
142677ae365eSSteven Rostedt * that have moved the tail page and are currently on it.
142777ae365eSSteven Rostedt *
142877ae365eSSteven Rostedt * We add a counter to the write field to denote this.
142977ae365eSSteven Rostedt */
143077ae365eSSteven Rostedt old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
143177ae365eSSteven Rostedt old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
143277ae365eSSteven Rostedt
143377ae365eSSteven Rostedt /*
143477ae365eSSteven Rostedt * Just make sure we have seen our old_write and synchronize
143577ae365eSSteven Rostedt * with any interrupts that come in.
143677ae365eSSteven Rostedt */
143777ae365eSSteven Rostedt barrier();
143877ae365eSSteven Rostedt
143977ae365eSSteven Rostedt /*
144077ae365eSSteven Rostedt * If the tail page is still the same as what we think
144177ae365eSSteven Rostedt * it is, then it is up to us to update the tail
144277ae365eSSteven Rostedt * pointer.
144377ae365eSSteven Rostedt */
14448573636eSSteven Rostedt (Red Hat) if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
144577ae365eSSteven Rostedt /* Zero the write counter */
144677ae365eSSteven Rostedt unsigned long val = old_write & ~RB_WRITE_MASK;
144777ae365eSSteven Rostedt unsigned long eval = old_entries & ~RB_WRITE_MASK;
144877ae365eSSteven Rostedt
144977ae365eSSteven Rostedt /*
145077ae365eSSteven Rostedt * This will only succeed if an interrupt did
145177ae365eSSteven Rostedt * not come in and change it. In which case, we
145277ae365eSSteven Rostedt * do not want to modify it.
1453da706d8bSLai Jiangshan *
1454da706d8bSLai Jiangshan * We add (void) to let the compiler know that we do not care
1455da706d8bSLai Jiangshan * about the return value of these functions. We use the
1456da706d8bSLai Jiangshan * cmpxchg to only update if an interrupt did not already
1457da706d8bSLai Jiangshan * do it for us. If the cmpxchg fails, we don't care.
145877ae365eSSteven Rostedt */
1459da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->write, old_write, val);
1460da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->entries, old_entries, eval);
146177ae365eSSteven Rostedt
146277ae365eSSteven Rostedt /*
146377ae365eSSteven Rostedt * No need to worry about races with clearing out the commit.
146477ae365eSSteven Rostedt * it only can increment when a commit takes place. But that
146577ae365eSSteven Rostedt * only happens in the outer most nested commit.
146677ae365eSSteven Rostedt */
146777ae365eSSteven Rostedt local_set(&next_page->page->commit, 0);
146877ae365eSSteven Rostedt
1469ffe3986fSSteven Rostedt (Google) /* Either we update tail_page or an interrupt does */
1470ffe3986fSSteven Rostedt (Google) if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1471ffe3986fSSteven Rostedt (Google) local_inc(&cpu_buffer->pages_touched);
147277ae365eSSteven Rostedt }
147377ae365eSSteven Rostedt }
147477ae365eSSteven Rostedt
rb_check_bpage(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)1475b4b55dfdSUros Bizjak static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
147677ae365eSSteven Rostedt struct buffer_page *bpage)
147777ae365eSSteven Rostedt {
147877ae365eSSteven Rostedt unsigned long val = (unsigned long)bpage;
147977ae365eSSteven Rostedt
1480b4b55dfdSUros Bizjak RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
148177ae365eSSteven Rostedt }
148277ae365eSSteven Rostedt
rb_check_links(struct ring_buffer_per_cpu * cpu_buffer,struct list_head * list)1483b237e1f7SPetr Pavlu static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer,
1484b237e1f7SPetr Pavlu struct list_head *list)
1485b237e1f7SPetr Pavlu {
1486b237e1f7SPetr Pavlu if (RB_WARN_ON(cpu_buffer,
1487b237e1f7SPetr Pavlu rb_list_head(rb_list_head(list->next)->prev) != list))
1488b237e1f7SPetr Pavlu return false;
1489b237e1f7SPetr Pavlu
1490b237e1f7SPetr Pavlu if (RB_WARN_ON(cpu_buffer,
1491b237e1f7SPetr Pavlu rb_list_head(rb_list_head(list->prev)->next) != list))
1492b237e1f7SPetr Pavlu return false;
1493b237e1f7SPetr Pavlu
1494b237e1f7SPetr Pavlu return true;
1495b237e1f7SPetr Pavlu }
1496b237e1f7SPetr Pavlu
149777ae365eSSteven Rostedt /**
1498d611851bSzhangwei(Jovi) * rb_check_pages - integrity check of buffer pages
14997a8e76a3SSteven Rostedt * @cpu_buffer: CPU buffer with pages to test
15007a8e76a3SSteven Rostedt *
1501c3706f00SWenji Huang * As a safety measure we check to make sure the data pages have not
15027a8e76a3SSteven Rostedt * been corrupted.
15037a8e76a3SSteven Rostedt */
rb_check_pages(struct ring_buffer_per_cpu * cpu_buffer)1504b4b55dfdSUros Bizjak static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
15057a8e76a3SSteven Rostedt {
1506b237e1f7SPetr Pavlu struct list_head *head, *tmp;
1507b237e1f7SPetr Pavlu unsigned long buffer_cnt;
1508b237e1f7SPetr Pavlu unsigned long flags;
1509b237e1f7SPetr Pavlu int nr_loops = 0;
15107a8e76a3SSteven Rostedt
1511b237e1f7SPetr Pavlu /*
1512b237e1f7SPetr Pavlu * Walk the linked list underpinning the ring buffer and validate all
1513b237e1f7SPetr Pavlu * its next and prev links.
1514b237e1f7SPetr Pavlu *
1515b237e1f7SPetr Pavlu * The check acquires the reader_lock to avoid concurrent processing
1516b237e1f7SPetr Pavlu * with code that could be modifying the list. However, the lock cannot
1517b237e1f7SPetr Pavlu * be held for the entire duration of the walk, as this would make the
1518b237e1f7SPetr Pavlu * time when interrupts are disabled non-deterministic, dependent on the
1519b237e1f7SPetr Pavlu * ring buffer size. Therefore, the code releases and re-acquires the
1520b237e1f7SPetr Pavlu * lock after checking each page. The ring_buffer_per_cpu.cnt variable
1521b237e1f7SPetr Pavlu * is then used to detect if the list was modified while the lock was
1522b237e1f7SPetr Pavlu * not held, in which case the check needs to be restarted.
1523b237e1f7SPetr Pavlu *
1524b237e1f7SPetr Pavlu * The code attempts to perform the check at most three times before
1525b237e1f7SPetr Pavlu * giving up. This is acceptable because this is only a self-validation
1526b237e1f7SPetr Pavlu * to detect problems early on. In practice, the list modification
1527b237e1f7SPetr Pavlu * operations are fairly spaced, and so this check typically succeeds at
1528b237e1f7SPetr Pavlu * most on the second try.
1529b237e1f7SPetr Pavlu */
1530b237e1f7SPetr Pavlu again:
1531b237e1f7SPetr Pavlu if (++nr_loops > 3)
1532b4b55dfdSUros Bizjak return;
15338843e06fSMukesh Ojha
1534b237e1f7SPetr Pavlu raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1535b237e1f7SPetr Pavlu head = rb_list_head(cpu_buffer->pages);
1536b237e1f7SPetr Pavlu if (!rb_check_links(cpu_buffer, head))
1537b237e1f7SPetr Pavlu goto out_locked;
1538b237e1f7SPetr Pavlu buffer_cnt = cpu_buffer->cnt;
1539b237e1f7SPetr Pavlu tmp = head;
1540b237e1f7SPetr Pavlu raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
15418843e06fSMukesh Ojha
1542b237e1f7SPetr Pavlu while (true) {
1543b237e1f7SPetr Pavlu raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
15448843e06fSMukesh Ojha
1545b237e1f7SPetr Pavlu if (buffer_cnt != cpu_buffer->cnt) {
1546b237e1f7SPetr Pavlu /* The list was updated, try again. */
1547b237e1f7SPetr Pavlu raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1548b237e1f7SPetr Pavlu goto again;
15497a8e76a3SSteven Rostedt }
1550b237e1f7SPetr Pavlu
1551b237e1f7SPetr Pavlu tmp = rb_list_head(tmp->next);
1552b237e1f7SPetr Pavlu if (tmp == head)
1553b237e1f7SPetr Pavlu /* The iteration circled back, all is done. */
1554b237e1f7SPetr Pavlu goto out_locked;
1555b237e1f7SPetr Pavlu
1556b237e1f7SPetr Pavlu if (!rb_check_links(cpu_buffer, tmp))
1557b237e1f7SPetr Pavlu goto out_locked;
1558b237e1f7SPetr Pavlu
1559b237e1f7SPetr Pavlu raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1560b237e1f7SPetr Pavlu }
1561b237e1f7SPetr Pavlu
1562b237e1f7SPetr Pavlu out_locked:
1563b237e1f7SPetr Pavlu raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
15647a8e76a3SSteven Rostedt }
15657a8e76a3SSteven Rostedt
1566be68d63aSSteven Rostedt (Google) /*
1567be68d63aSSteven Rostedt (Google) * Take an address, add the meta data size as well as the array of
1568be68d63aSSteven Rostedt (Google) * array subbuffer indexes, then align it to a subbuffer size.
1569be68d63aSSteven Rostedt (Google) *
1570be68d63aSSteven Rostedt (Google) * This is used to help find the next per cpu subbuffer within a mapped range.
1571be68d63aSSteven Rostedt (Google) */
1572be68d63aSSteven Rostedt (Google) static unsigned long
rb_range_align_subbuf(unsigned long addr,int subbuf_size,int nr_subbufs)1573be68d63aSSteven Rostedt (Google) rb_range_align_subbuf(unsigned long addr, int subbuf_size, int nr_subbufs)
1574be68d63aSSteven Rostedt (Google) {
15754009cc31SSteven Rostedt addr += sizeof(struct ring_buffer_cpu_meta) +
1576be68d63aSSteven Rostedt (Google) sizeof(int) * nr_subbufs;
1577be68d63aSSteven Rostedt (Google) return ALIGN(addr, subbuf_size);
1578be68d63aSSteven Rostedt (Google) }
1579be68d63aSSteven Rostedt (Google)
1580be68d63aSSteven Rostedt (Google) /*
1581b14d0329SSteven Rostedt (Google) * Return the ring_buffer_meta for a given @cpu.
1582be68d63aSSteven Rostedt (Google) */
rb_range_meta(struct trace_buffer * buffer,int nr_pages,int cpu)1583b14d0329SSteven Rostedt (Google) static void *rb_range_meta(struct trace_buffer *buffer, int nr_pages, int cpu)
1584be68d63aSSteven Rostedt (Google) {
1585be68d63aSSteven Rostedt (Google) int subbuf_size = buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
15864009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta;
15874009cc31SSteven Rostedt struct ring_buffer_meta *bmeta;
15884009cc31SSteven Rostedt unsigned long ptr;
1589be68d63aSSteven Rostedt (Google) int nr_subbufs;
1590be68d63aSSteven Rostedt (Google)
15914009cc31SSteven Rostedt bmeta = buffer->meta;
15924009cc31SSteven Rostedt if (!bmeta)
1593b14d0329SSteven Rostedt (Google) return NULL;
1594b14d0329SSteven Rostedt (Google)
15954009cc31SSteven Rostedt ptr = (unsigned long)bmeta + bmeta->buffers_offset;
15964009cc31SSteven Rostedt meta = (struct ring_buffer_cpu_meta *)ptr;
15974009cc31SSteven Rostedt
1598b14d0329SSteven Rostedt (Google) /* When nr_pages passed in is zero, the first meta has already been initialized */
1599b14d0329SSteven Rostedt (Google) if (!nr_pages) {
1600b14d0329SSteven Rostedt (Google) nr_subbufs = meta->nr_subbufs;
1601b14d0329SSteven Rostedt (Google) } else {
1602be68d63aSSteven Rostedt (Google) /* Include the reader page */
1603be68d63aSSteven Rostedt (Google) nr_subbufs = nr_pages + 1;
1604b14d0329SSteven Rostedt (Google) }
1605be68d63aSSteven Rostedt (Google)
1606be68d63aSSteven Rostedt (Google) /*
1607be68d63aSSteven Rostedt (Google) * The first chunk may not be subbuffer aligned, where as
1608be68d63aSSteven Rostedt (Google) * the rest of the chunks are.
1609be68d63aSSteven Rostedt (Google) */
1610be68d63aSSteven Rostedt (Google) if (cpu) {
1611be68d63aSSteven Rostedt (Google) ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1612b14d0329SSteven Rostedt (Google) ptr += subbuf_size * nr_subbufs;
1613be68d63aSSteven Rostedt (Google)
1614be68d63aSSteven Rostedt (Google) /* We can use multiplication to find chunks greater than 1 */
1615be68d63aSSteven Rostedt (Google) if (cpu > 1) {
1616be68d63aSSteven Rostedt (Google) unsigned long size;
1617b14d0329SSteven Rostedt (Google) unsigned long p;
1618be68d63aSSteven Rostedt (Google)
1619b14d0329SSteven Rostedt (Google) /* Save the beginning of this CPU chunk */
1620b14d0329SSteven Rostedt (Google) p = ptr;
1621b14d0329SSteven Rostedt (Google) ptr = rb_range_align_subbuf(ptr, subbuf_size, nr_subbufs);
1622be68d63aSSteven Rostedt (Google) ptr += subbuf_size * nr_subbufs;
1623be68d63aSSteven Rostedt (Google)
1624be68d63aSSteven Rostedt (Google) /* Now all chunks after this are the same size */
1625be68d63aSSteven Rostedt (Google) size = ptr - p;
1626be68d63aSSteven Rostedt (Google) ptr += size * (cpu - 2);
1627be68d63aSSteven Rostedt (Google) }
1628be68d63aSSteven Rostedt (Google) }
1629be68d63aSSteven Rostedt (Google) return (void *)ptr;
1630be68d63aSSteven Rostedt (Google) }
1631be68d63aSSteven Rostedt (Google)
1632b14d0329SSteven Rostedt (Google) /* Return the start of subbufs given the meta pointer */
rb_subbufs_from_meta(struct ring_buffer_cpu_meta * meta)16334009cc31SSteven Rostedt static void *rb_subbufs_from_meta(struct ring_buffer_cpu_meta *meta)
1634b14d0329SSteven Rostedt (Google) {
1635b14d0329SSteven Rostedt (Google) int subbuf_size = meta->subbuf_size;
1636b14d0329SSteven Rostedt (Google) unsigned long ptr;
1637b14d0329SSteven Rostedt (Google)
1638b14d0329SSteven Rostedt (Google) ptr = (unsigned long)meta;
1639b14d0329SSteven Rostedt (Google) ptr = rb_range_align_subbuf(ptr, subbuf_size, meta->nr_subbufs);
1640b14d0329SSteven Rostedt (Google)
1641b14d0329SSteven Rostedt (Google) return (void *)ptr;
1642b14d0329SSteven Rostedt (Google) }
1643b14d0329SSteven Rostedt (Google)
1644b14d0329SSteven Rostedt (Google) /*
1645b14d0329SSteven Rostedt (Google) * Return a specific sub-buffer for a given @cpu defined by @idx.
1646b14d0329SSteven Rostedt (Google) */
rb_range_buffer(struct ring_buffer_per_cpu * cpu_buffer,int idx)1647b14d0329SSteven Rostedt (Google) static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx)
1648b14d0329SSteven Rostedt (Google) {
16494009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta;
1650b14d0329SSteven Rostedt (Google) unsigned long ptr;
1651b14d0329SSteven Rostedt (Google) int subbuf_size;
1652b14d0329SSteven Rostedt (Google)
1653b14d0329SSteven Rostedt (Google) meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu);
1654b14d0329SSteven Rostedt (Google) if (!meta)
1655b14d0329SSteven Rostedt (Google) return NULL;
1656b14d0329SSteven Rostedt (Google)
1657b14d0329SSteven Rostedt (Google) if (WARN_ON_ONCE(idx >= meta->nr_subbufs))
1658b14d0329SSteven Rostedt (Google) return NULL;
1659b14d0329SSteven Rostedt (Google)
1660b14d0329SSteven Rostedt (Google) subbuf_size = meta->subbuf_size;
1661b14d0329SSteven Rostedt (Google)
1662b14d0329SSteven Rostedt (Google) /* Map this buffer to the order that's in meta->buffers[] */
1663b14d0329SSteven Rostedt (Google) idx = meta->buffers[idx];
1664b14d0329SSteven Rostedt (Google)
1665b14d0329SSteven Rostedt (Google) ptr = (unsigned long)rb_subbufs_from_meta(meta);
1666b14d0329SSteven Rostedt (Google)
1667b14d0329SSteven Rostedt (Google) ptr += subbuf_size * idx;
1668b14d0329SSteven Rostedt (Google) if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end)
1669b14d0329SSteven Rostedt (Google) return NULL;
1670b14d0329SSteven Rostedt (Google)
1671b14d0329SSteven Rostedt (Google) return (void *)ptr;
1672b14d0329SSteven Rostedt (Google) }
1673b14d0329SSteven Rostedt (Google)
1674c76883f1SSteven Rostedt (Google) /*
16754009cc31SSteven Rostedt * See if the existing memory contains a valid meta section.
16764009cc31SSteven Rostedt * if so, use that, otherwise initialize it.
16774009cc31SSteven Rostedt */
rb_meta_init(struct trace_buffer * buffer,int scratch_size)16784af0a9c5SSteven Rostedt static bool rb_meta_init(struct trace_buffer *buffer, int scratch_size)
16794009cc31SSteven Rostedt {
16804009cc31SSteven Rostedt unsigned long ptr = buffer->range_addr_start;
16814009cc31SSteven Rostedt struct ring_buffer_meta *bmeta;
16824009cc31SSteven Rostedt unsigned long total_size;
16834009cc31SSteven Rostedt int struct_sizes;
16844009cc31SSteven Rostedt
16854009cc31SSteven Rostedt bmeta = (struct ring_buffer_meta *)ptr;
16864009cc31SSteven Rostedt buffer->meta = bmeta;
16874009cc31SSteven Rostedt
16884009cc31SSteven Rostedt total_size = buffer->range_addr_end - buffer->range_addr_start;
16894009cc31SSteven Rostedt
16904009cc31SSteven Rostedt struct_sizes = sizeof(struct ring_buffer_cpu_meta);
16914009cc31SSteven Rostedt struct_sizes |= sizeof(*bmeta) << 16;
16924009cc31SSteven Rostedt
16934009cc31SSteven Rostedt /* The first buffer will start word size after the meta page */
16944009cc31SSteven Rostedt ptr += sizeof(*bmeta);
16954009cc31SSteven Rostedt ptr = ALIGN(ptr, sizeof(long));
16964af0a9c5SSteven Rostedt ptr += scratch_size;
16974009cc31SSteven Rostedt
16984009cc31SSteven Rostedt if (bmeta->magic != RING_BUFFER_META_MAGIC) {
16994009cc31SSteven Rostedt pr_info("Ring buffer boot meta mismatch of magic\n");
17004009cc31SSteven Rostedt goto init;
17014009cc31SSteven Rostedt }
17024009cc31SSteven Rostedt
17034009cc31SSteven Rostedt if (bmeta->struct_sizes != struct_sizes) {
17044009cc31SSteven Rostedt pr_info("Ring buffer boot meta mismatch of struct size\n");
17054009cc31SSteven Rostedt goto init;
17064009cc31SSteven Rostedt }
17074009cc31SSteven Rostedt
17084009cc31SSteven Rostedt if (bmeta->total_size != total_size) {
17094009cc31SSteven Rostedt pr_info("Ring buffer boot meta mismatch of total size\n");
17104009cc31SSteven Rostedt goto init;
17114009cc31SSteven Rostedt }
17124009cc31SSteven Rostedt
17134009cc31SSteven Rostedt if (bmeta->buffers_offset > bmeta->total_size) {
17144009cc31SSteven Rostedt pr_info("Ring buffer boot meta mismatch of offset outside of total size\n");
17154009cc31SSteven Rostedt goto init;
17164009cc31SSteven Rostedt }
17174009cc31SSteven Rostedt
17184009cc31SSteven Rostedt if (bmeta->buffers_offset != (void *)ptr - (void *)bmeta) {
17194009cc31SSteven Rostedt pr_info("Ring buffer boot meta mismatch of first buffer offset\n");
17204009cc31SSteven Rostedt goto init;
17214009cc31SSteven Rostedt }
17224009cc31SSteven Rostedt
17234009cc31SSteven Rostedt return true;
17244009cc31SSteven Rostedt
17254009cc31SSteven Rostedt init:
17264009cc31SSteven Rostedt bmeta->magic = RING_BUFFER_META_MAGIC;
17274009cc31SSteven Rostedt bmeta->struct_sizes = struct_sizes;
17284009cc31SSteven Rostedt bmeta->total_size = total_size;
17294009cc31SSteven Rostedt bmeta->buffers_offset = (void *)ptr - (void *)bmeta;
17304009cc31SSteven Rostedt
17314af0a9c5SSteven Rostedt /* Zero out the scatch pad */
17324af0a9c5SSteven Rostedt memset((void *)bmeta + sizeof(*bmeta), 0, bmeta->buffers_offset - sizeof(*bmeta));
17334af0a9c5SSteven Rostedt
17344009cc31SSteven Rostedt return false;
17354009cc31SSteven Rostedt }
17364009cc31SSteven Rostedt
17374009cc31SSteven Rostedt /*
1738c76883f1SSteven Rostedt (Google) * See if the existing memory contains valid ring buffer data.
1739c76883f1SSteven Rostedt (Google) * As the previous kernel must be the same as this kernel, all
1740c76883f1SSteven Rostedt (Google) * the calculations (size of buffers and number of buffers)
1741c76883f1SSteven Rostedt (Google) * must be the same.
1742c76883f1SSteven Rostedt (Google) */
rb_cpu_meta_valid(struct ring_buffer_cpu_meta * meta,int cpu,struct trace_buffer * buffer,int nr_pages,unsigned long * subbuf_mask)17434009cc31SSteven Rostedt static bool rb_cpu_meta_valid(struct ring_buffer_cpu_meta *meta, int cpu,
1744f5b95f1fSSteven Rostedt struct trace_buffer *buffer, int nr_pages,
1745f5b95f1fSSteven Rostedt unsigned long *subbuf_mask)
1746c76883f1SSteven Rostedt (Google) {
1747c76883f1SSteven Rostedt (Google) int subbuf_size = PAGE_SIZE;
1748c76883f1SSteven Rostedt (Google) struct buffer_data_page *subbuf;
1749c76883f1SSteven Rostedt (Google) unsigned long buffers_start;
1750c76883f1SSteven Rostedt (Google) unsigned long buffers_end;
1751c76883f1SSteven Rostedt (Google) int i;
1752c76883f1SSteven Rostedt (Google)
1753f5b95f1fSSteven Rostedt if (!subbuf_mask)
1754f5b95f1fSSteven Rostedt return false;
1755f5b95f1fSSteven Rostedt
1756c76883f1SSteven Rostedt (Google) buffers_start = meta->first_buffer;
1757c76883f1SSteven Rostedt (Google) buffers_end = meta->first_buffer + (subbuf_size * meta->nr_subbufs);
1758c76883f1SSteven Rostedt (Google)
1759c76883f1SSteven Rostedt (Google) /* Is the head and commit buffers within the range of buffers? */
1760c76883f1SSteven Rostedt (Google) if (meta->head_buffer < buffers_start ||
1761c76883f1SSteven Rostedt (Google) meta->head_buffer >= buffers_end) {
1762c76883f1SSteven Rostedt (Google) pr_info("Ring buffer boot meta [%d] head buffer out of range\n", cpu);
1763c76883f1SSteven Rostedt (Google) return false;
1764c76883f1SSteven Rostedt (Google) }
1765c76883f1SSteven Rostedt (Google)
1766c76883f1SSteven Rostedt (Google) if (meta->commit_buffer < buffers_start ||
1767c76883f1SSteven Rostedt (Google) meta->commit_buffer >= buffers_end) {
1768c76883f1SSteven Rostedt (Google) pr_info("Ring buffer boot meta [%d] commit buffer out of range\n", cpu);
1769c76883f1SSteven Rostedt (Google) return false;
1770c76883f1SSteven Rostedt (Google) }
1771c76883f1SSteven Rostedt (Google)
1772c76883f1SSteven Rostedt (Google) subbuf = rb_subbufs_from_meta(meta);
1773c76883f1SSteven Rostedt (Google)
1774f5b95f1fSSteven Rostedt bitmap_clear(subbuf_mask, 0, meta->nr_subbufs);
1775f5b95f1fSSteven Rostedt
1776c76883f1SSteven Rostedt (Google) /* Is the meta buffers and the subbufs themselves have correct data? */
1777c76883f1SSteven Rostedt (Google) for (i = 0; i < meta->nr_subbufs; i++) {
1778c76883f1SSteven Rostedt (Google) if (meta->buffers[i] < 0 ||
1779c76883f1SSteven Rostedt (Google) meta->buffers[i] >= meta->nr_subbufs) {
1780c76883f1SSteven Rostedt (Google) pr_info("Ring buffer boot meta [%d] array out of range\n", cpu);
1781c76883f1SSteven Rostedt (Google) return false;
1782c76883f1SSteven Rostedt (Google) }
1783c76883f1SSteven Rostedt (Google)
1784c76883f1SSteven Rostedt (Google) if ((unsigned)local_read(&subbuf->commit) > subbuf_size) {
1785c76883f1SSteven Rostedt (Google) pr_info("Ring buffer boot meta [%d] buffer invalid commit\n", cpu);
1786c76883f1SSteven Rostedt (Google) return false;
1787c76883f1SSteven Rostedt (Google) }
1788c76883f1SSteven Rostedt (Google)
1789f5b95f1fSSteven Rostedt if (test_bit(meta->buffers[i], subbuf_mask)) {
1790f5b95f1fSSteven Rostedt pr_info("Ring buffer boot meta [%d] array has duplicates\n", cpu);
1791f5b95f1fSSteven Rostedt return false;
1792f5b95f1fSSteven Rostedt }
1793f5b95f1fSSteven Rostedt
1794f5b95f1fSSteven Rostedt set_bit(meta->buffers[i], subbuf_mask);
1795c76883f1SSteven Rostedt (Google) subbuf = (void *)subbuf + subbuf_size;
1796c76883f1SSteven Rostedt (Google) }
1797c76883f1SSteven Rostedt (Google)
1798c76883f1SSteven Rostedt (Google) return true;
1799c76883f1SSteven Rostedt (Google) }
1800c76883f1SSteven Rostedt (Google)
18014009cc31SSteven Rostedt static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf);
18025f3b6e83SSteven Rostedt (Google)
rb_read_data_buffer(struct buffer_data_page * dpage,int tail,int cpu,unsigned long long * timestamp,u64 * delta_ptr)18035f3b6e83SSteven Rostedt (Google) static int rb_read_data_buffer(struct buffer_data_page *dpage, int tail, int cpu,
18045f3b6e83SSteven Rostedt (Google) unsigned long long *timestamp, u64 *delta_ptr)
18055f3b6e83SSteven Rostedt (Google) {
18065f3b6e83SSteven Rostedt (Google) struct ring_buffer_event *event;
18075f3b6e83SSteven Rostedt (Google) u64 ts, delta;
18085f3b6e83SSteven Rostedt (Google) int events = 0;
18095f3b6e83SSteven Rostedt (Google) int e;
18105f3b6e83SSteven Rostedt (Google)
18115f3b6e83SSteven Rostedt (Google) *delta_ptr = 0;
18125f3b6e83SSteven Rostedt (Google) *timestamp = 0;
18135f3b6e83SSteven Rostedt (Google)
18145f3b6e83SSteven Rostedt (Google) ts = dpage->time_stamp;
18155f3b6e83SSteven Rostedt (Google)
18165f3b6e83SSteven Rostedt (Google) for (e = 0; e < tail; e += rb_event_length(event)) {
18175f3b6e83SSteven Rostedt (Google)
18185f3b6e83SSteven Rostedt (Google) event = (struct ring_buffer_event *)(dpage->data + e);
18195f3b6e83SSteven Rostedt (Google)
18205f3b6e83SSteven Rostedt (Google) switch (event->type_len) {
18215f3b6e83SSteven Rostedt (Google)
18225f3b6e83SSteven Rostedt (Google) case RINGBUF_TYPE_TIME_EXTEND:
18235f3b6e83SSteven Rostedt (Google) delta = rb_event_time_stamp(event);
18245f3b6e83SSteven Rostedt (Google) ts += delta;
18255f3b6e83SSteven Rostedt (Google) break;
18265f3b6e83SSteven Rostedt (Google)
18275f3b6e83SSteven Rostedt (Google) case RINGBUF_TYPE_TIME_STAMP:
18285f3b6e83SSteven Rostedt (Google) delta = rb_event_time_stamp(event);
18295f3b6e83SSteven Rostedt (Google) delta = rb_fix_abs_ts(delta, ts);
18305f3b6e83SSteven Rostedt (Google) if (delta < ts) {
18315f3b6e83SSteven Rostedt (Google) *delta_ptr = delta;
18325f3b6e83SSteven Rostedt (Google) *timestamp = ts;
18335f3b6e83SSteven Rostedt (Google) return -1;
18345f3b6e83SSteven Rostedt (Google) }
18355f3b6e83SSteven Rostedt (Google) ts = delta;
18365f3b6e83SSteven Rostedt (Google) break;
18375f3b6e83SSteven Rostedt (Google)
18385f3b6e83SSteven Rostedt (Google) case RINGBUF_TYPE_PADDING:
18395f3b6e83SSteven Rostedt (Google) if (event->time_delta == 1)
18405f3b6e83SSteven Rostedt (Google) break;
18415f3b6e83SSteven Rostedt (Google) fallthrough;
18425f3b6e83SSteven Rostedt (Google) case RINGBUF_TYPE_DATA:
18435f3b6e83SSteven Rostedt (Google) events++;
18445f3b6e83SSteven Rostedt (Google) ts += event->time_delta;
18455f3b6e83SSteven Rostedt (Google) break;
18465f3b6e83SSteven Rostedt (Google)
18475f3b6e83SSteven Rostedt (Google) default:
18485f3b6e83SSteven Rostedt (Google) return -1;
18495f3b6e83SSteven Rostedt (Google) }
18505f3b6e83SSteven Rostedt (Google) }
18515f3b6e83SSteven Rostedt (Google) *timestamp = ts;
18525f3b6e83SSteven Rostedt (Google) return events;
18535f3b6e83SSteven Rostedt (Google) }
18545f3b6e83SSteven Rostedt (Google)
rb_validate_buffer(struct buffer_data_page * dpage,int cpu)18555f3b6e83SSteven Rostedt (Google) static int rb_validate_buffer(struct buffer_data_page *dpage, int cpu)
18565f3b6e83SSteven Rostedt (Google) {
18575f3b6e83SSteven Rostedt (Google) unsigned long long ts;
18585f3b6e83SSteven Rostedt (Google) u64 delta;
18595f3b6e83SSteven Rostedt (Google) int tail;
18605f3b6e83SSteven Rostedt (Google)
18615f3b6e83SSteven Rostedt (Google) tail = local_read(&dpage->commit);
18625f3b6e83SSteven Rostedt (Google) return rb_read_data_buffer(dpage, tail, cpu, &ts, &delta);
18635f3b6e83SSteven Rostedt (Google) }
18645f3b6e83SSteven Rostedt (Google)
18655f3b6e83SSteven Rostedt (Google) /* If the meta data has been validated, now validate the events */
rb_meta_validate_events(struct ring_buffer_per_cpu * cpu_buffer)18665f3b6e83SSteven Rostedt (Google) static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer)
18675f3b6e83SSteven Rostedt (Google) {
18684009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
18695f3b6e83SSteven Rostedt (Google) struct buffer_page *head_page;
18705f3b6e83SSteven Rostedt (Google) unsigned long entry_bytes = 0;
18715f3b6e83SSteven Rostedt (Google) unsigned long entries = 0;
18725f3b6e83SSteven Rostedt (Google) int ret;
18735f3b6e83SSteven Rostedt (Google) int i;
18745f3b6e83SSteven Rostedt (Google)
18755f3b6e83SSteven Rostedt (Google) if (!meta || !meta->head_buffer)
18765f3b6e83SSteven Rostedt (Google) return;
18775f3b6e83SSteven Rostedt (Google)
18785f3b6e83SSteven Rostedt (Google) /* Do the reader page first */
18795f3b6e83SSteven Rostedt (Google) ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu);
18805f3b6e83SSteven Rostedt (Google) if (ret < 0) {
18815f3b6e83SSteven Rostedt (Google) pr_info("Ring buffer reader page is invalid\n");
18825f3b6e83SSteven Rostedt (Google) goto invalid;
18835f3b6e83SSteven Rostedt (Google) }
18845f3b6e83SSteven Rostedt (Google) entries += ret;
18855f3b6e83SSteven Rostedt (Google) entry_bytes += local_read(&cpu_buffer->reader_page->page->commit);
18865f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->reader_page->entries, ret);
18875f3b6e83SSteven Rostedt (Google)
18885f3b6e83SSteven Rostedt (Google) head_page = cpu_buffer->head_page;
18895f3b6e83SSteven Rostedt (Google)
1890*1d6c39c8SSteven Rostedt /* If the commit_buffer is the reader page, update the commit page */
1891*1d6c39c8SSteven Rostedt if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) {
1892*1d6c39c8SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->reader_page;
1893*1d6c39c8SSteven Rostedt /* Nothing more to do, the only page is the reader page */
18945f3b6e83SSteven Rostedt (Google) goto done;
1895*1d6c39c8SSteven Rostedt }
18965f3b6e83SSteven Rostedt (Google)
18975f3b6e83SSteven Rostedt (Google) /* Iterate until finding the commit page */
18985f3b6e83SSteven Rostedt (Google) for (i = 0; i < meta->nr_subbufs + 1; i++, rb_inc_page(&head_page)) {
18995f3b6e83SSteven Rostedt (Google)
19005f3b6e83SSteven Rostedt (Google) /* Reader page has already been done */
19015f3b6e83SSteven Rostedt (Google) if (head_page == cpu_buffer->reader_page)
19025f3b6e83SSteven Rostedt (Google) continue;
19035f3b6e83SSteven Rostedt (Google)
19045f3b6e83SSteven Rostedt (Google) ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu);
19055f3b6e83SSteven Rostedt (Google) if (ret < 0) {
19065f3b6e83SSteven Rostedt (Google) pr_info("Ring buffer meta [%d] invalid buffer page\n",
19075f3b6e83SSteven Rostedt (Google) cpu_buffer->cpu);
19085f3b6e83SSteven Rostedt (Google) goto invalid;
19095f3b6e83SSteven Rostedt (Google) }
191097937834SSteven Rostedt
191197937834SSteven Rostedt /* If the buffer has content, update pages_touched */
191297937834SSteven Rostedt if (ret)
191397937834SSteven Rostedt local_inc(&cpu_buffer->pages_touched);
191497937834SSteven Rostedt
19155f3b6e83SSteven Rostedt (Google) entries += ret;
19165f3b6e83SSteven Rostedt (Google) entry_bytes += local_read(&head_page->page->commit);
19175f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->head_page->entries, ret);
19185f3b6e83SSteven Rostedt (Google)
19195f3b6e83SSteven Rostedt (Google) if (head_page == cpu_buffer->commit_page)
19205f3b6e83SSteven Rostedt (Google) break;
19215f3b6e83SSteven Rostedt (Google) }
19225f3b6e83SSteven Rostedt (Google)
19235f3b6e83SSteven Rostedt (Google) if (head_page != cpu_buffer->commit_page) {
19245f3b6e83SSteven Rostedt (Google) pr_info("Ring buffer meta [%d] commit page not found\n",
19255f3b6e83SSteven Rostedt (Google) cpu_buffer->cpu);
19265f3b6e83SSteven Rostedt (Google) goto invalid;
19275f3b6e83SSteven Rostedt (Google) }
19285f3b6e83SSteven Rostedt (Google) done:
19295f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->entries, entries);
19305f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->entries_bytes, entry_bytes);
19315f3b6e83SSteven Rostedt (Google)
19325f3b6e83SSteven Rostedt (Google) pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu);
19335f3b6e83SSteven Rostedt (Google) return;
19345f3b6e83SSteven Rostedt (Google)
19355f3b6e83SSteven Rostedt (Google) invalid:
19365f3b6e83SSteven Rostedt (Google) /* The content of the buffers are invalid, reset the meta data */
19375f3b6e83SSteven Rostedt (Google) meta->head_buffer = 0;
19385f3b6e83SSteven Rostedt (Google) meta->commit_buffer = 0;
19395f3b6e83SSteven Rostedt (Google)
19405f3b6e83SSteven Rostedt (Google) /* Reset the reader page */
19415f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->reader_page->entries, 0);
19425f3b6e83SSteven Rostedt (Google) local_set(&cpu_buffer->reader_page->page->commit, 0);
19435f3b6e83SSteven Rostedt (Google)
19445f3b6e83SSteven Rostedt (Google) /* Reset all the subbuffers */
19455f3b6e83SSteven Rostedt (Google) for (i = 0; i < meta->nr_subbufs - 1; i++, rb_inc_page(&head_page)) {
19465f3b6e83SSteven Rostedt (Google) local_set(&head_page->entries, 0);
19475f3b6e83SSteven Rostedt (Google) local_set(&head_page->page->commit, 0);
19485f3b6e83SSteven Rostedt (Google) }
19495f3b6e83SSteven Rostedt (Google) }
19505f3b6e83SSteven Rostedt (Google)
rb_range_meta_init(struct trace_buffer * buffer,int nr_pages,int scratch_size)19514af0a9c5SSteven Rostedt static void rb_range_meta_init(struct trace_buffer *buffer, int nr_pages, int scratch_size)
1952bca704f6SSteven Rostedt {
19534009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta;
1954f5b95f1fSSteven Rostedt unsigned long *subbuf_mask;
1955c76883f1SSteven Rostedt (Google) unsigned long delta;
1956b14d0329SSteven Rostedt (Google) void *subbuf;
19574009cc31SSteven Rostedt bool valid = false;
1958b14d0329SSteven Rostedt (Google) int cpu;
1959b14d0329SSteven Rostedt (Google) int i;
1960b14d0329SSteven Rostedt (Google)
1961f5b95f1fSSteven Rostedt /* Create a mask to test the subbuf array */
1962f5b95f1fSSteven Rostedt subbuf_mask = bitmap_alloc(nr_pages + 1, GFP_KERNEL);
1963f5b95f1fSSteven Rostedt /* If subbuf_mask fails to allocate, then rb_meta_valid() will return false */
1964f5b95f1fSSteven Rostedt
19654af0a9c5SSteven Rostedt if (rb_meta_init(buffer, scratch_size))
19664009cc31SSteven Rostedt valid = true;
19674009cc31SSteven Rostedt
1968b14d0329SSteven Rostedt (Google) for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1969c76883f1SSteven Rostedt (Google) void *next_meta;
1970c76883f1SSteven Rostedt (Google)
1971b14d0329SSteven Rostedt (Google) meta = rb_range_meta(buffer, nr_pages, cpu);
1972b14d0329SSteven Rostedt (Google)
19734009cc31SSteven Rostedt if (valid && rb_cpu_meta_valid(meta, cpu, buffer, nr_pages, subbuf_mask)) {
1974c76883f1SSteven Rostedt (Google) /* Make the mappings match the current address */
1975c76883f1SSteven Rostedt (Google) subbuf = rb_subbufs_from_meta(meta);
1976c76883f1SSteven Rostedt (Google) delta = (unsigned long)subbuf - meta->first_buffer;
1977c76883f1SSteven Rostedt (Google) meta->first_buffer += delta;
1978c76883f1SSteven Rostedt (Google) meta->head_buffer += delta;
1979c76883f1SSteven Rostedt (Google) meta->commit_buffer += delta;
1980c76883f1SSteven Rostedt (Google) continue;
1981c76883f1SSteven Rostedt (Google) }
1982c76883f1SSteven Rostedt (Google)
1983c76883f1SSteven Rostedt (Google) if (cpu < nr_cpu_ids - 1)
1984c76883f1SSteven Rostedt (Google) next_meta = rb_range_meta(buffer, nr_pages, cpu + 1);
1985c76883f1SSteven Rostedt (Google) else
1986c76883f1SSteven Rostedt (Google) next_meta = (void *)buffer->range_addr_end;
1987c76883f1SSteven Rostedt (Google)
1988c76883f1SSteven Rostedt (Google) memset(meta, 0, next_meta - (void *)meta);
1989c76883f1SSteven Rostedt (Google)
1990b14d0329SSteven Rostedt (Google) meta->nr_subbufs = nr_pages + 1;
1991b14d0329SSteven Rostedt (Google) meta->subbuf_size = PAGE_SIZE;
1992b14d0329SSteven Rostedt (Google)
1993b14d0329SSteven Rostedt (Google) subbuf = rb_subbufs_from_meta(meta);
1994b14d0329SSteven Rostedt (Google)
1995c76883f1SSteven Rostedt (Google) meta->first_buffer = (unsigned long)subbuf;
1996c76883f1SSteven Rostedt (Google)
1997b14d0329SSteven Rostedt (Google) /*
1998b14d0329SSteven Rostedt (Google) * The buffers[] array holds the order of the sub-buffers
1999b14d0329SSteven Rostedt (Google) * that are after the meta data. The sub-buffers may
2000b14d0329SSteven Rostedt (Google) * be swapped out when read and inserted into a different
2001b14d0329SSteven Rostedt (Google) * location of the ring buffer. Although their addresses
2002b14d0329SSteven Rostedt (Google) * remain the same, the buffers[] array contains the
2003b14d0329SSteven Rostedt (Google) * index into the sub-buffers holding their actual order.
2004b14d0329SSteven Rostedt (Google) */
2005b14d0329SSteven Rostedt (Google) for (i = 0; i < meta->nr_subbufs; i++) {
2006b14d0329SSteven Rostedt (Google) meta->buffers[i] = i;
2007b14d0329SSteven Rostedt (Google) rb_init_page(subbuf);
2008b14d0329SSteven Rostedt (Google) subbuf += meta->subbuf_size;
2009b14d0329SSteven Rostedt (Google) }
2010b14d0329SSteven Rostedt (Google) }
2011f5b95f1fSSteven Rostedt bitmap_free(subbuf_mask);
2012b14d0329SSteven Rostedt (Google) }
2013b14d0329SSteven Rostedt (Google)
rbm_start(struct seq_file * m,loff_t * pos)2014950032ffSSteven Rostedt (Google) static void *rbm_start(struct seq_file *m, loff_t *pos)
2015950032ffSSteven Rostedt (Google) {
2016950032ffSSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer = m->private;
20174009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2018950032ffSSteven Rostedt (Google) unsigned long val;
2019950032ffSSteven Rostedt (Google)
2020950032ffSSteven Rostedt (Google) if (!meta)
2021950032ffSSteven Rostedt (Google) return NULL;
2022950032ffSSteven Rostedt (Google)
2023950032ffSSteven Rostedt (Google) if (*pos > meta->nr_subbufs)
2024950032ffSSteven Rostedt (Google) return NULL;
2025950032ffSSteven Rostedt (Google)
2026950032ffSSteven Rostedt (Google) val = *pos;
2027950032ffSSteven Rostedt (Google) val++;
2028950032ffSSteven Rostedt (Google)
2029950032ffSSteven Rostedt (Google) return (void *)val;
2030950032ffSSteven Rostedt (Google) }
2031950032ffSSteven Rostedt (Google)
rbm_next(struct seq_file * m,void * v,loff_t * pos)2032950032ffSSteven Rostedt (Google) static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
2033950032ffSSteven Rostedt (Google) {
2034950032ffSSteven Rostedt (Google) (*pos)++;
2035950032ffSSteven Rostedt (Google)
2036950032ffSSteven Rostedt (Google) return rbm_start(m, pos);
2037950032ffSSteven Rostedt (Google) }
2038950032ffSSteven Rostedt (Google)
rbm_show(struct seq_file * m,void * v)2039950032ffSSteven Rostedt (Google) static int rbm_show(struct seq_file *m, void *v)
2040950032ffSSteven Rostedt (Google) {
2041950032ffSSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer = m->private;
20424009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2043950032ffSSteven Rostedt (Google) unsigned long val = (unsigned long)v;
2044950032ffSSteven Rostedt (Google)
2045950032ffSSteven Rostedt (Google) if (val == 1) {
2046950032ffSSteven Rostedt (Google) seq_printf(m, "head_buffer: %d\n",
2047950032ffSSteven Rostedt (Google) rb_meta_subbuf_idx(meta, (void *)meta->head_buffer));
2048950032ffSSteven Rostedt (Google) seq_printf(m, "commit_buffer: %d\n",
2049950032ffSSteven Rostedt (Google) rb_meta_subbuf_idx(meta, (void *)meta->commit_buffer));
2050950032ffSSteven Rostedt (Google) seq_printf(m, "subbuf_size: %d\n", meta->subbuf_size);
2051950032ffSSteven Rostedt (Google) seq_printf(m, "nr_subbufs: %d\n", meta->nr_subbufs);
2052950032ffSSteven Rostedt (Google) return 0;
2053950032ffSSteven Rostedt (Google) }
2054950032ffSSteven Rostedt (Google)
2055950032ffSSteven Rostedt (Google) val -= 2;
2056950032ffSSteven Rostedt (Google) seq_printf(m, "buffer[%ld]: %d\n", val, meta->buffers[val]);
2057950032ffSSteven Rostedt (Google)
2058950032ffSSteven Rostedt (Google) return 0;
2059950032ffSSteven Rostedt (Google) }
2060950032ffSSteven Rostedt (Google)
rbm_stop(struct seq_file * m,void * p)2061950032ffSSteven Rostedt (Google) static void rbm_stop(struct seq_file *m, void *p)
2062950032ffSSteven Rostedt (Google) {
2063950032ffSSteven Rostedt (Google) }
2064950032ffSSteven Rostedt (Google)
2065950032ffSSteven Rostedt (Google) static const struct seq_operations rb_meta_seq_ops = {
2066950032ffSSteven Rostedt (Google) .start = rbm_start,
2067950032ffSSteven Rostedt (Google) .next = rbm_next,
2068950032ffSSteven Rostedt (Google) .show = rbm_show,
2069950032ffSSteven Rostedt (Google) .stop = rbm_stop,
2070950032ffSSteven Rostedt (Google) };
2071950032ffSSteven Rostedt (Google)
ring_buffer_meta_seq_init(struct file * file,struct trace_buffer * buffer,int cpu)2072950032ffSSteven Rostedt (Google) int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu)
2073950032ffSSteven Rostedt (Google) {
2074950032ffSSteven Rostedt (Google) struct seq_file *m;
2075950032ffSSteven Rostedt (Google) int ret;
2076950032ffSSteven Rostedt (Google)
2077950032ffSSteven Rostedt (Google) ret = seq_open(file, &rb_meta_seq_ops);
2078950032ffSSteven Rostedt (Google) if (ret)
2079950032ffSSteven Rostedt (Google) return ret;
2080950032ffSSteven Rostedt (Google)
2081950032ffSSteven Rostedt (Google) m = file->private_data;
2082950032ffSSteven Rostedt (Google) m->private = buffer->buffers[cpu];
2083950032ffSSteven Rostedt (Google)
2084950032ffSSteven Rostedt (Google) return 0;
2085950032ffSSteven Rostedt (Google) }
2086950032ffSSteven Rostedt (Google)
2087c76883f1SSteven Rostedt (Google) /* Map the buffer_pages to the previous head and commit pages */
rb_meta_buffer_update(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * bpage)2088c76883f1SSteven Rostedt (Google) static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
2089c76883f1SSteven Rostedt (Google) struct buffer_page *bpage)
2090c76883f1SSteven Rostedt (Google) {
20914009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
2092c76883f1SSteven Rostedt (Google)
2093c76883f1SSteven Rostedt (Google) if (meta->head_buffer == (unsigned long)bpage->page)
2094c76883f1SSteven Rostedt (Google) cpu_buffer->head_page = bpage;
2095c76883f1SSteven Rostedt (Google)
2096c76883f1SSteven Rostedt (Google) if (meta->commit_buffer == (unsigned long)bpage->page) {
2097c76883f1SSteven Rostedt (Google) cpu_buffer->commit_page = bpage;
2098c76883f1SSteven Rostedt (Google) cpu_buffer->tail_page = bpage;
2099c76883f1SSteven Rostedt (Google) }
2100c76883f1SSteven Rostedt (Google) }
2101c76883f1SSteven Rostedt (Google)
__rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,long nr_pages,struct list_head * pages)210274e2afc6SQiujun Huang static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
210374e2afc6SQiujun Huang long nr_pages, struct list_head *pages)
21047a8e76a3SSteven Rostedt {
2105be68d63aSSteven Rostedt (Google) struct trace_buffer *buffer = cpu_buffer->buffer;
21064009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = NULL;
2107044fa782SSteven Rostedt struct buffer_page *bpage, *tmp;
2108927e56dbSSteven Rostedt (VMware) bool user_thread = current->mm != NULL;
2109927e56dbSSteven Rostedt (VMware) gfp_t mflags;
21109b94a8fbSSteven Rostedt (Red Hat) long i;
21113adc54faSSteven Rostedt
2112927e56dbSSteven Rostedt (VMware) /*
2113927e56dbSSteven Rostedt (VMware) * Check if the available memory is there first.
2114927e56dbSSteven Rostedt (VMware) * Note, si_mem_available() only gives us a rough estimate of available
2115927e56dbSSteven Rostedt (VMware) * memory. It may not be accurate. But we don't care, we just want
2116927e56dbSSteven Rostedt (VMware) * to prevent doing any allocation when it is obvious that it is
2117927e56dbSSteven Rostedt (VMware) * not going to succeed.
2118927e56dbSSteven Rostedt (VMware) */
21192a872fa4SSteven Rostedt (VMware) i = si_mem_available();
21202a872fa4SSteven Rostedt (VMware) if (i < nr_pages)
21212a872fa4SSteven Rostedt (VMware) return -ENOMEM;
21222a872fa4SSteven Rostedt (VMware)
2123d7ec4bfeSVaibhav Nagarnaik /*
212484861885SJoel Fernandes * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
212584861885SJoel Fernandes * gracefully without invoking oom-killer and the system is not
212684861885SJoel Fernandes * destabilized.
2127d7ec4bfeSVaibhav Nagarnaik */
2128927e56dbSSteven Rostedt (VMware) mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
2129927e56dbSSteven Rostedt (VMware)
2130927e56dbSSteven Rostedt (VMware) /*
2131927e56dbSSteven Rostedt (VMware) * If a user thread allocates too much, and si_mem_available()
2132927e56dbSSteven Rostedt (VMware) * reports there's enough memory, even though there is not.
2133927e56dbSSteven Rostedt (VMware) * Make sure the OOM killer kills this thread. This can happen
2134927e56dbSSteven Rostedt (VMware) * even with RETRY_MAYFAIL because another task may be doing
2135927e56dbSSteven Rostedt (VMware) * an allocation after this task has taken all memory.
2136927e56dbSSteven Rostedt (VMware) * This is the task the OOM killer needs to take out during this
2137927e56dbSSteven Rostedt (VMware) * loop, even if it was triggered by an allocation somewhere else.
2138927e56dbSSteven Rostedt (VMware) */
2139927e56dbSSteven Rostedt (VMware) if (user_thread)
2140927e56dbSSteven Rostedt (VMware) set_current_oom_origin();
2141c76883f1SSteven Rostedt (Google)
2142c76883f1SSteven Rostedt (Google) if (buffer->range_addr_start)
2143c76883f1SSteven Rostedt (Google) meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
2144c76883f1SSteven Rostedt (Google)
2145927e56dbSSteven Rostedt (VMware) for (i = 0; i < nr_pages; i++) {
2146927e56dbSSteven Rostedt (VMware) struct page *page;
2147927e56dbSSteven Rostedt (VMware)
2148044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
214974e2afc6SQiujun Huang mflags, cpu_to_node(cpu_buffer->cpu));
2150044fa782SSteven Rostedt if (!bpage)
2151e4c2ce82SSteven Rostedt goto free_pages;
215277ae365eSSteven Rostedt
215374e2afc6SQiujun Huang rb_check_bpage(cpu_buffer, bpage);
215474e2afc6SQiujun Huang
2155be68d63aSSteven Rostedt (Google) /*
2156be68d63aSSteven Rostedt (Google) * Append the pages as for mapped buffers we want to keep
2157be68d63aSSteven Rostedt (Google) * the order
2158be68d63aSSteven Rostedt (Google) */
2159be68d63aSSteven Rostedt (Google) list_add_tail(&bpage->list, pages);
216077ae365eSSteven Rostedt
2161c76883f1SSteven Rostedt (Google) if (meta) {
2162be68d63aSSteven Rostedt (Google) /* A range was given. Use that for the buffer page */
2163b14d0329SSteven Rostedt (Google) bpage->page = rb_range_buffer(cpu_buffer, i + 1);
2164be68d63aSSteven Rostedt (Google) if (!bpage->page)
2165be68d63aSSteven Rostedt (Google) goto free_pages;
2166c76883f1SSteven Rostedt (Google) /* If this is valid from a previous boot */
2167c76883f1SSteven Rostedt (Google) if (meta->head_buffer)
2168c76883f1SSteven Rostedt (Google) rb_meta_buffer_update(cpu_buffer, bpage);
2169be68d63aSSteven Rostedt (Google) bpage->range = 1;
2170b14d0329SSteven Rostedt (Google) bpage->id = i + 1;
2171be68d63aSSteven Rostedt (Google) } else {
21726b76323eSVincent Donnefort page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
2173c09d4167SVincent Donnefort mflags | __GFP_COMP | __GFP_ZERO,
2174f9b94daaSTzvetomir Stoyanov (VMware) cpu_buffer->buffer->subbuf_order);
21757ea59064SVaibhav Nagarnaik if (!page)
21767a8e76a3SSteven Rostedt goto free_pages;
21777ea59064SVaibhav Nagarnaik bpage->page = page_address(page);
2178b14d0329SSteven Rostedt (Google) rb_init_page(bpage->page);
2179be68d63aSSteven Rostedt (Google) }
2180f9b94daaSTzvetomir Stoyanov (VMware) bpage->order = cpu_buffer->buffer->subbuf_order;
2181927e56dbSSteven Rostedt (VMware)
2182927e56dbSSteven Rostedt (VMware) if (user_thread && fatal_signal_pending(current))
2183927e56dbSSteven Rostedt (VMware) goto free_pages;
21847a8e76a3SSteven Rostedt }
2185927e56dbSSteven Rostedt (VMware) if (user_thread)
2186927e56dbSSteven Rostedt (VMware) clear_current_oom_origin();
21877a8e76a3SSteven Rostedt
2188438ced17SVaibhav Nagarnaik return 0;
2189438ced17SVaibhav Nagarnaik
2190438ced17SVaibhav Nagarnaik free_pages:
2191438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, pages, list) {
2192438ced17SVaibhav Nagarnaik list_del_init(&bpage->list);
2193438ced17SVaibhav Nagarnaik free_buffer_page(bpage);
2194438ced17SVaibhav Nagarnaik }
2195927e56dbSSteven Rostedt (VMware) if (user_thread)
2196927e56dbSSteven Rostedt (VMware) clear_current_oom_origin();
2197438ced17SVaibhav Nagarnaik
2198438ced17SVaibhav Nagarnaik return -ENOMEM;
2199438ced17SVaibhav Nagarnaik }
2200438ced17SVaibhav Nagarnaik
rb_allocate_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)2201438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
22029b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages)
2203438ced17SVaibhav Nagarnaik {
2204438ced17SVaibhav Nagarnaik LIST_HEAD(pages);
2205438ced17SVaibhav Nagarnaik
2206438ced17SVaibhav Nagarnaik WARN_ON(!nr_pages);
2207438ced17SVaibhav Nagarnaik
220874e2afc6SQiujun Huang if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
2209438ced17SVaibhav Nagarnaik return -ENOMEM;
2210438ced17SVaibhav Nagarnaik
22113adc54faSSteven Rostedt /*
22123adc54faSSteven Rostedt * The ring buffer page list is a circular list that does not
22133adc54faSSteven Rostedt * start and end with a list head. All page list items point to
22143adc54faSSteven Rostedt * other pages.
22153adc54faSSteven Rostedt */
22163adc54faSSteven Rostedt cpu_buffer->pages = pages.next;
22173adc54faSSteven Rostedt list_del(&pages);
22187a8e76a3SSteven Rostedt
2219438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages = nr_pages;
2220438ced17SVaibhav Nagarnaik
22217a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer);
22227a8e76a3SSteven Rostedt
22237a8e76a3SSteven Rostedt return 0;
22247a8e76a3SSteven Rostedt }
22257a8e76a3SSteven Rostedt
22267a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct trace_buffer * buffer,long nr_pages,int cpu)222713292494SSteven Rostedt (VMware) rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
22287a8e76a3SSteven Rostedt {
22297a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
22304009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta;
2231044fa782SSteven Rostedt struct buffer_page *bpage;
22327ea59064SVaibhav Nagarnaik struct page *page;
22337a8e76a3SSteven Rostedt int ret;
22347a8e76a3SSteven Rostedt
22357a8e76a3SSteven Rostedt cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
22367a8e76a3SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu));
22377a8e76a3SSteven Rostedt if (!cpu_buffer)
22387a8e76a3SSteven Rostedt return NULL;
22397a8e76a3SSteven Rostedt
22407a8e76a3SSteven Rostedt cpu_buffer->cpu = cpu;
22417a8e76a3SSteven Rostedt cpu_buffer->buffer = buffer;
22425389f6faSThomas Gleixner raw_spin_lock_init(&cpu_buffer->reader_lock);
22431f8a6a10SPeter Zijlstra lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
2244edc35bd7SThomas Gleixner cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
224583f40318SVaibhav Nagarnaik INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
224605fdd70dSVaibhav Nagarnaik init_completion(&cpu_buffer->update_done);
224715693458SSteven Rostedt (Red Hat) init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
2248f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&cpu_buffer->irq_work.waiters);
22491e0d6714SSteven Rostedt (Red Hat) init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
2250117c3920SVincent Donnefort mutex_init(&cpu_buffer->mapping_lock);
22517a8e76a3SSteven Rostedt
2252044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
2253e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu));
2254044fa782SSteven Rostedt if (!bpage)
2255e4c2ce82SSteven Rostedt goto fail_free_buffer;
2256e4c2ce82SSteven Rostedt
225777ae365eSSteven Rostedt rb_check_bpage(cpu_buffer, bpage);
225877ae365eSSteven Rostedt
2259044fa782SSteven Rostedt cpu_buffer->reader_page = bpage;
2260f9b94daaSTzvetomir Stoyanov (VMware)
2261be68d63aSSteven Rostedt (Google) if (buffer->range_addr_start) {
2262b14d0329SSteven Rostedt (Google) /*
2263b14d0329SSteven Rostedt (Google) * Range mapped buffers have the same restrictions as memory
2264b14d0329SSteven Rostedt (Google) * mapped ones do.
2265b14d0329SSteven Rostedt (Google) */
2266b14d0329SSteven Rostedt (Google) cpu_buffer->mapped = 1;
2267b14d0329SSteven Rostedt (Google) cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu);
2268b14d0329SSteven Rostedt (Google) bpage->page = rb_range_buffer(cpu_buffer, 0);
2269be68d63aSSteven Rostedt (Google) if (!bpage->page)
2270be68d63aSSteven Rostedt (Google) goto fail_free_reader;
2271c76883f1SSteven Rostedt (Google) if (cpu_buffer->ring_meta->head_buffer)
2272c76883f1SSteven Rostedt (Google) rb_meta_buffer_update(cpu_buffer, bpage);
2273be68d63aSSteven Rostedt (Google) bpage->range = 1;
2274be68d63aSSteven Rostedt (Google) } else {
2275be68d63aSSteven Rostedt (Google) page = alloc_pages_node(cpu_to_node(cpu),
2276be68d63aSSteven Rostedt (Google) GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
22776b76323eSVincent Donnefort cpu_buffer->buffer->subbuf_order);
22787ea59064SVaibhav Nagarnaik if (!page)
2279e4c2ce82SSteven Rostedt goto fail_free_reader;
22807ea59064SVaibhav Nagarnaik bpage->page = page_address(page);
2281044fa782SSteven Rostedt rb_init_page(bpage->page);
2282b14d0329SSteven Rostedt (Google) }
2283e4c2ce82SSteven Rostedt
2284d769041fSSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
228544b99462SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages);
2286d769041fSSteven Rostedt
2287438ced17SVaibhav Nagarnaik ret = rb_allocate_pages(cpu_buffer, nr_pages);
22887a8e76a3SSteven Rostedt if (ret < 0)
2289d769041fSSteven Rostedt goto fail_free_reader;
22907a8e76a3SSteven Rostedt
22915f3b6e83SSteven Rostedt (Google) rb_meta_validate_events(cpu_buffer);
22925f3b6e83SSteven Rostedt (Google)
2293c76883f1SSteven Rostedt (Google) /* If the boot meta was valid then this has already been updated */
2294c76883f1SSteven Rostedt (Google) meta = cpu_buffer->ring_meta;
2295c76883f1SSteven Rostedt (Google) if (!meta || !meta->head_buffer ||
2296c76883f1SSteven Rostedt (Google) !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) {
2297c76883f1SSteven Rostedt (Google) if (meta && meta->head_buffer &&
2298c76883f1SSteven Rostedt (Google) (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) {
2299c76883f1SSteven Rostedt (Google) pr_warn("Ring buffer meta buffers not all mapped\n");
2300c76883f1SSteven Rostedt (Google) if (!cpu_buffer->head_page)
2301c76883f1SSteven Rostedt (Google) pr_warn(" Missing head_page\n");
2302c76883f1SSteven Rostedt (Google) if (!cpu_buffer->commit_page)
2303c76883f1SSteven Rostedt (Google) pr_warn(" Missing commit_page\n");
2304c76883f1SSteven Rostedt (Google) if (!cpu_buffer->tail_page)
2305c76883f1SSteven Rostedt (Google) pr_warn(" Missing tail_page\n");
2306c76883f1SSteven Rostedt (Google) }
2307c76883f1SSteven Rostedt (Google)
23087a8e76a3SSteven Rostedt cpu_buffer->head_page
23093adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list);
2310bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
23117a8e76a3SSteven Rostedt
231277ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer);
2313c76883f1SSteven Rostedt (Google)
2314c76883f1SSteven Rostedt (Google) if (cpu_buffer->ring_meta)
2315b14d0329SSteven Rostedt (Google) meta->commit_buffer = meta->head_buffer;
2316c76883f1SSteven Rostedt (Google) } else {
2317c76883f1SSteven Rostedt (Google) /* The valid meta buffer still needs to activate the head page */
2318c76883f1SSteven Rostedt (Google) rb_head_page_activate(cpu_buffer);
2319b14d0329SSteven Rostedt (Google) }
232077ae365eSSteven Rostedt
23217a8e76a3SSteven Rostedt return cpu_buffer;
23227a8e76a3SSteven Rostedt
2323d769041fSSteven Rostedt fail_free_reader:
2324d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page);
2325d769041fSSteven Rostedt
23267a8e76a3SSteven Rostedt fail_free_buffer:
23277a8e76a3SSteven Rostedt kfree(cpu_buffer);
23287a8e76a3SSteven Rostedt return NULL;
23297a8e76a3SSteven Rostedt }
23307a8e76a3SSteven Rostedt
rb_free_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)23317a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
23327a8e76a3SSteven Rostedt {
23333adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages;
2334044fa782SSteven Rostedt struct buffer_page *bpage, *tmp;
23357a8e76a3SSteven Rostedt
2336675751bbSJohannes Berg irq_work_sync(&cpu_buffer->irq_work.work);
2337675751bbSJohannes Berg
2338d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page);
2339d769041fSSteven Rostedt
234056f4ca0aSDaniil Tatianin if (head) {
234177ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer);
234277ae365eSSteven Rostedt
2343044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) {
2344044fa782SSteven Rostedt list_del_init(&bpage->list);
2345044fa782SSteven Rostedt free_buffer_page(bpage);
23467a8e76a3SSteven Rostedt }
23473adc54faSSteven Rostedt bpage = list_entry(head, struct buffer_page, list);
23483adc54faSSteven Rostedt free_buffer_page(bpage);
23493adc54faSSteven Rostedt }
23503adc54faSSteven Rostedt
235117d80175SSteven Rostedt (Google) free_page((unsigned long)cpu_buffer->free_page);
235217d80175SSteven Rostedt (Google)
23537a8e76a3SSteven Rostedt kfree(cpu_buffer);
23547a8e76a3SSteven Rostedt }
23557a8e76a3SSteven Rostedt
alloc_buffer(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long end,unsigned long scratch_size,struct lock_class_key * key)2356be68d63aSSteven Rostedt (Google) static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
2357be68d63aSSteven Rostedt (Google) int order, unsigned long start,
2358be68d63aSSteven Rostedt (Google) unsigned long end,
23594af0a9c5SSteven Rostedt unsigned long scratch_size,
23601f8a6a10SPeter Zijlstra struct lock_class_key *key)
23617a8e76a3SSteven Rostedt {
236213292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
23639b94a8fbSSteven Rostedt (Red Hat) long nr_pages;
2364be68d63aSSteven Rostedt (Google) int subbuf_size;
23657a8e76a3SSteven Rostedt int bsize;
23669b94a8fbSSteven Rostedt (Red Hat) int cpu;
2367b32614c0SSebastian Andrzej Siewior int ret;
23687a8e76a3SSteven Rostedt
23697a8e76a3SSteven Rostedt /* keep it in its own cache line */
23707a8e76a3SSteven Rostedt buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
23717a8e76a3SSteven Rostedt GFP_KERNEL);
23727a8e76a3SSteven Rostedt if (!buffer)
23737a8e76a3SSteven Rostedt return NULL;
23747a8e76a3SSteven Rostedt
2375b18cc3deSSebastian Andrzej Siewior if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
23769e01c1b7SRusty Russell goto fail_free_buffer;
23779e01c1b7SRusty Russell
2378be68d63aSSteven Rostedt (Google) buffer->subbuf_order = order;
2379be68d63aSSteven Rostedt (Google) subbuf_size = (PAGE_SIZE << order);
2380be68d63aSSteven Rostedt (Google) buffer->subbuf_size = subbuf_size - BUF_PAGE_HDR_SIZE;
2381139f8400STzvetomir Stoyanov (VMware)
2382139f8400STzvetomir Stoyanov (VMware) /* Max payload is buffer page size - header (8bytes) */
2383139f8400STzvetomir Stoyanov (VMware) buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
2384139f8400STzvetomir Stoyanov (VMware)
23857a8e76a3SSteven Rostedt buffer->flags = flags;
238637886f6aSSteven Rostedt buffer->clock = trace_clock_local;
23871f8a6a10SPeter Zijlstra buffer->reader_lock_key = key;
23887a8e76a3SSteven Rostedt
238915693458SSteven Rostedt (Red Hat) init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
2390f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&buffer->irq_work.waiters);
239115693458SSteven Rostedt (Red Hat)
23927a8e76a3SSteven Rostedt buffer->cpus = nr_cpu_ids;
23937a8e76a3SSteven Rostedt
23947a8e76a3SSteven Rostedt bsize = sizeof(void *) * nr_cpu_ids;
23957a8e76a3SSteven Rostedt buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
23967a8e76a3SSteven Rostedt GFP_KERNEL);
23977a8e76a3SSteven Rostedt if (!buffer->buffers)
23989e01c1b7SRusty Russell goto fail_free_cpumask;
23997a8e76a3SSteven Rostedt
2400be68d63aSSteven Rostedt (Google) /* If start/end are specified, then that overrides size */
2401be68d63aSSteven Rostedt (Google) if (start && end) {
24024009cc31SSteven Rostedt unsigned long buffers_start;
2403be68d63aSSteven Rostedt (Google) unsigned long ptr;
2404be68d63aSSteven Rostedt (Google) int n;
2405be68d63aSSteven Rostedt (Google)
24064009cc31SSteven Rostedt /* Make sure that start is word aligned */
24074009cc31SSteven Rostedt start = ALIGN(start, sizeof(long));
24084009cc31SSteven Rostedt
24094af0a9c5SSteven Rostedt /* scratch_size needs to be aligned too */
24104af0a9c5SSteven Rostedt scratch_size = ALIGN(scratch_size, sizeof(long));
24114af0a9c5SSteven Rostedt
24124009cc31SSteven Rostedt /* Subtract the buffer meta data and word aligned */
24134009cc31SSteven Rostedt buffers_start = start + sizeof(struct ring_buffer_cpu_meta);
24144009cc31SSteven Rostedt buffers_start = ALIGN(buffers_start, sizeof(long));
24154af0a9c5SSteven Rostedt buffers_start += scratch_size;
24164009cc31SSteven Rostedt
24174af0a9c5SSteven Rostedt /* Calculate the size for the per CPU data */
24184009cc31SSteven Rostedt size = end - buffers_start;
2419be68d63aSSteven Rostedt (Google) size = size / nr_cpu_ids;
2420be68d63aSSteven Rostedt (Google)
2421be68d63aSSteven Rostedt (Google) /*
2422be68d63aSSteven Rostedt (Google) * The number of sub-buffers (nr_pages) is determined by the
2423be68d63aSSteven Rostedt (Google) * total size allocated minus the meta data size.
2424be68d63aSSteven Rostedt (Google) * Then that is divided by the number of per CPU buffers
2425be68d63aSSteven Rostedt (Google) * needed, plus account for the integer array index that
2426be68d63aSSteven Rostedt (Google) * will be appended to the meta data.
2427be68d63aSSteven Rostedt (Google) */
24284009cc31SSteven Rostedt nr_pages = (size - sizeof(struct ring_buffer_cpu_meta)) /
2429be68d63aSSteven Rostedt (Google) (subbuf_size + sizeof(int));
2430be68d63aSSteven Rostedt (Google) /* Need at least two pages plus the reader page */
2431be68d63aSSteven Rostedt (Google) if (nr_pages < 3)
2432be68d63aSSteven Rostedt (Google) goto fail_free_buffers;
2433be68d63aSSteven Rostedt (Google)
2434be68d63aSSteven Rostedt (Google) again:
2435be68d63aSSteven Rostedt (Google) /* Make sure that the size fits aligned */
24364009cc31SSteven Rostedt for (n = 0, ptr = buffers_start; n < nr_cpu_ids; n++) {
24374009cc31SSteven Rostedt ptr += sizeof(struct ring_buffer_cpu_meta) +
2438be68d63aSSteven Rostedt (Google) sizeof(int) * nr_pages;
2439be68d63aSSteven Rostedt (Google) ptr = ALIGN(ptr, subbuf_size);
2440be68d63aSSteven Rostedt (Google) ptr += subbuf_size * nr_pages;
2441be68d63aSSteven Rostedt (Google) }
2442be68d63aSSteven Rostedt (Google) if (ptr > end) {
2443be68d63aSSteven Rostedt (Google) if (nr_pages <= 3)
2444be68d63aSSteven Rostedt (Google) goto fail_free_buffers;
2445be68d63aSSteven Rostedt (Google) nr_pages--;
2446be68d63aSSteven Rostedt (Google) goto again;
2447be68d63aSSteven Rostedt (Google) }
2448be68d63aSSteven Rostedt (Google)
2449be68d63aSSteven Rostedt (Google) /* nr_pages should not count the reader page */
2450be68d63aSSteven Rostedt (Google) nr_pages--;
2451be68d63aSSteven Rostedt (Google) buffer->range_addr_start = start;
2452be68d63aSSteven Rostedt (Google) buffer->range_addr_end = end;
2453b14d0329SSteven Rostedt (Google)
24544af0a9c5SSteven Rostedt rb_range_meta_init(buffer, nr_pages, scratch_size);
2455be68d63aSSteven Rostedt (Google) } else {
2456be68d63aSSteven Rostedt (Google)
2457be68d63aSSteven Rostedt (Google) /* need at least two pages */
2458be68d63aSSteven Rostedt (Google) nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2459be68d63aSSteven Rostedt (Google) if (nr_pages < 2)
2460be68d63aSSteven Rostedt (Google) nr_pages = 2;
2461be68d63aSSteven Rostedt (Google) }
2462be68d63aSSteven Rostedt (Google)
2463b32614c0SSebastian Andrzej Siewior cpu = raw_smp_processor_id();
2464b32614c0SSebastian Andrzej Siewior cpumask_set_cpu(cpu, buffer->cpumask);
2465b32614c0SSebastian Andrzej Siewior buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
24667a8e76a3SSteven Rostedt if (!buffer->buffers[cpu])
24677a8e76a3SSteven Rostedt goto fail_free_buffers;
24687a8e76a3SSteven Rostedt
2469b32614c0SSebastian Andrzej Siewior ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2470b32614c0SSebastian Andrzej Siewior if (ret < 0)
2471b32614c0SSebastian Andrzej Siewior goto fail_free_buffers;
2472554f786eSSteven Rostedt
24737a8e76a3SSteven Rostedt mutex_init(&buffer->mutex);
24747a8e76a3SSteven Rostedt
24757a8e76a3SSteven Rostedt return buffer;
24767a8e76a3SSteven Rostedt
24777a8e76a3SSteven Rostedt fail_free_buffers:
24787a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
24797a8e76a3SSteven Rostedt if (buffer->buffers[cpu])
24807a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]);
24817a8e76a3SSteven Rostedt }
24827a8e76a3SSteven Rostedt kfree(buffer->buffers);
24837a8e76a3SSteven Rostedt
24849e01c1b7SRusty Russell fail_free_cpumask:
24859e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask);
24869e01c1b7SRusty Russell
24877a8e76a3SSteven Rostedt fail_free_buffer:
24887a8e76a3SSteven Rostedt kfree(buffer);
24897a8e76a3SSteven Rostedt return NULL;
24907a8e76a3SSteven Rostedt }
2491be68d63aSSteven Rostedt (Google)
2492be68d63aSSteven Rostedt (Google) /**
2493be68d63aSSteven Rostedt (Google) * __ring_buffer_alloc - allocate a new ring_buffer
2494be68d63aSSteven Rostedt (Google) * @size: the size in bytes per cpu that is needed.
2495be68d63aSSteven Rostedt (Google) * @flags: attributes to set for the ring buffer.
2496be68d63aSSteven Rostedt (Google) * @key: ring buffer reader_lock_key.
2497be68d63aSSteven Rostedt (Google) *
2498be68d63aSSteven Rostedt (Google) * Currently the only flag that is available is the RB_FL_OVERWRITE
2499be68d63aSSteven Rostedt (Google) * flag. This flag means that the buffer will overwrite old data
2500be68d63aSSteven Rostedt (Google) * when the buffer wraps. If this flag is not set, the buffer will
2501be68d63aSSteven Rostedt (Google) * drop data when the tail hits the head.
2502be68d63aSSteven Rostedt (Google) */
__ring_buffer_alloc(unsigned long size,unsigned flags,struct lock_class_key * key)2503be68d63aSSteven Rostedt (Google) struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
2504be68d63aSSteven Rostedt (Google) struct lock_class_key *key)
2505be68d63aSSteven Rostedt (Google) {
2506be68d63aSSteven Rostedt (Google) /* Default buffer page size - one system page */
25074af0a9c5SSteven Rostedt return alloc_buffer(size, flags, 0, 0, 0, 0, key);
2508be68d63aSSteven Rostedt (Google)
2509be68d63aSSteven Rostedt (Google) }
25101f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
25117a8e76a3SSteven Rostedt
25127a8e76a3SSteven Rostedt /**
2513be68d63aSSteven Rostedt (Google) * __ring_buffer_alloc_range - allocate a new ring_buffer from existing memory
2514be68d63aSSteven Rostedt (Google) * @size: the size in bytes per cpu that is needed.
2515be68d63aSSteven Rostedt (Google) * @flags: attributes to set for the ring buffer.
25160b60a7fbSJulia Lawall * @order: sub-buffer order
2517be68d63aSSteven Rostedt (Google) * @start: start of allocated range
2518be68d63aSSteven Rostedt (Google) * @range_size: size of allocated range
25194af0a9c5SSteven Rostedt * @scratch_size: size of scratch area (for preallocated memory buffers)
2520be68d63aSSteven Rostedt (Google) * @key: ring buffer reader_lock_key.
2521be68d63aSSteven Rostedt (Google) *
2522be68d63aSSteven Rostedt (Google) * Currently the only flag that is available is the RB_FL_OVERWRITE
2523be68d63aSSteven Rostedt (Google) * flag. This flag means that the buffer will overwrite old data
2524be68d63aSSteven Rostedt (Google) * when the buffer wraps. If this flag is not set, the buffer will
2525be68d63aSSteven Rostedt (Google) * drop data when the tail hits the head.
2526be68d63aSSteven Rostedt (Google) */
__ring_buffer_alloc_range(unsigned long size,unsigned flags,int order,unsigned long start,unsigned long range_size,unsigned long scratch_size,struct lock_class_key * key)2527be68d63aSSteven Rostedt (Google) struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
2528be68d63aSSteven Rostedt (Google) int order, unsigned long start,
2529be68d63aSSteven Rostedt (Google) unsigned long range_size,
25304af0a9c5SSteven Rostedt unsigned long scratch_size,
2531be68d63aSSteven Rostedt (Google) struct lock_class_key *key)
2532be68d63aSSteven Rostedt (Google) {
25334af0a9c5SSteven Rostedt return alloc_buffer(size, flags, order, start, start + range_size,
25344af0a9c5SSteven Rostedt scratch_size, key);
2535be68d63aSSteven Rostedt (Google) }
2536be68d63aSSteven Rostedt (Google)
ring_buffer_meta_scratch(struct trace_buffer * buffer,unsigned int * size)25374af0a9c5SSteven Rostedt void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
25387a1d1e4bSSteven Rostedt (Google) {
2539b6533482SSteven Rostedt struct ring_buffer_meta *meta;
2540b6533482SSteven Rostedt void *ptr;
25417a1d1e4bSSteven Rostedt (Google)
25424af0a9c5SSteven Rostedt if (!buffer || !buffer->meta)
25434af0a9c5SSteven Rostedt return NULL;
25447a1d1e4bSSteven Rostedt (Google)
2545b6533482SSteven Rostedt meta = buffer->meta;
25467a1d1e4bSSteven Rostedt (Google)
2547b6533482SSteven Rostedt ptr = (void *)ALIGN((unsigned long)meta + sizeof(*meta), sizeof(long));
2548b6533482SSteven Rostedt
2549b6533482SSteven Rostedt if (size)
2550b6533482SSteven Rostedt *size = (void *)meta + meta->buffers_offset - ptr;
2551b6533482SSteven Rostedt
2552b6533482SSteven Rostedt return ptr;
25537a1d1e4bSSteven Rostedt (Google) }
25547a1d1e4bSSteven Rostedt (Google)
25557a1d1e4bSSteven Rostedt (Google) /**
25567a8e76a3SSteven Rostedt * ring_buffer_free - free a ring buffer.
25577a8e76a3SSteven Rostedt * @buffer: the buffer to free.
25587a8e76a3SSteven Rostedt */
25597a8e76a3SSteven Rostedt void
ring_buffer_free(struct trace_buffer * buffer)256013292494SSteven Rostedt (VMware) ring_buffer_free(struct trace_buffer *buffer)
25617a8e76a3SSteven Rostedt {
25627a8e76a3SSteven Rostedt int cpu;
25637a8e76a3SSteven Rostedt
2564b32614c0SSebastian Andrzej Siewior cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
2565554f786eSSteven Rostedt
2566675751bbSJohannes Berg irq_work_sync(&buffer->irq_work.work);
2567675751bbSJohannes Berg
25687a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu)
25697a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]);
25707a8e76a3SSteven Rostedt
2571bd3f0221SEric Dumazet kfree(buffer->buffers);
25729e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask);
25739e01c1b7SRusty Russell
25747a8e76a3SSteven Rostedt kfree(buffer);
25757a8e76a3SSteven Rostedt }
2576c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free);
25777a8e76a3SSteven Rostedt
ring_buffer_set_clock(struct trace_buffer * buffer,u64 (* clock)(void))257813292494SSteven Rostedt (VMware) void ring_buffer_set_clock(struct trace_buffer *buffer,
257937886f6aSSteven Rostedt u64 (*clock)(void))
258037886f6aSSteven Rostedt {
258137886f6aSSteven Rostedt buffer->clock = clock;
258237886f6aSSteven Rostedt }
258337886f6aSSteven Rostedt
ring_buffer_set_time_stamp_abs(struct trace_buffer * buffer,bool abs)258413292494SSteven Rostedt (VMware) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
258500b41452STom Zanussi {
258600b41452STom Zanussi buffer->time_stamp_abs = abs;
258700b41452STom Zanussi }
258800b41452STom Zanussi
ring_buffer_time_stamp_abs(struct trace_buffer * buffer)258913292494SSteven Rostedt (VMware) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
259000b41452STom Zanussi {
259100b41452STom Zanussi return buffer->time_stamp_abs;
259200b41452STom Zanussi }
259300b41452STom Zanussi
rb_page_entries(struct buffer_page * bpage)259483f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage)
25957a8e76a3SSteven Rostedt {
259683f40318SVaibhav Nagarnaik return local_read(&bpage->entries) & RB_WRITE_MASK;
259783f40318SVaibhav Nagarnaik }
259883f40318SVaibhav Nagarnaik
rb_page_write(struct buffer_page * bpage)259983f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage)
260083f40318SVaibhav Nagarnaik {
260183f40318SVaibhav Nagarnaik return local_read(&bpage->write) & RB_WRITE_MASK;
260283f40318SVaibhav Nagarnaik }
260383f40318SVaibhav Nagarnaik
2604bc92b956SUros Bizjak static bool
rb_remove_pages(struct ring_buffer_per_cpu * cpu_buffer,unsigned long nr_pages)26059b94a8fbSSteven Rostedt (Red Hat) rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
260683f40318SVaibhav Nagarnaik {
260783f40318SVaibhav Nagarnaik struct list_head *tail_page, *to_remove, *next_page;
260883f40318SVaibhav Nagarnaik struct buffer_page *to_remove_page, *tmp_iter_page;
260983f40318SVaibhav Nagarnaik struct buffer_page *last_page, *first_page;
26109b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_removed;
261183f40318SVaibhav Nagarnaik unsigned long head_bit;
261283f40318SVaibhav Nagarnaik int page_entries;
261383f40318SVaibhav Nagarnaik
261483f40318SVaibhav Nagarnaik head_bit = 0;
26157a8e76a3SSteven Rostedt
26165389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock);
261783f40318SVaibhav Nagarnaik atomic_inc(&cpu_buffer->record_disabled);
261883f40318SVaibhav Nagarnaik /*
261983f40318SVaibhav Nagarnaik * We don't race with the readers since we have acquired the reader
262083f40318SVaibhav Nagarnaik * lock. We also don't race with writers after disabling recording.
262183f40318SVaibhav Nagarnaik * This makes it easy to figure out the first and the last page to be
262283f40318SVaibhav Nagarnaik * removed from the list. We unlink all the pages in between including
262383f40318SVaibhav Nagarnaik * the first and last pages. This is done in a busy loop so that we
262483f40318SVaibhav Nagarnaik * lose the least number of traces.
262583f40318SVaibhav Nagarnaik * The pages are freed after we restart recording and unlock readers.
262683f40318SVaibhav Nagarnaik */
262783f40318SVaibhav Nagarnaik tail_page = &cpu_buffer->tail_page->list;
262877ae365eSSteven Rostedt
262983f40318SVaibhav Nagarnaik /*
263083f40318SVaibhav Nagarnaik * tail page might be on reader page, we remove the next page
263183f40318SVaibhav Nagarnaik * from the ring buffer
263283f40318SVaibhav Nagarnaik */
263383f40318SVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page)
263483f40318SVaibhav Nagarnaik tail_page = rb_list_head(tail_page->next);
263583f40318SVaibhav Nagarnaik to_remove = tail_page;
263683f40318SVaibhav Nagarnaik
263783f40318SVaibhav Nagarnaik /* start of pages to remove */
263883f40318SVaibhav Nagarnaik first_page = list_entry(rb_list_head(to_remove->next),
263983f40318SVaibhav Nagarnaik struct buffer_page, list);
264083f40318SVaibhav Nagarnaik
264183f40318SVaibhav Nagarnaik for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
264283f40318SVaibhav Nagarnaik to_remove = rb_list_head(to_remove)->next;
264383f40318SVaibhav Nagarnaik head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
26447a8e76a3SSteven Rostedt }
26452d093282SZheng Yejian /* Read iterators need to reset themselves when some pages removed */
26462d093282SZheng Yejian cpu_buffer->pages_removed += nr_removed;
26477a8e76a3SSteven Rostedt
264883f40318SVaibhav Nagarnaik next_page = rb_list_head(to_remove)->next;
26497a8e76a3SSteven Rostedt
265083f40318SVaibhav Nagarnaik /*
265183f40318SVaibhav Nagarnaik * Now we remove all pages between tail_page and next_page.
265283f40318SVaibhav Nagarnaik * Make sure that we have head_bit value preserved for the
265383f40318SVaibhav Nagarnaik * next page
265483f40318SVaibhav Nagarnaik */
265583f40318SVaibhav Nagarnaik tail_page->next = (struct list_head *)((unsigned long)next_page |
265683f40318SVaibhav Nagarnaik head_bit);
265783f40318SVaibhav Nagarnaik next_page = rb_list_head(next_page);
265883f40318SVaibhav Nagarnaik next_page->prev = tail_page;
265983f40318SVaibhav Nagarnaik
266083f40318SVaibhav Nagarnaik /* make sure pages points to a valid page in the ring buffer */
266183f40318SVaibhav Nagarnaik cpu_buffer->pages = next_page;
2662b237e1f7SPetr Pavlu cpu_buffer->cnt++;
266383f40318SVaibhav Nagarnaik
266483f40318SVaibhav Nagarnaik /* update head page */
266583f40318SVaibhav Nagarnaik if (head_bit)
266683f40318SVaibhav Nagarnaik cpu_buffer->head_page = list_entry(next_page,
266783f40318SVaibhav Nagarnaik struct buffer_page, list);
266883f40318SVaibhav Nagarnaik
266983f40318SVaibhav Nagarnaik /* pages are removed, resume tracing and then free the pages */
267083f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->record_disabled);
26715389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock);
267283f40318SVaibhav Nagarnaik
267383f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
267483f40318SVaibhav Nagarnaik
267583f40318SVaibhav Nagarnaik /* last buffer page to remove */
267683f40318SVaibhav Nagarnaik last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
267783f40318SVaibhav Nagarnaik list);
267883f40318SVaibhav Nagarnaik tmp_iter_page = first_page;
267983f40318SVaibhav Nagarnaik
268083f40318SVaibhav Nagarnaik do {
268183f36555SVaibhav Nagarnaik cond_resched();
268283f36555SVaibhav Nagarnaik
268383f40318SVaibhav Nagarnaik to_remove_page = tmp_iter_page;
26846689bed3SQiujun Huang rb_inc_page(&tmp_iter_page);
268583f40318SVaibhav Nagarnaik
268683f40318SVaibhav Nagarnaik /* update the counters */
268783f40318SVaibhav Nagarnaik page_entries = rb_page_entries(to_remove_page);
268883f40318SVaibhav Nagarnaik if (page_entries) {
268983f40318SVaibhav Nagarnaik /*
269083f40318SVaibhav Nagarnaik * If something was added to this page, it was full
269183f40318SVaibhav Nagarnaik * since it is not the tail page. So we deduct the
269283f40318SVaibhav Nagarnaik * bytes consumed in ring buffer from here.
269348fdc72fSVaibhav Nagarnaik * Increment overrun to account for the lost events.
269483f40318SVaibhav Nagarnaik */
269548fdc72fSVaibhav Nagarnaik local_add(page_entries, &cpu_buffer->overrun);
269645d99ea4SZheng Yejian local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
269731029a8bSSteven Rostedt (Google) local_inc(&cpu_buffer->pages_lost);
269883f40318SVaibhav Nagarnaik }
269983f40318SVaibhav Nagarnaik
270083f40318SVaibhav Nagarnaik /*
270183f40318SVaibhav Nagarnaik * We have already removed references to this list item, just
270283f40318SVaibhav Nagarnaik * free up the buffer_page and its page
270383f40318SVaibhav Nagarnaik */
270483f40318SVaibhav Nagarnaik free_buffer_page(to_remove_page);
270583f40318SVaibhav Nagarnaik nr_removed--;
270683f40318SVaibhav Nagarnaik
270783f40318SVaibhav Nagarnaik } while (to_remove_page != last_page);
270883f40318SVaibhav Nagarnaik
270983f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, nr_removed);
27105040b4b7SVaibhav Nagarnaik
27115040b4b7SVaibhav Nagarnaik return nr_removed == 0;
27127a8e76a3SSteven Rostedt }
27137a8e76a3SSteven Rostedt
2714bc92b956SUros Bizjak static bool
rb_insert_pages(struct ring_buffer_per_cpu * cpu_buffer)27155040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
27167a8e76a3SSteven Rostedt {
27175040b4b7SVaibhav Nagarnaik struct list_head *pages = &cpu_buffer->new_pages;
271888ca6a71SSteven Rostedt unsigned long flags;
2719bc92b956SUros Bizjak bool success;
2720bc92b956SUros Bizjak int retries;
27217a8e76a3SSteven Rostedt
272288ca6a71SSteven Rostedt /* Can be called at early boot up, where interrupts must not been enabled */
272388ca6a71SSteven Rostedt raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
27245040b4b7SVaibhav Nagarnaik /*
27255040b4b7SVaibhav Nagarnaik * We are holding the reader lock, so the reader page won't be swapped
27265040b4b7SVaibhav Nagarnaik * in the ring buffer. Now we are racing with the writer trying to
27275040b4b7SVaibhav Nagarnaik * move head page and the tail page.
27285040b4b7SVaibhav Nagarnaik * We are going to adapt the reader page update process where:
27295040b4b7SVaibhav Nagarnaik * 1. We first splice the start and end of list of new pages between
27305040b4b7SVaibhav Nagarnaik * the head page and its previous page.
27315040b4b7SVaibhav Nagarnaik * 2. We cmpxchg the prev_page->next to point from head page to the
27325040b4b7SVaibhav Nagarnaik * start of new pages list.
27335040b4b7SVaibhav Nagarnaik * 3. Finally, we update the head->prev to the end of new list.
27345040b4b7SVaibhav Nagarnaik *
27355040b4b7SVaibhav Nagarnaik * We will try this process 10 times, to make sure that we don't keep
27365040b4b7SVaibhav Nagarnaik * spinning.
27375040b4b7SVaibhav Nagarnaik */
27385040b4b7SVaibhav Nagarnaik retries = 10;
2739bc92b956SUros Bizjak success = false;
27405040b4b7SVaibhav Nagarnaik while (retries--) {
2741bdf4fb62SUros Bizjak struct list_head *head_page, *prev_page;
27425040b4b7SVaibhav Nagarnaik struct list_head *last_page, *first_page;
27435040b4b7SVaibhav Nagarnaik struct list_head *head_page_with_bit;
2744625ed527SZheng Yejian struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
274577ae365eSSteven Rostedt
2746625ed527SZheng Yejian if (!hpage)
274754f7be5bSSteven Rostedt break;
2748625ed527SZheng Yejian head_page = &hpage->list;
27495040b4b7SVaibhav Nagarnaik prev_page = head_page->prev;
27505040b4b7SVaibhav Nagarnaik
27515040b4b7SVaibhav Nagarnaik first_page = pages->next;
27525040b4b7SVaibhav Nagarnaik last_page = pages->prev;
27535040b4b7SVaibhav Nagarnaik
27545040b4b7SVaibhav Nagarnaik head_page_with_bit = (struct list_head *)
27555040b4b7SVaibhav Nagarnaik ((unsigned long)head_page | RB_PAGE_HEAD);
27565040b4b7SVaibhav Nagarnaik
27575040b4b7SVaibhav Nagarnaik last_page->next = head_page_with_bit;
27585040b4b7SVaibhav Nagarnaik first_page->prev = prev_page;
27595040b4b7SVaibhav Nagarnaik
2760bdf4fb62SUros Bizjak /* caution: head_page_with_bit gets updated on cmpxchg failure */
2761bdf4fb62SUros Bizjak if (try_cmpxchg(&prev_page->next,
2762bdf4fb62SUros Bizjak &head_page_with_bit, first_page)) {
27635040b4b7SVaibhav Nagarnaik /*
27645040b4b7SVaibhav Nagarnaik * yay, we replaced the page pointer to our new list,
27655040b4b7SVaibhav Nagarnaik * now, we just have to update to head page's prev
27665040b4b7SVaibhav Nagarnaik * pointer to point to end of list
27675040b4b7SVaibhav Nagarnaik */
27685040b4b7SVaibhav Nagarnaik head_page->prev = last_page;
2769b237e1f7SPetr Pavlu cpu_buffer->cnt++;
2770bc92b956SUros Bizjak success = true;
27715040b4b7SVaibhav Nagarnaik break;
27727a8e76a3SSteven Rostedt }
27735040b4b7SVaibhav Nagarnaik }
27747a8e76a3SSteven Rostedt
27755040b4b7SVaibhav Nagarnaik if (success)
27765040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(pages);
27775040b4b7SVaibhav Nagarnaik /*
27785040b4b7SVaibhav Nagarnaik * If we weren't successful in adding in new pages, warn and stop
27795040b4b7SVaibhav Nagarnaik * tracing
27805040b4b7SVaibhav Nagarnaik */
27815040b4b7SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, !success);
278288ca6a71SSteven Rostedt raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
27835040b4b7SVaibhav Nagarnaik
27845040b4b7SVaibhav Nagarnaik /* free pages if they weren't inserted */
27855040b4b7SVaibhav Nagarnaik if (!success) {
27865040b4b7SVaibhav Nagarnaik struct buffer_page *bpage, *tmp;
27875040b4b7SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
27885040b4b7SVaibhav Nagarnaik list) {
27895040b4b7SVaibhav Nagarnaik list_del_init(&bpage->list);
27905040b4b7SVaibhav Nagarnaik free_buffer_page(bpage);
27915040b4b7SVaibhav Nagarnaik }
27925040b4b7SVaibhav Nagarnaik }
27935040b4b7SVaibhav Nagarnaik return success;
27947a8e76a3SSteven Rostedt }
27957a8e76a3SSteven Rostedt
rb_update_pages(struct ring_buffer_per_cpu * cpu_buffer)279683f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2797438ced17SVaibhav Nagarnaik {
2798bc92b956SUros Bizjak bool success;
279983f40318SVaibhav Nagarnaik
28005040b4b7SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0)
28015040b4b7SVaibhav Nagarnaik success = rb_insert_pages(cpu_buffer);
28025040b4b7SVaibhav Nagarnaik else
28035040b4b7SVaibhav Nagarnaik success = rb_remove_pages(cpu_buffer,
28045040b4b7SVaibhav Nagarnaik -cpu_buffer->nr_pages_to_update);
28055040b4b7SVaibhav Nagarnaik
28065040b4b7SVaibhav Nagarnaik if (success)
2807438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
280883f40318SVaibhav Nagarnaik }
280983f40318SVaibhav Nagarnaik
update_pages_handler(struct work_struct * work)281083f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work)
281183f40318SVaibhav Nagarnaik {
281283f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
281383f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu, update_pages_work);
281483f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer);
281505fdd70dSVaibhav Nagarnaik complete(&cpu_buffer->update_done);
2816438ced17SVaibhav Nagarnaik }
2817438ced17SVaibhav Nagarnaik
28187a8e76a3SSteven Rostedt /**
28197a8e76a3SSteven Rostedt * ring_buffer_resize - resize the ring buffer
28207a8e76a3SSteven Rostedt * @buffer: the buffer to resize.
28217a8e76a3SSteven Rostedt * @size: the new size.
2822d611851bSzhangwei(Jovi) * @cpu_id: the cpu buffer to resize
28237a8e76a3SSteven Rostedt *
2824139f8400STzvetomir Stoyanov (VMware) * Minimum size is 2 * buffer->subbuf_size.
28257a8e76a3SSteven Rostedt *
282683f40318SVaibhav Nagarnaik * Returns 0 on success and < 0 on failure.
28277a8e76a3SSteven Rostedt */
ring_buffer_resize(struct trace_buffer * buffer,unsigned long size,int cpu_id)282813292494SSteven Rostedt (VMware) int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2829438ced17SVaibhav Nagarnaik int cpu_id)
28307a8e76a3SSteven Rostedt {
28317a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
28329b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages;
28330a1754b2SQiujun Huang int cpu, err;
28347a8e76a3SSteven Rostedt
2835ee51a1deSIngo Molnar /*
2836ee51a1deSIngo Molnar * Always succeed at resizing a non-existent buffer:
2837ee51a1deSIngo Molnar */
2838ee51a1deSIngo Molnar if (!buffer)
28390a1754b2SQiujun Huang return 0;
2840ee51a1deSIngo Molnar
28416a31e1f1SSteven Rostedt /* Make sure the requested buffer exists */
28426a31e1f1SSteven Rostedt if (cpu_id != RING_BUFFER_ALL_CPUS &&
28436a31e1f1SSteven Rostedt !cpumask_test_cpu(cpu_id, buffer->cpumask))
28440a1754b2SQiujun Huang return 0;
28456a31e1f1SSteven Rostedt
2846139f8400STzvetomir Stoyanov (VMware) nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
28477a8e76a3SSteven Rostedt
28487a8e76a3SSteven Rostedt /* we need a minimum of two pages */
284959643d15SSteven Rostedt (Red Hat) if (nr_pages < 2)
285059643d15SSteven Rostedt (Red Hat) nr_pages = 2;
28517a8e76a3SSteven Rostedt
285207b8b10eSSteven Rostedt (VMware) /* prevent another thread from changing buffer sizes */
285307b8b10eSSteven Rostedt (VMware) mutex_lock(&buffer->mutex);
28548a96c028SChen Lin atomic_inc(&buffer->resizing);
285507b8b10eSSteven Rostedt (VMware)
285607b8b10eSSteven Rostedt (VMware) if (cpu_id == RING_BUFFER_ALL_CPUS) {
285783f40318SVaibhav Nagarnaik /*
285883f40318SVaibhav Nagarnaik * Don't succeed if resizing is disabled, as a reader might be
285983f40318SVaibhav Nagarnaik * manipulating the ring buffer and is expecting a sane state while
286083f40318SVaibhav Nagarnaik * this is true.
286183f40318SVaibhav Nagarnaik */
286207b8b10eSSteven Rostedt (VMware) for_each_buffer_cpu(buffer, cpu) {
286307b8b10eSSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu];
286407b8b10eSSteven Rostedt (VMware) if (atomic_read(&cpu_buffer->resize_disabled)) {
286507b8b10eSSteven Rostedt (VMware) err = -EBUSY;
286607b8b10eSSteven Rostedt (VMware) goto out_err_unlock;
286707b8b10eSSteven Rostedt (VMware) }
286807b8b10eSSteven Rostedt (VMware) }
286983f40318SVaibhav Nagarnaik
2870438ced17SVaibhav Nagarnaik /* calculate the pages to update */
28717a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
28727a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
2873438ced17SVaibhav Nagarnaik
2874438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages -
2875438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages;
2876438ced17SVaibhav Nagarnaik /*
2877438ced17SVaibhav Nagarnaik * nothing more to do for removing pages or no update
2878438ced17SVaibhav Nagarnaik */
2879438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update <= 0)
2880438ced17SVaibhav Nagarnaik continue;
2881438ced17SVaibhav Nagarnaik /*
2882438ced17SVaibhav Nagarnaik * to add pages, make sure all new pages can be
2883438ced17SVaibhav Nagarnaik * allocated without receiving ENOMEM
2884438ced17SVaibhav Nagarnaik */
2885438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages);
288674e2afc6SQiujun Huang if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
288774e2afc6SQiujun Huang &cpu_buffer->new_pages)) {
2888438ced17SVaibhav Nagarnaik /* not enough memory for new pages */
288983f40318SVaibhav Nagarnaik err = -ENOMEM;
289083f40318SVaibhav Nagarnaik goto out_err;
289183f40318SVaibhav Nagarnaik }
2892f6bd2c92SZheng Yejian
2893f6bd2c92SZheng Yejian cond_resched();
289483f40318SVaibhav Nagarnaik }
289583f40318SVaibhav Nagarnaik
289699c37d1aSSebastian Andrzej Siewior cpus_read_lock();
289783f40318SVaibhav Nagarnaik /*
289883f40318SVaibhav Nagarnaik * Fire off all the required work handlers
289905fdd70dSVaibhav Nagarnaik * We can't schedule on offline CPUs, but it's not necessary
290083f40318SVaibhav Nagarnaik * since we can change their buffer sizes without any race.
290183f40318SVaibhav Nagarnaik */
290283f40318SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) {
290383f40318SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu];
290405fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update)
290583f40318SVaibhav Nagarnaik continue;
290683f40318SVaibhav Nagarnaik
2907021c5b34SCorey Minyard /* Can't run something on an offline CPU. */
2908021c5b34SCorey Minyard if (!cpu_online(cpu)) {
2909f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer);
2910f5eb5588SSteven Rostedt (Red Hat) cpu_buffer->nr_pages_to_update = 0;
2911f5eb5588SSteven Rostedt (Red Hat) } else {
291288ca6a71SSteven Rostedt /* Run directly if possible. */
291388ca6a71SSteven Rostedt migrate_disable();
291488ca6a71SSteven Rostedt if (cpu != smp_processor_id()) {
291588ca6a71SSteven Rostedt migrate_enable();
291605fdd70dSVaibhav Nagarnaik schedule_work_on(cpu,
291705fdd70dSVaibhav Nagarnaik &cpu_buffer->update_pages_work);
291888ca6a71SSteven Rostedt } else {
291988ca6a71SSteven Rostedt update_pages_handler(&cpu_buffer->update_pages_work);
292088ca6a71SSteven Rostedt migrate_enable();
292188ca6a71SSteven Rostedt }
2922f5eb5588SSteven Rostedt (Red Hat) }
29237a8e76a3SSteven Rostedt }
2924438ced17SVaibhav Nagarnaik
2925438ced17SVaibhav Nagarnaik /* wait for all the updates to complete */
2926438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) {
2927438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu];
292805fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update)
292983f40318SVaibhav Nagarnaik continue;
293083f40318SVaibhav Nagarnaik
293105fdd70dSVaibhav Nagarnaik if (cpu_online(cpu))
293205fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done);
293383f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0;
2934438ced17SVaibhav Nagarnaik }
293583f40318SVaibhav Nagarnaik
293699c37d1aSSebastian Andrzej Siewior cpus_read_unlock();
2937438ced17SVaibhav Nagarnaik } else {
2938438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu_id];
293983f40318SVaibhav Nagarnaik
2940438ced17SVaibhav Nagarnaik if (nr_pages == cpu_buffer->nr_pages)
29417a8e76a3SSteven Rostedt goto out;
2942438ced17SVaibhav Nagarnaik
294307b8b10eSSteven Rostedt (VMware) /*
294407b8b10eSSteven Rostedt (VMware) * Don't succeed if resizing is disabled, as a reader might be
294507b8b10eSSteven Rostedt (VMware) * manipulating the ring buffer and is expecting a sane state while
294607b8b10eSSteven Rostedt (VMware) * this is true.
294707b8b10eSSteven Rostedt (VMware) */
294807b8b10eSSteven Rostedt (VMware) if (atomic_read(&cpu_buffer->resize_disabled)) {
294907b8b10eSSteven Rostedt (VMware) err = -EBUSY;
295007b8b10eSSteven Rostedt (VMware) goto out_err_unlock;
295107b8b10eSSteven Rostedt (VMware) }
295207b8b10eSSteven Rostedt (VMware)
2953438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages -
2954438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages;
2955438ced17SVaibhav Nagarnaik
2956438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages);
2957438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0 &&
295874e2afc6SQiujun Huang __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
295974e2afc6SQiujun Huang &cpu_buffer->new_pages)) {
296083f40318SVaibhav Nagarnaik err = -ENOMEM;
296183f40318SVaibhav Nagarnaik goto out_err;
296283f40318SVaibhav Nagarnaik }
2963438ced17SVaibhav Nagarnaik
296499c37d1aSSebastian Andrzej Siewior cpus_read_lock();
296583f40318SVaibhav Nagarnaik
2966021c5b34SCorey Minyard /* Can't run something on an offline CPU. */
2967021c5b34SCorey Minyard if (!cpu_online(cpu_id))
2968f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer);
2969f5eb5588SSteven Rostedt (Red Hat) else {
297088ca6a71SSteven Rostedt /* Run directly if possible. */
297188ca6a71SSteven Rostedt migrate_disable();
297288ca6a71SSteven Rostedt if (cpu_id == smp_processor_id()) {
297388ca6a71SSteven Rostedt rb_update_pages(cpu_buffer);
297488ca6a71SSteven Rostedt migrate_enable();
297588ca6a71SSteven Rostedt } else {
297688ca6a71SSteven Rostedt migrate_enable();
297783f40318SVaibhav Nagarnaik schedule_work_on(cpu_id,
297883f40318SVaibhav Nagarnaik &cpu_buffer->update_pages_work);
297905fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done);
2980f5eb5588SSteven Rostedt (Red Hat) }
298188ca6a71SSteven Rostedt }
298283f40318SVaibhav Nagarnaik
298383f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0;
298499c37d1aSSebastian Andrzej Siewior cpus_read_unlock();
29857a8e76a3SSteven Rostedt }
29867a8e76a3SSteven Rostedt
29877a8e76a3SSteven Rostedt out:
2988659f451fSSteven Rostedt /*
2989659f451fSSteven Rostedt * The ring buffer resize can happen with the ring buffer
2990659f451fSSteven Rostedt * enabled, so that the update disturbs the tracing as little
2991659f451fSSteven Rostedt * as possible. But if the buffer is disabled, we do not need
2992659f451fSSteven Rostedt * to worry about that, and we can take the time to verify
2993659f451fSSteven Rostedt * that the buffer is not corrupt.
2994659f451fSSteven Rostedt */
2995659f451fSSteven Rostedt if (atomic_read(&buffer->record_disabled)) {
2996659f451fSSteven Rostedt atomic_inc(&buffer->record_disabled);
2997659f451fSSteven Rostedt /*
2998659f451fSSteven Rostedt * Even though the buffer was disabled, we must make sure
2999659f451fSSteven Rostedt * that it is truly disabled before calling rb_check_pages.
3000659f451fSSteven Rostedt * There could have been a race between checking
3001659f451fSSteven Rostedt * record_disable and incrementing it.
3002659f451fSSteven Rostedt */
300374401729SPaul E. McKenney synchronize_rcu();
3004659f451fSSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
3005659f451fSSteven Rostedt cpu_buffer = buffer->buffers[cpu];
3006659f451fSSteven Rostedt rb_check_pages(cpu_buffer);
3007659f451fSSteven Rostedt }
3008659f451fSSteven Rostedt atomic_dec(&buffer->record_disabled);
3009659f451fSSteven Rostedt }
3010659f451fSSteven Rostedt
30118a96c028SChen Lin atomic_dec(&buffer->resizing);
30127a8e76a3SSteven Rostedt mutex_unlock(&buffer->mutex);
30130a1754b2SQiujun Huang return 0;
30147a8e76a3SSteven Rostedt
301583f40318SVaibhav Nagarnaik out_err:
3016438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) {
3017438ced17SVaibhav Nagarnaik struct buffer_page *bpage, *tmp;
301883f40318SVaibhav Nagarnaik
3019438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu];
3020438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0;
302183f40318SVaibhav Nagarnaik
3022438ced17SVaibhav Nagarnaik if (list_empty(&cpu_buffer->new_pages))
3023438ced17SVaibhav Nagarnaik continue;
302483f40318SVaibhav Nagarnaik
3025438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
3026438ced17SVaibhav Nagarnaik list) {
3027044fa782SSteven Rostedt list_del_init(&bpage->list);
3028044fa782SSteven Rostedt free_buffer_page(bpage);
30297a8e76a3SSteven Rostedt }
3030438ced17SVaibhav Nagarnaik }
303107b8b10eSSteven Rostedt (VMware) out_err_unlock:
30328a96c028SChen Lin atomic_dec(&buffer->resizing);
3033641d2f63SVegard Nossum mutex_unlock(&buffer->mutex);
303483f40318SVaibhav Nagarnaik return err;
30357a8e76a3SSteven Rostedt }
3036c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize);
30377a8e76a3SSteven Rostedt
ring_buffer_change_overwrite(struct trace_buffer * buffer,int val)303813292494SSteven Rostedt (VMware) void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
3039750912faSDavid Sharp {
3040750912faSDavid Sharp mutex_lock(&buffer->mutex);
3041750912faSDavid Sharp if (val)
3042750912faSDavid Sharp buffer->flags |= RB_FL_OVERWRITE;
3043750912faSDavid Sharp else
3044750912faSDavid Sharp buffer->flags &= ~RB_FL_OVERWRITE;
3045750912faSDavid Sharp mutex_unlock(&buffer->mutex);
3046750912faSDavid Sharp }
3047750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
3048750912faSDavid Sharp
__rb_page_index(struct buffer_page * bpage,unsigned index)30492289d567SSteven Rostedt (Red Hat) static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
30507a8e76a3SSteven Rostedt {
3051044fa782SSteven Rostedt return bpage->page->data + index;
30527a8e76a3SSteven Rostedt }
30537a8e76a3SSteven Rostedt
30542289d567SSteven Rostedt (Red Hat) static __always_inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu * cpu_buffer)3055d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
30567a8e76a3SSteven Rostedt {
30576f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->reader_page,
30586f807acdSSteven Rostedt cpu_buffer->reader_page->read);
30596f807acdSSteven Rostedt }
30606f807acdSSteven Rostedt
3061785888c5SSteven Rostedt (VMware) static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter * iter)3062785888c5SSteven Rostedt (VMware) rb_iter_head_event(struct ring_buffer_iter *iter)
3063785888c5SSteven Rostedt (VMware) {
3064785888c5SSteven Rostedt (VMware) struct ring_buffer_event *event;
3065785888c5SSteven Rostedt (VMware) struct buffer_page *iter_head_page = iter->head_page;
3066785888c5SSteven Rostedt (VMware) unsigned long commit;
3067785888c5SSteven Rostedt (VMware) unsigned length;
3068785888c5SSteven Rostedt (VMware)
3069153368ceSSteven Rostedt (VMware) if (iter->head != iter->next_event)
3070153368ceSSteven Rostedt (VMware) return iter->event;
3071153368ceSSteven Rostedt (VMware)
3072785888c5SSteven Rostedt (VMware) /*
3073785888c5SSteven Rostedt (VMware) * When the writer goes across pages, it issues a cmpxchg which
3074785888c5SSteven Rostedt (VMware) * is a mb(), which will synchronize with the rmb here.
3075785888c5SSteven Rostedt (VMware) * (see rb_tail_page_update() and __rb_reserve_next())
3076785888c5SSteven Rostedt (VMware) */
3077785888c5SSteven Rostedt (VMware) commit = rb_page_commit(iter_head_page);
3078785888c5SSteven Rostedt (VMware) smp_rmb();
307995a404bdSSteven Rostedt (Google)
308095a404bdSSteven Rostedt (Google) /* An event needs to be at least 8 bytes in size */
308195a404bdSSteven Rostedt (Google) if (iter->head > commit - 8)
308295a404bdSSteven Rostedt (Google) goto reset;
308395a404bdSSteven Rostedt (Google)
3084785888c5SSteven Rostedt (VMware) event = __rb_page_index(iter_head_page, iter->head);
3085785888c5SSteven Rostedt (VMware) length = rb_event_length(event);
3086785888c5SSteven Rostedt (VMware)
3087785888c5SSteven Rostedt (VMware) /*
3088785888c5SSteven Rostedt (VMware) * READ_ONCE() doesn't work on functions and we don't want the
3089785888c5SSteven Rostedt (VMware) * compiler doing any crazy optimizations with length.
3090785888c5SSteven Rostedt (VMware) */
3091785888c5SSteven Rostedt (VMware) barrier();
3092785888c5SSteven Rostedt (VMware)
3093139f8400STzvetomir Stoyanov (VMware) if ((iter->head + length) > commit || length > iter->event_size)
3094785888c5SSteven Rostedt (VMware) /* Writer corrupted the read? */
3095785888c5SSteven Rostedt (VMware) goto reset;
3096785888c5SSteven Rostedt (VMware)
3097785888c5SSteven Rostedt (VMware) memcpy(iter->event, event, length);
3098785888c5SSteven Rostedt (VMware) /*
3099785888c5SSteven Rostedt (VMware) * If the page stamp is still the same after this rmb() then the
3100785888c5SSteven Rostedt (VMware) * event was safely copied without the writer entering the page.
3101785888c5SSteven Rostedt (VMware) */
3102785888c5SSteven Rostedt (VMware) smp_rmb();
3103785888c5SSteven Rostedt (VMware)
3104785888c5SSteven Rostedt (VMware) /* Make sure the page didn't change since we read this */
3105785888c5SSteven Rostedt (VMware) if (iter->page_stamp != iter_head_page->page->time_stamp ||
3106785888c5SSteven Rostedt (VMware) commit > rb_page_commit(iter_head_page))
3107785888c5SSteven Rostedt (VMware) goto reset;
3108785888c5SSteven Rostedt (VMware)
3109785888c5SSteven Rostedt (VMware) iter->next_event = iter->head + length;
3110785888c5SSteven Rostedt (VMware) return iter->event;
3111785888c5SSteven Rostedt (VMware) reset:
3112785888c5SSteven Rostedt (VMware) /* Reset to the beginning */
3113785888c5SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
3114785888c5SSteven Rostedt (VMware) iter->head = 0;
3115785888c5SSteven Rostedt (VMware) iter->next_event = 0;
3116c9b7a4a7SSteven Rostedt (VMware) iter->missed_events = 1;
3117785888c5SSteven Rostedt (VMware) return NULL;
3118785888c5SSteven Rostedt (VMware) }
3119785888c5SSteven Rostedt (VMware)
312025985edcSLucas De Marchi /* Size is determined by what has been committed */
rb_page_size(struct buffer_page * bpage)31212289d567SSteven Rostedt (Red Hat) static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
3122bf41a158SSteven Rostedt {
3123fe832be0SSteven Rostedt (Google) return rb_page_commit(bpage) & ~RB_MISSED_MASK;
3124bf41a158SSteven Rostedt }
3125bf41a158SSteven Rostedt
31262289d567SSteven Rostedt (Red Hat) static __always_inline unsigned
rb_commit_index(struct ring_buffer_per_cpu * cpu_buffer)3127bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
3128bf41a158SSteven Rostedt {
3129bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->commit_page);
3130bf41a158SSteven Rostedt }
3131bf41a158SSteven Rostedt
31322289d567SSteven Rostedt (Red Hat) static __always_inline unsigned
rb_event_index(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)31333cb30911SSteven Rostedt (Google) rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
31347a8e76a3SSteven Rostedt {
3135bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event;
3136bf41a158SSteven Rostedt
31373cb30911SSteven Rostedt (Google) addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
31383cb30911SSteven Rostedt (Google)
31393cb30911SSteven Rostedt (Google) return addr - BUF_PAGE_HDR_SIZE;
31407a8e76a3SSteven Rostedt }
31417a8e76a3SSteven Rostedt
rb_inc_iter(struct ring_buffer_iter * iter)314234a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter)
3143d769041fSSteven Rostedt {
3144d769041fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3145d769041fSSteven Rostedt
3146d769041fSSteven Rostedt /*
3147d769041fSSteven Rostedt * The iterator could be on the reader page (it starts there).
3148d769041fSSteven Rostedt * But the head could have moved, since the reader was
3149d769041fSSteven Rostedt * found. Check for this case and assign the iterator
3150d769041fSSteven Rostedt * to the head page instead of next.
3151d769041fSSteven Rostedt */
3152d769041fSSteven Rostedt if (iter->head_page == cpu_buffer->reader_page)
315377ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer);
3154d769041fSSteven Rostedt else
31556689bed3SQiujun Huang rb_inc_page(&iter->head_page);
3156d769041fSSteven Rostedt
315728e3fc56SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
31587a8e76a3SSteven Rostedt iter->head = 0;
3159785888c5SSteven Rostedt (VMware) iter->next_event = 0;
31607a8e76a3SSteven Rostedt }
31617a8e76a3SSteven Rostedt
3162b14d0329SSteven Rostedt (Google) /* Return the index into the sub-buffers for a given sub-buffer */
rb_meta_subbuf_idx(struct ring_buffer_cpu_meta * meta,void * subbuf)31634009cc31SSteven Rostedt static int rb_meta_subbuf_idx(struct ring_buffer_cpu_meta *meta, void *subbuf)
3164b14d0329SSteven Rostedt (Google) {
3165b14d0329SSteven Rostedt (Google) void *subbuf_array;
3166b14d0329SSteven Rostedt (Google)
3167b14d0329SSteven Rostedt (Google) subbuf_array = (void *)meta + sizeof(int) * meta->nr_subbufs;
3168b14d0329SSteven Rostedt (Google) subbuf_array = (void *)ALIGN((unsigned long)subbuf_array, meta->subbuf_size);
3169b14d0329SSteven Rostedt (Google) return (subbuf - subbuf_array) / meta->subbuf_size;
3170b14d0329SSteven Rostedt (Google) }
3171b14d0329SSteven Rostedt (Google)
rb_update_meta_head(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * next_page)3172b14d0329SSteven Rostedt (Google) static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer,
3173b14d0329SSteven Rostedt (Google) struct buffer_page *next_page)
3174b14d0329SSteven Rostedt (Google) {
31754009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3176b14d0329SSteven Rostedt (Google) unsigned long old_head = (unsigned long)next_page->page;
3177b14d0329SSteven Rostedt (Google) unsigned long new_head;
3178b14d0329SSteven Rostedt (Google)
3179b14d0329SSteven Rostedt (Google) rb_inc_page(&next_page);
3180b14d0329SSteven Rostedt (Google) new_head = (unsigned long)next_page->page;
3181b14d0329SSteven Rostedt (Google)
3182b14d0329SSteven Rostedt (Google) /*
3183b14d0329SSteven Rostedt (Google) * Only move it forward once, if something else came in and
3184b14d0329SSteven Rostedt (Google) * moved it forward, then we don't want to touch it.
3185b14d0329SSteven Rostedt (Google) */
3186b14d0329SSteven Rostedt (Google) (void)cmpxchg(&meta->head_buffer, old_head, new_head);
3187b14d0329SSteven Rostedt (Google) }
3188b14d0329SSteven Rostedt (Google)
rb_update_meta_reader(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * reader)3189b14d0329SSteven Rostedt (Google) static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer,
3190b14d0329SSteven Rostedt (Google) struct buffer_page *reader)
3191b14d0329SSteven Rostedt (Google) {
31924009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3193b14d0329SSteven Rostedt (Google) void *old_reader = cpu_buffer->reader_page->page;
3194b14d0329SSteven Rostedt (Google) void *new_reader = reader->page;
3195b14d0329SSteven Rostedt (Google) int id;
3196b14d0329SSteven Rostedt (Google)
3197b14d0329SSteven Rostedt (Google) id = reader->id;
3198b14d0329SSteven Rostedt (Google) cpu_buffer->reader_page->id = id;
3199b14d0329SSteven Rostedt (Google) reader->id = 0;
3200b14d0329SSteven Rostedt (Google)
3201b14d0329SSteven Rostedt (Google) meta->buffers[0] = rb_meta_subbuf_idx(meta, new_reader);
3202b14d0329SSteven Rostedt (Google) meta->buffers[id] = rb_meta_subbuf_idx(meta, old_reader);
3203b14d0329SSteven Rostedt (Google)
3204b14d0329SSteven Rostedt (Google) /* The head pointer is the one after the reader */
3205b14d0329SSteven Rostedt (Google) rb_update_meta_head(cpu_buffer, reader);
3206b14d0329SSteven Rostedt (Google) }
3207b14d0329SSteven Rostedt (Google)
320877ae365eSSteven Rostedt /*
320977ae365eSSteven Rostedt * rb_handle_head_page - writer hit the head page
321077ae365eSSteven Rostedt *
321177ae365eSSteven Rostedt * Returns: +1 to retry page
321277ae365eSSteven Rostedt * 0 to continue
321377ae365eSSteven Rostedt * -1 on error
321477ae365eSSteven Rostedt */
321577ae365eSSteven Rostedt static int
rb_handle_head_page(struct ring_buffer_per_cpu * cpu_buffer,struct buffer_page * tail_page,struct buffer_page * next_page)321677ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
321777ae365eSSteven Rostedt struct buffer_page *tail_page,
321877ae365eSSteven Rostedt struct buffer_page *next_page)
321977ae365eSSteven Rostedt {
322077ae365eSSteven Rostedt struct buffer_page *new_head;
322177ae365eSSteven Rostedt int entries;
322277ae365eSSteven Rostedt int type;
322377ae365eSSteven Rostedt int ret;
322477ae365eSSteven Rostedt
322577ae365eSSteven Rostedt entries = rb_page_entries(next_page);
322677ae365eSSteven Rostedt
322777ae365eSSteven Rostedt /*
322877ae365eSSteven Rostedt * The hard part is here. We need to move the head
322977ae365eSSteven Rostedt * forward, and protect against both readers on
323077ae365eSSteven Rostedt * other CPUs and writers coming in via interrupts.
323177ae365eSSteven Rostedt */
323277ae365eSSteven Rostedt type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
323377ae365eSSteven Rostedt RB_PAGE_HEAD);
323477ae365eSSteven Rostedt
323577ae365eSSteven Rostedt /*
323677ae365eSSteven Rostedt * type can be one of four:
323777ae365eSSteven Rostedt * NORMAL - an interrupt already moved it for us
323877ae365eSSteven Rostedt * HEAD - we are the first to get here.
323977ae365eSSteven Rostedt * UPDATE - we are the interrupt interrupting
324077ae365eSSteven Rostedt * a current move.
324177ae365eSSteven Rostedt * MOVED - a reader on another CPU moved the next
324277ae365eSSteven Rostedt * pointer to its reader page. Give up
324377ae365eSSteven Rostedt * and try again.
324477ae365eSSteven Rostedt */
324577ae365eSSteven Rostedt
324677ae365eSSteven Rostedt switch (type) {
324777ae365eSSteven Rostedt case RB_PAGE_HEAD:
324877ae365eSSteven Rostedt /*
324977ae365eSSteven Rostedt * We changed the head to UPDATE, thus
325077ae365eSSteven Rostedt * it is our responsibility to update
325177ae365eSSteven Rostedt * the counters.
325277ae365eSSteven Rostedt */
325377ae365eSSteven Rostedt local_add(entries, &cpu_buffer->overrun);
325445d99ea4SZheng Yejian local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
325531029a8bSSteven Rostedt (Google) local_inc(&cpu_buffer->pages_lost);
325677ae365eSSteven Rostedt
3257b14d0329SSteven Rostedt (Google) if (cpu_buffer->ring_meta)
3258b14d0329SSteven Rostedt (Google) rb_update_meta_head(cpu_buffer, next_page);
325977ae365eSSteven Rostedt /*
326077ae365eSSteven Rostedt * The entries will be zeroed out when we move the
326177ae365eSSteven Rostedt * tail page.
326277ae365eSSteven Rostedt */
326377ae365eSSteven Rostedt
326477ae365eSSteven Rostedt /* still more to do */
326577ae365eSSteven Rostedt break;
326677ae365eSSteven Rostedt
326777ae365eSSteven Rostedt case RB_PAGE_UPDATE:
326877ae365eSSteven Rostedt /*
326977ae365eSSteven Rostedt * This is an interrupt that interrupt the
327077ae365eSSteven Rostedt * previous update. Still more to do.
327177ae365eSSteven Rostedt */
327277ae365eSSteven Rostedt break;
327377ae365eSSteven Rostedt case RB_PAGE_NORMAL:
327477ae365eSSteven Rostedt /*
327577ae365eSSteven Rostedt * An interrupt came in before the update
327677ae365eSSteven Rostedt * and processed this for us.
327777ae365eSSteven Rostedt * Nothing left to do.
327877ae365eSSteven Rostedt */
327977ae365eSSteven Rostedt return 1;
328077ae365eSSteven Rostedt case RB_PAGE_MOVED:
328177ae365eSSteven Rostedt /*
328277ae365eSSteven Rostedt * The reader is on another CPU and just did
328377ae365eSSteven Rostedt * a swap with our next_page.
328477ae365eSSteven Rostedt * Try again.
328577ae365eSSteven Rostedt */
328677ae365eSSteven Rostedt return 1;
328777ae365eSSteven Rostedt default:
328877ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
328977ae365eSSteven Rostedt return -1;
329077ae365eSSteven Rostedt }
329177ae365eSSteven Rostedt
329277ae365eSSteven Rostedt /*
329377ae365eSSteven Rostedt * Now that we are here, the old head pointer is
329477ae365eSSteven Rostedt * set to UPDATE. This will keep the reader from
329577ae365eSSteven Rostedt * swapping the head page with the reader page.
329677ae365eSSteven Rostedt * The reader (on another CPU) will spin till
329777ae365eSSteven Rostedt * we are finished.
329877ae365eSSteven Rostedt *
329977ae365eSSteven Rostedt * We just need to protect against interrupts
330077ae365eSSteven Rostedt * doing the job. We will set the next pointer
330177ae365eSSteven Rostedt * to HEAD. After that, we set the old pointer
330277ae365eSSteven Rostedt * to NORMAL, but only if it was HEAD before.
330377ae365eSSteven Rostedt * otherwise we are an interrupt, and only
330477ae365eSSteven Rostedt * want the outer most commit to reset it.
330577ae365eSSteven Rostedt */
330677ae365eSSteven Rostedt new_head = next_page;
33076689bed3SQiujun Huang rb_inc_page(&new_head);
330877ae365eSSteven Rostedt
330977ae365eSSteven Rostedt ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
331077ae365eSSteven Rostedt RB_PAGE_NORMAL);
331177ae365eSSteven Rostedt
331277ae365eSSteven Rostedt /*
331377ae365eSSteven Rostedt * Valid returns are:
331477ae365eSSteven Rostedt * HEAD - an interrupt came in and already set it.
331577ae365eSSteven Rostedt * NORMAL - One of two things:
331677ae365eSSteven Rostedt * 1) We really set it.
331777ae365eSSteven Rostedt * 2) A bunch of interrupts came in and moved
331877ae365eSSteven Rostedt * the page forward again.
331977ae365eSSteven Rostedt */
332077ae365eSSteven Rostedt switch (ret) {
332177ae365eSSteven Rostedt case RB_PAGE_HEAD:
332277ae365eSSteven Rostedt case RB_PAGE_NORMAL:
332377ae365eSSteven Rostedt /* OK */
332477ae365eSSteven Rostedt break;
332577ae365eSSteven Rostedt default:
332677ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1);
332777ae365eSSteven Rostedt return -1;
332877ae365eSSteven Rostedt }
332977ae365eSSteven Rostedt
333077ae365eSSteven Rostedt /*
333177ae365eSSteven Rostedt * It is possible that an interrupt came in,
333277ae365eSSteven Rostedt * set the head up, then more interrupts came in
333377ae365eSSteven Rostedt * and moved it again. When we get back here,
333477ae365eSSteven Rostedt * the page would have been set to NORMAL but we
333577ae365eSSteven Rostedt * just set it back to HEAD.
333677ae365eSSteven Rostedt *
333777ae365eSSteven Rostedt * How do you detect this? Well, if that happened
333877ae365eSSteven Rostedt * the tail page would have moved.
333977ae365eSSteven Rostedt */
334077ae365eSSteven Rostedt if (ret == RB_PAGE_NORMAL) {
33418573636eSSteven Rostedt (Red Hat) struct buffer_page *buffer_tail_page;
33428573636eSSteven Rostedt (Red Hat)
33438573636eSSteven Rostedt (Red Hat) buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
334477ae365eSSteven Rostedt /*
334577ae365eSSteven Rostedt * If the tail had moved passed next, then we need
334677ae365eSSteven Rostedt * to reset the pointer.
334777ae365eSSteven Rostedt */
33488573636eSSteven Rostedt (Red Hat) if (buffer_tail_page != tail_page &&
33498573636eSSteven Rostedt (Red Hat) buffer_tail_page != next_page)
335077ae365eSSteven Rostedt rb_head_page_set_normal(cpu_buffer, new_head,
335177ae365eSSteven Rostedt next_page,
335277ae365eSSteven Rostedt RB_PAGE_HEAD);
335377ae365eSSteven Rostedt }
335477ae365eSSteven Rostedt
335577ae365eSSteven Rostedt /*
335677ae365eSSteven Rostedt * If this was the outer most commit (the one that
335777ae365eSSteven Rostedt * changed the original pointer from HEAD to UPDATE),
335877ae365eSSteven Rostedt * then it is up to us to reset it to NORMAL.
335977ae365eSSteven Rostedt */
336077ae365eSSteven Rostedt if (type == RB_PAGE_HEAD) {
336177ae365eSSteven Rostedt ret = rb_head_page_set_normal(cpu_buffer, next_page,
336277ae365eSSteven Rostedt tail_page,
336377ae365eSSteven Rostedt RB_PAGE_UPDATE);
336477ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer,
336577ae365eSSteven Rostedt ret != RB_PAGE_UPDATE))
336677ae365eSSteven Rostedt return -1;
336777ae365eSSteven Rostedt }
336877ae365eSSteven Rostedt
336977ae365eSSteven Rostedt return 0;
337077ae365eSSteven Rostedt }
337177ae365eSSteven Rostedt
3372c7b09308SSteven Rostedt static inline void
rb_reset_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)3373c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
3374fcc742eaSSteven Rostedt (Red Hat) unsigned long tail, struct rb_event_info *info)
3375c7b09308SSteven Rostedt {
3376139f8400STzvetomir Stoyanov (VMware) unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
3377fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page = info->tail_page;
3378c7b09308SSteven Rostedt struct ring_buffer_event *event;
3379fcc742eaSSteven Rostedt (Red Hat) unsigned long length = info->length;
3380c7b09308SSteven Rostedt
3381c7b09308SSteven Rostedt /*
3382c7b09308SSteven Rostedt * Only the event that crossed the page boundary
3383c7b09308SSteven Rostedt * must fill the old tail_page with padding.
3384c7b09308SSteven Rostedt */
3385139f8400STzvetomir Stoyanov (VMware) if (tail >= bsize) {
3386b3230c8bSSteven Rostedt /*
3387b3230c8bSSteven Rostedt * If the page was filled, then we still need
3388b3230c8bSSteven Rostedt * to update the real_end. Reset it to zero
3389b3230c8bSSteven Rostedt * and the reader will ignore it.
3390b3230c8bSSteven Rostedt */
3391139f8400STzvetomir Stoyanov (VMware) if (tail == bsize)
3392b3230c8bSSteven Rostedt tail_page->real_end = 0;
3393b3230c8bSSteven Rostedt
3394c7b09308SSteven Rostedt local_sub(length, &tail_page->write);
3395c7b09308SSteven Rostedt return;
3396c7b09308SSteven Rostedt }
3397c7b09308SSteven Rostedt
3398c7b09308SSteven Rostedt event = __rb_page_index(tail_page, tail);
3399c7b09308SSteven Rostedt
3400c7b09308SSteven Rostedt /*
3401ff0ff84aSSteven Rostedt * Save the original length to the meta data.
3402ff0ff84aSSteven Rostedt * This will be used by the reader to add lost event
3403ff0ff84aSSteven Rostedt * counter.
3404ff0ff84aSSteven Rostedt */
3405ff0ff84aSSteven Rostedt tail_page->real_end = tail;
3406ff0ff84aSSteven Rostedt
3407ff0ff84aSSteven Rostedt /*
3408c7b09308SSteven Rostedt * If this event is bigger than the minimum size, then
3409c7b09308SSteven Rostedt * we need to be careful that we don't subtract the
3410c7b09308SSteven Rostedt * write counter enough to allow another writer to slip
3411c7b09308SSteven Rostedt * in on this page.
3412c7b09308SSteven Rostedt * We put in a discarded commit instead, to make sure
341345d99ea4SZheng Yejian * that this space is not used again, and this space will
341445d99ea4SZheng Yejian * not be accounted into 'entries_bytes'.
3415c7b09308SSteven Rostedt *
3416c7b09308SSteven Rostedt * If we are less than the minimum size, we don't need to
3417c7b09308SSteven Rostedt * worry about it.
3418c7b09308SSteven Rostedt */
3419139f8400STzvetomir Stoyanov (VMware) if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
3420c7b09308SSteven Rostedt /* No room for any events */
3421c7b09308SSteven Rostedt
3422c7b09308SSteven Rostedt /* Mark the rest of the page with padding */
3423c7b09308SSteven Rostedt rb_event_set_padding(event);
3424c7b09308SSteven Rostedt
3425a0fcaaedSSteven Rostedt (Google) /* Make sure the padding is visible before the write update */
3426a0fcaaedSSteven Rostedt (Google) smp_wmb();
3427a0fcaaedSSteven Rostedt (Google)
3428c7b09308SSteven Rostedt /* Set the write back to the previous setting */
3429c7b09308SSteven Rostedt local_sub(length, &tail_page->write);
3430c7b09308SSteven Rostedt return;
3431c7b09308SSteven Rostedt }
3432c7b09308SSteven Rostedt
3433c7b09308SSteven Rostedt /* Put in a discarded event */
3434139f8400STzvetomir Stoyanov (VMware) event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
3435c7b09308SSteven Rostedt event->type_len = RINGBUF_TYPE_PADDING;
3436c7b09308SSteven Rostedt /* time delta must be non zero */
3437c7b09308SSteven Rostedt event->time_delta = 1;
3438c7b09308SSteven Rostedt
343945d99ea4SZheng Yejian /* account for padding bytes */
3440139f8400STzvetomir Stoyanov (VMware) local_add(bsize - tail, &cpu_buffer->entries_bytes);
344145d99ea4SZheng Yejian
3442a0fcaaedSSteven Rostedt (Google) /* Make sure the padding is visible before the tail_page->write update */
3443a0fcaaedSSteven Rostedt (Google) smp_wmb();
3444a0fcaaedSSteven Rostedt (Google)
3445c7b09308SSteven Rostedt /* Set write to end of buffer */
3446139f8400STzvetomir Stoyanov (VMware) length = (tail + length) - bsize;
3447c7b09308SSteven Rostedt local_sub(length, &tail_page->write);
3448c7b09308SSteven Rostedt }
34496634ff26SSteven Rostedt
34504239c38fSSteven Rostedt (Red Hat) static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
34514239c38fSSteven Rostedt (Red Hat)
3452747e94aeSSteven Rostedt /*
3453747e94aeSSteven Rostedt * This is the slow path, force gcc not to inline it.
3454747e94aeSSteven Rostedt */
3455747e94aeSSteven Rostedt static noinline struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu * cpu_buffer,unsigned long tail,struct rb_event_info * info)34566634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
3457fcc742eaSSteven Rostedt (Red Hat) unsigned long tail, struct rb_event_info *info)
34587a8e76a3SSteven Rostedt {
3459fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page = info->tail_page;
34605a50e33cSSteven Rostedt struct buffer_page *commit_page = cpu_buffer->commit_page;
346113292494SSteven Rostedt (VMware) struct trace_buffer *buffer = cpu_buffer->buffer;
346277ae365eSSteven Rostedt struct buffer_page *next_page;
346377ae365eSSteven Rostedt int ret;
3464aa20ae84SSteven Rostedt
3465aa20ae84SSteven Rostedt next_page = tail_page;
34667a8e76a3SSteven Rostedt
34676689bed3SQiujun Huang rb_inc_page(&next_page);
34687a8e76a3SSteven Rostedt
3469bf41a158SSteven Rostedt /*
3470bf41a158SSteven Rostedt * If for some reason, we had an interrupt storm that made
3471bf41a158SSteven Rostedt * it all the way around the buffer, bail, and warn
3472bf41a158SSteven Rostedt * about it.
3473bf41a158SSteven Rostedt */
347498db8df7SSteven Rostedt if (unlikely(next_page == commit_page)) {
347577ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun);
347645141d46SSteven Rostedt goto out_reset;
3477bf41a158SSteven Rostedt }
3478d769041fSSteven Rostedt
3479bf41a158SSteven Rostedt /*
348077ae365eSSteven Rostedt * This is where the fun begins!
348177ae365eSSteven Rostedt *
348277ae365eSSteven Rostedt * We are fighting against races between a reader that
348377ae365eSSteven Rostedt * could be on another CPU trying to swap its reader
348477ae365eSSteven Rostedt * page with the buffer head.
348577ae365eSSteven Rostedt *
348677ae365eSSteven Rostedt * We are also fighting against interrupts coming in and
348777ae365eSSteven Rostedt * moving the head or tail on us as well.
348877ae365eSSteven Rostedt *
348977ae365eSSteven Rostedt * If the next page is the head page then we have filled
349077ae365eSSteven Rostedt * the buffer, unless the commit page is still on the
349177ae365eSSteven Rostedt * reader page.
3492bf41a158SSteven Rostedt */
34936689bed3SQiujun Huang if (rb_is_head_page(next_page, &tail_page->list)) {
3494bf41a158SSteven Rostedt
349577ae365eSSteven Rostedt /*
349677ae365eSSteven Rostedt * If the commit is not on the reader page, then
349777ae365eSSteven Rostedt * move the header page.
349877ae365eSSteven Rostedt */
349977ae365eSSteven Rostedt if (!rb_is_reader_page(cpu_buffer->commit_page)) {
350077ae365eSSteven Rostedt /*
350177ae365eSSteven Rostedt * If we are not in overwrite mode,
350277ae365eSSteven Rostedt * this is easy, just stop here.
350377ae365eSSteven Rostedt */
3504884bfe89SSlava Pestov if (!(buffer->flags & RB_FL_OVERWRITE)) {
3505884bfe89SSlava Pestov local_inc(&cpu_buffer->dropped_events);
350677ae365eSSteven Rostedt goto out_reset;
3507884bfe89SSlava Pestov }
350877ae365eSSteven Rostedt
350977ae365eSSteven Rostedt ret = rb_handle_head_page(cpu_buffer,
351077ae365eSSteven Rostedt tail_page,
351177ae365eSSteven Rostedt next_page);
351277ae365eSSteven Rostedt if (ret < 0)
351377ae365eSSteven Rostedt goto out_reset;
351477ae365eSSteven Rostedt if (ret)
351577ae365eSSteven Rostedt goto out_again;
351677ae365eSSteven Rostedt } else {
351777ae365eSSteven Rostedt /*
351877ae365eSSteven Rostedt * We need to be careful here too. The
351977ae365eSSteven Rostedt * commit page could still be on the reader
352077ae365eSSteven Rostedt * page. We could have a small buffer, and
352177ae365eSSteven Rostedt * have filled up the buffer with events
352277ae365eSSteven Rostedt * from interrupts and such, and wrapped.
352377ae365eSSteven Rostedt *
3524c6358bacSQiujun Huang * Note, if the tail page is also on the
352577ae365eSSteven Rostedt * reader_page, we let it move out.
352677ae365eSSteven Rostedt */
352777ae365eSSteven Rostedt if (unlikely((cpu_buffer->commit_page !=
352877ae365eSSteven Rostedt cpu_buffer->tail_page) &&
352977ae365eSSteven Rostedt (cpu_buffer->commit_page ==
353077ae365eSSteven Rostedt cpu_buffer->reader_page))) {
353177ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun);
353277ae365eSSteven Rostedt goto out_reset;
353377ae365eSSteven Rostedt }
353477ae365eSSteven Rostedt }
3535bf41a158SSteven Rostedt }
3536bf41a158SSteven Rostedt
353770004986SSteven Rostedt (Red Hat) rb_tail_page_update(cpu_buffer, tail_page, next_page);
35387a8e76a3SSteven Rostedt
353977ae365eSSteven Rostedt out_again:
354077ae365eSSteven Rostedt
3541fcc742eaSSteven Rostedt (Red Hat) rb_reset_tail(cpu_buffer, tail, info);
3542bf41a158SSteven Rostedt
35434239c38fSSteven Rostedt (Red Hat) /* Commit what we have for now. */
35444239c38fSSteven Rostedt (Red Hat) rb_end_commit(cpu_buffer);
35454239c38fSSteven Rostedt (Red Hat) /* rb_end_commit() decs committing */
35464239c38fSSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing);
35474239c38fSSteven Rostedt (Red Hat)
3548bf41a158SSteven Rostedt /* fail and let the caller try again */
3549bf41a158SSteven Rostedt return ERR_PTR(-EAGAIN);
3550bf41a158SSteven Rostedt
355145141d46SSteven Rostedt out_reset:
35526f3b3440SLai Jiangshan /* reset write */
3553fcc742eaSSteven Rostedt (Red Hat) rb_reset_tail(cpu_buffer, tail, info);
35546f3b3440SLai Jiangshan
3555bf41a158SSteven Rostedt return NULL;
35567a8e76a3SSteven Rostedt }
35577a8e76a3SSteven Rostedt
355874e87937SSteven Rostedt (VMware) /* Slow path */
355974e87937SSteven Rostedt (VMware) static struct ring_buffer_event *
rb_add_time_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,u64 delta,bool abs)35603cb30911SSteven Rostedt (Google) rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
35613cb30911SSteven Rostedt (Google) struct ring_buffer_event *event, u64 delta, bool abs)
3562d90fd774SSteven Rostedt (Red Hat) {
3563dc4e2801STom Zanussi if (abs)
3564dc4e2801STom Zanussi event->type_len = RINGBUF_TYPE_TIME_STAMP;
3565dc4e2801STom Zanussi else
3566d90fd774SSteven Rostedt (Red Hat) event->type_len = RINGBUF_TYPE_TIME_EXTEND;
3567d90fd774SSteven Rostedt (Red Hat)
3568dc4e2801STom Zanussi /* Not the first event on the page, or not delta? */
35693cb30911SSteven Rostedt (Google) if (abs || rb_event_index(cpu_buffer, event)) {
3570d90fd774SSteven Rostedt (Red Hat) event->time_delta = delta & TS_MASK;
3571d90fd774SSteven Rostedt (Red Hat) event->array[0] = delta >> TS_SHIFT;
3572d90fd774SSteven Rostedt (Red Hat) } else {
3573d90fd774SSteven Rostedt (Red Hat) /* nope, just zero it */
3574d90fd774SSteven Rostedt (Red Hat) event->time_delta = 0;
3575d90fd774SSteven Rostedt (Red Hat) event->array[0] = 0;
3576d90fd774SSteven Rostedt (Red Hat) }
3577d90fd774SSteven Rostedt (Red Hat)
3578d90fd774SSteven Rostedt (Red Hat) return skip_time_extend(event);
3579d90fd774SSteven Rostedt (Red Hat) }
3580d90fd774SSteven Rostedt (Red Hat)
358158fbc3c6SSteven Rostedt (VMware) #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
sched_clock_stable(void)358258fbc3c6SSteven Rostedt (VMware) static inline bool sched_clock_stable(void)
358358fbc3c6SSteven Rostedt (VMware) {
358458fbc3c6SSteven Rostedt (VMware) return true;
358558fbc3c6SSteven Rostedt (VMware) }
358658fbc3c6SSteven Rostedt (VMware) #endif
358758fbc3c6SSteven Rostedt (VMware)
358874e87937SSteven Rostedt (VMware) static void
rb_check_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)358958fbc3c6SSteven Rostedt (VMware) rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
359058fbc3c6SSteven Rostedt (VMware) struct rb_event_info *info)
359158fbc3c6SSteven Rostedt (VMware) {
359258fbc3c6SSteven Rostedt (VMware) u64 write_stamp;
359358fbc3c6SSteven Rostedt (VMware)
359429ce2451SSteven Rostedt (VMware) WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
359558fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->delta,
359658fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->ts,
359758fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->before,
359858fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->after,
3599c84897c0SSteven Rostedt (Google) (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
360058fbc3c6SSteven Rostedt (VMware) sched_clock_stable() ? "" :
360158fbc3c6SSteven Rostedt (VMware) "If you just came from a suspend/resume,\n"
360258fbc3c6SSteven Rostedt (VMware) "please switch to the trace global clock:\n"
36032455f0e1SRoss Zwisler " echo global > /sys/kernel/tracing/trace_clock\n"
360458fbc3c6SSteven Rostedt (VMware) "or add trace_clock=global to the kernel command line\n");
360558fbc3c6SSteven Rostedt (VMware) }
360658fbc3c6SSteven Rostedt (VMware)
rb_add_timestamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event ** event,struct rb_event_info * info,u64 * delta,unsigned int * length)360774e87937SSteven Rostedt (VMware) static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
360874e87937SSteven Rostedt (VMware) struct ring_buffer_event **event,
360974e87937SSteven Rostedt (VMware) struct rb_event_info *info,
361074e87937SSteven Rostedt (VMware) u64 *delta,
361174e87937SSteven Rostedt (VMware) unsigned int *length)
361274e87937SSteven Rostedt (VMware) {
361374e87937SSteven Rostedt (VMware) bool abs = info->add_timestamp &
361474e87937SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
361574e87937SSteven Rostedt (VMware)
361629ce2451SSteven Rostedt (VMware) if (unlikely(info->delta > (1ULL << 59))) {
36176695da58SSteven Rostedt (Google) /*
36186695da58SSteven Rostedt (Google) * Some timers can use more than 59 bits, and when a timestamp
36196695da58SSteven Rostedt (Google) * is added to the buffer, it will lose those bits.
36206695da58SSteven Rostedt (Google) */
36216695da58SSteven Rostedt (Google) if (abs && (info->ts & TS_MSB)) {
36226695da58SSteven Rostedt (Google) info->delta &= ABS_TS_MASK;
36236695da58SSteven Rostedt (Google)
362429ce2451SSteven Rostedt (VMware) /* did the clock go backwards */
36256695da58SSteven Rostedt (Google) } else if (info->before == info->after && info->before > info->ts) {
362629ce2451SSteven Rostedt (VMware) /* not interrupted */
362729ce2451SSteven Rostedt (VMware) static int once;
362829ce2451SSteven Rostedt (VMware)
362929ce2451SSteven Rostedt (VMware) /*
363029ce2451SSteven Rostedt (VMware) * This is possible with a recalibrating of the TSC.
363129ce2451SSteven Rostedt (VMware) * Do not produce a call stack, but just report it.
363229ce2451SSteven Rostedt (VMware) */
363329ce2451SSteven Rostedt (VMware) if (!once) {
363429ce2451SSteven Rostedt (VMware) once++;
363529ce2451SSteven Rostedt (VMware) pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
363629ce2451SSteven Rostedt (VMware) info->before, info->ts);
363729ce2451SSteven Rostedt (VMware) }
363829ce2451SSteven Rostedt (VMware) } else
363974e87937SSteven Rostedt (VMware) rb_check_timestamp(cpu_buffer, info);
364029ce2451SSteven Rostedt (VMware) if (!abs)
364129ce2451SSteven Rostedt (VMware) info->delta = 0;
364229ce2451SSteven Rostedt (VMware) }
36433cb30911SSteven Rostedt (Google) *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
364474e87937SSteven Rostedt (VMware) *length -= RB_LEN_TIME_EXTEND;
364574e87937SSteven Rostedt (VMware) *delta = 0;
364674e87937SSteven Rostedt (VMware) }
364774e87937SSteven Rostedt (VMware)
3648d90fd774SSteven Rostedt (Red Hat) /**
3649d90fd774SSteven Rostedt (Red Hat) * rb_update_event - update event type and data
3650cfc585a4SSteven Rostedt (VMware) * @cpu_buffer: The per cpu buffer of the @event
3651d90fd774SSteven Rostedt (Red Hat) * @event: the event to update
3652cfc585a4SSteven Rostedt (VMware) * @info: The info to update the @event with (contains length and delta)
3653d90fd774SSteven Rostedt (Red Hat) *
3654cfc585a4SSteven Rostedt (VMware) * Update the type and data fields of the @event. The length
3655d90fd774SSteven Rostedt (Red Hat) * is the actual size that is written to the ring buffer,
3656d90fd774SSteven Rostedt (Red Hat) * and with this, we can determine what to place into the
3657d90fd774SSteven Rostedt (Red Hat) * data field.
3658d90fd774SSteven Rostedt (Red Hat) */
3659b7dc42fdSSteven Rostedt (Red Hat) static void
rb_update_event(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event,struct rb_event_info * info)3660d90fd774SSteven Rostedt (Red Hat) rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
3661d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event *event,
3662d90fd774SSteven Rostedt (Red Hat) struct rb_event_info *info)
3663d90fd774SSteven Rostedt (Red Hat) {
3664d90fd774SSteven Rostedt (Red Hat) unsigned length = info->length;
3665d90fd774SSteven Rostedt (Red Hat) u64 delta = info->delta;
36668672e494SSteven Rostedt (VMware) unsigned int nest = local_read(&cpu_buffer->committing) - 1;
36678672e494SSteven Rostedt (VMware)
3668a948c69dSSteven Rostedt (VMware) if (!WARN_ON_ONCE(nest >= MAX_NEST))
36698672e494SSteven Rostedt (VMware) cpu_buffer->event_stamp[nest] = info->ts;
3670d90fd774SSteven Rostedt (Red Hat)
3671d90fd774SSteven Rostedt (Red Hat) /*
3672d90fd774SSteven Rostedt (Red Hat) * If we need to add a timestamp, then we
36736167c205SSteven Rostedt (VMware) * add it to the start of the reserved space.
3674d90fd774SSteven Rostedt (Red Hat) */
367574e87937SSteven Rostedt (VMware) if (unlikely(info->add_timestamp))
367674e87937SSteven Rostedt (VMware) rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
3677d90fd774SSteven Rostedt (Red Hat)
3678d90fd774SSteven Rostedt (Red Hat) event->time_delta = delta;
3679d90fd774SSteven Rostedt (Red Hat) length -= RB_EVNT_HDR_SIZE;
3680adab66b7SSteven Rostedt (VMware) if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
3681d90fd774SSteven Rostedt (Red Hat) event->type_len = 0;
3682d90fd774SSteven Rostedt (Red Hat) event->array[0] = length;
3683d90fd774SSteven Rostedt (Red Hat) } else
3684d90fd774SSteven Rostedt (Red Hat) event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
3685d90fd774SSteven Rostedt (Red Hat) }
3686d90fd774SSteven Rostedt (Red Hat)
rb_calculate_event_length(unsigned length)3687d90fd774SSteven Rostedt (Red Hat) static unsigned rb_calculate_event_length(unsigned length)
3688d90fd774SSteven Rostedt (Red Hat) {
3689d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event event; /* Used only for sizeof array */
3690d90fd774SSteven Rostedt (Red Hat)
3691d90fd774SSteven Rostedt (Red Hat) /* zero length can cause confusions */
3692d90fd774SSteven Rostedt (Red Hat) if (!length)
3693d90fd774SSteven Rostedt (Red Hat) length++;
3694d90fd774SSteven Rostedt (Red Hat)
3695adab66b7SSteven Rostedt (VMware) if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
3696d90fd774SSteven Rostedt (Red Hat) length += sizeof(event.array[0]);
3697d90fd774SSteven Rostedt (Red Hat)
3698d90fd774SSteven Rostedt (Red Hat) length += RB_EVNT_HDR_SIZE;
3699adab66b7SSteven Rostedt (VMware) length = ALIGN(length, RB_ARCH_ALIGNMENT);
3700d90fd774SSteven Rostedt (Red Hat)
3701d90fd774SSteven Rostedt (Red Hat) /*
3702d90fd774SSteven Rostedt (Red Hat) * In case the time delta is larger than the 27 bits for it
3703d90fd774SSteven Rostedt (Red Hat) * in the header, we need to add a timestamp. If another
3704d90fd774SSteven Rostedt (Red Hat) * event comes in when trying to discard this one to increase
3705d90fd774SSteven Rostedt (Red Hat) * the length, then the timestamp will be added in the allocated
3706d90fd774SSteven Rostedt (Red Hat) * space of this event. If length is bigger than the size needed
3707d90fd774SSteven Rostedt (Red Hat) * for the TIME_EXTEND, then padding has to be used. The events
3708d90fd774SSteven Rostedt (Red Hat) * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
3709d90fd774SSteven Rostedt (Red Hat) * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
3710d90fd774SSteven Rostedt (Red Hat) * As length is a multiple of 4, we only need to worry if it
3711d90fd774SSteven Rostedt (Red Hat) * is 12 (RB_LEN_TIME_EXTEND + 4).
3712d90fd774SSteven Rostedt (Red Hat) */
3713d90fd774SSteven Rostedt (Red Hat) if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
3714d90fd774SSteven Rostedt (Red Hat) length += RB_ALIGNMENT;
3715d90fd774SSteven Rostedt (Red Hat)
3716d90fd774SSteven Rostedt (Red Hat) return length;
3717d90fd774SSteven Rostedt (Red Hat) }
3718d90fd774SSteven Rostedt (Red Hat)
3719bc92b956SUros Bizjak static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)3720a4543a2fSSteven Rostedt (Red Hat) rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3721d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event *event)
3722d90fd774SSteven Rostedt (Red Hat) {
3723d90fd774SSteven Rostedt (Red Hat) unsigned long new_index, old_index;
3724d90fd774SSteven Rostedt (Red Hat) struct buffer_page *bpage;
3725d90fd774SSteven Rostedt (Red Hat) unsigned long addr;
3726d90fd774SSteven Rostedt (Red Hat)
37273cb30911SSteven Rostedt (Google) new_index = rb_event_index(cpu_buffer, event);
3728d90fd774SSteven Rostedt (Red Hat) old_index = new_index + rb_event_ts_length(event);
3729d90fd774SSteven Rostedt (Red Hat) addr = (unsigned long)event;
37303cb30911SSteven Rostedt (Google) addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3731d90fd774SSteven Rostedt (Red Hat)
37328573636eSSteven Rostedt (Red Hat) bpage = READ_ONCE(cpu_buffer->tail_page);
3733d90fd774SSteven Rostedt (Red Hat)
3734083e9f65SSteven Rostedt (Google) /*
3735083e9f65SSteven Rostedt (Google) * Make sure the tail_page is still the same and
3736083e9f65SSteven Rostedt (Google) * the next write location is the end of this event
3737083e9f65SSteven Rostedt (Google) */
3738d90fd774SSteven Rostedt (Red Hat) if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3739d90fd774SSteven Rostedt (Red Hat) unsigned long write_mask =
3740d90fd774SSteven Rostedt (Red Hat) local_read(&bpage->write) & ~RB_WRITE_MASK;
3741d90fd774SSteven Rostedt (Red Hat) unsigned long event_length = rb_event_length(event);
3742a389d86fSSteven Rostedt (VMware)
3743b2dd7975SSteven Rostedt (Google) /*
3744b2dd7975SSteven Rostedt (Google) * For the before_stamp to be different than the write_stamp
3745b2dd7975SSteven Rostedt (Google) * to make sure that the next event adds an absolute
3746b2dd7975SSteven Rostedt (Google) * value and does not rely on the saved write stamp, which
3747b2dd7975SSteven Rostedt (Google) * is now going to be bogus.
3748083e9f65SSteven Rostedt (Google) *
3749083e9f65SSteven Rostedt (Google) * By setting the before_stamp to zero, the next event
3750083e9f65SSteven Rostedt (Google) * is not going to use the write_stamp and will instead
3751083e9f65SSteven Rostedt (Google) * create an absolute timestamp. This means there's no
3752083e9f65SSteven Rostedt (Google) * reason to update the wirte_stamp!
3753b2dd7975SSteven Rostedt (Google) */
3754b2dd7975SSteven Rostedt (Google) rb_time_set(&cpu_buffer->before_stamp, 0);
3755b2dd7975SSteven Rostedt (Google)
3756a389d86fSSteven Rostedt (VMware) /*
3757a389d86fSSteven Rostedt (VMware) * If an event were to come in now, it would see that the
3758a389d86fSSteven Rostedt (VMware) * write_stamp and the before_stamp are different, and assume
3759a389d86fSSteven Rostedt (VMware) * that this event just added itself before updating
3760a389d86fSSteven Rostedt (VMware) * the write stamp. The interrupting event will fix the
3761083e9f65SSteven Rostedt (Google) * write stamp for us, and use an absolute timestamp.
3762a389d86fSSteven Rostedt (VMware) */
3763a389d86fSSteven Rostedt (VMware)
3764d90fd774SSteven Rostedt (Red Hat) /*
3765d90fd774SSteven Rostedt (Red Hat) * This is on the tail page. It is possible that
3766d90fd774SSteven Rostedt (Red Hat) * a write could come in and move the tail page
3767d90fd774SSteven Rostedt (Red Hat) * and write to the next page. That is fine
3768d90fd774SSteven Rostedt (Red Hat) * because we just shorten what is on this page.
3769d90fd774SSteven Rostedt (Red Hat) */
3770d90fd774SSteven Rostedt (Red Hat) old_index += write_mask;
3771d90fd774SSteven Rostedt (Red Hat) new_index += write_mask;
377200a8478fSUros Bizjak
377300a8478fSUros Bizjak /* caution: old_index gets updated on cmpxchg failure */
377400a8478fSUros Bizjak if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
3775d90fd774SSteven Rostedt (Red Hat) /* update counters */
3776d90fd774SSteven Rostedt (Red Hat) local_sub(event_length, &cpu_buffer->entries_bytes);
3777bc92b956SUros Bizjak return true;
3778d90fd774SSteven Rostedt (Red Hat) }
3779d90fd774SSteven Rostedt (Red Hat) }
3780d90fd774SSteven Rostedt (Red Hat)
3781d90fd774SSteven Rostedt (Red Hat) /* could not discard */
3782bc92b956SUros Bizjak return false;
3783d90fd774SSteven Rostedt (Red Hat) }
3784d90fd774SSteven Rostedt (Red Hat)
rb_start_commit(struct ring_buffer_per_cpu * cpu_buffer)3785d90fd774SSteven Rostedt (Red Hat) static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3786d90fd774SSteven Rostedt (Red Hat) {
3787d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing);
3788d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->commits);
3789d90fd774SSteven Rostedt (Red Hat) }
3790d90fd774SSteven Rostedt (Red Hat)
379138e11df1SSteven Rostedt (Red Hat) static __always_inline void
rb_set_commit_to_write(struct ring_buffer_per_cpu * cpu_buffer)3792d90fd774SSteven Rostedt (Red Hat) rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3793d90fd774SSteven Rostedt (Red Hat) {
3794d90fd774SSteven Rostedt (Red Hat) unsigned long max_count;
3795d90fd774SSteven Rostedt (Red Hat)
3796d90fd774SSteven Rostedt (Red Hat) /*
3797d90fd774SSteven Rostedt (Red Hat) * We only race with interrupts and NMIs on this CPU.
3798d90fd774SSteven Rostedt (Red Hat) * If we own the commit event, then we can commit
3799d90fd774SSteven Rostedt (Red Hat) * all others that interrupted us, since the interruptions
3800d90fd774SSteven Rostedt (Red Hat) * are in stack format (they finish before they come
3801d90fd774SSteven Rostedt (Red Hat) * back to us). This allows us to do a simple loop to
3802d90fd774SSteven Rostedt (Red Hat) * assign the commit to the tail.
3803d90fd774SSteven Rostedt (Red Hat) */
3804d90fd774SSteven Rostedt (Red Hat) again:
3805d90fd774SSteven Rostedt (Red Hat) max_count = cpu_buffer->nr_pages * 100;
3806d90fd774SSteven Rostedt (Red Hat)
38078573636eSSteven Rostedt (Red Hat) while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3808d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3809d90fd774SSteven Rostedt (Red Hat) return;
3810d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer,
3811d90fd774SSteven Rostedt (Red Hat) rb_is_reader_page(cpu_buffer->tail_page)))
3812d90fd774SSteven Rostedt (Red Hat) return;
38136455b616SZheng Yejian /*
38146455b616SZheng Yejian * No need for a memory barrier here, as the update
38156455b616SZheng Yejian * of the tail_page did it for this page.
38166455b616SZheng Yejian */
3817d90fd774SSteven Rostedt (Red Hat) local_set(&cpu_buffer->commit_page->page->commit,
3818d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page));
38196689bed3SQiujun Huang rb_inc_page(&cpu_buffer->commit_page);
3820b14d0329SSteven Rostedt (Google) if (cpu_buffer->ring_meta) {
38214009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
3822b14d0329SSteven Rostedt (Google) meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page;
3823b14d0329SSteven Rostedt (Google) }
3824d90fd774SSteven Rostedt (Red Hat) /* add barrier to keep gcc from optimizing too much */
3825d90fd774SSteven Rostedt (Red Hat) barrier();
3826d90fd774SSteven Rostedt (Red Hat) }
3827d90fd774SSteven Rostedt (Red Hat) while (rb_commit_index(cpu_buffer) !=
3828d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page)) {
3829d90fd774SSteven Rostedt (Red Hat)
38306455b616SZheng Yejian /* Make sure the readers see the content of what is committed. */
38316455b616SZheng Yejian smp_wmb();
3832d90fd774SSteven Rostedt (Red Hat) local_set(&cpu_buffer->commit_page->page->commit,
3833d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page));
3834d90fd774SSteven Rostedt (Red Hat) RB_WARN_ON(cpu_buffer,
3835d90fd774SSteven Rostedt (Red Hat) local_read(&cpu_buffer->commit_page->page->commit) &
3836d90fd774SSteven Rostedt (Red Hat) ~RB_WRITE_MASK);
3837d90fd774SSteven Rostedt (Red Hat) barrier();
3838d90fd774SSteven Rostedt (Red Hat) }
3839d90fd774SSteven Rostedt (Red Hat)
3840d90fd774SSteven Rostedt (Red Hat) /* again, keep gcc from optimizing */
3841d90fd774SSteven Rostedt (Red Hat) barrier();
3842d90fd774SSteven Rostedt (Red Hat)
3843d90fd774SSteven Rostedt (Red Hat) /*
3844d90fd774SSteven Rostedt (Red Hat) * If an interrupt came in just after the first while loop
3845d90fd774SSteven Rostedt (Red Hat) * and pushed the tail page forward, we will be left with
3846d90fd774SSteven Rostedt (Red Hat) * a dangling commit that will never go forward.
3847d90fd774SSteven Rostedt (Red Hat) */
38488573636eSSteven Rostedt (Red Hat) if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3849d90fd774SSteven Rostedt (Red Hat) goto again;
3850d90fd774SSteven Rostedt (Red Hat) }
3851d90fd774SSteven Rostedt (Red Hat)
rb_end_commit(struct ring_buffer_per_cpu * cpu_buffer)385238e11df1SSteven Rostedt (Red Hat) static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3853d90fd774SSteven Rostedt (Red Hat) {
3854d90fd774SSteven Rostedt (Red Hat) unsigned long commits;
3855d90fd774SSteven Rostedt (Red Hat)
3856d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer,
3857d90fd774SSteven Rostedt (Red Hat) !local_read(&cpu_buffer->committing)))
3858d90fd774SSteven Rostedt (Red Hat) return;
3859d90fd774SSteven Rostedt (Red Hat)
3860d90fd774SSteven Rostedt (Red Hat) again:
3861d90fd774SSteven Rostedt (Red Hat) commits = local_read(&cpu_buffer->commits);
3862d90fd774SSteven Rostedt (Red Hat) /* synchronize with interrupts */
3863d90fd774SSteven Rostedt (Red Hat) barrier();
3864d90fd774SSteven Rostedt (Red Hat) if (local_read(&cpu_buffer->committing) == 1)
3865d90fd774SSteven Rostedt (Red Hat) rb_set_commit_to_write(cpu_buffer);
3866d90fd774SSteven Rostedt (Red Hat)
3867d90fd774SSteven Rostedt (Red Hat) local_dec(&cpu_buffer->committing);
3868d90fd774SSteven Rostedt (Red Hat)
3869d90fd774SSteven Rostedt (Red Hat) /* synchronize with interrupts */
3870d90fd774SSteven Rostedt (Red Hat) barrier();
3871d90fd774SSteven Rostedt (Red Hat)
3872d90fd774SSteven Rostedt (Red Hat) /*
3873d90fd774SSteven Rostedt (Red Hat) * Need to account for interrupts coming in between the
3874d90fd774SSteven Rostedt (Red Hat) * updating of the commit page and the clearing of the
3875d90fd774SSteven Rostedt (Red Hat) * committing counter.
3876d90fd774SSteven Rostedt (Red Hat) */
3877d90fd774SSteven Rostedt (Red Hat) if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3878d90fd774SSteven Rostedt (Red Hat) !local_read(&cpu_buffer->committing)) {
3879d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing);
3880d90fd774SSteven Rostedt (Red Hat) goto again;
3881d90fd774SSteven Rostedt (Red Hat) }
3882d90fd774SSteven Rostedt (Red Hat) }
3883d90fd774SSteven Rostedt (Red Hat)
rb_event_discard(struct ring_buffer_event * event)3884d90fd774SSteven Rostedt (Red Hat) static inline void rb_event_discard(struct ring_buffer_event *event)
3885d90fd774SSteven Rostedt (Red Hat) {
3886dc4e2801STom Zanussi if (extended_time(event))
3887d90fd774SSteven Rostedt (Red Hat) event = skip_time_extend(event);
3888d90fd774SSteven Rostedt (Red Hat)
3889d90fd774SSteven Rostedt (Red Hat) /* array[0] holds the actual length for the discarded event */
3890d90fd774SSteven Rostedt (Red Hat) event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3891d90fd774SSteven Rostedt (Red Hat) event->type_len = RINGBUF_TYPE_PADDING;
3892d90fd774SSteven Rostedt (Red Hat) /* time delta must be non zero */
3893d90fd774SSteven Rostedt (Red Hat) if (!event->time_delta)
3894d90fd774SSteven Rostedt (Red Hat) event->time_delta = 1;
3895d90fd774SSteven Rostedt (Red Hat) }
3896d90fd774SSteven Rostedt (Red Hat)
rb_commit(struct ring_buffer_per_cpu * cpu_buffer)389704aabc32SSong Chen static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3898d90fd774SSteven Rostedt (Red Hat) {
3899d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->entries);
3900d90fd774SSteven Rostedt (Red Hat) rb_end_commit(cpu_buffer);
3901d90fd774SSteven Rostedt (Red Hat) }
3902d90fd774SSteven Rostedt (Red Hat)
3903d90fd774SSteven Rostedt (Red Hat) static __always_inline void
rb_wakeups(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer)390413292494SSteven Rostedt (VMware) rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3905d90fd774SSteven Rostedt (Red Hat) {
3906d90fd774SSteven Rostedt (Red Hat) if (buffer->irq_work.waiters_pending) {
3907d90fd774SSteven Rostedt (Red Hat) buffer->irq_work.waiters_pending = false;
3908d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */
3909d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&buffer->irq_work.work);
3910d90fd774SSteven Rostedt (Red Hat) }
3911d90fd774SSteven Rostedt (Red Hat)
3912d90fd774SSteven Rostedt (Red Hat) if (cpu_buffer->irq_work.waiters_pending) {
3913d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.waiters_pending = false;
3914d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */
3915d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&cpu_buffer->irq_work.work);
3916d90fd774SSteven Rostedt (Red Hat) }
3917d90fd774SSteven Rostedt (Red Hat)
391803329f99SSteven Rostedt (VMware) if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
391903329f99SSteven Rostedt (VMware) return;
3920d90fd774SSteven Rostedt (Red Hat)
392103329f99SSteven Rostedt (VMware) if (cpu_buffer->reader_page == cpu_buffer->commit_page)
392203329f99SSteven Rostedt (VMware) return;
392303329f99SSteven Rostedt (VMware)
392403329f99SSteven Rostedt (VMware) if (!cpu_buffer->irq_work.full_waiters_pending)
392503329f99SSteven Rostedt (VMware) return;
392603329f99SSteven Rostedt (VMware)
392703329f99SSteven Rostedt (VMware) cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
39282c2b0a78SSteven Rostedt (VMware)
392942fb0a1eSSteven Rostedt (Google) if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
39302c2b0a78SSteven Rostedt (VMware) return;
39312c2b0a78SSteven Rostedt (VMware)
3932d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.wakeup_full = true;
3933d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.full_waiters_pending = false;
3934d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */
3935d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&cpu_buffer->irq_work.work);
3936d90fd774SSteven Rostedt (Red Hat) }
3937d90fd774SSteven Rostedt (Red Hat)
393828575c61SSteven Rostedt (VMware) #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
393928575c61SSteven Rostedt (VMware) # define do_ring_buffer_record_recursion() \
394028575c61SSteven Rostedt (VMware) do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
394128575c61SSteven Rostedt (VMware) #else
394228575c61SSteven Rostedt (VMware) # define do_ring_buffer_record_recursion() do { } while (0)
394328575c61SSteven Rostedt (VMware) #endif
394428575c61SSteven Rostedt (VMware)
3945d90fd774SSteven Rostedt (Red Hat) /*
3946d90fd774SSteven Rostedt (Red Hat) * The lock and unlock are done within a preempt disable section.
3947d90fd774SSteven Rostedt (Red Hat) * The current_context per_cpu variable can only be modified
3948d90fd774SSteven Rostedt (Red Hat) * by the current task between lock and unlock. But it can
3949a0e3a18fSSteven Rostedt (VMware) * be modified more than once via an interrupt. To pass this
3950a0e3a18fSSteven Rostedt (VMware) * information from the lock to the unlock without having to
3951a0e3a18fSSteven Rostedt (VMware) * access the 'in_interrupt()' functions again (which do show
3952a0e3a18fSSteven Rostedt (VMware) * a bit of overhead in something as critical as function tracing,
3953a0e3a18fSSteven Rostedt (VMware) * we use a bitmask trick.
3954d90fd774SSteven Rostedt (Red Hat) *
3955b02414c8SSteven Rostedt (VMware) * bit 1 = NMI context
3956b02414c8SSteven Rostedt (VMware) * bit 2 = IRQ context
3957b02414c8SSteven Rostedt (VMware) * bit 3 = SoftIRQ context
3958b02414c8SSteven Rostedt (VMware) * bit 4 = normal context.
3959d90fd774SSteven Rostedt (Red Hat) *
3960a0e3a18fSSteven Rostedt (VMware) * This works because this is the order of contexts that can
3961a0e3a18fSSteven Rostedt (VMware) * preempt other contexts. A SoftIRQ never preempts an IRQ
3962a0e3a18fSSteven Rostedt (VMware) * context.
3963a0e3a18fSSteven Rostedt (VMware) *
3964a0e3a18fSSteven Rostedt (VMware) * When the context is determined, the corresponding bit is
3965a0e3a18fSSteven Rostedt (VMware) * checked and set (if it was set, then a recursion of that context
3966a0e3a18fSSteven Rostedt (VMware) * happened).
3967a0e3a18fSSteven Rostedt (VMware) *
3968a0e3a18fSSteven Rostedt (VMware) * On unlock, we need to clear this bit. To do so, just subtract
3969a0e3a18fSSteven Rostedt (VMware) * 1 from the current_context and AND it to itself.
3970a0e3a18fSSteven Rostedt (VMware) *
3971a0e3a18fSSteven Rostedt (VMware) * (binary)
3972a0e3a18fSSteven Rostedt (VMware) * 101 - 1 = 100
3973a0e3a18fSSteven Rostedt (VMware) * 101 & 100 = 100 (clearing bit zero)
3974a0e3a18fSSteven Rostedt (VMware) *
3975a0e3a18fSSteven Rostedt (VMware) * 1010 - 1 = 1001
3976a0e3a18fSSteven Rostedt (VMware) * 1010 & 1001 = 1000 (clearing bit 1)
3977a0e3a18fSSteven Rostedt (VMware) *
3978a0e3a18fSSteven Rostedt (VMware) * The least significant bit can be cleared this way, and it
3979a0e3a18fSSteven Rostedt (VMware) * just so happens that it is the same bit corresponding to
3980a0e3a18fSSteven Rostedt (VMware) * the current context.
3981b02414c8SSteven Rostedt (VMware) *
3982b02414c8SSteven Rostedt (VMware) * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3983b02414c8SSteven Rostedt (VMware) * is set when a recursion is detected at the current context, and if
3984b02414c8SSteven Rostedt (VMware) * the TRANSITION bit is already set, it will fail the recursion.
3985b02414c8SSteven Rostedt (VMware) * This is needed because there's a lag between the changing of
3986b02414c8SSteven Rostedt (VMware) * interrupt context and updating the preempt count. In this case,
3987b02414c8SSteven Rostedt (VMware) * a false positive will be found. To handle this, one extra recursion
3988b02414c8SSteven Rostedt (VMware) * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3989b02414c8SSteven Rostedt (VMware) * bit is already set, then it is considered a recursion and the function
3990b02414c8SSteven Rostedt (VMware) * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3991b02414c8SSteven Rostedt (VMware) *
3992b02414c8SSteven Rostedt (VMware) * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3993b02414c8SSteven Rostedt (VMware) * to be cleared. Even if it wasn't the context that set it. That is,
3994b02414c8SSteven Rostedt (VMware) * if an interrupt comes in while NORMAL bit is set and the ring buffer
3995b02414c8SSteven Rostedt (VMware) * is called before preempt_count() is updated, since the check will
3996b02414c8SSteven Rostedt (VMware) * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3997b02414c8SSteven Rostedt (VMware) * NMI then comes in, it will set the NMI bit, but when the NMI code
3998f2cc020dSIngo Molnar * does the trace_recursive_unlock() it will clear the TRANSITION bit
3999b02414c8SSteven Rostedt (VMware) * and leave the NMI bit set. But this is fine, because the interrupt
4000b02414c8SSteven Rostedt (VMware) * code that set the TRANSITION bit will then clear the NMI bit when it
4001b02414c8SSteven Rostedt (VMware) * calls trace_recursive_unlock(). If another NMI comes in, it will
4002b02414c8SSteven Rostedt (VMware) * set the TRANSITION bit and continue.
4003b02414c8SSteven Rostedt (VMware) *
4004b02414c8SSteven Rostedt (VMware) * Note: The TRANSITION bit only handles a single transition between context.
4005d90fd774SSteven Rostedt (Red Hat) */
4006d90fd774SSteven Rostedt (Red Hat)
4007bc92b956SUros Bizjak static __always_inline bool
trace_recursive_lock(struct ring_buffer_per_cpu * cpu_buffer)4008d90fd774SSteven Rostedt (Red Hat) trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
4009d90fd774SSteven Rostedt (Red Hat) {
4010a0e3a18fSSteven Rostedt (VMware) unsigned int val = cpu_buffer->current_context;
401191ebe8bcSSteven Rostedt (VMware) int bit = interrupt_context_level();
40129b84fadcSSteven Rostedt (VMware)
40139b84fadcSSteven Rostedt (VMware) bit = RB_CTX_NORMAL - bit;
4014a0e3a18fSSteven Rostedt (VMware)
4015b02414c8SSteven Rostedt (VMware) if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
4016b02414c8SSteven Rostedt (VMware) /*
4017b02414c8SSteven Rostedt (VMware) * It is possible that this was called by transitioning
4018b02414c8SSteven Rostedt (VMware) * between interrupt context, and preempt_count() has not
4019b02414c8SSteven Rostedt (VMware) * been updated yet. In this case, use the TRANSITION bit.
4020b02414c8SSteven Rostedt (VMware) */
4021b02414c8SSteven Rostedt (VMware) bit = RB_CTX_TRANSITION;
402228575c61SSteven Rostedt (VMware) if (val & (1 << (bit + cpu_buffer->nest))) {
402328575c61SSteven Rostedt (VMware) do_ring_buffer_record_recursion();
4024bc92b956SUros Bizjak return true;
4025b02414c8SSteven Rostedt (VMware) }
402628575c61SSteven Rostedt (VMware) }
4027d90fd774SSteven Rostedt (Red Hat)
40288e012066SSteven Rostedt (VMware) val |= (1 << (bit + cpu_buffer->nest));
4029a0e3a18fSSteven Rostedt (VMware) cpu_buffer->current_context = val;
4030d90fd774SSteven Rostedt (Red Hat)
4031bc92b956SUros Bizjak return false;
4032d90fd774SSteven Rostedt (Red Hat) }
4033d90fd774SSteven Rostedt (Red Hat)
4034d90fd774SSteven Rostedt (Red Hat) static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu * cpu_buffer)4035d90fd774SSteven Rostedt (Red Hat) trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
4036d90fd774SSteven Rostedt (Red Hat) {
40378e012066SSteven Rostedt (VMware) cpu_buffer->current_context &=
40388e012066SSteven Rostedt (VMware) cpu_buffer->current_context - (1 << cpu_buffer->nest);
40398e012066SSteven Rostedt (VMware) }
40408e012066SSteven Rostedt (VMware)
4041b02414c8SSteven Rostedt (VMware) /* The recursive locking above uses 5 bits */
4042b02414c8SSteven Rostedt (VMware) #define NESTED_BITS 5
40438e012066SSteven Rostedt (VMware)
40448e012066SSteven Rostedt (VMware) /**
40458e012066SSteven Rostedt (VMware) * ring_buffer_nest_start - Allow to trace while nested
40468e012066SSteven Rostedt (VMware) * @buffer: The ring buffer to modify
40478e012066SSteven Rostedt (VMware) *
40486167c205SSteven Rostedt (VMware) * The ring buffer has a safety mechanism to prevent recursion.
40498e012066SSteven Rostedt (VMware) * But there may be a case where a trace needs to be done while
40508e012066SSteven Rostedt (VMware) * tracing something else. In this case, calling this function
40518e012066SSteven Rostedt (VMware) * will allow this function to nest within a currently active
40528e012066SSteven Rostedt (VMware) * ring_buffer_lock_reserve().
40538e012066SSteven Rostedt (VMware) *
40548e012066SSteven Rostedt (VMware) * Call this function before calling another ring_buffer_lock_reserve() and
40558e012066SSteven Rostedt (VMware) * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
40568e012066SSteven Rostedt (VMware) */
ring_buffer_nest_start(struct trace_buffer * buffer)405713292494SSteven Rostedt (VMware) void ring_buffer_nest_start(struct trace_buffer *buffer)
40588e012066SSteven Rostedt (VMware) {
40598e012066SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer;
40608e012066SSteven Rostedt (VMware) int cpu;
40618e012066SSteven Rostedt (VMware)
40628e012066SSteven Rostedt (VMware) /* Enabled by ring_buffer_nest_end() */
40638e012066SSteven Rostedt (VMware) preempt_disable_notrace();
40648e012066SSteven Rostedt (VMware) cpu = raw_smp_processor_id();
40658e012066SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu];
40666167c205SSteven Rostedt (VMware) /* This is the shift value for the above recursive locking */
40678e012066SSteven Rostedt (VMware) cpu_buffer->nest += NESTED_BITS;
40688e012066SSteven Rostedt (VMware) }
40698e012066SSteven Rostedt (VMware)
40708e012066SSteven Rostedt (VMware) /**
40718e012066SSteven Rostedt (VMware) * ring_buffer_nest_end - Allow to trace while nested
40728e012066SSteven Rostedt (VMware) * @buffer: The ring buffer to modify
40738e012066SSteven Rostedt (VMware) *
40748e012066SSteven Rostedt (VMware) * Must be called after ring_buffer_nest_start() and after the
40758e012066SSteven Rostedt (VMware) * ring_buffer_unlock_commit().
40768e012066SSteven Rostedt (VMware) */
ring_buffer_nest_end(struct trace_buffer * buffer)407713292494SSteven Rostedt (VMware) void ring_buffer_nest_end(struct trace_buffer *buffer)
40788e012066SSteven Rostedt (VMware) {
40798e012066SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer;
40808e012066SSteven Rostedt (VMware) int cpu;
40818e012066SSteven Rostedt (VMware)
40828e012066SSteven Rostedt (VMware) /* disabled by ring_buffer_nest_start() */
40838e012066SSteven Rostedt (VMware) cpu = raw_smp_processor_id();
40848e012066SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu];
40856167c205SSteven Rostedt (VMware) /* This is the shift value for the above recursive locking */
40868e012066SSteven Rostedt (VMware) cpu_buffer->nest -= NESTED_BITS;
40878e012066SSteven Rostedt (VMware) preempt_enable_notrace();
4088d90fd774SSteven Rostedt (Red Hat) }
4089d90fd774SSteven Rostedt (Red Hat)
4090d90fd774SSteven Rostedt (Red Hat) /**
4091d90fd774SSteven Rostedt (Red Hat) * ring_buffer_unlock_commit - commit a reserved
4092d90fd774SSteven Rostedt (Red Hat) * @buffer: The buffer to commit to
4093d90fd774SSteven Rostedt (Red Hat) *
4094d90fd774SSteven Rostedt (Red Hat) * This commits the data to the ring buffer, and releases any locks held.
4095d90fd774SSteven Rostedt (Red Hat) *
4096d90fd774SSteven Rostedt (Red Hat) * Must be paired with ring_buffer_lock_reserve.
4097d90fd774SSteven Rostedt (Red Hat) */
ring_buffer_unlock_commit(struct trace_buffer * buffer)409804aabc32SSong Chen int ring_buffer_unlock_commit(struct trace_buffer *buffer)
4099d90fd774SSteven Rostedt (Red Hat) {
4100d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer;
4101d90fd774SSteven Rostedt (Red Hat) int cpu = raw_smp_processor_id();
4102d90fd774SSteven Rostedt (Red Hat)
4103d90fd774SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu];
4104d90fd774SSteven Rostedt (Red Hat)
410504aabc32SSong Chen rb_commit(cpu_buffer);
4106d90fd774SSteven Rostedt (Red Hat)
4107d90fd774SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer);
4108d90fd774SSteven Rostedt (Red Hat)
4109d90fd774SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer);
4110d90fd774SSteven Rostedt (Red Hat)
4111d90fd774SSteven Rostedt (Red Hat) preempt_enable_notrace();
4112d90fd774SSteven Rostedt (Red Hat)
4113d90fd774SSteven Rostedt (Red Hat) return 0;
4114d90fd774SSteven Rostedt (Red Hat) }
4115d90fd774SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
4116a4543a2fSSteven Rostedt (Red Hat)
41175b7be9c7SSteven Rostedt (VMware) /* Special value to validate all deltas on a page. */
41185b7be9c7SSteven Rostedt (VMware) #define CHECK_FULL_PAGE 1L
41195b7be9c7SSteven Rostedt (VMware)
41205b7be9c7SSteven Rostedt (VMware) #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
4121d40dbb61SSteven Rostedt (Google)
show_irq_str(int bits)4122d40dbb61SSteven Rostedt (Google) static const char *show_irq_str(int bits)
4123d40dbb61SSteven Rostedt (Google) {
4124d40dbb61SSteven Rostedt (Google) const char *type[] = {
4125d40dbb61SSteven Rostedt (Google) ".", // 0
4126d40dbb61SSteven Rostedt (Google) "s", // 1
4127d40dbb61SSteven Rostedt (Google) "h", // 2
4128d40dbb61SSteven Rostedt (Google) "Hs", // 3
4129d40dbb61SSteven Rostedt (Google) "n", // 4
4130d40dbb61SSteven Rostedt (Google) "Ns", // 5
4131d40dbb61SSteven Rostedt (Google) "Nh", // 6
4132d40dbb61SSteven Rostedt (Google) "NHs", // 7
4133d40dbb61SSteven Rostedt (Google) };
4134d40dbb61SSteven Rostedt (Google)
4135d40dbb61SSteven Rostedt (Google) return type[bits];
4136d40dbb61SSteven Rostedt (Google) }
4137d40dbb61SSteven Rostedt (Google)
4138537affeaSliujing /* Assume this is a trace event */
show_flags(struct ring_buffer_event * event)4139d40dbb61SSteven Rostedt (Google) static const char *show_flags(struct ring_buffer_event *event)
4140d40dbb61SSteven Rostedt (Google) {
4141d40dbb61SSteven Rostedt (Google) struct trace_entry *entry;
4142d40dbb61SSteven Rostedt (Google) int bits = 0;
4143d40dbb61SSteven Rostedt (Google)
4144d40dbb61SSteven Rostedt (Google) if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4145d40dbb61SSteven Rostedt (Google) return "X";
4146d40dbb61SSteven Rostedt (Google)
4147d40dbb61SSteven Rostedt (Google) entry = ring_buffer_event_data(event);
4148d40dbb61SSteven Rostedt (Google)
4149d40dbb61SSteven Rostedt (Google) if (entry->flags & TRACE_FLAG_SOFTIRQ)
4150d40dbb61SSteven Rostedt (Google) bits |= 1;
4151d40dbb61SSteven Rostedt (Google)
4152d40dbb61SSteven Rostedt (Google) if (entry->flags & TRACE_FLAG_HARDIRQ)
4153d40dbb61SSteven Rostedt (Google) bits |= 2;
4154d40dbb61SSteven Rostedt (Google)
4155d40dbb61SSteven Rostedt (Google) if (entry->flags & TRACE_FLAG_NMI)
4156d40dbb61SSteven Rostedt (Google) bits |= 4;
4157d40dbb61SSteven Rostedt (Google)
4158d40dbb61SSteven Rostedt (Google) return show_irq_str(bits);
4159d40dbb61SSteven Rostedt (Google) }
4160d40dbb61SSteven Rostedt (Google)
show_irq(struct ring_buffer_event * event)4161d40dbb61SSteven Rostedt (Google) static const char *show_irq(struct ring_buffer_event *event)
4162d40dbb61SSteven Rostedt (Google) {
4163d40dbb61SSteven Rostedt (Google) struct trace_entry *entry;
4164d40dbb61SSteven Rostedt (Google)
4165d40dbb61SSteven Rostedt (Google) if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
4166d40dbb61SSteven Rostedt (Google) return "";
4167d40dbb61SSteven Rostedt (Google)
4168d40dbb61SSteven Rostedt (Google) entry = ring_buffer_event_data(event);
4169d40dbb61SSteven Rostedt (Google) if (entry->flags & TRACE_FLAG_IRQS_OFF)
4170d40dbb61SSteven Rostedt (Google) return "d";
4171d40dbb61SSteven Rostedt (Google) return "";
4172d40dbb61SSteven Rostedt (Google) }
4173d40dbb61SSteven Rostedt (Google)
show_interrupt_level(void)4174d40dbb61SSteven Rostedt (Google) static const char *show_interrupt_level(void)
4175d40dbb61SSteven Rostedt (Google) {
4176d40dbb61SSteven Rostedt (Google) unsigned long pc = preempt_count();
4177d40dbb61SSteven Rostedt (Google) unsigned char level = 0;
4178d40dbb61SSteven Rostedt (Google)
4179d40dbb61SSteven Rostedt (Google) if (pc & SOFTIRQ_OFFSET)
4180d40dbb61SSteven Rostedt (Google) level |= 1;
4181d40dbb61SSteven Rostedt (Google)
4182d40dbb61SSteven Rostedt (Google) if (pc & HARDIRQ_MASK)
4183d40dbb61SSteven Rostedt (Google) level |= 2;
4184d40dbb61SSteven Rostedt (Google)
4185d40dbb61SSteven Rostedt (Google) if (pc & NMI_MASK)
4186d40dbb61SSteven Rostedt (Google) level |= 4;
4187d40dbb61SSteven Rostedt (Google)
4188d40dbb61SSteven Rostedt (Google) return show_irq_str(level);
4189d40dbb61SSteven Rostedt (Google) }
4190d40dbb61SSteven Rostedt (Google)
dump_buffer_page(struct buffer_data_page * bpage,struct rb_event_info * info,unsigned long tail)41915b7be9c7SSteven Rostedt (VMware) static void dump_buffer_page(struct buffer_data_page *bpage,
41925b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info,
41935b7be9c7SSteven Rostedt (VMware) unsigned long tail)
41945b7be9c7SSteven Rostedt (VMware) {
41955b7be9c7SSteven Rostedt (VMware) struct ring_buffer_event *event;
41965b7be9c7SSteven Rostedt (VMware) u64 ts, delta;
41975b7be9c7SSteven Rostedt (VMware) int e;
41985b7be9c7SSteven Rostedt (VMware)
41995b7be9c7SSteven Rostedt (VMware) ts = bpage->time_stamp;
42005b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] PAGE TIME STAMP\n", ts);
42015b7be9c7SSteven Rostedt (VMware)
42025b7be9c7SSteven Rostedt (VMware) for (e = 0; e < tail; e += rb_event_length(event)) {
42035b7be9c7SSteven Rostedt (VMware)
42045b7be9c7SSteven Rostedt (VMware) event = (struct ring_buffer_event *)(bpage->data + e);
42055b7be9c7SSteven Rostedt (VMware)
42065b7be9c7SSteven Rostedt (VMware) switch (event->type_len) {
42075b7be9c7SSteven Rostedt (VMware)
42085b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_EXTEND:
4209e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
42105b7be9c7SSteven Rostedt (VMware) ts += delta;
42110b9036efSSteven Rostedt (Google) pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
42120b9036efSSteven Rostedt (Google) e, ts, delta);
42135b7be9c7SSteven Rostedt (VMware) break;
42145b7be9c7SSteven Rostedt (VMware)
42155b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_STAMP:
4216e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
42176695da58SSteven Rostedt (Google) ts = rb_fix_abs_ts(delta, ts);
42180b9036efSSteven Rostedt (Google) pr_warn(" 0x%x: [%lld] absolute:%lld TIME STAMP\n",
42190b9036efSSteven Rostedt (Google) e, ts, delta);
42205b7be9c7SSteven Rostedt (VMware) break;
42215b7be9c7SSteven Rostedt (VMware)
42225b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_PADDING:
42235b7be9c7SSteven Rostedt (VMware) ts += event->time_delta;
42240b9036efSSteven Rostedt (Google) pr_warn(" 0x%x: [%lld] delta:%d PADDING\n",
42250b9036efSSteven Rostedt (Google) e, ts, event->time_delta);
42265b7be9c7SSteven Rostedt (VMware) break;
42275b7be9c7SSteven Rostedt (VMware)
42285b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_DATA:
42295b7be9c7SSteven Rostedt (VMware) ts += event->time_delta;
4230d40dbb61SSteven Rostedt (Google) pr_warn(" 0x%x: [%lld] delta:%d %s%s\n",
4231d40dbb61SSteven Rostedt (Google) e, ts, event->time_delta,
4232d40dbb61SSteven Rostedt (Google) show_flags(event), show_irq(event));
42335b7be9c7SSteven Rostedt (VMware) break;
42345b7be9c7SSteven Rostedt (VMware)
42355b7be9c7SSteven Rostedt (VMware) default:
42365b7be9c7SSteven Rostedt (VMware) break;
42375b7be9c7SSteven Rostedt (VMware) }
42385b7be9c7SSteven Rostedt (VMware) }
42390b9036efSSteven Rostedt (Google) pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
42405b7be9c7SSteven Rostedt (VMware) }
42415b7be9c7SSteven Rostedt (VMware)
42425b7be9c7SSteven Rostedt (VMware) static DEFINE_PER_CPU(atomic_t, checking);
42435b7be9c7SSteven Rostedt (VMware) static atomic_t ts_dump;
42445b7be9c7SSteven Rostedt (VMware)
4245f50345b4SSteven Rostedt (Google) #define buffer_warn_return(fmt, ...) \
4246f50345b4SSteven Rostedt (Google) do { \
4247f50345b4SSteven Rostedt (Google) /* If another report is happening, ignore this one */ \
4248f50345b4SSteven Rostedt (Google) if (atomic_inc_return(&ts_dump) != 1) { \
4249f50345b4SSteven Rostedt (Google) atomic_dec(&ts_dump); \
4250f50345b4SSteven Rostedt (Google) goto out; \
4251f50345b4SSteven Rostedt (Google) } \
4252f50345b4SSteven Rostedt (Google) atomic_inc(&cpu_buffer->record_disabled); \
4253f50345b4SSteven Rostedt (Google) pr_warn(fmt, ##__VA_ARGS__); \
4254f50345b4SSteven Rostedt (Google) dump_buffer_page(bpage, info, tail); \
4255f50345b4SSteven Rostedt (Google) atomic_dec(&ts_dump); \
4256f50345b4SSteven Rostedt (Google) /* There's some cases in boot up that this can happen */ \
4257f50345b4SSteven Rostedt (Google) if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING)) \
4258f50345b4SSteven Rostedt (Google) /* Do not re-enable checking */ \
4259f50345b4SSteven Rostedt (Google) return; \
4260f50345b4SSteven Rostedt (Google) } while (0)
4261f50345b4SSteven Rostedt (Google)
42625b7be9c7SSteven Rostedt (VMware) /*
42635b7be9c7SSteven Rostedt (VMware) * Check if the current event time stamp matches the deltas on
42645b7be9c7SSteven Rostedt (VMware) * the buffer page.
42655b7be9c7SSteven Rostedt (VMware) */
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)42665b7be9c7SSteven Rostedt (VMware) static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
42675b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info,
42685b7be9c7SSteven Rostedt (VMware) unsigned long tail)
42695b7be9c7SSteven Rostedt (VMware) {
42705b7be9c7SSteven Rostedt (VMware) struct buffer_data_page *bpage;
42715b7be9c7SSteven Rostedt (VMware) u64 ts, delta;
42725b7be9c7SSteven Rostedt (VMware) bool full = false;
42735f3b6e83SSteven Rostedt (Google) int ret;
42745b7be9c7SSteven Rostedt (VMware)
42755b7be9c7SSteven Rostedt (VMware) bpage = info->tail_page->page;
42765b7be9c7SSteven Rostedt (VMware)
42775b7be9c7SSteven Rostedt (VMware) if (tail == CHECK_FULL_PAGE) {
42785b7be9c7SSteven Rostedt (VMware) full = true;
42795b7be9c7SSteven Rostedt (VMware) tail = local_read(&bpage->commit);
42805b7be9c7SSteven Rostedt (VMware) } else if (info->add_timestamp &
42815b7be9c7SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
42825b7be9c7SSteven Rostedt (VMware) /* Ignore events with absolute time stamps */
42835b7be9c7SSteven Rostedt (VMware) return;
42845b7be9c7SSteven Rostedt (VMware) }
42855b7be9c7SSteven Rostedt (VMware)
42865b7be9c7SSteven Rostedt (VMware) /*
42875b7be9c7SSteven Rostedt (VMware) * Do not check the first event (skip possible extends too).
42885b7be9c7SSteven Rostedt (VMware) * Also do not check if previous events have not been committed.
42895b7be9c7SSteven Rostedt (VMware) */
42905b7be9c7SSteven Rostedt (VMware) if (tail <= 8 || tail > local_read(&bpage->commit))
42915b7be9c7SSteven Rostedt (VMware) return;
42925b7be9c7SSteven Rostedt (VMware)
42935b7be9c7SSteven Rostedt (VMware) /*
42945b7be9c7SSteven Rostedt (VMware) * If this interrupted another event,
42955b7be9c7SSteven Rostedt (VMware) */
42965b7be9c7SSteven Rostedt (VMware) if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
42975b7be9c7SSteven Rostedt (VMware) goto out;
42985b7be9c7SSteven Rostedt (VMware)
42995f3b6e83SSteven Rostedt (Google) ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta);
43005f3b6e83SSteven Rostedt (Google) if (ret < 0) {
4301f50345b4SSteven Rostedt (Google) if (delta < ts) {
4302f50345b4SSteven Rostedt (Google) buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
4303f50345b4SSteven Rostedt (Google) cpu_buffer->cpu, ts, delta);
43045f3b6e83SSteven Rostedt (Google) goto out;
43055b7be9c7SSteven Rostedt (VMware) }
43065b7be9c7SSteven Rostedt (VMware) }
43075b7be9c7SSteven Rostedt (VMware) if ((full && ts > info->ts) ||
43085b7be9c7SSteven Rostedt (VMware) (!full && ts + info->delta != info->ts)) {
4309f50345b4SSteven Rostedt (Google) buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
43105b7be9c7SSteven Rostedt (VMware) cpu_buffer->cpu,
43116549de1fSSteven Rostedt (VMware) ts + info->delta, info->ts, info->delta,
43126549de1fSSteven Rostedt (VMware) info->before, info->after,
4313d40dbb61SSteven Rostedt (Google) full ? " (full)" : "", show_interrupt_level());
43145b7be9c7SSteven Rostedt (VMware) }
43155b7be9c7SSteven Rostedt (VMware) out:
43165b7be9c7SSteven Rostedt (VMware) atomic_dec(this_cpu_ptr(&checking));
43175b7be9c7SSteven Rostedt (VMware) }
43185b7be9c7SSteven Rostedt (VMware) #else
check_buffer(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info,unsigned long tail)43195b7be9c7SSteven Rostedt (VMware) static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
43205b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info,
43215b7be9c7SSteven Rostedt (VMware) unsigned long tail)
43225b7be9c7SSteven Rostedt (VMware) {
43235b7be9c7SSteven Rostedt (VMware) }
43245b7be9c7SSteven Rostedt (VMware) #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
43255b7be9c7SSteven Rostedt (VMware)
43266634ff26SSteven Rostedt static struct ring_buffer_event *
__rb_reserve_next(struct ring_buffer_per_cpu * cpu_buffer,struct rb_event_info * info)43276634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
4328fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info *info)
43296634ff26SSteven Rostedt {
43306634ff26SSteven Rostedt struct ring_buffer_event *event;
4331fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page;
4332a389d86fSSteven Rostedt (VMware) unsigned long tail, write, w;
433369d1b839SSteven Rostedt
43348573636eSSteven Rostedt (Red Hat) /* Don't let the compiler play games with cpu_buffer->tail_page */
43358573636eSSteven Rostedt (Red Hat) tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
4336a389d86fSSteven Rostedt (VMware)
4337a389d86fSSteven Rostedt (VMware) /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK;
4338a389d86fSSteven Rostedt (VMware) barrier();
4339c84897c0SSteven Rostedt (Google) rb_time_read(&cpu_buffer->before_stamp, &info->before);
4340c84897c0SSteven Rostedt (Google) rb_time_read(&cpu_buffer->write_stamp, &info->after);
4341a389d86fSSteven Rostedt (VMware) barrier();
4342a389d86fSSteven Rostedt (VMware) info->ts = rb_time_stamp(cpu_buffer->buffer);
4343a389d86fSSteven Rostedt (VMware)
434458fbc3c6SSteven Rostedt (VMware) if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
4345a389d86fSSteven Rostedt (VMware) info->delta = info->ts;
4346a389d86fSSteven Rostedt (VMware) } else {
4347a389d86fSSteven Rostedt (VMware) /*
434858fbc3c6SSteven Rostedt (VMware) * If interrupting an event time update, we may need an
434958fbc3c6SSteven Rostedt (VMware) * absolute timestamp.
4350a389d86fSSteven Rostedt (VMware) * Don't bother if this is the start of a new page (w == 0).
4351a389d86fSSteven Rostedt (VMware) */
4352b3ae7b67SSteven Rostedt (Google) if (!w) {
4353b3ae7b67SSteven Rostedt (Google) /* Use the sub-buffer timestamp */
4354b3ae7b67SSteven Rostedt (Google) info->delta = 0;
4355c84897c0SSteven Rostedt (Google) } else if (unlikely(info->before != info->after)) {
43567c4b4a51SSteven Rostedt (VMware) info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
4357fcc742eaSSteven Rostedt (Red Hat) info->length += RB_LEN_TIME_EXTEND;
435858fbc3c6SSteven Rostedt (VMware) } else {
435958fbc3c6SSteven Rostedt (VMware) info->delta = info->ts - info->after;
436058fbc3c6SSteven Rostedt (VMware) if (unlikely(test_time_stamp(info->delta))) {
436158fbc3c6SSteven Rostedt (VMware) info->add_timestamp |= RB_ADD_STAMP_EXTEND;
436258fbc3c6SSteven Rostedt (VMware) info->length += RB_LEN_TIME_EXTEND;
436358fbc3c6SSteven Rostedt (VMware) }
436458fbc3c6SSteven Rostedt (VMware) }
436558fbc3c6SSteven Rostedt (VMware) }
436677ae365eSSteven Rostedt
436710464b4aSSteven Rostedt (VMware) /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts);
4368a389d86fSSteven Rostedt (VMware)
4369a389d86fSSteven Rostedt (VMware) /*C*/ write = local_add_return(info->length, &tail_page->write);
437077ae365eSSteven Rostedt
437177ae365eSSteven Rostedt /* set write to only the index of the write */
437277ae365eSSteven Rostedt write &= RB_WRITE_MASK;
4373a389d86fSSteven Rostedt (VMware)
4374fcc742eaSSteven Rostedt (Red Hat) tail = write - info->length;
43756634ff26SSteven Rostedt
4376a389d86fSSteven Rostedt (VMware) /* See if we shot pass the end of this buffer page */
4377139f8400STzvetomir Stoyanov (VMware) if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
43785b7be9c7SSteven Rostedt (VMware) check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
4379a389d86fSSteven Rostedt (VMware) return rb_move_tail(cpu_buffer, tail, info);
4380a389d86fSSteven Rostedt (VMware) }
4381a389d86fSSteven Rostedt (VMware)
4382a389d86fSSteven Rostedt (VMware) if (likely(tail == w)) {
4383a389d86fSSteven Rostedt (VMware) /* Nothing interrupted us between A and C */
438410464b4aSSteven Rostedt (VMware) /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts);
4385dd939425SSteven Rostedt (Google) /*
4386dd939425SSteven Rostedt (Google) * If something came in between C and D, the write stamp
4387dd939425SSteven Rostedt (Google) * may now not be in sync. But that's fine as the before_stamp
4388dd939425SSteven Rostedt (Google) * will be different and then next event will just be forced
4389dd939425SSteven Rostedt (Google) * to use an absolute timestamp.
4390dd939425SSteven Rostedt (Google) */
43917c4b4a51SSteven Rostedt (VMware) if (likely(!(info->add_timestamp &
43927c4b4a51SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4393a389d86fSSteven Rostedt (VMware) /* This did not interrupt any time update */
439458fbc3c6SSteven Rostedt (VMware) info->delta = info->ts - info->after;
4395a389d86fSSteven Rostedt (VMware) else
439682db909eSQiujun Huang /* Just use full timestamp for interrupting event */
4397a389d86fSSteven Rostedt (VMware) info->delta = info->ts;
43985b7be9c7SSteven Rostedt (VMware) check_buffer(cpu_buffer, info, tail);
4399a389d86fSSteven Rostedt (VMware) } else {
4400a389d86fSSteven Rostedt (VMware) u64 ts;
4401a389d86fSSteven Rostedt (VMware) /* SLOW PATH - Interrupted between A and C */
4402b803d7c6SSteven Rostedt (Google)
4403b803d7c6SSteven Rostedt (Google) /* Save the old before_stamp */
4404c84897c0SSteven Rostedt (Google) rb_time_read(&cpu_buffer->before_stamp, &info->before);
4405b803d7c6SSteven Rostedt (Google)
4406b803d7c6SSteven Rostedt (Google) /*
4407b803d7c6SSteven Rostedt (Google) * Read a new timestamp and update the before_stamp to make
4408b803d7c6SSteven Rostedt (Google) * the next event after this one force using an absolute
4409b803d7c6SSteven Rostedt (Google) * timestamp. This is in case an interrupt were to come in
4410b803d7c6SSteven Rostedt (Google) * between E and F.
4411b803d7c6SSteven Rostedt (Google) */
4412b803d7c6SSteven Rostedt (Google) ts = rb_time_stamp(cpu_buffer->buffer);
4413b803d7c6SSteven Rostedt (Google) rb_time_set(&cpu_buffer->before_stamp, ts);
4414b803d7c6SSteven Rostedt (Google)
4415b803d7c6SSteven Rostedt (Google) barrier();
4416c84897c0SSteven Rostedt (Google) /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after);
4417a389d86fSSteven Rostedt (VMware) barrier();
4418b803d7c6SSteven Rostedt (Google) /*F*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
4419b803d7c6SSteven Rostedt (Google) info->after == info->before && info->after < ts) {
4420b803d7c6SSteven Rostedt (Google) /*
4421b803d7c6SSteven Rostedt (Google) * Nothing came after this event between C and F, it is
4422b803d7c6SSteven Rostedt (Google) * safe to use info->after for the delta as it
4423b803d7c6SSteven Rostedt (Google) * matched info->before and is still valid.
4424b803d7c6SSteven Rostedt (Google) */
442558fbc3c6SSteven Rostedt (VMware) info->delta = ts - info->after;
4426a389d86fSSteven Rostedt (VMware) } else {
4427a389d86fSSteven Rostedt (VMware) /*
4428b803d7c6SSteven Rostedt (Google) * Interrupted between C and F:
4429a389d86fSSteven Rostedt (VMware) * Lost the previous events time stamp. Just set the
4430a389d86fSSteven Rostedt (VMware) * delta to zero, and this will be the same time as
4431a389d86fSSteven Rostedt (VMware) * the event this event interrupted. And the events that
4432a389d86fSSteven Rostedt (VMware) * came after this will still be correct (as they would
4433a389d86fSSteven Rostedt (VMware) * have built their delta on the previous event.
4434a389d86fSSteven Rostedt (VMware) */
4435a389d86fSSteven Rostedt (VMware) info->delta = 0;
4436a389d86fSSteven Rostedt (VMware) }
44378672e494SSteven Rostedt (VMware) info->ts = ts;
44387c4b4a51SSteven Rostedt (VMware) info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
4439a389d86fSSteven Rostedt (VMware) }
4440a389d86fSSteven Rostedt (VMware)
4441b7dc42fdSSteven Rostedt (Red Hat) /*
4442b7dc42fdSSteven Rostedt (Red Hat) * If this is the first commit on the page, then it has the same
4443b7dc42fdSSteven Rostedt (Red Hat) * timestamp as the page itself.
4444b7dc42fdSSteven Rostedt (Red Hat) */
44457c4b4a51SSteven Rostedt (VMware) if (unlikely(!tail && !(info->add_timestamp &
44467c4b4a51SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
4447b7dc42fdSSteven Rostedt (Red Hat) info->delta = 0;
4448b7dc42fdSSteven Rostedt (Red Hat)
44496634ff26SSteven Rostedt /* We reserved something on the buffer */
4450b7dc42fdSSteven Rostedt (Red Hat)
44516634ff26SSteven Rostedt event = __rb_page_index(tail_page, tail);
4452fcc742eaSSteven Rostedt (Red Hat) rb_update_event(cpu_buffer, event, info);
44536634ff26SSteven Rostedt
44546634ff26SSteven Rostedt local_inc(&tail_page->entries);
44556634ff26SSteven Rostedt
4456b7dc42fdSSteven Rostedt (Red Hat) /*
4457b7dc42fdSSteven Rostedt (Red Hat) * If this is the first commit on the page, then update
4458b7dc42fdSSteven Rostedt (Red Hat) * its timestamp.
4459b7dc42fdSSteven Rostedt (Red Hat) */
446075b21c6dSSteven Rostedt (VMware) if (unlikely(!tail))
4461b7dc42fdSSteven Rostedt (Red Hat) tail_page->page->time_stamp = info->ts;
4462b7dc42fdSSteven Rostedt (Red Hat)
4463c64e148aSVaibhav Nagarnaik /* account for these added bytes */
4464fcc742eaSSteven Rostedt (Red Hat) local_add(info->length, &cpu_buffer->entries_bytes);
4465c64e148aSVaibhav Nagarnaik
44666634ff26SSteven Rostedt return event;
44676634ff26SSteven Rostedt }
44686634ff26SSteven Rostedt
4469fa7ffb39SSteven Rostedt (Red Hat) static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct trace_buffer * buffer,struct ring_buffer_per_cpu * cpu_buffer,unsigned long length)447013292494SSteven Rostedt (VMware) rb_reserve_next_event(struct trace_buffer *buffer,
447162f0b3ebSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer,
44721cd8d735SSteven Rostedt unsigned long length)
44737a8e76a3SSteven Rostedt {
44747a8e76a3SSteven Rostedt struct ring_buffer_event *event;
4475fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info info;
4476818e3dd3SSteven Rostedt int nr_loops = 0;
447758fbc3c6SSteven Rostedt (VMware) int add_ts_default;
44787a8e76a3SSteven Rostedt
4479cd2375a3SSteven Rostedt /*
4480cd2375a3SSteven Rostedt * ring buffer does cmpxchg as well as atomic64 operations
4481cd2375a3SSteven Rostedt * (which some archs use locking for atomic64), make sure this
4482cd2375a3SSteven Rostedt * is safe in NMI context
4483cd2375a3SSteven Rostedt */
4484cd2375a3SSteven Rostedt if ((!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) ||
4485cd2375a3SSteven Rostedt IS_ENABLED(CONFIG_GENERIC_ATOMIC64)) &&
448671229230SSteven Rostedt (Google) (unlikely(in_nmi()))) {
448771229230SSteven Rostedt (Google) return NULL;
448871229230SSteven Rostedt (Google) }
448971229230SSteven Rostedt (Google)
4490fa743953SSteven Rostedt rb_start_commit(cpu_buffer);
4491a389d86fSSteven Rostedt (VMware) /* The commit page can not change after this */
4492fa743953SSteven Rostedt
449385bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
449462f0b3ebSSteven Rostedt /*
449562f0b3ebSSteven Rostedt * Due to the ability to swap a cpu buffer from a buffer
449662f0b3ebSSteven Rostedt * it is possible it was swapped before we committed.
449762f0b3ebSSteven Rostedt * (committing stops a swap). We check for it here and
449862f0b3ebSSteven Rostedt * if it happened, we have to fail the write.
449962f0b3ebSSteven Rostedt */
450062f0b3ebSSteven Rostedt barrier();
45016aa7de05SMark Rutland if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
450262f0b3ebSSteven Rostedt local_dec(&cpu_buffer->committing);
450362f0b3ebSSteven Rostedt local_dec(&cpu_buffer->commits);
450462f0b3ebSSteven Rostedt return NULL;
450562f0b3ebSSteven Rostedt }
450685bac32cSSteven Rostedt #endif
4507b7dc42fdSSteven Rostedt (Red Hat)
4508fcc742eaSSteven Rostedt (Red Hat) info.length = rb_calculate_event_length(length);
450958fbc3c6SSteven Rostedt (VMware)
451058fbc3c6SSteven Rostedt (VMware) if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
451158fbc3c6SSteven Rostedt (VMware) add_ts_default = RB_ADD_STAMP_ABSOLUTE;
451258fbc3c6SSteven Rostedt (VMware) info.length += RB_LEN_TIME_EXTEND;
4513139f8400STzvetomir Stoyanov (VMware) if (info.length > cpu_buffer->buffer->max_data_size)
4514b3ae7b67SSteven Rostedt (Google) goto out_fail;
451558fbc3c6SSteven Rostedt (VMware) } else {
451658fbc3c6SSteven Rostedt (VMware) add_ts_default = RB_ADD_STAMP_NONE;
451758fbc3c6SSteven Rostedt (VMware) }
451858fbc3c6SSteven Rostedt (VMware)
4519a4543a2fSSteven Rostedt (Red Hat) again:
452058fbc3c6SSteven Rostedt (VMware) info.add_timestamp = add_ts_default;
4521b7dc42fdSSteven Rostedt (Red Hat) info.delta = 0;
4522b7dc42fdSSteven Rostedt (Red Hat)
4523818e3dd3SSteven Rostedt /*
4524818e3dd3SSteven Rostedt * We allow for interrupts to reenter here and do a trace.
4525818e3dd3SSteven Rostedt * If one does, it will cause this original code to loop
4526818e3dd3SSteven Rostedt * back here. Even with heavy interrupts happening, this
4527818e3dd3SSteven Rostedt * should only happen a few times in a row. If this happens
4528818e3dd3SSteven Rostedt * 1000 times in a row, there must be either an interrupt
4529818e3dd3SSteven Rostedt * storm or we have something buggy.
4530818e3dd3SSteven Rostedt * Bail!
4531818e3dd3SSteven Rostedt */
45323e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
4533fa743953SSteven Rostedt goto out_fail;
4534818e3dd3SSteven Rostedt
4535fcc742eaSSteven Rostedt (Red Hat) event = __rb_reserve_next(cpu_buffer, &info);
4536fcc742eaSSteven Rostedt (Red Hat)
4537bd1b7cd3SSteven Rostedt (Red Hat) if (unlikely(PTR_ERR(event) == -EAGAIN)) {
453858fbc3c6SSteven Rostedt (VMware) if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
4539bd1b7cd3SSteven Rostedt (Red Hat) info.length -= RB_LEN_TIME_EXTEND;
4540bf41a158SSteven Rostedt goto again;
4541bd1b7cd3SSteven Rostedt (Red Hat) }
45427a8e76a3SSteven Rostedt
4543a389d86fSSteven Rostedt (VMware) if (likely(event))
45447a8e76a3SSteven Rostedt return event;
4545fa743953SSteven Rostedt out_fail:
4546fa743953SSteven Rostedt rb_end_commit(cpu_buffer);
4547fa743953SSteven Rostedt return NULL;
45487a8e76a3SSteven Rostedt }
45497a8e76a3SSteven Rostedt
45507a8e76a3SSteven Rostedt /**
45517a8e76a3SSteven Rostedt * ring_buffer_lock_reserve - reserve a part of the buffer
45527a8e76a3SSteven Rostedt * @buffer: the ring buffer to reserve from
45537a8e76a3SSteven Rostedt * @length: the length of the data to reserve (excluding event header)
45547a8e76a3SSteven Rostedt *
45556167c205SSteven Rostedt (VMware) * Returns a reserved event on the ring buffer to copy directly to.
45567a8e76a3SSteven Rostedt * The user of this interface will need to get the body to write into
45577a8e76a3SSteven Rostedt * and can use the ring_buffer_event_data() interface.
45587a8e76a3SSteven Rostedt *
45597a8e76a3SSteven Rostedt * The length is the length of the data needed, not the event length
45607a8e76a3SSteven Rostedt * which also includes the event header.
45617a8e76a3SSteven Rostedt *
45627a8e76a3SSteven Rostedt * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
45637a8e76a3SSteven Rostedt * If NULL is returned, then nothing has been allocated or locked.
45647a8e76a3SSteven Rostedt */
45657a8e76a3SSteven Rostedt struct ring_buffer_event *
ring_buffer_lock_reserve(struct trace_buffer * buffer,unsigned long length)456613292494SSteven Rostedt (VMware) ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
45677a8e76a3SSteven Rostedt {
45687a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
45697a8e76a3SSteven Rostedt struct ring_buffer_event *event;
45705168ae50SSteven Rostedt int cpu;
45717a8e76a3SSteven Rostedt
4572bf41a158SSteven Rostedt /* If we are tracing schedule, we don't want to recurse */
45735168ae50SSteven Rostedt preempt_disable_notrace();
4574bf41a158SSteven Rostedt
45753205f806SSteven Rostedt (Red Hat) if (unlikely(atomic_read(&buffer->record_disabled)))
457658a09ec6SSteven Rostedt (Red Hat) goto out;
4577261842b7SSteven Rostedt
45787a8e76a3SSteven Rostedt cpu = raw_smp_processor_id();
45797a8e76a3SSteven Rostedt
45803205f806SSteven Rostedt (Red Hat) if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
4581d769041fSSteven Rostedt goto out;
45827a8e76a3SSteven Rostedt
45837a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
45847a8e76a3SSteven Rostedt
45853205f806SSteven Rostedt (Red Hat) if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
4586d769041fSSteven Rostedt goto out;
45877a8e76a3SSteven Rostedt
4588139f8400STzvetomir Stoyanov (VMware) if (unlikely(length > buffer->max_data_size))
4589bf41a158SSteven Rostedt goto out;
45907a8e76a3SSteven Rostedt
459158a09ec6SSteven Rostedt (Red Hat) if (unlikely(trace_recursive_lock(cpu_buffer)))
459258a09ec6SSteven Rostedt (Red Hat) goto out;
459358a09ec6SSteven Rostedt (Red Hat)
459462f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length);
45957a8e76a3SSteven Rostedt if (!event)
459658a09ec6SSteven Rostedt (Red Hat) goto out_unlock;
45977a8e76a3SSteven Rostedt
45987a8e76a3SSteven Rostedt return event;
45997a8e76a3SSteven Rostedt
460058a09ec6SSteven Rostedt (Red Hat) out_unlock:
460158a09ec6SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer);
4602d769041fSSteven Rostedt out:
46035168ae50SSteven Rostedt preempt_enable_notrace();
46047a8e76a3SSteven Rostedt return NULL;
46057a8e76a3SSteven Rostedt }
4606c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
46077a8e76a3SSteven Rostedt
4608a1863c21SSteven Rostedt /*
4609a1863c21SSteven Rostedt * Decrement the entries to the page that an event is on.
4610a1863c21SSteven Rostedt * The event does not even need to exist, only the pointer
4611a1863c21SSteven Rostedt * to the page it is on. This may only be called before the commit
4612a1863c21SSteven Rostedt * takes place.
4613a1863c21SSteven Rostedt */
4614a1863c21SSteven Rostedt static inline void
rb_decrement_entry(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)4615a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
4616a1863c21SSteven Rostedt struct ring_buffer_event *event)
4617a1863c21SSteven Rostedt {
4618a1863c21SSteven Rostedt unsigned long addr = (unsigned long)event;
4619a1863c21SSteven Rostedt struct buffer_page *bpage = cpu_buffer->commit_page;
4620a1863c21SSteven Rostedt struct buffer_page *start;
4621a1863c21SSteven Rostedt
46223cb30911SSteven Rostedt (Google) addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
4623a1863c21SSteven Rostedt
4624a1863c21SSteven Rostedt /* Do the likely case first */
4625a1863c21SSteven Rostedt if (likely(bpage->page == (void *)addr)) {
4626a1863c21SSteven Rostedt local_dec(&bpage->entries);
4627a1863c21SSteven Rostedt return;
4628a1863c21SSteven Rostedt }
4629a1863c21SSteven Rostedt
4630a1863c21SSteven Rostedt /*
4631a1863c21SSteven Rostedt * Because the commit page may be on the reader page we
4632a1863c21SSteven Rostedt * start with the next page and check the end loop there.
4633a1863c21SSteven Rostedt */
46346689bed3SQiujun Huang rb_inc_page(&bpage);
4635a1863c21SSteven Rostedt start = bpage;
4636a1863c21SSteven Rostedt do {
4637a1863c21SSteven Rostedt if (bpage->page == (void *)addr) {
4638a1863c21SSteven Rostedt local_dec(&bpage->entries);
4639a1863c21SSteven Rostedt return;
4640a1863c21SSteven Rostedt }
46416689bed3SQiujun Huang rb_inc_page(&bpage);
4642a1863c21SSteven Rostedt } while (bpage != start);
4643a1863c21SSteven Rostedt
4644a1863c21SSteven Rostedt /* commit not part of this buffer?? */
4645a1863c21SSteven Rostedt RB_WARN_ON(cpu_buffer, 1);
4646a1863c21SSteven Rostedt }
4647a1863c21SSteven Rostedt
46487a8e76a3SSteven Rostedt /**
464988883490SQiujun Huang * ring_buffer_discard_commit - discard an event that has not been committed
4650fa1b47ddSSteven Rostedt * @buffer: the ring buffer
4651fa1b47ddSSteven Rostedt * @event: non committed event to discard
4652fa1b47ddSSteven Rostedt *
4653dc892f73SSteven Rostedt * Sometimes an event that is in the ring buffer needs to be ignored.
4654dc892f73SSteven Rostedt * This function lets the user discard an event in the ring buffer
4655dc892f73SSteven Rostedt * and then that event will not be read later.
4656dc892f73SSteven Rostedt *
46576167c205SSteven Rostedt (VMware) * This function only works if it is called before the item has been
4658dc892f73SSteven Rostedt * committed. It will try to free the event from the ring buffer
4659fa1b47ddSSteven Rostedt * if another event has not been added behind it.
4660fa1b47ddSSteven Rostedt *
4661fa1b47ddSSteven Rostedt * If another event has been added behind it, it will set the event
4662fa1b47ddSSteven Rostedt * up as discarded, and perform the commit.
4663fa1b47ddSSteven Rostedt *
4664fa1b47ddSSteven Rostedt * If this function is called, do not call ring_buffer_unlock_commit on
4665fa1b47ddSSteven Rostedt * the event.
4666fa1b47ddSSteven Rostedt */
ring_buffer_discard_commit(struct trace_buffer * buffer,struct ring_buffer_event * event)466713292494SSteven Rostedt (VMware) void ring_buffer_discard_commit(struct trace_buffer *buffer,
4668fa1b47ddSSteven Rostedt struct ring_buffer_event *event)
4669fa1b47ddSSteven Rostedt {
4670fa1b47ddSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
4671fa1b47ddSSteven Rostedt int cpu;
4672fa1b47ddSSteven Rostedt
4673fa1b47ddSSteven Rostedt /* The event is discarded regardless */
4674f3b9aae1SFrederic Weisbecker rb_event_discard(event);
4675fa1b47ddSSteven Rostedt
4676fa743953SSteven Rostedt cpu = smp_processor_id();
4677fa743953SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
4678fa743953SSteven Rostedt
4679fa1b47ddSSteven Rostedt /*
4680fa1b47ddSSteven Rostedt * This must only be called if the event has not been
4681fa1b47ddSSteven Rostedt * committed yet. Thus we can assume that preemption
4682fa1b47ddSSteven Rostedt * is still disabled.
4683fa1b47ddSSteven Rostedt */
4684fa743953SSteven Rostedt RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
4685fa1b47ddSSteven Rostedt
4686a1863c21SSteven Rostedt rb_decrement_entry(cpu_buffer, event);
46870f2541d2SSteven Rostedt if (rb_try_to_discard(cpu_buffer, event))
4688fa1b47ddSSteven Rostedt goto out;
4689fa1b47ddSSteven Rostedt
4690fa1b47ddSSteven Rostedt out:
4691fa743953SSteven Rostedt rb_end_commit(cpu_buffer);
4692fa1b47ddSSteven Rostedt
469358a09ec6SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer);
4694f3b9aae1SFrederic Weisbecker
46955168ae50SSteven Rostedt preempt_enable_notrace();
4696fa1b47ddSSteven Rostedt
4697fa1b47ddSSteven Rostedt }
4698fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
4699fa1b47ddSSteven Rostedt
4700fa1b47ddSSteven Rostedt /**
47017a8e76a3SSteven Rostedt * ring_buffer_write - write data to the buffer without reserving
47027a8e76a3SSteven Rostedt * @buffer: The ring buffer to write to.
47037a8e76a3SSteven Rostedt * @length: The length of the data being written (excluding the event header)
47047a8e76a3SSteven Rostedt * @data: The data to write to the buffer.
47057a8e76a3SSteven Rostedt *
47067a8e76a3SSteven Rostedt * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
47077a8e76a3SSteven Rostedt * one function. If you already have the data to write to the buffer, it
47087a8e76a3SSteven Rostedt * may be easier to simply call this function.
47097a8e76a3SSteven Rostedt *
47107a8e76a3SSteven Rostedt * Note, like ring_buffer_lock_reserve, the length is the length of the data
47117a8e76a3SSteven Rostedt * and not the length of the event which would hold the header.
47127a8e76a3SSteven Rostedt */
ring_buffer_write(struct trace_buffer * buffer,unsigned long length,void * data)471313292494SSteven Rostedt (VMware) int ring_buffer_write(struct trace_buffer *buffer,
47147a8e76a3SSteven Rostedt unsigned long length,
47157a8e76a3SSteven Rostedt void *data)
47167a8e76a3SSteven Rostedt {
47177a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
47187a8e76a3SSteven Rostedt struct ring_buffer_event *event;
47197a8e76a3SSteven Rostedt void *body;
47207a8e76a3SSteven Rostedt int ret = -EBUSY;
47215168ae50SSteven Rostedt int cpu;
47227a8e76a3SSteven Rostedt
47235168ae50SSteven Rostedt preempt_disable_notrace();
4724bf41a158SSteven Rostedt
472552fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled))
472652fbe9cdSLai Jiangshan goto out;
472752fbe9cdSLai Jiangshan
47287a8e76a3SSteven Rostedt cpu = raw_smp_processor_id();
47297a8e76a3SSteven Rostedt
47309e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
4731d769041fSSteven Rostedt goto out;
47327a8e76a3SSteven Rostedt
47337a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
47347a8e76a3SSteven Rostedt
47357a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled))
47367a8e76a3SSteven Rostedt goto out;
47377a8e76a3SSteven Rostedt
4738139f8400STzvetomir Stoyanov (VMware) if (length > buffer->max_data_size)
4739be957c44SSteven Rostedt goto out;
4740be957c44SSteven Rostedt
4741985e871bSSteven Rostedt (Red Hat) if (unlikely(trace_recursive_lock(cpu_buffer)))
4742985e871bSSteven Rostedt (Red Hat) goto out;
4743985e871bSSteven Rostedt (Red Hat)
474462f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length);
47457a8e76a3SSteven Rostedt if (!event)
4746985e871bSSteven Rostedt (Red Hat) goto out_unlock;
47477a8e76a3SSteven Rostedt
47487a8e76a3SSteven Rostedt body = rb_event_data(event);
47497a8e76a3SSteven Rostedt
47507a8e76a3SSteven Rostedt memcpy(body, data, length);
47517a8e76a3SSteven Rostedt
475204aabc32SSong Chen rb_commit(cpu_buffer);
47537a8e76a3SSteven Rostedt
475415693458SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer);
475515693458SSteven Rostedt (Red Hat)
47567a8e76a3SSteven Rostedt ret = 0;
4757985e871bSSteven Rostedt (Red Hat)
4758985e871bSSteven Rostedt (Red Hat) out_unlock:
4759985e871bSSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer);
4760985e871bSSteven Rostedt (Red Hat)
47617a8e76a3SSteven Rostedt out:
47625168ae50SSteven Rostedt preempt_enable_notrace();
47637a8e76a3SSteven Rostedt
47647a8e76a3SSteven Rostedt return ret;
47657a8e76a3SSteven Rostedt }
4766c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write);
47677a8e76a3SSteven Rostedt
47680568c6ebSVincent Donnefort /*
47690568c6ebSVincent Donnefort * The total entries in the ring buffer is the running counter
47700568c6ebSVincent Donnefort * of entries entered into the ring buffer, minus the sum of
47710568c6ebSVincent Donnefort * the entries read from the ring buffer and the number of
47720568c6ebSVincent Donnefort * entries that were overwritten.
47730568c6ebSVincent Donnefort */
47740568c6ebSVincent Donnefort static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu * cpu_buffer)47750568c6ebSVincent Donnefort rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
47760568c6ebSVincent Donnefort {
47770568c6ebSVincent Donnefort return local_read(&cpu_buffer->entries) -
47780568c6ebSVincent Donnefort (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
47790568c6ebSVincent Donnefort }
47800568c6ebSVincent Donnefort
rb_per_cpu_empty(struct ring_buffer_per_cpu * cpu_buffer)4781da58834cSYaowei Bai static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
4782bf41a158SSteven Rostedt {
47830568c6ebSVincent Donnefort return !rb_num_of_entries(cpu_buffer);
4784bf41a158SSteven Rostedt }
4785bf41a158SSteven Rostedt
47867a8e76a3SSteven Rostedt /**
47877a8e76a3SSteven Rostedt * ring_buffer_record_disable - stop all writes into the buffer
47887a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to.
47897a8e76a3SSteven Rostedt *
47907a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write
47917a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL.
47927a8e76a3SSteven Rostedt *
479374401729SPaul E. McKenney * The caller should call synchronize_rcu() after this.
47947a8e76a3SSteven Rostedt */
ring_buffer_record_disable(struct trace_buffer * buffer)479513292494SSteven Rostedt (VMware) void ring_buffer_record_disable(struct trace_buffer *buffer)
47967a8e76a3SSteven Rostedt {
47977a8e76a3SSteven Rostedt atomic_inc(&buffer->record_disabled);
47987a8e76a3SSteven Rostedt }
4799c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
48007a8e76a3SSteven Rostedt
48017a8e76a3SSteven Rostedt /**
48027a8e76a3SSteven Rostedt * ring_buffer_record_enable - enable writes to the buffer
48037a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes
48047a8e76a3SSteven Rostedt *
48057a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables
4806c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable).
48077a8e76a3SSteven Rostedt */
ring_buffer_record_enable(struct trace_buffer * buffer)480813292494SSteven Rostedt (VMware) void ring_buffer_record_enable(struct trace_buffer *buffer)
48097a8e76a3SSteven Rostedt {
48107a8e76a3SSteven Rostedt atomic_dec(&buffer->record_disabled);
48117a8e76a3SSteven Rostedt }
4812c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
48137a8e76a3SSteven Rostedt
48147a8e76a3SSteven Rostedt /**
4815499e5470SSteven Rostedt * ring_buffer_record_off - stop all writes into the buffer
4816499e5470SSteven Rostedt * @buffer: The ring buffer to stop writes to.
4817499e5470SSteven Rostedt *
4818499e5470SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write
4819499e5470SSteven Rostedt * to the buffer after this will fail and return NULL.
4820499e5470SSteven Rostedt *
4821499e5470SSteven Rostedt * This is different than ring_buffer_record_disable() as
482287abb3b1SWang Tianhong * it works like an on/off switch, where as the disable() version
4823499e5470SSteven Rostedt * must be paired with a enable().
4824499e5470SSteven Rostedt */
ring_buffer_record_off(struct trace_buffer * buffer)482513292494SSteven Rostedt (VMware) void ring_buffer_record_off(struct trace_buffer *buffer)
4826499e5470SSteven Rostedt {
4827499e5470SSteven Rostedt unsigned int rd;
4828499e5470SSteven Rostedt unsigned int new_rd;
4829499e5470SSteven Rostedt
4830499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled);
48318328e36dSUros Bizjak do {
4832499e5470SSteven Rostedt new_rd = rd | RB_BUFFER_OFF;
48338328e36dSUros Bizjak } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4834499e5470SSteven Rostedt }
4835499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4836499e5470SSteven Rostedt
4837499e5470SSteven Rostedt /**
4838499e5470SSteven Rostedt * ring_buffer_record_on - restart writes into the buffer
4839499e5470SSteven Rostedt * @buffer: The ring buffer to start writes to.
4840499e5470SSteven Rostedt *
4841499e5470SSteven Rostedt * This enables all writes to the buffer that was disabled by
4842499e5470SSteven Rostedt * ring_buffer_record_off().
4843499e5470SSteven Rostedt *
4844499e5470SSteven Rostedt * This is different than ring_buffer_record_enable() as
484587abb3b1SWang Tianhong * it works like an on/off switch, where as the enable() version
4846499e5470SSteven Rostedt * must be paired with a disable().
4847499e5470SSteven Rostedt */
ring_buffer_record_on(struct trace_buffer * buffer)484813292494SSteven Rostedt (VMware) void ring_buffer_record_on(struct trace_buffer *buffer)
4849499e5470SSteven Rostedt {
4850499e5470SSteven Rostedt unsigned int rd;
4851499e5470SSteven Rostedt unsigned int new_rd;
4852499e5470SSteven Rostedt
4853499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled);
48548328e36dSUros Bizjak do {
4855499e5470SSteven Rostedt new_rd = rd & ~RB_BUFFER_OFF;
48568328e36dSUros Bizjak } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4857499e5470SSteven Rostedt }
4858499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4859499e5470SSteven Rostedt
4860499e5470SSteven Rostedt /**
4861499e5470SSteven Rostedt * ring_buffer_record_is_on - return true if the ring buffer can write
4862499e5470SSteven Rostedt * @buffer: The ring buffer to see if write is enabled
4863499e5470SSteven Rostedt *
4864499e5470SSteven Rostedt * Returns true if the ring buffer is in a state that it accepts writes.
4865499e5470SSteven Rostedt */
ring_buffer_record_is_on(struct trace_buffer * buffer)486613292494SSteven Rostedt (VMware) bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4867499e5470SSteven Rostedt {
4868499e5470SSteven Rostedt return !atomic_read(&buffer->record_disabled);
4869499e5470SSteven Rostedt }
4870499e5470SSteven Rostedt
4871499e5470SSteven Rostedt /**
487273c8d894SMasami Hiramatsu * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
487373c8d894SMasami Hiramatsu * @buffer: The ring buffer to see if write is set enabled
487473c8d894SMasami Hiramatsu *
487573c8d894SMasami Hiramatsu * Returns true if the ring buffer is set writable by ring_buffer_record_on().
487673c8d894SMasami Hiramatsu * Note that this does NOT mean it is in a writable state.
487773c8d894SMasami Hiramatsu *
487873c8d894SMasami Hiramatsu * It may return true when the ring buffer has been disabled by
487973c8d894SMasami Hiramatsu * ring_buffer_record_disable(), as that is a temporary disabling of
488073c8d894SMasami Hiramatsu * the ring buffer.
488173c8d894SMasami Hiramatsu */
ring_buffer_record_is_set_on(struct trace_buffer * buffer)488213292494SSteven Rostedt (VMware) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
488373c8d894SMasami Hiramatsu {
488473c8d894SMasami Hiramatsu return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
488573c8d894SMasami Hiramatsu }
488673c8d894SMasami Hiramatsu
488773c8d894SMasami Hiramatsu /**
48887a8e76a3SSteven Rostedt * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
48897a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to.
48907a8e76a3SSteven Rostedt * @cpu: The CPU buffer to stop
48917a8e76a3SSteven Rostedt *
48927a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write
48937a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL.
48947a8e76a3SSteven Rostedt *
489574401729SPaul E. McKenney * The caller should call synchronize_rcu() after this.
48967a8e76a3SSteven Rostedt */
ring_buffer_record_disable_cpu(struct trace_buffer * buffer,int cpu)489713292494SSteven Rostedt (VMware) void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
48987a8e76a3SSteven Rostedt {
48997a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
49007a8e76a3SSteven Rostedt
49019e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
49028aabee57SSteven Rostedt return;
49037a8e76a3SSteven Rostedt
49047a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
49057a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled);
49067a8e76a3SSteven Rostedt }
4907c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
49087a8e76a3SSteven Rostedt
49097a8e76a3SSteven Rostedt /**
49107a8e76a3SSteven Rostedt * ring_buffer_record_enable_cpu - enable writes to the buffer
49117a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes
49127a8e76a3SSteven Rostedt * @cpu: The CPU to enable.
49137a8e76a3SSteven Rostedt *
49147a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables
4915c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable).
49167a8e76a3SSteven Rostedt */
ring_buffer_record_enable_cpu(struct trace_buffer * buffer,int cpu)491713292494SSteven Rostedt (VMware) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
49187a8e76a3SSteven Rostedt {
49197a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
49207a8e76a3SSteven Rostedt
49219e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
49228aabee57SSteven Rostedt return;
49237a8e76a3SSteven Rostedt
49247a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
49257a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled);
49267a8e76a3SSteven Rostedt }
4927c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
49287a8e76a3SSteven Rostedt
49297a8e76a3SSteven Rostedt /**
4930c64e148aSVaibhav Nagarnaik * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4931c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer
4932c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from.
4933c64e148aSVaibhav Nagarnaik */
ring_buffer_oldest_event_ts(struct trace_buffer * buffer,int cpu)493413292494SSteven Rostedt (VMware) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4935c64e148aSVaibhav Nagarnaik {
4936c64e148aSVaibhav Nagarnaik unsigned long flags;
4937c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer;
4938c64e148aSVaibhav Nagarnaik struct buffer_page *bpage;
4939da830e58SLinus Torvalds u64 ret = 0;
4940c64e148aSVaibhav Nagarnaik
4941c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask))
4942c64e148aSVaibhav Nagarnaik return 0;
4943c64e148aSVaibhav Nagarnaik
4944c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu];
49457115e3fcSLinus Torvalds raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4946c64e148aSVaibhav Nagarnaik /*
4947c64e148aSVaibhav Nagarnaik * if the tail is on reader_page, oldest time stamp is on the reader
4948c64e148aSVaibhav Nagarnaik * page
4949c64e148aSVaibhav Nagarnaik */
4950c64e148aSVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4951c64e148aSVaibhav Nagarnaik bpage = cpu_buffer->reader_page;
4952c64e148aSVaibhav Nagarnaik else
4953c64e148aSVaibhav Nagarnaik bpage = rb_set_head_page(cpu_buffer);
495454f7be5bSSteven Rostedt if (bpage)
4955c64e148aSVaibhav Nagarnaik ret = bpage->page->time_stamp;
49567115e3fcSLinus Torvalds raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4957c64e148aSVaibhav Nagarnaik
4958c64e148aSVaibhav Nagarnaik return ret;
4959c64e148aSVaibhav Nagarnaik }
4960c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4961c64e148aSVaibhav Nagarnaik
4962c64e148aSVaibhav Nagarnaik /**
496345d99ea4SZheng Yejian * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4964c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer
4965c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from.
4966c64e148aSVaibhav Nagarnaik */
ring_buffer_bytes_cpu(struct trace_buffer * buffer,int cpu)496713292494SSteven Rostedt (VMware) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4968c64e148aSVaibhav Nagarnaik {
4969c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer;
4970c64e148aSVaibhav Nagarnaik unsigned long ret;
4971c64e148aSVaibhav Nagarnaik
4972c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask))
4973c64e148aSVaibhav Nagarnaik return 0;
4974c64e148aSVaibhav Nagarnaik
4975c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu];
4976c64e148aSVaibhav Nagarnaik ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4977c64e148aSVaibhav Nagarnaik
4978c64e148aSVaibhav Nagarnaik return ret;
4979c64e148aSVaibhav Nagarnaik }
4980c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4981c64e148aSVaibhav Nagarnaik
4982c64e148aSVaibhav Nagarnaik /**
49837a8e76a3SSteven Rostedt * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
49847a8e76a3SSteven Rostedt * @buffer: The ring buffer
49857a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the entries from.
49867a8e76a3SSteven Rostedt */
ring_buffer_entries_cpu(struct trace_buffer * buffer,int cpu)498713292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
49887a8e76a3SSteven Rostedt {
49897a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
49907a8e76a3SSteven Rostedt
49919e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
49928aabee57SSteven Rostedt return 0;
49937a8e76a3SSteven Rostedt
49947a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
4995554f786eSSteven Rostedt
4996f6195aa0SSteven Rostedt return rb_num_of_entries(cpu_buffer);
49977a8e76a3SSteven Rostedt }
4998c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
49997a8e76a3SSteven Rostedt
50007a8e76a3SSteven Rostedt /**
5001884bfe89SSlava Pestov * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
5002884bfe89SSlava Pestov * buffer wrapping around (only if RB_FL_OVERWRITE is on).
50037a8e76a3SSteven Rostedt * @buffer: The ring buffer
50047a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from
50057a8e76a3SSteven Rostedt */
ring_buffer_overrun_cpu(struct trace_buffer * buffer,int cpu)500613292494SSteven Rostedt (VMware) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
50077a8e76a3SSteven Rostedt {
50087a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
50098aabee57SSteven Rostedt unsigned long ret;
50107a8e76a3SSteven Rostedt
50119e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
50128aabee57SSteven Rostedt return 0;
50137a8e76a3SSteven Rostedt
50147a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
501577ae365eSSteven Rostedt ret = local_read(&cpu_buffer->overrun);
5016554f786eSSteven Rostedt
5017554f786eSSteven Rostedt return ret;
50187a8e76a3SSteven Rostedt }
5019c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
50207a8e76a3SSteven Rostedt
50217a8e76a3SSteven Rostedt /**
5022884bfe89SSlava Pestov * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
5023884bfe89SSlava Pestov * commits failing due to the buffer wrapping around while there are uncommitted
5024884bfe89SSlava Pestov * events, such as during an interrupt storm.
5025f0d2c681SSteven Rostedt * @buffer: The ring buffer
5026f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from
5027f0d2c681SSteven Rostedt */
5028f0d2c681SSteven Rostedt unsigned long
ring_buffer_commit_overrun_cpu(struct trace_buffer * buffer,int cpu)502913292494SSteven Rostedt (VMware) ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
5030f0d2c681SSteven Rostedt {
5031f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
5032f0d2c681SSteven Rostedt unsigned long ret;
5033f0d2c681SSteven Rostedt
5034f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask))
5035f0d2c681SSteven Rostedt return 0;
5036f0d2c681SSteven Rostedt
5037f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
503877ae365eSSteven Rostedt ret = local_read(&cpu_buffer->commit_overrun);
5039f0d2c681SSteven Rostedt
5040f0d2c681SSteven Rostedt return ret;
5041f0d2c681SSteven Rostedt }
5042f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
5043f0d2c681SSteven Rostedt
5044f0d2c681SSteven Rostedt /**
5045884bfe89SSlava Pestov * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
5046884bfe89SSlava Pestov * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
5047884bfe89SSlava Pestov * @buffer: The ring buffer
5048884bfe89SSlava Pestov * @cpu: The per CPU buffer to get the number of overruns from
5049884bfe89SSlava Pestov */
5050884bfe89SSlava Pestov unsigned long
ring_buffer_dropped_events_cpu(struct trace_buffer * buffer,int cpu)505113292494SSteven Rostedt (VMware) ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
5052884bfe89SSlava Pestov {
5053884bfe89SSlava Pestov struct ring_buffer_per_cpu *cpu_buffer;
5054884bfe89SSlava Pestov unsigned long ret;
5055884bfe89SSlava Pestov
5056884bfe89SSlava Pestov if (!cpumask_test_cpu(cpu, buffer->cpumask))
5057884bfe89SSlava Pestov return 0;
5058884bfe89SSlava Pestov
5059884bfe89SSlava Pestov cpu_buffer = buffer->buffers[cpu];
5060884bfe89SSlava Pestov ret = local_read(&cpu_buffer->dropped_events);
5061884bfe89SSlava Pestov
5062884bfe89SSlava Pestov return ret;
5063884bfe89SSlava Pestov }
5064884bfe89SSlava Pestov EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
5065884bfe89SSlava Pestov
5066884bfe89SSlava Pestov /**
5067ad964704SSteven Rostedt (Red Hat) * ring_buffer_read_events_cpu - get the number of events successfully read
5068ad964704SSteven Rostedt (Red Hat) * @buffer: The ring buffer
5069ad964704SSteven Rostedt (Red Hat) * @cpu: The per CPU buffer to get the number of events read
5070ad964704SSteven Rostedt (Red Hat) */
5071ad964704SSteven Rostedt (Red Hat) unsigned long
ring_buffer_read_events_cpu(struct trace_buffer * buffer,int cpu)507213292494SSteven Rostedt (VMware) ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
5073ad964704SSteven Rostedt (Red Hat) {
5074ad964704SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer;
5075ad964704SSteven Rostedt (Red Hat)
5076ad964704SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask))
5077ad964704SSteven Rostedt (Red Hat) return 0;
5078ad964704SSteven Rostedt (Red Hat)
5079ad964704SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu];
5080ad964704SSteven Rostedt (Red Hat) return cpu_buffer->read;
5081ad964704SSteven Rostedt (Red Hat) }
5082ad964704SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
5083ad964704SSteven Rostedt (Red Hat)
5084ad964704SSteven Rostedt (Red Hat) /**
50857a8e76a3SSteven Rostedt * ring_buffer_entries - get the number of entries in a buffer
50867a8e76a3SSteven Rostedt * @buffer: The ring buffer
50877a8e76a3SSteven Rostedt *
50887a8e76a3SSteven Rostedt * Returns the total number of entries in the ring buffer
50897a8e76a3SSteven Rostedt * (all CPU entries)
50907a8e76a3SSteven Rostedt */
ring_buffer_entries(struct trace_buffer * buffer)509113292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries(struct trace_buffer *buffer)
50927a8e76a3SSteven Rostedt {
50937a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
50947a8e76a3SSteven Rostedt unsigned long entries = 0;
50957a8e76a3SSteven Rostedt int cpu;
50967a8e76a3SSteven Rostedt
50977a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */
50987a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
50997a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
5100f6195aa0SSteven Rostedt entries += rb_num_of_entries(cpu_buffer);
51017a8e76a3SSteven Rostedt }
51027a8e76a3SSteven Rostedt
51037a8e76a3SSteven Rostedt return entries;
51047a8e76a3SSteven Rostedt }
5105c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries);
51067a8e76a3SSteven Rostedt
51077a8e76a3SSteven Rostedt /**
510867b394f7SJiri Olsa * ring_buffer_overruns - get the number of overruns in buffer
51097a8e76a3SSteven Rostedt * @buffer: The ring buffer
51107a8e76a3SSteven Rostedt *
51117a8e76a3SSteven Rostedt * Returns the total number of overruns in the ring buffer
51127a8e76a3SSteven Rostedt * (all CPU entries)
51137a8e76a3SSteven Rostedt */
ring_buffer_overruns(struct trace_buffer * buffer)511413292494SSteven Rostedt (VMware) unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
51157a8e76a3SSteven Rostedt {
51167a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
51177a8e76a3SSteven Rostedt unsigned long overruns = 0;
51187a8e76a3SSteven Rostedt int cpu;
51197a8e76a3SSteven Rostedt
51207a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */
51217a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
51227a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
512377ae365eSSteven Rostedt overruns += local_read(&cpu_buffer->overrun);
51247a8e76a3SSteven Rostedt }
51257a8e76a3SSteven Rostedt
51267a8e76a3SSteven Rostedt return overruns;
51277a8e76a3SSteven Rostedt }
5128c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns);
51297a8e76a3SSteven Rostedt
rb_iter_reset(struct ring_buffer_iter * iter)5130642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter)
51317a8e76a3SSteven Rostedt {
51327a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
51337a8e76a3SSteven Rostedt
5134d769041fSSteven Rostedt /* Iterator usage is expected to have record disabled */
5135d769041fSSteven Rostedt iter->head_page = cpu_buffer->reader_page;
51366f807acdSSteven Rostedt iter->head = cpu_buffer->reader_page->read;
5137785888c5SSteven Rostedt (VMware) iter->next_event = iter->head;
5138651e22f2SSteven Rostedt (Red Hat)
5139651e22f2SSteven Rostedt (Red Hat) iter->cache_reader_page = iter->head_page;
514024607f11SSteven Rostedt (Red Hat) iter->cache_read = cpu_buffer->read;
51412d093282SZheng Yejian iter->cache_pages_removed = cpu_buffer->pages_removed;
5142651e22f2SSteven Rostedt (Red Hat)
514328e3fc56SSteven Rostedt (VMware) if (iter->head) {
5144d769041fSSteven Rostedt iter->read_stamp = cpu_buffer->read_stamp;
514528e3fc56SSteven Rostedt (VMware) iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
514628e3fc56SSteven Rostedt (VMware) } else {
5147abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp;
514828e3fc56SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp;
514928e3fc56SSteven Rostedt (VMware) }
5150642edba5SSteven Rostedt }
5151f83c9d0fSSteven Rostedt
5152642edba5SSteven Rostedt /**
5153642edba5SSteven Rostedt * ring_buffer_iter_reset - reset an iterator
5154642edba5SSteven Rostedt * @iter: The iterator to reset
5155642edba5SSteven Rostedt *
5156642edba5SSteven Rostedt * Resets the iterator, so that it will start from the beginning
5157642edba5SSteven Rostedt * again.
5158642edba5SSteven Rostedt */
ring_buffer_iter_reset(struct ring_buffer_iter * iter)5159642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
5160642edba5SSteven Rostedt {
5161554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
5162642edba5SSteven Rostedt unsigned long flags;
5163642edba5SSteven Rostedt
5164554f786eSSteven Rostedt if (!iter)
5165554f786eSSteven Rostedt return;
5166554f786eSSteven Rostedt
5167554f786eSSteven Rostedt cpu_buffer = iter->cpu_buffer;
5168554f786eSSteven Rostedt
51695389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5170642edba5SSteven Rostedt rb_iter_reset(iter);
51715389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
51727a8e76a3SSteven Rostedt }
5173c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
51747a8e76a3SSteven Rostedt
51757a8e76a3SSteven Rostedt /**
51767a8e76a3SSteven Rostedt * ring_buffer_iter_empty - check if an iterator has no more to read
51777a8e76a3SSteven Rostedt * @iter: The iterator to check
51787a8e76a3SSteven Rostedt */
ring_buffer_iter_empty(struct ring_buffer_iter * iter)51797a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
51807a8e76a3SSteven Rostedt {
51817a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
518278f7a45dSSteven Rostedt (VMware) struct buffer_page *reader;
518378f7a45dSSteven Rostedt (VMware) struct buffer_page *head_page;
518478f7a45dSSteven Rostedt (VMware) struct buffer_page *commit_page;
5185ead6ecfdSSteven Rostedt (VMware) struct buffer_page *curr_commit_page;
518678f7a45dSSteven Rostedt (VMware) unsigned commit;
5187ead6ecfdSSteven Rostedt (VMware) u64 curr_commit_ts;
5188ead6ecfdSSteven Rostedt (VMware) u64 commit_ts;
51897a8e76a3SSteven Rostedt
51907a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer;
519178f7a45dSSteven Rostedt (VMware) reader = cpu_buffer->reader_page;
519278f7a45dSSteven Rostedt (VMware) head_page = cpu_buffer->head_page;
5193f1e30cb6Slinke li commit_page = READ_ONCE(cpu_buffer->commit_page);
5194ead6ecfdSSteven Rostedt (VMware) commit_ts = commit_page->page->time_stamp;
519578f7a45dSSteven Rostedt (VMware)
5196ead6ecfdSSteven Rostedt (VMware) /*
5197ead6ecfdSSteven Rostedt (VMware) * When the writer goes across pages, it issues a cmpxchg which
5198ead6ecfdSSteven Rostedt (VMware) * is a mb(), which will synchronize with the rmb here.
5199ead6ecfdSSteven Rostedt (VMware) * (see rb_tail_page_update())
5200ead6ecfdSSteven Rostedt (VMware) */
5201ead6ecfdSSteven Rostedt (VMware) smp_rmb();
5202ead6ecfdSSteven Rostedt (VMware) commit = rb_page_commit(commit_page);
5203ead6ecfdSSteven Rostedt (VMware) /* We want to make sure that the commit page doesn't change */
5204ead6ecfdSSteven Rostedt (VMware) smp_rmb();
5205ead6ecfdSSteven Rostedt (VMware)
5206ead6ecfdSSteven Rostedt (VMware) /* Make sure commit page didn't change */
5207ead6ecfdSSteven Rostedt (VMware) curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
5208ead6ecfdSSteven Rostedt (VMware) curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
5209ead6ecfdSSteven Rostedt (VMware)
5210ead6ecfdSSteven Rostedt (VMware) /* If the commit page changed, then there's more data */
5211ead6ecfdSSteven Rostedt (VMware) if (curr_commit_page != commit_page ||
5212ead6ecfdSSteven Rostedt (VMware) curr_commit_ts != commit_ts)
5213ead6ecfdSSteven Rostedt (VMware) return 0;
5214ead6ecfdSSteven Rostedt (VMware)
5215ead6ecfdSSteven Rostedt (VMware) /* Still racy, as it may return a false positive, but that's OK */
5216785888c5SSteven Rostedt (VMware) return ((iter->head_page == commit_page && iter->head >= commit) ||
521778f7a45dSSteven Rostedt (VMware) (iter->head_page == reader && commit_page == head_page &&
521878f7a45dSSteven Rostedt (VMware) head_page->read == commit &&
5219fe832be0SSteven Rostedt (Google) iter->head == rb_page_size(cpu_buffer->reader_page)));
52207a8e76a3SSteven Rostedt }
5221c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
52227a8e76a3SSteven Rostedt
52237a8e76a3SSteven Rostedt static void
rb_update_read_stamp(struct ring_buffer_per_cpu * cpu_buffer,struct ring_buffer_event * event)52247a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
52257a8e76a3SSteven Rostedt struct ring_buffer_event *event)
52267a8e76a3SSteven Rostedt {
52277a8e76a3SSteven Rostedt u64 delta;
52287a8e76a3SSteven Rostedt
5229334d4169SLai Jiangshan switch (event->type_len) {
52307a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING:
52317a8e76a3SSteven Rostedt return;
52327a8e76a3SSteven Rostedt
52337a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
5234e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
52357a8e76a3SSteven Rostedt cpu_buffer->read_stamp += delta;
52367a8e76a3SSteven Rostedt return;
52377a8e76a3SSteven Rostedt
52387a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP:
5239e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
52406695da58SSteven Rostedt (Google) delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
5241dc4e2801STom Zanussi cpu_buffer->read_stamp = delta;
52427a8e76a3SSteven Rostedt return;
52437a8e76a3SSteven Rostedt
52447a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA:
52457a8e76a3SSteven Rostedt cpu_buffer->read_stamp += event->time_delta;
52467a8e76a3SSteven Rostedt return;
52477a8e76a3SSteven Rostedt
52487a8e76a3SSteven Rostedt default:
5249da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1);
52507a8e76a3SSteven Rostedt }
52517a8e76a3SSteven Rostedt }
52527a8e76a3SSteven Rostedt
52537a8e76a3SSteven Rostedt static void
rb_update_iter_read_stamp(struct ring_buffer_iter * iter,struct ring_buffer_event * event)52547a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
52557a8e76a3SSteven Rostedt struct ring_buffer_event *event)
52567a8e76a3SSteven Rostedt {
52577a8e76a3SSteven Rostedt u64 delta;
52587a8e76a3SSteven Rostedt
5259334d4169SLai Jiangshan switch (event->type_len) {
52607a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING:
52617a8e76a3SSteven Rostedt return;
52627a8e76a3SSteven Rostedt
52637a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
5264e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
52657a8e76a3SSteven Rostedt iter->read_stamp += delta;
52667a8e76a3SSteven Rostedt return;
52677a8e76a3SSteven Rostedt
52687a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP:
5269e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event);
52706695da58SSteven Rostedt (Google) delta = rb_fix_abs_ts(delta, iter->read_stamp);
5271dc4e2801STom Zanussi iter->read_stamp = delta;
52727a8e76a3SSteven Rostedt return;
52737a8e76a3SSteven Rostedt
52747a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA:
52757a8e76a3SSteven Rostedt iter->read_stamp += event->time_delta;
52767a8e76a3SSteven Rostedt return;
52777a8e76a3SSteven Rostedt
52787a8e76a3SSteven Rostedt default:
5279da4d401aSSteven Rostedt (VMware) RB_WARN_ON(iter->cpu_buffer, 1);
52807a8e76a3SSteven Rostedt }
52817a8e76a3SSteven Rostedt }
52827a8e76a3SSteven Rostedt
5283d769041fSSteven Rostedt static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu * cpu_buffer)5284d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
52857a8e76a3SSteven Rostedt {
5286d769041fSSteven Rostedt struct buffer_page *reader = NULL;
5287139f8400STzvetomir Stoyanov (VMware) unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
528866a8cb95SSteven Rostedt unsigned long overwrite;
5289d769041fSSteven Rostedt unsigned long flags;
5290818e3dd3SSteven Rostedt int nr_loops = 0;
5291bc92b956SUros Bizjak bool ret;
5292d769041fSSteven Rostedt
52933e03fb7fSSteven Rostedt local_irq_save(flags);
52940199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock);
5295d769041fSSteven Rostedt
5296d769041fSSteven Rostedt again:
5297818e3dd3SSteven Rostedt /*
5298818e3dd3SSteven Rostedt * This should normally only loop twice. But because the
5299818e3dd3SSteven Rostedt * start of the reader inserts an empty page, it causes
5300818e3dd3SSteven Rostedt * a case where we will loop three times. There should be no
5301818e3dd3SSteven Rostedt * reason to loop four times (that I know of).
5302818e3dd3SSteven Rostedt */
53033e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
5304818e3dd3SSteven Rostedt reader = NULL;
5305818e3dd3SSteven Rostedt goto out;
5306818e3dd3SSteven Rostedt }
5307818e3dd3SSteven Rostedt
5308d769041fSSteven Rostedt reader = cpu_buffer->reader_page;
5309d769041fSSteven Rostedt
5310d769041fSSteven Rostedt /* If there's more to read, return this page */
5311bf41a158SSteven Rostedt if (cpu_buffer->reader_page->read < rb_page_size(reader))
5312d769041fSSteven Rostedt goto out;
5313d769041fSSteven Rostedt
5314d769041fSSteven Rostedt /* Never should we have an index greater than the size */
53153e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer,
53163e89c7bbSSteven Rostedt cpu_buffer->reader_page->read > rb_page_size(reader)))
53173e89c7bbSSteven Rostedt goto out;
5318d769041fSSteven Rostedt
5319d769041fSSteven Rostedt /* check if we caught up to the tail */
5320d769041fSSteven Rostedt reader = NULL;
5321bf41a158SSteven Rostedt if (cpu_buffer->commit_page == cpu_buffer->reader_page)
5322d769041fSSteven Rostedt goto out;
53237a8e76a3SSteven Rostedt
5324a5fb8331SSteven Rostedt /* Don't bother swapping if the ring buffer is empty */
5325a5fb8331SSteven Rostedt if (rb_num_of_entries(cpu_buffer) == 0)
5326a5fb8331SSteven Rostedt goto out;
5327a5fb8331SSteven Rostedt
53287a8e76a3SSteven Rostedt /*
5329d769041fSSteven Rostedt * Reset the reader page to size zero.
53307a8e76a3SSteven Rostedt */
533177ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0);
533277ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0);
533377ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0);
5334ff0ff84aSSteven Rostedt cpu_buffer->reader_page->real_end = 0;
5335d769041fSSteven Rostedt
533677ae365eSSteven Rostedt spin:
533777ae365eSSteven Rostedt /*
533877ae365eSSteven Rostedt * Splice the empty reader page into the list around the head.
533977ae365eSSteven Rostedt */
534077ae365eSSteven Rostedt reader = rb_set_head_page(cpu_buffer);
534154f7be5bSSteven Rostedt if (!reader)
534254f7be5bSSteven Rostedt goto out;
53430e1ff5d7SSteven Rostedt cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
5344d769041fSSteven Rostedt cpu_buffer->reader_page->list.prev = reader->list.prev;
5345bf41a158SSteven Rostedt
53463adc54faSSteven Rostedt /*
53473adc54faSSteven Rostedt * cpu_buffer->pages just needs to point to the buffer, it
53483adc54faSSteven Rostedt * has no specific buffer page to point to. Lets move it out
534925985edcSLucas De Marchi * of our way so we don't accidentally swap it.
53503adc54faSSteven Rostedt */
53513adc54faSSteven Rostedt cpu_buffer->pages = reader->list.prev;
53523adc54faSSteven Rostedt
535377ae365eSSteven Rostedt /* The reader page will be pointing to the new head */
53546689bed3SQiujun Huang rb_set_list_to_head(&cpu_buffer->reader_page->list);
5355d769041fSSteven Rostedt
5356d769041fSSteven Rostedt /*
535766a8cb95SSteven Rostedt * We want to make sure we read the overruns after we set up our
535866a8cb95SSteven Rostedt * pointers to the next object. The writer side does a
535966a8cb95SSteven Rostedt * cmpxchg to cross pages which acts as the mb on the writer
536066a8cb95SSteven Rostedt * side. Note, the reader will constantly fail the swap
536166a8cb95SSteven Rostedt * while the writer is updating the pointers, so this
536266a8cb95SSteven Rostedt * guarantees that the overwrite recorded here is the one we
536366a8cb95SSteven Rostedt * want to compare with the last_overrun.
536466a8cb95SSteven Rostedt */
536566a8cb95SSteven Rostedt smp_mb();
536666a8cb95SSteven Rostedt overwrite = local_read(&(cpu_buffer->overrun));
536766a8cb95SSteven Rostedt
536866a8cb95SSteven Rostedt /*
536977ae365eSSteven Rostedt * Here's the tricky part.
537077ae365eSSteven Rostedt *
537177ae365eSSteven Rostedt * We need to move the pointer past the header page.
537277ae365eSSteven Rostedt * But we can only do that if a writer is not currently
537377ae365eSSteven Rostedt * moving it. The page before the header page has the
537477ae365eSSteven Rostedt * flag bit '1' set if it is pointing to the page we want.
537577ae365eSSteven Rostedt * but if the writer is in the process of moving it
53763ca4d7afSZhouyi Zhou * then it will be '2' or already moved '0'.
5377d769041fSSteven Rostedt */
5378d769041fSSteven Rostedt
537977ae365eSSteven Rostedt ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
538077ae365eSSteven Rostedt
538177ae365eSSteven Rostedt /*
538277ae365eSSteven Rostedt * If we did not convert it, then we must try again.
538377ae365eSSteven Rostedt */
538477ae365eSSteven Rostedt if (!ret)
538577ae365eSSteven Rostedt goto spin;
538677ae365eSSteven Rostedt
5387b14d0329SSteven Rostedt (Google) if (cpu_buffer->ring_meta)
5388b14d0329SSteven Rostedt (Google) rb_update_meta_reader(cpu_buffer, reader);
5389b14d0329SSteven Rostedt (Google)
539077ae365eSSteven Rostedt /*
53912c2b0a78SSteven Rostedt (VMware) * Yay! We succeeded in replacing the page.
539277ae365eSSteven Rostedt *
539377ae365eSSteven Rostedt * Now make the new head point back to the reader page.
539477ae365eSSteven Rostedt */
53955ded3dc6SDavid Sharp rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
53966689bed3SQiujun Huang rb_inc_page(&cpu_buffer->head_page);
5397d769041fSSteven Rostedt
5398b237e1f7SPetr Pavlu cpu_buffer->cnt++;
53992c2b0a78SSteven Rostedt (VMware) local_inc(&cpu_buffer->pages_read);
54002c2b0a78SSteven Rostedt (VMware)
5401d769041fSSteven Rostedt /* Finally update the reader page to the new head */
5402d769041fSSteven Rostedt cpu_buffer->reader_page = reader;
5403b81f472aSSteven Rostedt (Red Hat) cpu_buffer->reader_page->read = 0;
5404d769041fSSteven Rostedt
540566a8cb95SSteven Rostedt if (overwrite != cpu_buffer->last_overrun) {
540666a8cb95SSteven Rostedt cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
540766a8cb95SSteven Rostedt cpu_buffer->last_overrun = overwrite;
540866a8cb95SSteven Rostedt }
540966a8cb95SSteven Rostedt
5410d769041fSSteven Rostedt goto again;
5411d769041fSSteven Rostedt
5412d769041fSSteven Rostedt out:
5413b81f472aSSteven Rostedt (Red Hat) /* Update the read_stamp on the first event */
5414b81f472aSSteven Rostedt (Red Hat) if (reader && reader->read == 0)
5415b81f472aSSteven Rostedt (Red Hat) cpu_buffer->read_stamp = reader->page->time_stamp;
5416b81f472aSSteven Rostedt (Red Hat)
54170199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock);
54183e03fb7fSSteven Rostedt local_irq_restore(flags);
5419d769041fSSteven Rostedt
5420a0fcaaedSSteven Rostedt (Google) /*
5421a0fcaaedSSteven Rostedt (Google) * The writer has preempt disable, wait for it. But not forever
5422a0fcaaedSSteven Rostedt (Google) * Although, 1 second is pretty much "forever"
5423a0fcaaedSSteven Rostedt (Google) */
5424a0fcaaedSSteven Rostedt (Google) #define USECS_WAIT 1000000
5425a0fcaaedSSteven Rostedt (Google) for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
5426a0fcaaedSSteven Rostedt (Google) /* If the write is past the end of page, a writer is still updating it */
5427139f8400STzvetomir Stoyanov (VMware) if (likely(!reader || rb_page_write(reader) <= bsize))
5428a0fcaaedSSteven Rostedt (Google) break;
5429a0fcaaedSSteven Rostedt (Google)
5430a0fcaaedSSteven Rostedt (Google) udelay(1);
5431a0fcaaedSSteven Rostedt (Google)
5432a0fcaaedSSteven Rostedt (Google) /* Get the latest version of the reader write value */
5433a0fcaaedSSteven Rostedt (Google) smp_rmb();
5434a0fcaaedSSteven Rostedt (Google) }
5435a0fcaaedSSteven Rostedt (Google)
5436a0fcaaedSSteven Rostedt (Google) /* The writer is not moving forward? Something is wrong */
5437a0fcaaedSSteven Rostedt (Google) if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
5438a0fcaaedSSteven Rostedt (Google) reader = NULL;
5439a0fcaaedSSteven Rostedt (Google)
5440a0fcaaedSSteven Rostedt (Google) /*
5441a0fcaaedSSteven Rostedt (Google) * Make sure we see any padding after the write update
54426455b616SZheng Yejian * (see rb_reset_tail()).
54436455b616SZheng Yejian *
54446455b616SZheng Yejian * In addition, a writer may be writing on the reader page
54456455b616SZheng Yejian * if the page has not been fully filled, so the read barrier
54466455b616SZheng Yejian * is also needed to make sure we see the content of what is
54476455b616SZheng Yejian * committed by the writer (see rb_set_commit_to_write()).
5448a0fcaaedSSteven Rostedt (Google) */
5449a0fcaaedSSteven Rostedt (Google) smp_rmb();
5450a0fcaaedSSteven Rostedt (Google)
5451a0fcaaedSSteven Rostedt (Google)
5452d769041fSSteven Rostedt return reader;
54537a8e76a3SSteven Rostedt }
54547a8e76a3SSteven Rostedt
rb_advance_reader(struct ring_buffer_per_cpu * cpu_buffer)5455d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
5456d769041fSSteven Rostedt {
5457d769041fSSteven Rostedt struct ring_buffer_event *event;
5458d769041fSSteven Rostedt struct buffer_page *reader;
5459d769041fSSteven Rostedt unsigned length;
5460d769041fSSteven Rostedt
5461d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer);
5462d769041fSSteven Rostedt
5463d769041fSSteven Rostedt /* This function should not be called when buffer is empty */
54643e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !reader))
54653e89c7bbSSteven Rostedt return;
5466d769041fSSteven Rostedt
5467d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer);
54687a8e76a3SSteven Rostedt
5469a1863c21SSteven Rostedt if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
5470e4906effSSteven Rostedt cpu_buffer->read++;
54717a8e76a3SSteven Rostedt
54727a8e76a3SSteven Rostedt rb_update_read_stamp(cpu_buffer, event);
54737a8e76a3SSteven Rostedt
5474d769041fSSteven Rostedt length = rb_event_length(event);
54756f807acdSSteven Rostedt cpu_buffer->reader_page->read += length;
547645d99ea4SZheng Yejian cpu_buffer->read_bytes += length;
54777a8e76a3SSteven Rostedt }
54787a8e76a3SSteven Rostedt
rb_advance_iter(struct ring_buffer_iter * iter)54797a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter)
54807a8e76a3SSteven Rostedt {
54817a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
54827a8e76a3SSteven Rostedt
54837a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer;
54847a8e76a3SSteven Rostedt
5485785888c5SSteven Rostedt (VMware) /* If head == next_event then we need to jump to the next event */
5486785888c5SSteven Rostedt (VMware) if (iter->head == iter->next_event) {
5487785888c5SSteven Rostedt (VMware) /* If the event gets overwritten again, there's nothing to do */
5488785888c5SSteven Rostedt (VMware) if (rb_iter_head_event(iter) == NULL)
5489785888c5SSteven Rostedt (VMware) return;
5490785888c5SSteven Rostedt (VMware) }
5491785888c5SSteven Rostedt (VMware)
5492785888c5SSteven Rostedt (VMware) iter->head = iter->next_event;
5493785888c5SSteven Rostedt (VMware)
54947a8e76a3SSteven Rostedt /*
54957a8e76a3SSteven Rostedt * Check if we are at the end of the buffer.
54967a8e76a3SSteven Rostedt */
5497785888c5SSteven Rostedt (VMware) if (iter->next_event >= rb_page_size(iter->head_page)) {
5498ea05b57cSSteven Rostedt /* discarded commits can make the page empty */
5499ea05b57cSSteven Rostedt if (iter->head_page == cpu_buffer->commit_page)
55003e89c7bbSSteven Rostedt return;
5501d769041fSSteven Rostedt rb_inc_iter(iter);
55027a8e76a3SSteven Rostedt return;
55037a8e76a3SSteven Rostedt }
55047a8e76a3SSteven Rostedt
5505785888c5SSteven Rostedt (VMware) rb_update_iter_read_stamp(iter, iter->event);
55067a8e76a3SSteven Rostedt }
55077a8e76a3SSteven Rostedt
rb_lost_events(struct ring_buffer_per_cpu * cpu_buffer)550866a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
550966a8cb95SSteven Rostedt {
551066a8cb95SSteven Rostedt return cpu_buffer->lost_events;
551166a8cb95SSteven Rostedt }
551266a8cb95SSteven Rostedt
5513f83c9d0fSSteven Rostedt static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu * cpu_buffer,u64 * ts,unsigned long * lost_events)551466a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
551566a8cb95SSteven Rostedt unsigned long *lost_events)
55167a8e76a3SSteven Rostedt {
55177a8e76a3SSteven Rostedt struct ring_buffer_event *event;
5518d769041fSSteven Rostedt struct buffer_page *reader;
5519818e3dd3SSteven Rostedt int nr_loops = 0;
55207a8e76a3SSteven Rostedt
5521dc4e2801STom Zanussi if (ts)
5522dc4e2801STom Zanussi *ts = 0;
55237a8e76a3SSteven Rostedt again:
5524818e3dd3SSteven Rostedt /*
552569d1b839SSteven Rostedt * We repeat when a time extend is encountered.
552669d1b839SSteven Rostedt * Since the time extend is always attached to a data event,
552769d1b839SSteven Rostedt * we should never loop more than once.
552869d1b839SSteven Rostedt * (We never hit the following condition more than twice).
5529818e3dd3SSteven Rostedt */
553069d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
5531818e3dd3SSteven Rostedt return NULL;
5532818e3dd3SSteven Rostedt
5533d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer);
5534d769041fSSteven Rostedt if (!reader)
55357a8e76a3SSteven Rostedt return NULL;
55367a8e76a3SSteven Rostedt
5537d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer);
55387a8e76a3SSteven Rostedt
5539334d4169SLai Jiangshan switch (event->type_len) {
55407a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING:
55412d622719STom Zanussi if (rb_null_event(event))
5542bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, 1);
55432d622719STom Zanussi /*
55442d622719STom Zanussi * Because the writer could be discarding every
55452d622719STom Zanussi * event it creates (which would probably be bad)
55462d622719STom Zanussi * if we were to go back to "again" then we may never
55472d622719STom Zanussi * catch up, and will trigger the warn on, or lock
55482d622719STom Zanussi * the box. Return the padding, and we will release
55492d622719STom Zanussi * the current locks, and try again.
55502d622719STom Zanussi */
55512d622719STom Zanussi return event;
55527a8e76a3SSteven Rostedt
55537a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
55547a8e76a3SSteven Rostedt /* Internal data, OK to advance */
5555d769041fSSteven Rostedt rb_advance_reader(cpu_buffer);
55567a8e76a3SSteven Rostedt goto again;
55577a8e76a3SSteven Rostedt
55587a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP:
5559dc4e2801STom Zanussi if (ts) {
5560e20044f7SSteven Rostedt (VMware) *ts = rb_event_time_stamp(event);
55616695da58SSteven Rostedt (Google) *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
5562dc4e2801STom Zanussi ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5563dc4e2801STom Zanussi cpu_buffer->cpu, ts);
5564dc4e2801STom Zanussi }
5565dc4e2801STom Zanussi /* Internal data, OK to advance */
5566d769041fSSteven Rostedt rb_advance_reader(cpu_buffer);
55677a8e76a3SSteven Rostedt goto again;
55687a8e76a3SSteven Rostedt
55697a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA:
5570dc4e2801STom Zanussi if (ts && !(*ts)) {
55717a8e76a3SSteven Rostedt *ts = cpu_buffer->read_stamp + event->time_delta;
5572d8eeb2d3SRobert Richter ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
557337886f6aSSteven Rostedt cpu_buffer->cpu, ts);
55747a8e76a3SSteven Rostedt }
557566a8cb95SSteven Rostedt if (lost_events)
557666a8cb95SSteven Rostedt *lost_events = rb_lost_events(cpu_buffer);
55777a8e76a3SSteven Rostedt return event;
55787a8e76a3SSteven Rostedt
55797a8e76a3SSteven Rostedt default:
5580da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1);
55817a8e76a3SSteven Rostedt }
55827a8e76a3SSteven Rostedt
55837a8e76a3SSteven Rostedt return NULL;
55847a8e76a3SSteven Rostedt }
5585c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek);
55867a8e76a3SSteven Rostedt
5587f83c9d0fSSteven Rostedt static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5588f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
55897a8e76a3SSteven Rostedt {
559013292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
55917a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
55927a8e76a3SSteven Rostedt struct ring_buffer_event *event;
5593818e3dd3SSteven Rostedt int nr_loops = 0;
55947a8e76a3SSteven Rostedt
5595dc4e2801STom Zanussi if (ts)
5596dc4e2801STom Zanussi *ts = 0;
5597dc4e2801STom Zanussi
55987a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer;
55997a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer;
56007a8e76a3SSteven Rostedt
5601492a74f4SSteven Rostedt /*
56022d093282SZheng Yejian * Check if someone performed a consuming read to the buffer
56032d093282SZheng Yejian * or removed some pages from the buffer. In these cases,
56042d093282SZheng Yejian * iterator was invalidated and we need to reset it.
5605492a74f4SSteven Rostedt */
5606492a74f4SSteven Rostedt if (unlikely(iter->cache_read != cpu_buffer->read ||
56072d093282SZheng Yejian iter->cache_reader_page != cpu_buffer->reader_page ||
56082d093282SZheng Yejian iter->cache_pages_removed != cpu_buffer->pages_removed))
5609492a74f4SSteven Rostedt rb_iter_reset(iter);
5610492a74f4SSteven Rostedt
56117a8e76a3SSteven Rostedt again:
56123c05d748SSteven Rostedt if (ring_buffer_iter_empty(iter))
56133c05d748SSteven Rostedt return NULL;
56143c05d748SSteven Rostedt
5615818e3dd3SSteven Rostedt /*
56163d2353deSSteven Rostedt (VMware) * As the writer can mess with what the iterator is trying
56173d2353deSSteven Rostedt (VMware) * to read, just give up if we fail to get an event after
56183d2353deSSteven Rostedt (VMware) * three tries. The iterator is not as reliable when reading
56193d2353deSSteven Rostedt (VMware) * the ring buffer with an active write as the consumer is.
56203d2353deSSteven Rostedt (VMware) * Do not warn if the three failures is reached.
5621818e3dd3SSteven Rostedt */
56223d2353deSSteven Rostedt (VMware) if (++nr_loops > 3)
5623818e3dd3SSteven Rostedt return NULL;
5624818e3dd3SSteven Rostedt
56257a8e76a3SSteven Rostedt if (rb_per_cpu_empty(cpu_buffer))
56267a8e76a3SSteven Rostedt return NULL;
56277a8e76a3SSteven Rostedt
562810e83fd0SSteven Rostedt (Red Hat) if (iter->head >= rb_page_size(iter->head_page)) {
56293c05d748SSteven Rostedt rb_inc_iter(iter);
56303c05d748SSteven Rostedt goto again;
56313c05d748SSteven Rostedt }
56323c05d748SSteven Rostedt
56337a8e76a3SSteven Rostedt event = rb_iter_head_event(iter);
56343d2353deSSteven Rostedt (VMware) if (!event)
5635785888c5SSteven Rostedt (VMware) goto again;
56367a8e76a3SSteven Rostedt
5637334d4169SLai Jiangshan switch (event->type_len) {
56387a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING:
56392d622719STom Zanussi if (rb_null_event(event)) {
5640d769041fSSteven Rostedt rb_inc_iter(iter);
56417a8e76a3SSteven Rostedt goto again;
56422d622719STom Zanussi }
56432d622719STom Zanussi rb_advance_iter(iter);
56442d622719STom Zanussi return event;
56457a8e76a3SSteven Rostedt
56467a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND:
56477a8e76a3SSteven Rostedt /* Internal data, OK to advance */
56487a8e76a3SSteven Rostedt rb_advance_iter(iter);
56497a8e76a3SSteven Rostedt goto again;
56507a8e76a3SSteven Rostedt
56517a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP:
5652dc4e2801STom Zanussi if (ts) {
5653e20044f7SSteven Rostedt (VMware) *ts = rb_event_time_stamp(event);
56546695da58SSteven Rostedt (Google) *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
5655dc4e2801STom Zanussi ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
5656dc4e2801STom Zanussi cpu_buffer->cpu, ts);
5657dc4e2801STom Zanussi }
5658dc4e2801STom Zanussi /* Internal data, OK to advance */
56597a8e76a3SSteven Rostedt rb_advance_iter(iter);
56607a8e76a3SSteven Rostedt goto again;
56617a8e76a3SSteven Rostedt
56627a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA:
5663dc4e2801STom Zanussi if (ts && !(*ts)) {
56647a8e76a3SSteven Rostedt *ts = iter->read_stamp + event->time_delta;
566537886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer,
566637886f6aSSteven Rostedt cpu_buffer->cpu, ts);
56677a8e76a3SSteven Rostedt }
56687a8e76a3SSteven Rostedt return event;
56697a8e76a3SSteven Rostedt
56707a8e76a3SSteven Rostedt default:
5671da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1);
56727a8e76a3SSteven Rostedt }
56737a8e76a3SSteven Rostedt
56747a8e76a3SSteven Rostedt return NULL;
56757a8e76a3SSteven Rostedt }
5676c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
56777a8e76a3SSteven Rostedt
rb_reader_lock(struct ring_buffer_per_cpu * cpu_buffer)5678289a5a25SSteven Rostedt (Red Hat) static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
56798d707e8eSSteven Rostedt {
5680289a5a25SSteven Rostedt (Red Hat) if (likely(!in_nmi())) {
5681289a5a25SSteven Rostedt (Red Hat) raw_spin_lock(&cpu_buffer->reader_lock);
5682289a5a25SSteven Rostedt (Red Hat) return true;
5683289a5a25SSteven Rostedt (Red Hat) }
5684289a5a25SSteven Rostedt (Red Hat)
56858d707e8eSSteven Rostedt /*
56868d707e8eSSteven Rostedt * If an NMI die dumps out the content of the ring buffer
5687289a5a25SSteven Rostedt (Red Hat) * trylock must be used to prevent a deadlock if the NMI
5688289a5a25SSteven Rostedt (Red Hat) * preempted a task that holds the ring buffer locks. If
5689289a5a25SSteven Rostedt (Red Hat) * we get the lock then all is fine, if not, then continue
5690289a5a25SSteven Rostedt (Red Hat) * to do the read, but this can corrupt the ring buffer,
5691289a5a25SSteven Rostedt (Red Hat) * so it must be permanently disabled from future writes.
5692289a5a25SSteven Rostedt (Red Hat) * Reading from NMI is a oneshot deal.
56938d707e8eSSteven Rostedt */
5694289a5a25SSteven Rostedt (Red Hat) if (raw_spin_trylock(&cpu_buffer->reader_lock))
5695289a5a25SSteven Rostedt (Red Hat) return true;
56968d707e8eSSteven Rostedt
5697289a5a25SSteven Rostedt (Red Hat) /* Continue without locking, but disable the ring buffer */
5698289a5a25SSteven Rostedt (Red Hat) atomic_inc(&cpu_buffer->record_disabled);
5699289a5a25SSteven Rostedt (Red Hat) return false;
5700289a5a25SSteven Rostedt (Red Hat) }
5701289a5a25SSteven Rostedt (Red Hat)
5702289a5a25SSteven Rostedt (Red Hat) static inline void
rb_reader_unlock(struct ring_buffer_per_cpu * cpu_buffer,bool locked)5703289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
5704289a5a25SSteven Rostedt (Red Hat) {
5705289a5a25SSteven Rostedt (Red Hat) if (likely(locked))
5706289a5a25SSteven Rostedt (Red Hat) raw_spin_unlock(&cpu_buffer->reader_lock);
57078d707e8eSSteven Rostedt }
57088d707e8eSSteven Rostedt
57097a8e76a3SSteven Rostedt /**
5710f83c9d0fSSteven Rostedt * ring_buffer_peek - peek at the next event to be read
5711f83c9d0fSSteven Rostedt * @buffer: The ring buffer to read
5712f83c9d0fSSteven Rostedt * @cpu: The cpu to peak at
5713f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event.
571466a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL)
5715f83c9d0fSSteven Rostedt *
5716f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does
5717f83c9d0fSSteven Rostedt * not consume the data.
5718f83c9d0fSSteven Rostedt */
5719f83c9d0fSSteven Rostedt struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)572013292494SSteven Rostedt (VMware) ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
572166a8cb95SSteven Rostedt unsigned long *lost_events)
5722f83c9d0fSSteven Rostedt {
5723f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
57248aabee57SSteven Rostedt struct ring_buffer_event *event;
5725f83c9d0fSSteven Rostedt unsigned long flags;
5726289a5a25SSteven Rostedt (Red Hat) bool dolock;
5727f83c9d0fSSteven Rostedt
5728554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask))
57298aabee57SSteven Rostedt return NULL;
5730554f786eSSteven Rostedt
57312d622719STom Zanussi again:
57328d707e8eSSteven Rostedt local_irq_save(flags);
5733289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer);
573466a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5735469535a5SRobert Richter if (event && event->type_len == RINGBUF_TYPE_PADDING)
5736469535a5SRobert Richter rb_advance_reader(cpu_buffer);
5737289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock);
57388d707e8eSSteven Rostedt local_irq_restore(flags);
5739f83c9d0fSSteven Rostedt
57401b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING)
57412d622719STom Zanussi goto again;
57422d622719STom Zanussi
5743f83c9d0fSSteven Rostedt return event;
5744f83c9d0fSSteven Rostedt }
5745f83c9d0fSSteven Rostedt
5746c9b7a4a7SSteven Rostedt (VMware) /** ring_buffer_iter_dropped - report if there are dropped events
5747c9b7a4a7SSteven Rostedt (VMware) * @iter: The ring buffer iterator
5748c9b7a4a7SSteven Rostedt (VMware) *
5749c9b7a4a7SSteven Rostedt (VMware) * Returns true if there was dropped events since the last peek.
5750c9b7a4a7SSteven Rostedt (VMware) */
ring_buffer_iter_dropped(struct ring_buffer_iter * iter)5751c9b7a4a7SSteven Rostedt (VMware) bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5752c9b7a4a7SSteven Rostedt (VMware) {
5753c9b7a4a7SSteven Rostedt (VMware) bool ret = iter->missed_events != 0;
5754c9b7a4a7SSteven Rostedt (VMware)
5755c9b7a4a7SSteven Rostedt (VMware) iter->missed_events = 0;
5756c9b7a4a7SSteven Rostedt (VMware) return ret;
5757c9b7a4a7SSteven Rostedt (VMware) }
5758c9b7a4a7SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5759c9b7a4a7SSteven Rostedt (VMware)
5760f83c9d0fSSteven Rostedt /**
5761f83c9d0fSSteven Rostedt * ring_buffer_iter_peek - peek at the next event to be read
5762f83c9d0fSSteven Rostedt * @iter: The ring buffer iterator
5763f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event.
5764f83c9d0fSSteven Rostedt *
5765f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does
5766f83c9d0fSSteven Rostedt * not increment the iterator.
5767f83c9d0fSSteven Rostedt */
5768f83c9d0fSSteven Rostedt struct ring_buffer_event *
ring_buffer_iter_peek(struct ring_buffer_iter * iter,u64 * ts)5769f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5770f83c9d0fSSteven Rostedt {
5771f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5772f83c9d0fSSteven Rostedt struct ring_buffer_event *event;
5773f83c9d0fSSteven Rostedt unsigned long flags;
5774f83c9d0fSSteven Rostedt
57752d622719STom Zanussi again:
57765389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5777f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts);
57785389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5779f83c9d0fSSteven Rostedt
57801b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING)
57812d622719STom Zanussi goto again;
57822d622719STom Zanussi
5783f83c9d0fSSteven Rostedt return event;
5784f83c9d0fSSteven Rostedt }
5785f83c9d0fSSteven Rostedt
5786f83c9d0fSSteven Rostedt /**
57877a8e76a3SSteven Rostedt * ring_buffer_consume - return an event and consume it
57887a8e76a3SSteven Rostedt * @buffer: The ring buffer to get the next event from
578966a8cb95SSteven Rostedt * @cpu: the cpu to read the buffer from
579066a8cb95SSteven Rostedt * @ts: a variable to store the timestamp (may be NULL)
579166a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL)
57927a8e76a3SSteven Rostedt *
57937a8e76a3SSteven Rostedt * Returns the next event in the ring buffer, and that event is consumed.
57947a8e76a3SSteven Rostedt * Meaning, that sequential reads will keep returning a different event,
57957a8e76a3SSteven Rostedt * and eventually empty the ring buffer if the producer is slower.
57967a8e76a3SSteven Rostedt */
57977a8e76a3SSteven Rostedt struct ring_buffer_event *
ring_buffer_consume(struct trace_buffer * buffer,int cpu,u64 * ts,unsigned long * lost_events)579813292494SSteven Rostedt (VMware) ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
579966a8cb95SSteven Rostedt unsigned long *lost_events)
58007a8e76a3SSteven Rostedt {
5801554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
5802554f786eSSteven Rostedt struct ring_buffer_event *event = NULL;
5803f83c9d0fSSteven Rostedt unsigned long flags;
5804289a5a25SSteven Rostedt (Red Hat) bool dolock;
58057a8e76a3SSteven Rostedt
58062d622719STom Zanussi again:
5807554f786eSSteven Rostedt /* might be called in atomic */
5808554f786eSSteven Rostedt preempt_disable();
58097a8e76a3SSteven Rostedt
5810554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask))
5811554f786eSSteven Rostedt goto out;
5812554f786eSSteven Rostedt
5813554f786eSSteven Rostedt cpu_buffer = buffer->buffers[cpu];
58148d707e8eSSteven Rostedt local_irq_save(flags);
5815289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer);
58167a8e76a3SSteven Rostedt
581766a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events);
581866a8cb95SSteven Rostedt if (event) {
581966a8cb95SSteven Rostedt cpu_buffer->lost_events = 0;
5820d769041fSSteven Rostedt rb_advance_reader(cpu_buffer);
582166a8cb95SSteven Rostedt }
58227a8e76a3SSteven Rostedt
5823289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock);
58248d707e8eSSteven Rostedt local_irq_restore(flags);
5825f83c9d0fSSteven Rostedt
5826554f786eSSteven Rostedt out:
5827554f786eSSteven Rostedt preempt_enable();
5828554f786eSSteven Rostedt
58291b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING)
58302d622719STom Zanussi goto again;
58312d622719STom Zanussi
58327a8e76a3SSteven Rostedt return event;
58337a8e76a3SSteven Rostedt }
5834c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume);
58357a8e76a3SSteven Rostedt
58367a8e76a3SSteven Rostedt /**
583772c9ddfdSDavid Miller * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
58387a8e76a3SSteven Rostedt * @buffer: The ring buffer to read from
58397a8e76a3SSteven Rostedt * @cpu: The cpu buffer to iterate over
584031b265b3SDouglas Anderson * @flags: gfp flags to use for memory allocation
58417a8e76a3SSteven Rostedt *
584272c9ddfdSDavid Miller * This performs the initial preparations necessary to iterate
5843ea70a962SPetr Pavlu * through the buffer. Memory is allocated, buffer resizing
584472c9ddfdSDavid Miller * is disabled, and the iterator pointer is returned to the caller.
58457a8e76a3SSteven Rostedt *
584672c9ddfdSDavid Miller * After a sequence of ring_buffer_read_prepare calls, the user is
5847d611851bSzhangwei(Jovi) * expected to make at least one call to ring_buffer_read_prepare_sync.
584872c9ddfdSDavid Miller * Afterwards, ring_buffer_read_start is invoked to get things going
584972c9ddfdSDavid Miller * for real.
585072c9ddfdSDavid Miller *
5851d611851bSzhangwei(Jovi) * This overall must be paired with ring_buffer_read_finish.
58527a8e76a3SSteven Rostedt */
58537a8e76a3SSteven Rostedt struct ring_buffer_iter *
ring_buffer_read_prepare(struct trace_buffer * buffer,int cpu,gfp_t flags)585413292494SSteven Rostedt (VMware) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
58557a8e76a3SSteven Rostedt {
58567a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
58578aabee57SSteven Rostedt struct ring_buffer_iter *iter;
58587a8e76a3SSteven Rostedt
58599e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
58608aabee57SSteven Rostedt return NULL;
58617a8e76a3SSteven Rostedt
5862785888c5SSteven Rostedt (VMware) iter = kzalloc(sizeof(*iter), flags);
58637a8e76a3SSteven Rostedt if (!iter)
58648aabee57SSteven Rostedt return NULL;
58657a8e76a3SSteven Rostedt
5866b0495258SSteven Rostedt (Google) /* Holds the entire event: data and meta data */
5867139f8400STzvetomir Stoyanov (VMware) iter->event_size = buffer->subbuf_size;
5868139f8400STzvetomir Stoyanov (VMware) iter->event = kmalloc(iter->event_size, flags);
5869785888c5SSteven Rostedt (VMware) if (!iter->event) {
5870785888c5SSteven Rostedt (VMware) kfree(iter);
5871785888c5SSteven Rostedt (VMware) return NULL;
5872785888c5SSteven Rostedt (VMware) }
5873785888c5SSteven Rostedt (VMware)
58747a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
58757a8e76a3SSteven Rostedt
58767a8e76a3SSteven Rostedt iter->cpu_buffer = cpu_buffer;
58777a8e76a3SSteven Rostedt
587807b8b10eSSteven Rostedt (VMware) atomic_inc(&cpu_buffer->resize_disabled);
587972c9ddfdSDavid Miller
588072c9ddfdSDavid Miller return iter;
588172c9ddfdSDavid Miller }
588272c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
588372c9ddfdSDavid Miller
588472c9ddfdSDavid Miller /**
588572c9ddfdSDavid Miller * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
588672c9ddfdSDavid Miller *
588772c9ddfdSDavid Miller * All previously invoked ring_buffer_read_prepare calls to prepare
588872c9ddfdSDavid Miller * iterators will be synchronized. Afterwards, read_buffer_read_start
588972c9ddfdSDavid Miller * calls on those iterators are allowed.
589072c9ddfdSDavid Miller */
589172c9ddfdSDavid Miller void
ring_buffer_read_prepare_sync(void)589272c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void)
589372c9ddfdSDavid Miller {
589474401729SPaul E. McKenney synchronize_rcu();
589572c9ddfdSDavid Miller }
589672c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
589772c9ddfdSDavid Miller
589872c9ddfdSDavid Miller /**
589972c9ddfdSDavid Miller * ring_buffer_read_start - start a non consuming read of the buffer
590072c9ddfdSDavid Miller * @iter: The iterator returned by ring_buffer_read_prepare
590172c9ddfdSDavid Miller *
590272c9ddfdSDavid Miller * This finalizes the startup of an iteration through the buffer.
590372c9ddfdSDavid Miller * The iterator comes from a call to ring_buffer_read_prepare and
590472c9ddfdSDavid Miller * an intervening ring_buffer_read_prepare_sync must have been
590572c9ddfdSDavid Miller * performed.
590672c9ddfdSDavid Miller *
5907d611851bSzhangwei(Jovi) * Must be paired with ring_buffer_read_finish.
590872c9ddfdSDavid Miller */
590972c9ddfdSDavid Miller void
ring_buffer_read_start(struct ring_buffer_iter * iter)591072c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter)
591172c9ddfdSDavid Miller {
591272c9ddfdSDavid Miller struct ring_buffer_per_cpu *cpu_buffer;
591372c9ddfdSDavid Miller unsigned long flags;
591472c9ddfdSDavid Miller
591572c9ddfdSDavid Miller if (!iter)
591672c9ddfdSDavid Miller return;
591772c9ddfdSDavid Miller
591872c9ddfdSDavid Miller cpu_buffer = iter->cpu_buffer;
59197a8e76a3SSteven Rostedt
59205389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
59210199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock);
5922642edba5SSteven Rostedt rb_iter_reset(iter);
59230199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock);
59245389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
59257a8e76a3SSteven Rostedt }
5926c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start);
59277a8e76a3SSteven Rostedt
59287a8e76a3SSteven Rostedt /**
5929d611851bSzhangwei(Jovi) * ring_buffer_read_finish - finish reading the iterator of the buffer
59307a8e76a3SSteven Rostedt * @iter: The iterator retrieved by ring_buffer_start
59317a8e76a3SSteven Rostedt *
5932ea70a962SPetr Pavlu * This re-enables resizing of the buffer, and frees the iterator.
59337a8e76a3SSteven Rostedt */
59347a8e76a3SSteven Rostedt void
ring_buffer_read_finish(struct ring_buffer_iter * iter)59357a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter)
59367a8e76a3SSteven Rostedt {
59377a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
59387a8e76a3SSteven Rostedt
5939ea70a962SPetr Pavlu /* Use this opportunity to check the integrity of the ring buffer. */
5940659f451fSSteven Rostedt rb_check_pages(cpu_buffer);
5941659f451fSSteven Rostedt
594207b8b10eSSteven Rostedt (VMware) atomic_dec(&cpu_buffer->resize_disabled);
5943785888c5SSteven Rostedt (VMware) kfree(iter->event);
59447a8e76a3SSteven Rostedt kfree(iter);
59457a8e76a3SSteven Rostedt }
5946c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
59477a8e76a3SSteven Rostedt
59487a8e76a3SSteven Rostedt /**
5949bc1a72afSSteven Rostedt (VMware) * ring_buffer_iter_advance - advance the iterator to the next location
59507a8e76a3SSteven Rostedt * @iter: The ring buffer iterator
59517a8e76a3SSteven Rostedt *
5952bc1a72afSSteven Rostedt (VMware) * Move the location of the iterator such that the next read will
5953bc1a72afSSteven Rostedt (VMware) * be the next location of the iterator.
59547a8e76a3SSteven Rostedt */
ring_buffer_iter_advance(struct ring_buffer_iter * iter)5955bc1a72afSSteven Rostedt (VMware) void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
59567a8e76a3SSteven Rostedt {
5957f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5958f83c9d0fSSteven Rostedt unsigned long flags;
59597a8e76a3SSteven Rostedt
59605389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
59617e9391cfSSteven Rostedt
59627a8e76a3SSteven Rostedt rb_advance_iter(iter);
59637a8e76a3SSteven Rostedt
5964bc1a72afSSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
59657a8e76a3SSteven Rostedt }
5966bc1a72afSSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
59677a8e76a3SSteven Rostedt
59687a8e76a3SSteven Rostedt /**
59697a8e76a3SSteven Rostedt * ring_buffer_size - return the size of the ring buffer (in bytes)
59707a8e76a3SSteven Rostedt * @buffer: The ring buffer.
597159e7cffeSFabian Frederick * @cpu: The CPU to get ring buffer size from.
59727a8e76a3SSteven Rostedt */
ring_buffer_size(struct trace_buffer * buffer,int cpu)597313292494SSteven Rostedt (VMware) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
59747a8e76a3SSteven Rostedt {
5975438ced17SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask))
5976438ced17SVaibhav Nagarnaik return 0;
5977438ced17SVaibhav Nagarnaik
5978139f8400STzvetomir Stoyanov (VMware) return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
59797a8e76a3SSteven Rostedt }
5980c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size);
59817a8e76a3SSteven Rostedt
59828ec90be7SSteven Rostedt (Google) /**
59838ec90be7SSteven Rostedt (Google) * ring_buffer_max_event_size - return the max data size of an event
59848ec90be7SSteven Rostedt (Google) * @buffer: The ring buffer.
59858ec90be7SSteven Rostedt (Google) *
59868ec90be7SSteven Rostedt (Google) * Returns the maximum size an event can be.
59878ec90be7SSteven Rostedt (Google) */
ring_buffer_max_event_size(struct trace_buffer * buffer)59888ec90be7SSteven Rostedt (Google) unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
59898ec90be7SSteven Rostedt (Google) {
59908ec90be7SSteven Rostedt (Google) /* If abs timestamp is requested, events have a timestamp too */
59918ec90be7SSteven Rostedt (Google) if (ring_buffer_time_stamp_abs(buffer))
5992139f8400STzvetomir Stoyanov (VMware) return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5993139f8400STzvetomir Stoyanov (VMware) return buffer->max_data_size;
59948ec90be7SSteven Rostedt (Google) }
59958ec90be7SSteven Rostedt (Google) EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
59968ec90be7SSteven Rostedt (Google)
rb_clear_buffer_page(struct buffer_page * page)59977e42907fSZheng Yejian static void rb_clear_buffer_page(struct buffer_page *page)
59987e42907fSZheng Yejian {
59997e42907fSZheng Yejian local_set(&page->write, 0);
60007e42907fSZheng Yejian local_set(&page->entries, 0);
60017e42907fSZheng Yejian rb_init_page(page->page);
60027e42907fSZheng Yejian page->read = 0;
60037e42907fSZheng Yejian }
60047e42907fSZheng Yejian
rb_update_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6005117c3920SVincent Donnefort static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6006117c3920SVincent Donnefort {
6007117c3920SVincent Donnefort struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6008117c3920SVincent Donnefort
6009dd4900d9SSteven Rostedt (Google) if (!meta)
6010dd4900d9SSteven Rostedt (Google) return;
6011dd4900d9SSteven Rostedt (Google)
6012117c3920SVincent Donnefort meta->reader.read = cpu_buffer->reader_page->read;
6013117c3920SVincent Donnefort meta->reader.id = cpu_buffer->reader_page->id;
6014117c3920SVincent Donnefort meta->reader.lost_events = cpu_buffer->lost_events;
6015117c3920SVincent Donnefort
6016117c3920SVincent Donnefort meta->entries = local_read(&cpu_buffer->entries);
6017117c3920SVincent Donnefort meta->overrun = local_read(&cpu_buffer->overrun);
6018117c3920SVincent Donnefort meta->read = cpu_buffer->read;
6019117c3920SVincent Donnefort
6020117c3920SVincent Donnefort /* Some archs do not have data cache coherency between kernel and user-space */
6021e4d4b867SSteven Rostedt flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
6022117c3920SVincent Donnefort }
6023117c3920SVincent Donnefort
60247a8e76a3SSteven Rostedt static void
rb_reset_cpu(struct ring_buffer_per_cpu * cpu_buffer)60257a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
60267a8e76a3SSteven Rostedt {
60277e42907fSZheng Yejian struct buffer_page *page;
60287e42907fSZheng Yejian
602977ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer);
603077ae365eSSteven Rostedt
60317a8e76a3SSteven Rostedt cpu_buffer->head_page
60323adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list);
60337e42907fSZheng Yejian rb_clear_buffer_page(cpu_buffer->head_page);
60347e42907fSZheng Yejian list_for_each_entry(page, cpu_buffer->pages, list) {
60357e42907fSZheng Yejian rb_clear_buffer_page(page);
60367e42907fSZheng Yejian }
6037bf41a158SSteven Rostedt
6038bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->head_page;
6039bf41a158SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->head_page;
6040bf41a158SSteven Rostedt
6041bf41a158SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
60425040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages);
60437e42907fSZheng Yejian rb_clear_buffer_page(cpu_buffer->reader_page);
6044d769041fSSteven Rostedt
6045c64e148aSVaibhav Nagarnaik local_set(&cpu_buffer->entries_bytes, 0);
604677ae365eSSteven Rostedt local_set(&cpu_buffer->overrun, 0);
6047884bfe89SSlava Pestov local_set(&cpu_buffer->commit_overrun, 0);
6048884bfe89SSlava Pestov local_set(&cpu_buffer->dropped_events, 0);
6049e4906effSSteven Rostedt local_set(&cpu_buffer->entries, 0);
6050fa743953SSteven Rostedt local_set(&cpu_buffer->committing, 0);
6051fa743953SSteven Rostedt local_set(&cpu_buffer->commits, 0);
60522c2b0a78SSteven Rostedt (VMware) local_set(&cpu_buffer->pages_touched, 0);
605331029a8bSSteven Rostedt (Google) local_set(&cpu_buffer->pages_lost, 0);
60542c2b0a78SSteven Rostedt (VMware) local_set(&cpu_buffer->pages_read, 0);
605503329f99SSteven Rostedt (VMware) cpu_buffer->last_pages_touch = 0;
60562c2b0a78SSteven Rostedt (VMware) cpu_buffer->shortest_full = 0;
605777ae365eSSteven Rostedt cpu_buffer->read = 0;
6058c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes = 0;
605969507c06SSteven Rostedt
606010464b4aSSteven Rostedt (VMware) rb_time_set(&cpu_buffer->write_stamp, 0);
606110464b4aSSteven Rostedt (VMware) rb_time_set(&cpu_buffer->before_stamp, 0);
606277ae365eSSteven Rostedt
60638672e494SSteven Rostedt (VMware) memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
60648672e494SSteven Rostedt (VMware)
606566a8cb95SSteven Rostedt cpu_buffer->lost_events = 0;
606666a8cb95SSteven Rostedt cpu_buffer->last_overrun = 0;
606766a8cb95SSteven Rostedt
606877ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer);
60692d093282SZheng Yejian cpu_buffer->pages_removed = 0;
6070b14d0329SSteven Rostedt (Google)
6071b14d0329SSteven Rostedt (Google) if (cpu_buffer->mapped) {
6072b14d0329SSteven Rostedt (Google) rb_update_meta_page(cpu_buffer);
6073b14d0329SSteven Rostedt (Google) if (cpu_buffer->ring_meta) {
60744009cc31SSteven Rostedt struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta;
6075b14d0329SSteven Rostedt (Google) meta->commit_buffer = meta->head_buffer;
6076b14d0329SSteven Rostedt (Google) }
6077b14d0329SSteven Rostedt (Google) }
60787a8e76a3SSteven Rostedt }
60797a8e76a3SSteven Rostedt
6080b23d7a5fSNicholas Piggin /* Must have disabled the cpu buffer then done a synchronize_rcu */
reset_disabled_cpu_buffer(struct ring_buffer_per_cpu * cpu_buffer)6081b23d7a5fSNicholas Piggin static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
60827a8e76a3SSteven Rostedt {
60837a8e76a3SSteven Rostedt unsigned long flags;
60847a8e76a3SSteven Rostedt
60855389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
6086f83c9d0fSSteven Rostedt
608741b6a95dSSteven Rostedt if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
608841b6a95dSSteven Rostedt goto out;
608941b6a95dSSteven Rostedt
60900199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock);
60917a8e76a3SSteven Rostedt
60927a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer);
60937a8e76a3SSteven Rostedt
60940199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock);
6095f83c9d0fSSteven Rostedt
609641b6a95dSSteven Rostedt out:
60975389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
6098b23d7a5fSNicholas Piggin }
6099b23d7a5fSNicholas Piggin
6100b23d7a5fSNicholas Piggin /**
6101b23d7a5fSNicholas Piggin * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
6102b23d7a5fSNicholas Piggin * @buffer: The ring buffer to reset a per cpu buffer of
6103b23d7a5fSNicholas Piggin * @cpu: The CPU buffer to be reset
6104b23d7a5fSNicholas Piggin */
ring_buffer_reset_cpu(struct trace_buffer * buffer,int cpu)6105b23d7a5fSNicholas Piggin void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
6106b23d7a5fSNicholas Piggin {
6107b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
6108b23d7a5fSNicholas Piggin
6109b23d7a5fSNicholas Piggin if (!cpumask_test_cpu(cpu, buffer->cpumask))
6110b23d7a5fSNicholas Piggin return;
6111b23d7a5fSNicholas Piggin
6112bbeb9746SGaurav Kohli /* prevent another thread from changing buffer sizes */
6113bbeb9746SGaurav Kohli mutex_lock(&buffer->mutex);
6114bbeb9746SGaurav Kohli
6115b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->resize_disabled);
6116b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled);
6117b23d7a5fSNicholas Piggin
6118b23d7a5fSNicholas Piggin /* Make sure all commits have finished */
6119b23d7a5fSNicholas Piggin synchronize_rcu();
6120b23d7a5fSNicholas Piggin
6121b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer);
612241ede23eSSteven Rostedt
612341ede23eSSteven Rostedt atomic_dec(&cpu_buffer->record_disabled);
612407b8b10eSSteven Rostedt (VMware) atomic_dec(&cpu_buffer->resize_disabled);
6125bbeb9746SGaurav Kohli
6126bbeb9746SGaurav Kohli mutex_unlock(&buffer->mutex);
61277a8e76a3SSteven Rostedt }
6128c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
61297a8e76a3SSteven Rostedt
61307c339fb4STze-nan Wu /* Flag to ensure proper resetting of atomic variables */
61317c339fb4STze-nan Wu #define RESET_BIT (1 << 30)
61327c339fb4STze-nan Wu
61337a8e76a3SSteven Rostedt /**
6134b7085b6fSJiapeng Chong * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
6135b23d7a5fSNicholas Piggin * @buffer: The ring buffer to reset a per cpu buffer of
6136b23d7a5fSNicholas Piggin */
ring_buffer_reset_online_cpus(struct trace_buffer * buffer)6137b23d7a5fSNicholas Piggin void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
6138b23d7a5fSNicholas Piggin {
6139b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer;
6140b23d7a5fSNicholas Piggin int cpu;
6141b23d7a5fSNicholas Piggin
6142bbeb9746SGaurav Kohli /* prevent another thread from changing buffer sizes */
6143bbeb9746SGaurav Kohli mutex_lock(&buffer->mutex);
6144bbeb9746SGaurav Kohli
6145b23d7a5fSNicholas Piggin for_each_online_buffer_cpu(buffer, cpu) {
6146b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu];
6147b23d7a5fSNicholas Piggin
61487c339fb4STze-nan Wu atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
6149b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled);
6150b23d7a5fSNicholas Piggin }
6151b23d7a5fSNicholas Piggin
6152b23d7a5fSNicholas Piggin /* Make sure all commits have finished */
6153b23d7a5fSNicholas Piggin synchronize_rcu();
6154b23d7a5fSNicholas Piggin
61557c339fb4STze-nan Wu for_each_buffer_cpu(buffer, cpu) {
6156b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu];
6157b23d7a5fSNicholas Piggin
61587c339fb4STze-nan Wu /*
61597c339fb4STze-nan Wu * If a CPU came online during the synchronize_rcu(), then
61607c339fb4STze-nan Wu * ignore it.
61617c339fb4STze-nan Wu */
61627c339fb4STze-nan Wu if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
61637c339fb4STze-nan Wu continue;
61647c339fb4STze-nan Wu
6165b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer);
6166b23d7a5fSNicholas Piggin
6167b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->record_disabled);
61687c339fb4STze-nan Wu atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
6169b23d7a5fSNicholas Piggin }
6170bbeb9746SGaurav Kohli
6171bbeb9746SGaurav Kohli mutex_unlock(&buffer->mutex);
6172b23d7a5fSNicholas Piggin }
6173b23d7a5fSNicholas Piggin
6174b23d7a5fSNicholas Piggin /**
61757a8e76a3SSteven Rostedt * ring_buffer_reset - reset a ring buffer
61767a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset all cpu buffers
61777a8e76a3SSteven Rostedt */
ring_buffer_reset(struct trace_buffer * buffer)617813292494SSteven Rostedt (VMware) void ring_buffer_reset(struct trace_buffer *buffer)
61797a8e76a3SSteven Rostedt {
6180b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer;
61817a8e76a3SSteven Rostedt int cpu;
61827a8e76a3SSteven Rostedt
618351d15794SSteven Rostedt (VMware) /* prevent another thread from changing buffer sizes */
618451d15794SSteven Rostedt (VMware) mutex_lock(&buffer->mutex);
618551d15794SSteven Rostedt (VMware)
6186b23d7a5fSNicholas Piggin for_each_buffer_cpu(buffer, cpu) {
6187b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu];
6188b23d7a5fSNicholas Piggin
6189b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->resize_disabled);
6190b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled);
6191b23d7a5fSNicholas Piggin }
6192b23d7a5fSNicholas Piggin
6193b23d7a5fSNicholas Piggin /* Make sure all commits have finished */
6194b23d7a5fSNicholas Piggin synchronize_rcu();
6195b23d7a5fSNicholas Piggin
6196b23d7a5fSNicholas Piggin for_each_buffer_cpu(buffer, cpu) {
6197b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu];
6198b23d7a5fSNicholas Piggin
6199b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer);
6200b23d7a5fSNicholas Piggin
6201b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->record_disabled);
6202b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->resize_disabled);
6203b23d7a5fSNicholas Piggin }
620451d15794SSteven Rostedt (VMware)
620551d15794SSteven Rostedt (VMware) mutex_unlock(&buffer->mutex);
62067a8e76a3SSteven Rostedt }
6207c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset);
62087a8e76a3SSteven Rostedt
62097a8e76a3SSteven Rostedt /**
6210b7085b6fSJiapeng Chong * ring_buffer_empty - is the ring buffer empty?
62117a8e76a3SSteven Rostedt * @buffer: The ring buffer to test
62127a8e76a3SSteven Rostedt */
ring_buffer_empty(struct trace_buffer * buffer)621313292494SSteven Rostedt (VMware) bool ring_buffer_empty(struct trace_buffer *buffer)
62147a8e76a3SSteven Rostedt {
62157a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
6216d4788207SSteven Rostedt unsigned long flags;
6217289a5a25SSteven Rostedt (Red Hat) bool dolock;
6218bc92b956SUros Bizjak bool ret;
62197a8e76a3SSteven Rostedt int cpu;
62207a8e76a3SSteven Rostedt
62217a8e76a3SSteven Rostedt /* yes this is racy, but if you don't like the race, lock the buffer */
62227a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) {
62237a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
62248d707e8eSSteven Rostedt local_irq_save(flags);
6225289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer);
6226d4788207SSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer);
6227289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock);
62288d707e8eSSteven Rostedt local_irq_restore(flags);
62298d707e8eSSteven Rostedt
6230d4788207SSteven Rostedt if (!ret)
62313d4e204dSYaowei Bai return false;
62327a8e76a3SSteven Rostedt }
6233554f786eSSteven Rostedt
62343d4e204dSYaowei Bai return true;
62357a8e76a3SSteven Rostedt }
6236c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty);
62377a8e76a3SSteven Rostedt
62387a8e76a3SSteven Rostedt /**
62397a8e76a3SSteven Rostedt * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
62407a8e76a3SSteven Rostedt * @buffer: The ring buffer
62417a8e76a3SSteven Rostedt * @cpu: The CPU buffer to test
62427a8e76a3SSteven Rostedt */
ring_buffer_empty_cpu(struct trace_buffer * buffer,int cpu)624313292494SSteven Rostedt (VMware) bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
62447a8e76a3SSteven Rostedt {
62457a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer;
6246d4788207SSteven Rostedt unsigned long flags;
6247289a5a25SSteven Rostedt (Red Hat) bool dolock;
6248bc92b956SUros Bizjak bool ret;
62497a8e76a3SSteven Rostedt
62509e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask))
62513d4e204dSYaowei Bai return true;
62527a8e76a3SSteven Rostedt
62537a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu];
62548d707e8eSSteven Rostedt local_irq_save(flags);
6255289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer);
6256554f786eSSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer);
6257289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock);
62588d707e8eSSteven Rostedt local_irq_restore(flags);
6259554f786eSSteven Rostedt
6260554f786eSSteven Rostedt return ret;
62617a8e76a3SSteven Rostedt }
6262c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
62637a8e76a3SSteven Rostedt
626485bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
62657a8e76a3SSteven Rostedt /**
62667a8e76a3SSteven Rostedt * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
62677a8e76a3SSteven Rostedt * @buffer_a: One buffer to swap with
62687a8e76a3SSteven Rostedt * @buffer_b: The other buffer to swap with
626959e7cffeSFabian Frederick * @cpu: the CPU of the buffers to swap
62707a8e76a3SSteven Rostedt *
62717a8e76a3SSteven Rostedt * This function is useful for tracers that want to take a "snapshot"
62727a8e76a3SSteven Rostedt * of a CPU buffer and has another back up buffer lying around.
62737a8e76a3SSteven Rostedt * it is expected that the tracer handles the cpu buffer not being
62747a8e76a3SSteven Rostedt * used at the moment.
62757a8e76a3SSteven Rostedt */
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)627613292494SSteven Rostedt (VMware) int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
627713292494SSteven Rostedt (VMware) struct trace_buffer *buffer_b, int cpu)
62787a8e76a3SSteven Rostedt {
62797a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_a;
62807a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_b;
6281554f786eSSteven Rostedt int ret = -EINVAL;
6282554f786eSSteven Rostedt
62839e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
62849e01c1b7SRusty Russell !cpumask_test_cpu(cpu, buffer_b->cpumask))
6285554f786eSSteven Rostedt goto out;
62867a8e76a3SSteven Rostedt
6287438ced17SVaibhav Nagarnaik cpu_buffer_a = buffer_a->buffers[cpu];
6288438ced17SVaibhav Nagarnaik cpu_buffer_b = buffer_b->buffers[cpu];
6289438ced17SVaibhav Nagarnaik
6290117c3920SVincent Donnefort /* It's up to the callers to not try to swap mapped buffers */
6291117c3920SVincent Donnefort if (WARN_ON_ONCE(cpu_buffer_a->mapped || cpu_buffer_b->mapped)) {
6292117c3920SVincent Donnefort ret = -EBUSY;
6293117c3920SVincent Donnefort goto out;
6294117c3920SVincent Donnefort }
6295117c3920SVincent Donnefort
62967a8e76a3SSteven Rostedt /* At least make sure the two buffers are somewhat the same */
6297438ced17SVaibhav Nagarnaik if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
6298554f786eSSteven Rostedt goto out;
6299554f786eSSteven Rostedt
6300b81e03a2SSteven Rostedt (Google) if (buffer_a->subbuf_order != buffer_b->subbuf_order)
6301b81e03a2SSteven Rostedt (Google) goto out;
6302b81e03a2SSteven Rostedt (Google)
6303554f786eSSteven Rostedt ret = -EAGAIN;
63047a8e76a3SSteven Rostedt
630597b17efeSSteven Rostedt if (atomic_read(&buffer_a->record_disabled))
6306554f786eSSteven Rostedt goto out;
630797b17efeSSteven Rostedt
630897b17efeSSteven Rostedt if (atomic_read(&buffer_b->record_disabled))
6309554f786eSSteven Rostedt goto out;
631097b17efeSSteven Rostedt
631197b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_a->record_disabled))
6312554f786eSSteven Rostedt goto out;
631397b17efeSSteven Rostedt
631497b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_b->record_disabled))
6315554f786eSSteven Rostedt goto out;
631697b17efeSSteven Rostedt
63177a8e76a3SSteven Rostedt /*
631874401729SPaul E. McKenney * We can't do a synchronize_rcu here because this
63197a8e76a3SSteven Rostedt * function can be called in atomic context.
63207a8e76a3SSteven Rostedt * Normally this will be called from the same CPU as cpu.
63217a8e76a3SSteven Rostedt * If not it's up to the caller to protect this.
63227a8e76a3SSteven Rostedt */
63237a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_a->record_disabled);
63247a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_b->record_disabled);
63257a8e76a3SSteven Rostedt
632698277991SSteven Rostedt ret = -EBUSY;
632798277991SSteven Rostedt if (local_read(&cpu_buffer_a->committing))
632898277991SSteven Rostedt goto out_dec;
632998277991SSteven Rostedt if (local_read(&cpu_buffer_b->committing))
633098277991SSteven Rostedt goto out_dec;
633198277991SSteven Rostedt
63328a96c028SChen Lin /*
63338a96c028SChen Lin * When resize is in progress, we cannot swap it because
63348a96c028SChen Lin * it will mess the state of the cpu buffer.
63358a96c028SChen Lin */
63368a96c028SChen Lin if (atomic_read(&buffer_a->resizing))
63378a96c028SChen Lin goto out_dec;
63388a96c028SChen Lin if (atomic_read(&buffer_b->resizing))
63398a96c028SChen Lin goto out_dec;
63408a96c028SChen Lin
63417a8e76a3SSteven Rostedt buffer_a->buffers[cpu] = cpu_buffer_b;
63427a8e76a3SSteven Rostedt buffer_b->buffers[cpu] = cpu_buffer_a;
63437a8e76a3SSteven Rostedt
63447a8e76a3SSteven Rostedt cpu_buffer_b->buffer = buffer_a;
63457a8e76a3SSteven Rostedt cpu_buffer_a->buffer = buffer_b;
63467a8e76a3SSteven Rostedt
634798277991SSteven Rostedt ret = 0;
634898277991SSteven Rostedt
634998277991SSteven Rostedt out_dec:
63507a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_a->record_disabled);
63517a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_b->record_disabled);
6352554f786eSSteven Rostedt out:
6353554f786eSSteven Rostedt return ret;
63547a8e76a3SSteven Rostedt }
6355c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
635685bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
63577a8e76a3SSteven Rostedt
63588789a9e7SSteven Rostedt /**
63598789a9e7SSteven Rostedt * ring_buffer_alloc_read_page - allocate a page to read from buffer
63608789a9e7SSteven Rostedt * @buffer: the buffer to allocate for.
6361d611851bSzhangwei(Jovi) * @cpu: the cpu buffer to allocate.
63628789a9e7SSteven Rostedt *
63638789a9e7SSteven Rostedt * This function is used in conjunction with ring_buffer_read_page.
63648789a9e7SSteven Rostedt * When reading a full page from the ring buffer, these functions
63658789a9e7SSteven Rostedt * can be used to speed up the process. The calling function should
63668789a9e7SSteven Rostedt * allocate a few pages first with this function. Then when it
63678789a9e7SSteven Rostedt * needs to get pages from the ring buffer, it passes the result
63688789a9e7SSteven Rostedt * of this function into ring_buffer_read_page, which will swap
63698789a9e7SSteven Rostedt * the page that was allocated, with the read page of the buffer.
63708789a9e7SSteven Rostedt *
63718789a9e7SSteven Rostedt * Returns:
6372a7e52ad7SSteven Rostedt (VMware) * The page allocated, or ERR_PTR
63738789a9e7SSteven Rostedt */
6374bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *
ring_buffer_alloc_read_page(struct trace_buffer * buffer,int cpu)6375bce761d7STzvetomir Stoyanov (VMware) ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
63768789a9e7SSteven Rostedt {
6377a7e52ad7SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer;
6378bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *bpage = NULL;
637973a757e6SSteven Rostedt (VMware) unsigned long flags;
63807ea59064SVaibhav Nagarnaik struct page *page;
63818789a9e7SSteven Rostedt
6382a7e52ad7SSteven Rostedt (VMware) if (!cpumask_test_cpu(cpu, buffer->cpumask))
6383a7e52ad7SSteven Rostedt (VMware) return ERR_PTR(-ENODEV);
6384a7e52ad7SSteven Rostedt (VMware)
6385bce761d7STzvetomir Stoyanov (VMware) bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
6386bce761d7STzvetomir Stoyanov (VMware) if (!bpage)
6387bce761d7STzvetomir Stoyanov (VMware) return ERR_PTR(-ENOMEM);
6388bce761d7STzvetomir Stoyanov (VMware)
6389bce761d7STzvetomir Stoyanov (VMware) bpage->order = buffer->subbuf_order;
6390a7e52ad7SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu];
639173a757e6SSteven Rostedt (VMware) local_irq_save(flags);
639273a757e6SSteven Rostedt (VMware) arch_spin_lock(&cpu_buffer->lock);
639373a757e6SSteven Rostedt (VMware)
639473a757e6SSteven Rostedt (VMware) if (cpu_buffer->free_page) {
6395bce761d7STzvetomir Stoyanov (VMware) bpage->data = cpu_buffer->free_page;
639673a757e6SSteven Rostedt (VMware) cpu_buffer->free_page = NULL;
639773a757e6SSteven Rostedt (VMware) }
639873a757e6SSteven Rostedt (VMware)
639973a757e6SSteven Rostedt (VMware) arch_spin_unlock(&cpu_buffer->lock);
640073a757e6SSteven Rostedt (VMware) local_irq_restore(flags);
640173a757e6SSteven Rostedt (VMware)
6402bce761d7STzvetomir Stoyanov (VMware) if (bpage->data)
640373a757e6SSteven Rostedt (VMware) goto out;
640473a757e6SSteven Rostedt (VMware)
64056b76323eSVincent Donnefort page = alloc_pages_node(cpu_to_node(cpu),
6406c09d4167SVincent Donnefort GFP_KERNEL | __GFP_NORETRY | __GFP_COMP | __GFP_ZERO,
6407f9b94daaSTzvetomir Stoyanov (VMware) cpu_buffer->buffer->subbuf_order);
6408bce761d7STzvetomir Stoyanov (VMware) if (!page) {
6409bce761d7STzvetomir Stoyanov (VMware) kfree(bpage);
6410a7e52ad7SSteven Rostedt (VMware) return ERR_PTR(-ENOMEM);
6411bce761d7STzvetomir Stoyanov (VMware) }
64128789a9e7SSteven Rostedt
6413bce761d7STzvetomir Stoyanov (VMware) bpage->data = page_address(page);
64148789a9e7SSteven Rostedt
641573a757e6SSteven Rostedt (VMware) out:
6416bce761d7STzvetomir Stoyanov (VMware) rb_init_page(bpage->data);
6417ef7a4a16SSteven Rostedt
6418044fa782SSteven Rostedt return bpage;
64198789a9e7SSteven Rostedt }
6420d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
64218789a9e7SSteven Rostedt
64228789a9e7SSteven Rostedt /**
64238789a9e7SSteven Rostedt * ring_buffer_free_read_page - free an allocated read page
64248789a9e7SSteven Rostedt * @buffer: the buffer the page was allocate for
642573a757e6SSteven Rostedt (VMware) * @cpu: the cpu buffer the page came from
6426bce761d7STzvetomir Stoyanov (VMware) * @data_page: the page to free
64278789a9e7SSteven Rostedt *
64288789a9e7SSteven Rostedt * Free a page allocated from ring_buffer_alloc_read_page.
64298789a9e7SSteven Rostedt */
ring_buffer_free_read_page(struct trace_buffer * buffer,int cpu,struct buffer_data_read_page * data_page)6430bce761d7STzvetomir Stoyanov (VMware) void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
6431bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *data_page)
64328789a9e7SSteven Rostedt {
64333e4272b9SJia-Ju Bai struct ring_buffer_per_cpu *cpu_buffer;
6434bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_page *bpage = data_page->data;
6435ae415fa4SSteven Rostedt (VMware) struct page *page = virt_to_page(bpage);
643673a757e6SSteven Rostedt (VMware) unsigned long flags;
643773a757e6SSteven Rostedt (VMware)
64383e4272b9SJia-Ju Bai if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
64393e4272b9SJia-Ju Bai return;
64403e4272b9SJia-Ju Bai
64413e4272b9SJia-Ju Bai cpu_buffer = buffer->buffers[cpu];
64423e4272b9SJia-Ju Bai
6443bce761d7STzvetomir Stoyanov (VMware) /*
6444bce761d7STzvetomir Stoyanov (VMware) * If the page is still in use someplace else, or order of the page
6445bce761d7STzvetomir Stoyanov (VMware) * is different from the subbuffer order of the buffer -
6446bce761d7STzvetomir Stoyanov (VMware) * we can't reuse it
6447bce761d7STzvetomir Stoyanov (VMware) */
6448bce761d7STzvetomir Stoyanov (VMware) if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
6449ae415fa4SSteven Rostedt (VMware) goto out;
6450ae415fa4SSteven Rostedt (VMware)
645173a757e6SSteven Rostedt (VMware) local_irq_save(flags);
645273a757e6SSteven Rostedt (VMware) arch_spin_lock(&cpu_buffer->lock);
645373a757e6SSteven Rostedt (VMware)
645473a757e6SSteven Rostedt (VMware) if (!cpu_buffer->free_page) {
645573a757e6SSteven Rostedt (VMware) cpu_buffer->free_page = bpage;
645673a757e6SSteven Rostedt (VMware) bpage = NULL;
645773a757e6SSteven Rostedt (VMware) }
645873a757e6SSteven Rostedt (VMware)
645973a757e6SSteven Rostedt (VMware) arch_spin_unlock(&cpu_buffer->lock);
646073a757e6SSteven Rostedt (VMware) local_irq_restore(flags);
646173a757e6SSteven Rostedt (VMware)
6462ae415fa4SSteven Rostedt (VMware) out:
6463bce761d7STzvetomir Stoyanov (VMware) free_pages((unsigned long)bpage, data_page->order);
6464bce761d7STzvetomir Stoyanov (VMware) kfree(data_page);
64658789a9e7SSteven Rostedt }
6466d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
64678789a9e7SSteven Rostedt
64688789a9e7SSteven Rostedt /**
64698789a9e7SSteven Rostedt * ring_buffer_read_page - extract a page from the ring buffer
64708789a9e7SSteven Rostedt * @buffer: buffer to extract from
64718789a9e7SSteven Rostedt * @data_page: the page to use allocated from ring_buffer_alloc_read_page
6472ef7a4a16SSteven Rostedt * @len: amount to extract
64738789a9e7SSteven Rostedt * @cpu: the cpu of the buffer to extract
64748789a9e7SSteven Rostedt * @full: should the extraction only happen when the page is full.
64758789a9e7SSteven Rostedt *
64768789a9e7SSteven Rostedt * This function will pull out a page from the ring buffer and consume it.
64778789a9e7SSteven Rostedt * @data_page must be the address of the variable that was returned
64788789a9e7SSteven Rostedt * from ring_buffer_alloc_read_page. This is because the page might be used
64798789a9e7SSteven Rostedt * to swap with a page in the ring buffer.
64808789a9e7SSteven Rostedt *
64818789a9e7SSteven Rostedt * for example:
6482d611851bSzhangwei(Jovi) * rpage = ring_buffer_alloc_read_page(buffer, cpu);
6483a7e52ad7SSteven Rostedt (VMware) * if (IS_ERR(rpage))
6484a7e52ad7SSteven Rostedt (VMware) * return PTR_ERR(rpage);
6485bce761d7STzvetomir Stoyanov (VMware) * ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
6486667d2412SLai Jiangshan * if (ret >= 0)
6487bce761d7STzvetomir Stoyanov (VMware) * process_page(ring_buffer_read_page_data(rpage), ret);
6488bce761d7STzvetomir Stoyanov (VMware) * ring_buffer_free_read_page(buffer, cpu, rpage);
64898789a9e7SSteven Rostedt *
64908789a9e7SSteven Rostedt * When @full is set, the function will not return true unless
64918789a9e7SSteven Rostedt * the writer is off the reader page.
64928789a9e7SSteven Rostedt *
64938789a9e7SSteven Rostedt * Note: it is up to the calling functions to handle sleeps and wakeups.
64948789a9e7SSteven Rostedt * The ring buffer can be used anywhere in the kernel and can not
64958789a9e7SSteven Rostedt * blindly call wake_up. The layer that uses the ring buffer must be
64968789a9e7SSteven Rostedt * responsible for that.
64978789a9e7SSteven Rostedt *
64988789a9e7SSteven Rostedt * Returns:
6499667d2412SLai Jiangshan * >=0 if data has been transferred, returns the offset of consumed data.
6500667d2412SLai Jiangshan * <0 if no data has been transferred.
65018789a9e7SSteven Rostedt */
ring_buffer_read_page(struct trace_buffer * buffer,struct buffer_data_read_page * data_page,size_t len,int cpu,int full)650213292494SSteven Rostedt (VMware) int ring_buffer_read_page(struct trace_buffer *buffer,
6503bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *data_page,
6504bce761d7STzvetomir Stoyanov (VMware) size_t len, int cpu, int full)
65058789a9e7SSteven Rostedt {
65068789a9e7SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
65078789a9e7SSteven Rostedt struct ring_buffer_event *event;
6508044fa782SSteven Rostedt struct buffer_data_page *bpage;
6509ef7a4a16SSteven Rostedt struct buffer_page *reader;
6510ff0ff84aSSteven Rostedt unsigned long missed_events;
65118789a9e7SSteven Rostedt unsigned long flags;
6512ef7a4a16SSteven Rostedt unsigned int commit;
6513667d2412SLai Jiangshan unsigned int read;
65144f3640f8SSteven Rostedt u64 save_timestamp;
6515667d2412SLai Jiangshan int ret = -1;
65168789a9e7SSteven Rostedt
6517554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask))
6518554f786eSSteven Rostedt goto out;
6519554f786eSSteven Rostedt
6520474d32b6SSteven Rostedt /*
6521474d32b6SSteven Rostedt * If len is not big enough to hold the page header, then
6522474d32b6SSteven Rostedt * we can not copy anything.
6523474d32b6SSteven Rostedt */
6524474d32b6SSteven Rostedt if (len <= BUF_PAGE_HDR_SIZE)
6525554f786eSSteven Rostedt goto out;
6526474d32b6SSteven Rostedt
6527474d32b6SSteven Rostedt len -= BUF_PAGE_HDR_SIZE;
6528474d32b6SSteven Rostedt
6529bce761d7STzvetomir Stoyanov (VMware) if (!data_page || !data_page->data)
6530bce761d7STzvetomir Stoyanov (VMware) goto out;
6531bce761d7STzvetomir Stoyanov (VMware) if (data_page->order != buffer->subbuf_order)
6532554f786eSSteven Rostedt goto out;
65338789a9e7SSteven Rostedt
6534bce761d7STzvetomir Stoyanov (VMware) bpage = data_page->data;
6535044fa782SSteven Rostedt if (!bpage)
6536554f786eSSteven Rostedt goto out;
65378789a9e7SSteven Rostedt
65385389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
65398789a9e7SSteven Rostedt
6540ef7a4a16SSteven Rostedt reader = rb_get_reader_page(cpu_buffer);
6541ef7a4a16SSteven Rostedt if (!reader)
6542554f786eSSteven Rostedt goto out_unlock;
65438789a9e7SSteven Rostedt
6544ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer);
6545667d2412SLai Jiangshan
6546ef7a4a16SSteven Rostedt read = reader->read;
6547fe832be0SSteven Rostedt (Google) commit = rb_page_size(reader);
6548ef7a4a16SSteven Rostedt
654966a8cb95SSteven Rostedt /* Check if any events were dropped */
6550ff0ff84aSSteven Rostedt missed_events = cpu_buffer->lost_events;
655166a8cb95SSteven Rostedt
65528789a9e7SSteven Rostedt /*
6553474d32b6SSteven Rostedt * If this page has been partially read or
6554474d32b6SSteven Rostedt * if len is not big enough to read the rest of the page or
6555474d32b6SSteven Rostedt * a writer is still on the page, then
6556474d32b6SSteven Rostedt * we must copy the data from the page to the buffer.
6557474d32b6SSteven Rostedt * Otherwise, we can simply swap the page with the one passed in.
65588789a9e7SSteven Rostedt */
6559474d32b6SSteven Rostedt if (read || (len < (commit - read)) ||
6560117c3920SVincent Donnefort cpu_buffer->reader_page == cpu_buffer->commit_page ||
6561117c3920SVincent Donnefort cpu_buffer->mapped) {
6562667d2412SLai Jiangshan struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
6563474d32b6SSteven Rostedt unsigned int rpos = read;
6564474d32b6SSteven Rostedt unsigned int pos = 0;
6565ef7a4a16SSteven Rostedt unsigned int size;
65668789a9e7SSteven Rostedt
6567fa8f4a89SSteven Rostedt (Google) /*
6568fa8f4a89SSteven Rostedt (Google) * If a full page is expected, this can still be returned
6569fa8f4a89SSteven Rostedt (Google) * if there's been a previous partial read and the
6570fa8f4a89SSteven Rostedt (Google) * rest of the page can be read and the commit page is off
6571fa8f4a89SSteven Rostedt (Google) * the reader page.
6572fa8f4a89SSteven Rostedt (Google) */
6573fa8f4a89SSteven Rostedt (Google) if (full &&
6574fa8f4a89SSteven Rostedt (Google) (!read || (len < (commit - read)) ||
6575fa8f4a89SSteven Rostedt (Google) cpu_buffer->reader_page == cpu_buffer->commit_page))
6576554f786eSSteven Rostedt goto out_unlock;
65778789a9e7SSteven Rostedt
6578ef7a4a16SSteven Rostedt if (len > (commit - read))
6579ef7a4a16SSteven Rostedt len = (commit - read);
6580ef7a4a16SSteven Rostedt
658169d1b839SSteven Rostedt /* Always keep the time extend and data together */
658269d1b839SSteven Rostedt size = rb_event_ts_length(event);
6583ef7a4a16SSteven Rostedt
6584ef7a4a16SSteven Rostedt if (len < size)
6585554f786eSSteven Rostedt goto out_unlock;
6586ef7a4a16SSteven Rostedt
65874f3640f8SSteven Rostedt /* save the current timestamp, since the user will need it */
65884f3640f8SSteven Rostedt save_timestamp = cpu_buffer->read_stamp;
65894f3640f8SSteven Rostedt
6590ef7a4a16SSteven Rostedt /* Need to copy one event at a time */
6591ef7a4a16SSteven Rostedt do {
6592e1e35927SDavid Sharp /* We need the size of one event, because
6593e1e35927SDavid Sharp * rb_advance_reader only advances by one event,
6594e1e35927SDavid Sharp * whereas rb_event_ts_length may include the size of
6595e1e35927SDavid Sharp * one or two events.
6596e1e35927SDavid Sharp * We have already ensured there's enough space if this
6597e1e35927SDavid Sharp * is a time extend. */
6598e1e35927SDavid Sharp size = rb_event_length(event);
6599474d32b6SSteven Rostedt memcpy(bpage->data + pos, rpage->data + rpos, size);
6600ef7a4a16SSteven Rostedt
6601ef7a4a16SSteven Rostedt len -= size;
6602ef7a4a16SSteven Rostedt
6603ef7a4a16SSteven Rostedt rb_advance_reader(cpu_buffer);
6604474d32b6SSteven Rostedt rpos = reader->read;
6605474d32b6SSteven Rostedt pos += size;
6606ef7a4a16SSteven Rostedt
660718fab912SHuang Ying if (rpos >= commit)
660818fab912SHuang Ying break;
660918fab912SHuang Ying
6610ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer);
661169d1b839SSteven Rostedt /* Always keep the time extend and data together */
661269d1b839SSteven Rostedt size = rb_event_ts_length(event);
6613e1e35927SDavid Sharp } while (len >= size);
6614667d2412SLai Jiangshan
6615667d2412SLai Jiangshan /* update bpage */
6616ef7a4a16SSteven Rostedt local_set(&bpage->commit, pos);
66174f3640f8SSteven Rostedt bpage->time_stamp = save_timestamp;
6618ef7a4a16SSteven Rostedt
6619474d32b6SSteven Rostedt /* we copied everything to the beginning */
6620474d32b6SSteven Rostedt read = 0;
66218789a9e7SSteven Rostedt } else {
6622afbab76aSSteven Rostedt /* update the entry counter */
662377ae365eSSteven Rostedt cpu_buffer->read += rb_page_entries(reader);
6624fe832be0SSteven Rostedt (Google) cpu_buffer->read_bytes += rb_page_size(reader);
6625afbab76aSSteven Rostedt
66268789a9e7SSteven Rostedt /* swap the pages */
6627044fa782SSteven Rostedt rb_init_page(bpage);
6628ef7a4a16SSteven Rostedt bpage = reader->page;
6629bce761d7STzvetomir Stoyanov (VMware) reader->page = data_page->data;
6630ef7a4a16SSteven Rostedt local_set(&reader->write, 0);
6631778c55d4SSteven Rostedt local_set(&reader->entries, 0);
6632ef7a4a16SSteven Rostedt reader->read = 0;
6633bce761d7STzvetomir Stoyanov (VMware) data_page->data = bpage;
6634ff0ff84aSSteven Rostedt
6635ff0ff84aSSteven Rostedt /*
6636ff0ff84aSSteven Rostedt * Use the real_end for the data size,
6637ff0ff84aSSteven Rostedt * This gives us a chance to store the lost events
6638ff0ff84aSSteven Rostedt * on the page.
6639ff0ff84aSSteven Rostedt */
6640ff0ff84aSSteven Rostedt if (reader->real_end)
6641ff0ff84aSSteven Rostedt local_set(&bpage->commit, reader->real_end);
6642ef7a4a16SSteven Rostedt }
6643ef7a4a16SSteven Rostedt ret = read;
6644ef7a4a16SSteven Rostedt
664566a8cb95SSteven Rostedt cpu_buffer->lost_events = 0;
66462711ca23SSteven Rostedt
66472711ca23SSteven Rostedt commit = local_read(&bpage->commit);
664866a8cb95SSteven Rostedt /*
664966a8cb95SSteven Rostedt * Set a flag in the commit field if we lost events
665066a8cb95SSteven Rostedt */
6651ff0ff84aSSteven Rostedt if (missed_events) {
6652ff0ff84aSSteven Rostedt /* If there is room at the end of the page to save the
6653ff0ff84aSSteven Rostedt * missed events, then record it there.
6654ff0ff84aSSteven Rostedt */
6655139f8400STzvetomir Stoyanov (VMware) if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
6656ff0ff84aSSteven Rostedt memcpy(&bpage->data[commit], &missed_events,
6657ff0ff84aSSteven Rostedt sizeof(missed_events));
6658ff0ff84aSSteven Rostedt local_add(RB_MISSED_STORED, &bpage->commit);
66592711ca23SSteven Rostedt commit += sizeof(missed_events);
6660ff0ff84aSSteven Rostedt }
666166a8cb95SSteven Rostedt local_add(RB_MISSED_EVENTS, &bpage->commit);
6662ff0ff84aSSteven Rostedt }
666366a8cb95SSteven Rostedt
66642711ca23SSteven Rostedt /*
66652711ca23SSteven Rostedt * This page may be off to user land. Zero it out here.
66662711ca23SSteven Rostedt */
6667139f8400STzvetomir Stoyanov (VMware) if (commit < buffer->subbuf_size)
6668139f8400STzvetomir Stoyanov (VMware) memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
66692711ca23SSteven Rostedt
6670554f786eSSteven Rostedt out_unlock:
66715389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
66728789a9e7SSteven Rostedt
6673554f786eSSteven Rostedt out:
66748789a9e7SSteven Rostedt return ret;
66758789a9e7SSteven Rostedt }
6676d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page);
66778789a9e7SSteven Rostedt
66782808e31eSTzvetomir Stoyanov (VMware) /**
6679bce761d7STzvetomir Stoyanov (VMware) * ring_buffer_read_page_data - get pointer to the data in the page.
6680bce761d7STzvetomir Stoyanov (VMware) * @page: the page to get the data from
6681bce761d7STzvetomir Stoyanov (VMware) *
6682bce761d7STzvetomir Stoyanov (VMware) * Returns pointer to the actual data in this page.
6683bce761d7STzvetomir Stoyanov (VMware) */
ring_buffer_read_page_data(struct buffer_data_read_page * page)6684bce761d7STzvetomir Stoyanov (VMware) void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
6685bce761d7STzvetomir Stoyanov (VMware) {
6686bce761d7STzvetomir Stoyanov (VMware) return page->data;
6687bce761d7STzvetomir Stoyanov (VMware) }
6688bce761d7STzvetomir Stoyanov (VMware) EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
6689bce761d7STzvetomir Stoyanov (VMware)
6690bce761d7STzvetomir Stoyanov (VMware) /**
66912808e31eSTzvetomir Stoyanov (VMware) * ring_buffer_subbuf_size_get - get size of the sub buffer.
66922808e31eSTzvetomir Stoyanov (VMware) * @buffer: the buffer to get the sub buffer size from
66932808e31eSTzvetomir Stoyanov (VMware) *
66942808e31eSTzvetomir Stoyanov (VMware) * Returns size of the sub buffer, in bytes.
66952808e31eSTzvetomir Stoyanov (VMware) */
ring_buffer_subbuf_size_get(struct trace_buffer * buffer)66962808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
66972808e31eSTzvetomir Stoyanov (VMware) {
66982808e31eSTzvetomir Stoyanov (VMware) return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
66992808e31eSTzvetomir Stoyanov (VMware) }
67002808e31eSTzvetomir Stoyanov (VMware) EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
67012808e31eSTzvetomir Stoyanov (VMware)
67022808e31eSTzvetomir Stoyanov (VMware) /**
67032808e31eSTzvetomir Stoyanov (VMware) * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
67042808e31eSTzvetomir Stoyanov (VMware) * @buffer: The ring_buffer to get the system sub page order from
67052808e31eSTzvetomir Stoyanov (VMware) *
67062808e31eSTzvetomir Stoyanov (VMware) * By default, one ring buffer sub page equals to one system page. This parameter
67072808e31eSTzvetomir Stoyanov (VMware) * is configurable, per ring buffer. The size of the ring buffer sub page can be
67082808e31eSTzvetomir Stoyanov (VMware) * extended, but must be an order of system page size.
67092808e31eSTzvetomir Stoyanov (VMware) *
67102808e31eSTzvetomir Stoyanov (VMware) * Returns the order of buffer sub page size, in system pages:
67112808e31eSTzvetomir Stoyanov (VMware) * 0 means the sub buffer size is 1 system page and so forth.
67122808e31eSTzvetomir Stoyanov (VMware) * In case of an error < 0 is returned.
67132808e31eSTzvetomir Stoyanov (VMware) */
ring_buffer_subbuf_order_get(struct trace_buffer * buffer)67142808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
67152808e31eSTzvetomir Stoyanov (VMware) {
67162808e31eSTzvetomir Stoyanov (VMware) if (!buffer)
67172808e31eSTzvetomir Stoyanov (VMware) return -EINVAL;
67182808e31eSTzvetomir Stoyanov (VMware)
67192808e31eSTzvetomir Stoyanov (VMware) return buffer->subbuf_order;
67202808e31eSTzvetomir Stoyanov (VMware) }
67212808e31eSTzvetomir Stoyanov (VMware) EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
67222808e31eSTzvetomir Stoyanov (VMware)
67232808e31eSTzvetomir Stoyanov (VMware) /**
67242808e31eSTzvetomir Stoyanov (VMware) * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
67252808e31eSTzvetomir Stoyanov (VMware) * @buffer: The ring_buffer to set the new page size.
67262808e31eSTzvetomir Stoyanov (VMware) * @order: Order of the system pages in one sub buffer page
67272808e31eSTzvetomir Stoyanov (VMware) *
67282808e31eSTzvetomir Stoyanov (VMware) * By default, one ring buffer pages equals to one system page. This API can be
67292808e31eSTzvetomir Stoyanov (VMware) * used to set new size of the ring buffer page. The size must be order of
67302808e31eSTzvetomir Stoyanov (VMware) * system page size, that's why the input parameter @order is the order of
67312808e31eSTzvetomir Stoyanov (VMware) * system pages that are allocated for one ring buffer page:
67322808e31eSTzvetomir Stoyanov (VMware) * 0 - 1 system page
67332808e31eSTzvetomir Stoyanov (VMware) * 1 - 2 system pages
67342808e31eSTzvetomir Stoyanov (VMware) * 3 - 4 system pages
67352808e31eSTzvetomir Stoyanov (VMware) * ...
67362808e31eSTzvetomir Stoyanov (VMware) *
67372808e31eSTzvetomir Stoyanov (VMware) * Returns 0 on success or < 0 in case of an error.
67382808e31eSTzvetomir Stoyanov (VMware) */
ring_buffer_subbuf_order_set(struct trace_buffer * buffer,int order)67392808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
67402808e31eSTzvetomir Stoyanov (VMware) {
67418e7b58c2SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer;
67428e7b58c2SSteven Rostedt (Google) struct buffer_page *bpage, *tmp;
6743f9b94daaSTzvetomir Stoyanov (VMware) int old_order, old_size;
6744f9b94daaSTzvetomir Stoyanov (VMware) int nr_pages;
67452808e31eSTzvetomir Stoyanov (VMware) int psize;
6746f9b94daaSTzvetomir Stoyanov (VMware) int err;
6747f9b94daaSTzvetomir Stoyanov (VMware) int cpu;
67482808e31eSTzvetomir Stoyanov (VMware)
67492808e31eSTzvetomir Stoyanov (VMware) if (!buffer || order < 0)
67502808e31eSTzvetomir Stoyanov (VMware) return -EINVAL;
67512808e31eSTzvetomir Stoyanov (VMware)
67522808e31eSTzvetomir Stoyanov (VMware) if (buffer->subbuf_order == order)
67532808e31eSTzvetomir Stoyanov (VMware) return 0;
67542808e31eSTzvetomir Stoyanov (VMware)
67552808e31eSTzvetomir Stoyanov (VMware) psize = (1 << order) * PAGE_SIZE;
67562808e31eSTzvetomir Stoyanov (VMware) if (psize <= BUF_PAGE_HDR_SIZE)
67572808e31eSTzvetomir Stoyanov (VMware) return -EINVAL;
67582808e31eSTzvetomir Stoyanov (VMware)
6759e78fb4eaSSteven Rostedt (Google) /* Size of a subbuf cannot be greater than the write counter */
6760e78fb4eaSSteven Rostedt (Google) if (psize > RB_WRITE_MASK + 1)
6761e78fb4eaSSteven Rostedt (Google) return -EINVAL;
6762e78fb4eaSSteven Rostedt (Google)
6763f9b94daaSTzvetomir Stoyanov (VMware) old_order = buffer->subbuf_order;
6764f9b94daaSTzvetomir Stoyanov (VMware) old_size = buffer->subbuf_size;
6765f9b94daaSTzvetomir Stoyanov (VMware)
6766f9b94daaSTzvetomir Stoyanov (VMware) /* prevent another thread from changing buffer sizes */
6767f9b94daaSTzvetomir Stoyanov (VMware) mutex_lock(&buffer->mutex);
6768f9b94daaSTzvetomir Stoyanov (VMware) atomic_inc(&buffer->record_disabled);
6769f9b94daaSTzvetomir Stoyanov (VMware)
6770f9b94daaSTzvetomir Stoyanov (VMware) /* Make sure all commits have finished */
6771f9b94daaSTzvetomir Stoyanov (VMware) synchronize_rcu();
6772f9b94daaSTzvetomir Stoyanov (VMware)
67732808e31eSTzvetomir Stoyanov (VMware) buffer->subbuf_order = order;
67742808e31eSTzvetomir Stoyanov (VMware) buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
67752808e31eSTzvetomir Stoyanov (VMware)
6776f9b94daaSTzvetomir Stoyanov (VMware) /* Make sure all new buffers are allocated, before deleting the old ones */
6777f9b94daaSTzvetomir Stoyanov (VMware) for_each_buffer_cpu(buffer, cpu) {
67788e7b58c2SSteven Rostedt (Google)
6779f9b94daaSTzvetomir Stoyanov (VMware) if (!cpumask_test_cpu(cpu, buffer->cpumask))
6780f9b94daaSTzvetomir Stoyanov (VMware) continue;
6781f9b94daaSTzvetomir Stoyanov (VMware)
67828e7b58c2SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
67838e7b58c2SSteven Rostedt (Google)
6784117c3920SVincent Donnefort if (cpu_buffer->mapped) {
6785117c3920SVincent Donnefort err = -EBUSY;
6786117c3920SVincent Donnefort goto error;
6787117c3920SVincent Donnefort }
6788117c3920SVincent Donnefort
6789353cc219SSteven Rostedt (Google) /* Update the number of pages to match the new size */
6790353cc219SSteven Rostedt (Google) nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
6791353cc219SSteven Rostedt (Google) nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
6792353cc219SSteven Rostedt (Google)
67938e7b58c2SSteven Rostedt (Google) /* we need a minimum of two pages */
67948e7b58c2SSteven Rostedt (Google) if (nr_pages < 2)
67958e7b58c2SSteven Rostedt (Google) nr_pages = 2;
67968e7b58c2SSteven Rostedt (Google)
67978e7b58c2SSteven Rostedt (Google) cpu_buffer->nr_pages_to_update = nr_pages;
67988e7b58c2SSteven Rostedt (Google)
67998e7b58c2SSteven Rostedt (Google) /* Include the reader page */
68008e7b58c2SSteven Rostedt (Google) nr_pages++;
68018e7b58c2SSteven Rostedt (Google)
68028e7b58c2SSteven Rostedt (Google) /* Allocate the new size buffer */
68038e7b58c2SSteven Rostedt (Google) INIT_LIST_HEAD(&cpu_buffer->new_pages);
68048e7b58c2SSteven Rostedt (Google) if (__rb_allocate_pages(cpu_buffer, nr_pages,
68058e7b58c2SSteven Rostedt (Google) &cpu_buffer->new_pages)) {
68068e7b58c2SSteven Rostedt (Google) /* not enough memory for new pages */
6807f9b94daaSTzvetomir Stoyanov (VMware) err = -ENOMEM;
6808f9b94daaSTzvetomir Stoyanov (VMware) goto error;
6809f9b94daaSTzvetomir Stoyanov (VMware) }
6810f9b94daaSTzvetomir Stoyanov (VMware) }
6811f9b94daaSTzvetomir Stoyanov (VMware)
6812f9b94daaSTzvetomir Stoyanov (VMware) for_each_buffer_cpu(buffer, cpu) {
681309661f75SPetr Pavlu struct buffer_data_page *old_free_data_page;
681409661f75SPetr Pavlu struct list_head old_pages;
681509661f75SPetr Pavlu unsigned long flags;
68168e7b58c2SSteven Rostedt (Google)
6817f9b94daaSTzvetomir Stoyanov (VMware) if (!cpumask_test_cpu(cpu, buffer->cpumask))
6818f9b94daaSTzvetomir Stoyanov (VMware) continue;
6819f9b94daaSTzvetomir Stoyanov (VMware)
68208e7b58c2SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
68218e7b58c2SSteven Rostedt (Google)
682209661f75SPetr Pavlu raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
682309661f75SPetr Pavlu
68248e7b58c2SSteven Rostedt (Google) /* Clear the head bit to make the link list normal to read */
68258e7b58c2SSteven Rostedt (Google) rb_head_page_deactivate(cpu_buffer);
68268e7b58c2SSteven Rostedt (Google)
682709661f75SPetr Pavlu /*
682809661f75SPetr Pavlu * Collect buffers from the cpu_buffer pages list and the
682909661f75SPetr Pavlu * reader_page on old_pages, so they can be freed later when not
683009661f75SPetr Pavlu * under a spinlock. The pages list is a linked list with no
683109661f75SPetr Pavlu * head, adding old_pages turns it into a regular list with
683209661f75SPetr Pavlu * old_pages being the head.
683309661f75SPetr Pavlu */
683409661f75SPetr Pavlu list_add(&old_pages, cpu_buffer->pages);
683509661f75SPetr Pavlu list_add(&cpu_buffer->reader_page->list, &old_pages);
68368e7b58c2SSteven Rostedt (Google)
68378e7b58c2SSteven Rostedt (Google) /* One page was allocated for the reader page */
68388e7b58c2SSteven Rostedt (Google) cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
68398e7b58c2SSteven Rostedt (Google) struct buffer_page, list);
68408e7b58c2SSteven Rostedt (Google) list_del_init(&cpu_buffer->reader_page->list);
68418e7b58c2SSteven Rostedt (Google)
684209661f75SPetr Pavlu /* Install the new pages, remove the head from the list */
68438e7b58c2SSteven Rostedt (Google) cpu_buffer->pages = cpu_buffer->new_pages.next;
684409661f75SPetr Pavlu list_del_init(&cpu_buffer->new_pages);
6845b237e1f7SPetr Pavlu cpu_buffer->cnt++;
68468e7b58c2SSteven Rostedt (Google)
68478e7b58c2SSteven Rostedt (Google) cpu_buffer->head_page
68488e7b58c2SSteven Rostedt (Google) = list_entry(cpu_buffer->pages, struct buffer_page, list);
68498e7b58c2SSteven Rostedt (Google) cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
68508e7b58c2SSteven Rostedt (Google)
68518e7b58c2SSteven Rostedt (Google) cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
68528e7b58c2SSteven Rostedt (Google) cpu_buffer->nr_pages_to_update = 0;
68538e7b58c2SSteven Rostedt (Google)
685409661f75SPetr Pavlu old_free_data_page = cpu_buffer->free_page;
68558e7b58c2SSteven Rostedt (Google) cpu_buffer->free_page = NULL;
68568e7b58c2SSteven Rostedt (Google)
68578e7b58c2SSteven Rostedt (Google) rb_head_page_activate(cpu_buffer);
68588e7b58c2SSteven Rostedt (Google)
685909661f75SPetr Pavlu raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
686009661f75SPetr Pavlu
686109661f75SPetr Pavlu /* Free old sub buffers */
686209661f75SPetr Pavlu list_for_each_entry_safe(bpage, tmp, &old_pages, list) {
686309661f75SPetr Pavlu list_del_init(&bpage->list);
686409661f75SPetr Pavlu free_buffer_page(bpage);
686509661f75SPetr Pavlu }
686609661f75SPetr Pavlu free_pages((unsigned long)old_free_data_page, old_order);
686709661f75SPetr Pavlu
68688e7b58c2SSteven Rostedt (Google) rb_check_pages(cpu_buffer);
6869f9b94daaSTzvetomir Stoyanov (VMware) }
6870f9b94daaSTzvetomir Stoyanov (VMware)
6871f9b94daaSTzvetomir Stoyanov (VMware) atomic_dec(&buffer->record_disabled);
6872f9b94daaSTzvetomir Stoyanov (VMware) mutex_unlock(&buffer->mutex);
6873f9b94daaSTzvetomir Stoyanov (VMware)
68742808e31eSTzvetomir Stoyanov (VMware) return 0;
6875f9b94daaSTzvetomir Stoyanov (VMware)
6876f9b94daaSTzvetomir Stoyanov (VMware) error:
6877f9b94daaSTzvetomir Stoyanov (VMware) buffer->subbuf_order = old_order;
6878f9b94daaSTzvetomir Stoyanov (VMware) buffer->subbuf_size = old_size;
6879f9b94daaSTzvetomir Stoyanov (VMware)
6880f9b94daaSTzvetomir Stoyanov (VMware) atomic_dec(&buffer->record_disabled);
6881f9b94daaSTzvetomir Stoyanov (VMware) mutex_unlock(&buffer->mutex);
6882f9b94daaSTzvetomir Stoyanov (VMware)
6883f9b94daaSTzvetomir Stoyanov (VMware) for_each_buffer_cpu(buffer, cpu) {
68848e7b58c2SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu];
68858e7b58c2SSteven Rostedt (Google)
68868e7b58c2SSteven Rostedt (Google) if (!cpu_buffer->nr_pages_to_update)
6887f9b94daaSTzvetomir Stoyanov (VMware) continue;
68888e7b58c2SSteven Rostedt (Google)
68898e7b58c2SSteven Rostedt (Google) list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
68908e7b58c2SSteven Rostedt (Google) list_del_init(&bpage->list);
68918e7b58c2SSteven Rostedt (Google) free_buffer_page(bpage);
6892f9b94daaSTzvetomir Stoyanov (VMware) }
68938e7b58c2SSteven Rostedt (Google) }
6894f9b94daaSTzvetomir Stoyanov (VMware)
6895f9b94daaSTzvetomir Stoyanov (VMware) return err;
68962808e31eSTzvetomir Stoyanov (VMware) }
68972808e31eSTzvetomir Stoyanov (VMware) EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
68982808e31eSTzvetomir Stoyanov (VMware)
rb_alloc_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6899117c3920SVincent Donnefort static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6900117c3920SVincent Donnefort {
6901117c3920SVincent Donnefort struct page *page;
6902117c3920SVincent Donnefort
6903117c3920SVincent Donnefort if (cpu_buffer->meta_page)
6904117c3920SVincent Donnefort return 0;
6905117c3920SVincent Donnefort
6906117c3920SVincent Donnefort page = alloc_page(GFP_USER | __GFP_ZERO);
6907117c3920SVincent Donnefort if (!page)
6908117c3920SVincent Donnefort return -ENOMEM;
6909117c3920SVincent Donnefort
6910117c3920SVincent Donnefort cpu_buffer->meta_page = page_to_virt(page);
6911117c3920SVincent Donnefort
6912117c3920SVincent Donnefort return 0;
6913117c3920SVincent Donnefort }
6914117c3920SVincent Donnefort
rb_free_meta_page(struct ring_buffer_per_cpu * cpu_buffer)6915117c3920SVincent Donnefort static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
6916117c3920SVincent Donnefort {
6917117c3920SVincent Donnefort unsigned long addr = (unsigned long)cpu_buffer->meta_page;
6918117c3920SVincent Donnefort
6919117c3920SVincent Donnefort free_page(addr);
6920117c3920SVincent Donnefort cpu_buffer->meta_page = NULL;
6921117c3920SVincent Donnefort }
6922117c3920SVincent Donnefort
rb_setup_ids_meta_page(struct ring_buffer_per_cpu * cpu_buffer,unsigned long * subbuf_ids)6923117c3920SVincent Donnefort static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
6924117c3920SVincent Donnefort unsigned long *subbuf_ids)
6925117c3920SVincent Donnefort {
6926117c3920SVincent Donnefort struct trace_buffer_meta *meta = cpu_buffer->meta_page;
6927117c3920SVincent Donnefort unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
6928117c3920SVincent Donnefort struct buffer_page *first_subbuf, *subbuf;
6929117c3920SVincent Donnefort int id = 0;
6930117c3920SVincent Donnefort
6931117c3920SVincent Donnefort subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
6932117c3920SVincent Donnefort cpu_buffer->reader_page->id = id++;
6933117c3920SVincent Donnefort
6934117c3920SVincent Donnefort first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
6935117c3920SVincent Donnefort do {
6936117c3920SVincent Donnefort if (WARN_ON(id >= nr_subbufs))
6937117c3920SVincent Donnefort break;
6938117c3920SVincent Donnefort
6939117c3920SVincent Donnefort subbuf_ids[id] = (unsigned long)subbuf->page;
6940117c3920SVincent Donnefort subbuf->id = id;
6941117c3920SVincent Donnefort
6942117c3920SVincent Donnefort rb_inc_page(&subbuf);
6943117c3920SVincent Donnefort id++;
6944117c3920SVincent Donnefort } while (subbuf != first_subbuf);
6945117c3920SVincent Donnefort
6946117c3920SVincent Donnefort /* install subbuf ID to kern VA translation */
6947117c3920SVincent Donnefort cpu_buffer->subbuf_ids = subbuf_ids;
6948117c3920SVincent Donnefort
6949117c3920SVincent Donnefort meta->meta_struct_len = sizeof(*meta);
6950117c3920SVincent Donnefort meta->nr_subbufs = nr_subbufs;
6951117c3920SVincent Donnefort meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
6952eb2dcde9SVincent Donnefort meta->meta_page_size = meta->subbuf_size;
6953117c3920SVincent Donnefort
6954117c3920SVincent Donnefort rb_update_meta_page(cpu_buffer);
6955117c3920SVincent Donnefort }
6956117c3920SVincent Donnefort
6957117c3920SVincent Donnefort static struct ring_buffer_per_cpu *
rb_get_mapped_buffer(struct trace_buffer * buffer,int cpu)6958117c3920SVincent Donnefort rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
6959117c3920SVincent Donnefort {
6960117c3920SVincent Donnefort struct ring_buffer_per_cpu *cpu_buffer;
6961117c3920SVincent Donnefort
6962117c3920SVincent Donnefort if (!cpumask_test_cpu(cpu, buffer->cpumask))
6963117c3920SVincent Donnefort return ERR_PTR(-EINVAL);
6964117c3920SVincent Donnefort
6965117c3920SVincent Donnefort cpu_buffer = buffer->buffers[cpu];
6966117c3920SVincent Donnefort
6967117c3920SVincent Donnefort mutex_lock(&cpu_buffer->mapping_lock);
6968117c3920SVincent Donnefort
6969dd4900d9SSteven Rostedt (Google) if (!cpu_buffer->user_mapped) {
6970117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->mapping_lock);
6971117c3920SVincent Donnefort return ERR_PTR(-ENODEV);
6972117c3920SVincent Donnefort }
6973117c3920SVincent Donnefort
6974117c3920SVincent Donnefort return cpu_buffer;
6975117c3920SVincent Donnefort }
6976117c3920SVincent Donnefort
rb_put_mapped_buffer(struct ring_buffer_per_cpu * cpu_buffer)6977117c3920SVincent Donnefort static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
6978117c3920SVincent Donnefort {
6979117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->mapping_lock);
6980117c3920SVincent Donnefort }
6981117c3920SVincent Donnefort
6982117c3920SVincent Donnefort /*
6983117c3920SVincent Donnefort * Fast-path for rb_buffer_(un)map(). Called whenever the meta-page doesn't need
6984117c3920SVincent Donnefort * to be set-up or torn-down.
6985117c3920SVincent Donnefort */
__rb_inc_dec_mapped(struct ring_buffer_per_cpu * cpu_buffer,bool inc)6986117c3920SVincent Donnefort static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
6987117c3920SVincent Donnefort bool inc)
6988117c3920SVincent Donnefort {
6989117c3920SVincent Donnefort unsigned long flags;
6990117c3920SVincent Donnefort
6991117c3920SVincent Donnefort lockdep_assert_held(&cpu_buffer->mapping_lock);
6992117c3920SVincent Donnefort
6993dd4900d9SSteven Rostedt (Google) /* mapped is always greater or equal to user_mapped */
6994dd4900d9SSteven Rostedt (Google) if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped))
6995dd4900d9SSteven Rostedt (Google) return -EINVAL;
6996dd4900d9SSteven Rostedt (Google)
6997117c3920SVincent Donnefort if (inc && cpu_buffer->mapped == UINT_MAX)
6998117c3920SVincent Donnefort return -EBUSY;
6999117c3920SVincent Donnefort
7000dd4900d9SSteven Rostedt (Google) if (WARN_ON(!inc && cpu_buffer->user_mapped == 0))
7001117c3920SVincent Donnefort return -EINVAL;
7002117c3920SVincent Donnefort
7003117c3920SVincent Donnefort mutex_lock(&cpu_buffer->buffer->mutex);
7004117c3920SVincent Donnefort raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7005117c3920SVincent Donnefort
7006dd4900d9SSteven Rostedt (Google) if (inc) {
7007dd4900d9SSteven Rostedt (Google) cpu_buffer->user_mapped++;
7008117c3920SVincent Donnefort cpu_buffer->mapped++;
7009dd4900d9SSteven Rostedt (Google) } else {
7010dd4900d9SSteven Rostedt (Google) cpu_buffer->user_mapped--;
7011117c3920SVincent Donnefort cpu_buffer->mapped--;
7012dd4900d9SSteven Rostedt (Google) }
7013117c3920SVincent Donnefort
7014117c3920SVincent Donnefort raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7015117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->buffer->mutex);
7016117c3920SVincent Donnefort
7017117c3920SVincent Donnefort return 0;
7018117c3920SVincent Donnefort }
7019117c3920SVincent Donnefort
7020117c3920SVincent Donnefort /*
7021117c3920SVincent Donnefort * +--------------+ pgoff == 0
7022117c3920SVincent Donnefort * | meta page |
7023117c3920SVincent Donnefort * +--------------+ pgoff == 1
7024117c3920SVincent Donnefort * | subbuffer 0 |
7025117c3920SVincent Donnefort * | |
7026117c3920SVincent Donnefort * +--------------+ pgoff == (1 + (1 << subbuf_order))
7027117c3920SVincent Donnefort * | subbuffer 1 |
7028117c3920SVincent Donnefort * | |
7029117c3920SVincent Donnefort * ...
7030117c3920SVincent Donnefort */
7031117c3920SVincent Donnefort #ifdef CONFIG_MMU
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7032117c3920SVincent Donnefort static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7033117c3920SVincent Donnefort struct vm_area_struct *vma)
7034117c3920SVincent Donnefort {
7035b96c3125SThorsten Blum unsigned long nr_subbufs, nr_pages, nr_vma_pages, pgoff = vma->vm_pgoff;
7036117c3920SVincent Donnefort unsigned int subbuf_pages, subbuf_order;
7037117c3920SVincent Donnefort struct page **pages;
7038117c3920SVincent Donnefort int p = 0, s = 0;
7039117c3920SVincent Donnefort int err;
7040117c3920SVincent Donnefort
7041117c3920SVincent Donnefort /* Refuse MP_PRIVATE or writable mappings */
7042117c3920SVincent Donnefort if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC ||
7043117c3920SVincent Donnefort !(vma->vm_flags & VM_MAYSHARE))
7044117c3920SVincent Donnefort return -EPERM;
7045117c3920SVincent Donnefort
7046eb2dcde9SVincent Donnefort subbuf_order = cpu_buffer->buffer->subbuf_order;
7047eb2dcde9SVincent Donnefort subbuf_pages = 1 << subbuf_order;
7048eb2dcde9SVincent Donnefort
7049eb2dcde9SVincent Donnefort if (subbuf_order && pgoff % subbuf_pages)
7050eb2dcde9SVincent Donnefort return -EINVAL;
7051eb2dcde9SVincent Donnefort
7052117c3920SVincent Donnefort /*
7053117c3920SVincent Donnefort * Make sure the mapping cannot become writable later. Also tell the VM
7054117c3920SVincent Donnefort * to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
7055117c3920SVincent Donnefort */
7056117c3920SVincent Donnefort vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP,
7057117c3920SVincent Donnefort VM_MAYWRITE);
7058117c3920SVincent Donnefort
7059117c3920SVincent Donnefort lockdep_assert_held(&cpu_buffer->mapping_lock);
7060117c3920SVincent Donnefort
7061117c3920SVincent Donnefort nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
7062c58a812cSEdward Adam Davis nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
7063c58a812cSEdward Adam Davis if (nr_pages <= pgoff)
7064c58a812cSEdward Adam Davis return -EINVAL;
7065c58a812cSEdward Adam Davis
7066c58a812cSEdward Adam Davis nr_pages -= pgoff;
7067117c3920SVincent Donnefort
7068b96c3125SThorsten Blum nr_vma_pages = vma_pages(vma);
7069b96c3125SThorsten Blum if (!nr_vma_pages || nr_vma_pages > nr_pages)
7070117c3920SVincent Donnefort return -EINVAL;
7071117c3920SVincent Donnefort
7072b96c3125SThorsten Blum nr_pages = nr_vma_pages;
7073117c3920SVincent Donnefort
7074117c3920SVincent Donnefort pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
7075117c3920SVincent Donnefort if (!pages)
7076117c3920SVincent Donnefort return -ENOMEM;
7077117c3920SVincent Donnefort
7078117c3920SVincent Donnefort if (!pgoff) {
7079eb2dcde9SVincent Donnefort unsigned long meta_page_padding;
7080eb2dcde9SVincent Donnefort
7081117c3920SVincent Donnefort pages[p++] = virt_to_page(cpu_buffer->meta_page);
7082117c3920SVincent Donnefort
7083117c3920SVincent Donnefort /*
7084eb2dcde9SVincent Donnefort * Pad with the zero-page to align the meta-page with the
7085eb2dcde9SVincent Donnefort * sub-buffers.
7086117c3920SVincent Donnefort */
7087eb2dcde9SVincent Donnefort meta_page_padding = subbuf_pages - 1;
7088eb2dcde9SVincent Donnefort while (meta_page_padding-- && p < nr_pages) {
7089eb2dcde9SVincent Donnefort unsigned long __maybe_unused zero_addr =
7090eb2dcde9SVincent Donnefort vma->vm_start + (PAGE_SIZE * p);
7091eb2dcde9SVincent Donnefort
7092eb2dcde9SVincent Donnefort pages[p++] = ZERO_PAGE(zero_addr);
7093eb2dcde9SVincent Donnefort }
7094117c3920SVincent Donnefort } else {
7095117c3920SVincent Donnefort /* Skip the meta-page */
7096eb2dcde9SVincent Donnefort pgoff -= subbuf_pages;
7097117c3920SVincent Donnefort
7098117c3920SVincent Donnefort s += pgoff / subbuf_pages;
7099117c3920SVincent Donnefort }
7100117c3920SVincent Donnefort
7101117c3920SVincent Donnefort while (p < nr_pages) {
71026e31b759SJeongjun Park struct page *page;
7103117c3920SVincent Donnefort int off = 0;
7104117c3920SVincent Donnefort
7105117c3920SVincent Donnefort if (WARN_ON_ONCE(s >= nr_subbufs)) {
7106117c3920SVincent Donnefort err = -EINVAL;
7107117c3920SVincent Donnefort goto out;
7108117c3920SVincent Donnefort }
7109117c3920SVincent Donnefort
71106e31b759SJeongjun Park page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
71116e31b759SJeongjun Park
7112117c3920SVincent Donnefort for (; off < (1 << (subbuf_order)); off++, page++) {
7113117c3920SVincent Donnefort if (p >= nr_pages)
7114117c3920SVincent Donnefort break;
7115117c3920SVincent Donnefort
7116117c3920SVincent Donnefort pages[p++] = page;
7117117c3920SVincent Donnefort }
7118117c3920SVincent Donnefort s++;
7119117c3920SVincent Donnefort }
7120117c3920SVincent Donnefort
7121117c3920SVincent Donnefort err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
7122117c3920SVincent Donnefort
7123117c3920SVincent Donnefort out:
7124117c3920SVincent Donnefort kfree(pages);
7125117c3920SVincent Donnefort
7126117c3920SVincent Donnefort return err;
7127117c3920SVincent Donnefort }
7128117c3920SVincent Donnefort #else
__rb_map_vma(struct ring_buffer_per_cpu * cpu_buffer,struct vm_area_struct * vma)7129117c3920SVincent Donnefort static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
7130117c3920SVincent Donnefort struct vm_area_struct *vma)
7131117c3920SVincent Donnefort {
7132117c3920SVincent Donnefort return -EOPNOTSUPP;
7133117c3920SVincent Donnefort }
7134117c3920SVincent Donnefort #endif
7135117c3920SVincent Donnefort
ring_buffer_map(struct trace_buffer * buffer,int cpu,struct vm_area_struct * vma)7136117c3920SVincent Donnefort int ring_buffer_map(struct trace_buffer *buffer, int cpu,
7137117c3920SVincent Donnefort struct vm_area_struct *vma)
7138117c3920SVincent Donnefort {
7139117c3920SVincent Donnefort struct ring_buffer_per_cpu *cpu_buffer;
7140117c3920SVincent Donnefort unsigned long flags, *subbuf_ids;
7141117c3920SVincent Donnefort int err = 0;
7142117c3920SVincent Donnefort
7143117c3920SVincent Donnefort if (!cpumask_test_cpu(cpu, buffer->cpumask))
7144117c3920SVincent Donnefort return -EINVAL;
7145117c3920SVincent Donnefort
7146117c3920SVincent Donnefort cpu_buffer = buffer->buffers[cpu];
7147117c3920SVincent Donnefort
7148117c3920SVincent Donnefort mutex_lock(&cpu_buffer->mapping_lock);
7149117c3920SVincent Donnefort
7150dd4900d9SSteven Rostedt (Google) if (cpu_buffer->user_mapped) {
7151117c3920SVincent Donnefort err = __rb_map_vma(cpu_buffer, vma);
7152117c3920SVincent Donnefort if (!err)
7153117c3920SVincent Donnefort err = __rb_inc_dec_mapped(cpu_buffer, true);
7154117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->mapping_lock);
7155117c3920SVincent Donnefort return err;
7156117c3920SVincent Donnefort }
7157117c3920SVincent Donnefort
7158117c3920SVincent Donnefort /* prevent another thread from changing buffer/sub-buffer sizes */
7159117c3920SVincent Donnefort mutex_lock(&buffer->mutex);
7160117c3920SVincent Donnefort
7161117c3920SVincent Donnefort err = rb_alloc_meta_page(cpu_buffer);
7162117c3920SVincent Donnefort if (err)
7163117c3920SVincent Donnefort goto unlock;
7164117c3920SVincent Donnefort
7165117c3920SVincent Donnefort /* subbuf_ids include the reader while nr_pages does not */
7166117c3920SVincent Donnefort subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
7167117c3920SVincent Donnefort if (!subbuf_ids) {
7168117c3920SVincent Donnefort rb_free_meta_page(cpu_buffer);
7169117c3920SVincent Donnefort err = -ENOMEM;
7170117c3920SVincent Donnefort goto unlock;
7171117c3920SVincent Donnefort }
7172117c3920SVincent Donnefort
7173117c3920SVincent Donnefort atomic_inc(&cpu_buffer->resize_disabled);
7174117c3920SVincent Donnefort
7175117c3920SVincent Donnefort /*
7176117c3920SVincent Donnefort * Lock all readers to block any subbuf swap until the subbuf IDs are
7177117c3920SVincent Donnefort * assigned.
7178117c3920SVincent Donnefort */
7179117c3920SVincent Donnefort raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7180117c3920SVincent Donnefort rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
7181dd4900d9SSteven Rostedt (Google)
7182117c3920SVincent Donnefort raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7183117c3920SVincent Donnefort
7184117c3920SVincent Donnefort err = __rb_map_vma(cpu_buffer, vma);
7185117c3920SVincent Donnefort if (!err) {
7186117c3920SVincent Donnefort raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7187dd4900d9SSteven Rostedt (Google) /* This is the first time it is mapped by user */
7188dd4900d9SSteven Rostedt (Google) cpu_buffer->mapped++;
7189dd4900d9SSteven Rostedt (Google) cpu_buffer->user_mapped = 1;
7190117c3920SVincent Donnefort raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7191117c3920SVincent Donnefort } else {
7192117c3920SVincent Donnefort kfree(cpu_buffer->subbuf_ids);
7193117c3920SVincent Donnefort cpu_buffer->subbuf_ids = NULL;
7194117c3920SVincent Donnefort rb_free_meta_page(cpu_buffer);
71959ba0e175SSteven Rostedt atomic_dec(&cpu_buffer->resize_disabled);
7196117c3920SVincent Donnefort }
7197117c3920SVincent Donnefort
7198117c3920SVincent Donnefort unlock:
7199117c3920SVincent Donnefort mutex_unlock(&buffer->mutex);
7200117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->mapping_lock);
7201117c3920SVincent Donnefort
7202117c3920SVincent Donnefort return err;
7203117c3920SVincent Donnefort }
7204117c3920SVincent Donnefort
ring_buffer_unmap(struct trace_buffer * buffer,int cpu)7205117c3920SVincent Donnefort int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
7206117c3920SVincent Donnefort {
7207117c3920SVincent Donnefort struct ring_buffer_per_cpu *cpu_buffer;
7208117c3920SVincent Donnefort unsigned long flags;
7209117c3920SVincent Donnefort int err = 0;
7210117c3920SVincent Donnefort
7211117c3920SVincent Donnefort if (!cpumask_test_cpu(cpu, buffer->cpumask))
7212117c3920SVincent Donnefort return -EINVAL;
7213117c3920SVincent Donnefort
7214117c3920SVincent Donnefort cpu_buffer = buffer->buffers[cpu];
7215117c3920SVincent Donnefort
7216117c3920SVincent Donnefort mutex_lock(&cpu_buffer->mapping_lock);
7217117c3920SVincent Donnefort
7218dd4900d9SSteven Rostedt (Google) if (!cpu_buffer->user_mapped) {
7219117c3920SVincent Donnefort err = -ENODEV;
7220117c3920SVincent Donnefort goto out;
7221dd4900d9SSteven Rostedt (Google) } else if (cpu_buffer->user_mapped > 1) {
7222117c3920SVincent Donnefort __rb_inc_dec_mapped(cpu_buffer, false);
7223117c3920SVincent Donnefort goto out;
7224117c3920SVincent Donnefort }
7225117c3920SVincent Donnefort
7226117c3920SVincent Donnefort mutex_lock(&buffer->mutex);
7227117c3920SVincent Donnefort raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7228117c3920SVincent Donnefort
7229dd4900d9SSteven Rostedt (Google) /* This is the last user space mapping */
7230dd4900d9SSteven Rostedt (Google) if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped))
7231dd4900d9SSteven Rostedt (Google) cpu_buffer->mapped--;
7232dd4900d9SSteven Rostedt (Google) cpu_buffer->user_mapped = 0;
7233117c3920SVincent Donnefort
7234117c3920SVincent Donnefort raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7235117c3920SVincent Donnefort
7236117c3920SVincent Donnefort kfree(cpu_buffer->subbuf_ids);
7237117c3920SVincent Donnefort cpu_buffer->subbuf_ids = NULL;
7238117c3920SVincent Donnefort rb_free_meta_page(cpu_buffer);
7239117c3920SVincent Donnefort atomic_dec(&cpu_buffer->resize_disabled);
7240117c3920SVincent Donnefort
7241117c3920SVincent Donnefort mutex_unlock(&buffer->mutex);
7242117c3920SVincent Donnefort
7243117c3920SVincent Donnefort out:
7244117c3920SVincent Donnefort mutex_unlock(&cpu_buffer->mapping_lock);
7245117c3920SVincent Donnefort
7246117c3920SVincent Donnefort return err;
7247117c3920SVincent Donnefort }
7248117c3920SVincent Donnefort
ring_buffer_map_get_reader(struct trace_buffer * buffer,int cpu)7249117c3920SVincent Donnefort int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu)
7250117c3920SVincent Donnefort {
7251117c3920SVincent Donnefort struct ring_buffer_per_cpu *cpu_buffer;
7252fe832be0SSteven Rostedt (Google) struct buffer_page *reader;
7253fe832be0SSteven Rostedt (Google) unsigned long missed_events;
7254117c3920SVincent Donnefort unsigned long reader_size;
7255117c3920SVincent Donnefort unsigned long flags;
7256117c3920SVincent Donnefort
7257117c3920SVincent Donnefort cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
7258117c3920SVincent Donnefort if (IS_ERR(cpu_buffer))
7259117c3920SVincent Donnefort return (int)PTR_ERR(cpu_buffer);
7260117c3920SVincent Donnefort
7261117c3920SVincent Donnefort raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
7262117c3920SVincent Donnefort
7263117c3920SVincent Donnefort consume:
7264117c3920SVincent Donnefort if (rb_per_cpu_empty(cpu_buffer))
7265117c3920SVincent Donnefort goto out;
7266117c3920SVincent Donnefort
7267117c3920SVincent Donnefort reader_size = rb_page_size(cpu_buffer->reader_page);
7268117c3920SVincent Donnefort
7269117c3920SVincent Donnefort /*
7270117c3920SVincent Donnefort * There are data to be read on the current reader page, we can
7271117c3920SVincent Donnefort * return to the caller. But before that, we assume the latter will read
7272117c3920SVincent Donnefort * everything. Let's update the kernel reader accordingly.
7273117c3920SVincent Donnefort */
7274117c3920SVincent Donnefort if (cpu_buffer->reader_page->read < reader_size) {
7275117c3920SVincent Donnefort while (cpu_buffer->reader_page->read < reader_size)
7276117c3920SVincent Donnefort rb_advance_reader(cpu_buffer);
7277117c3920SVincent Donnefort goto out;
7278117c3920SVincent Donnefort }
7279117c3920SVincent Donnefort
7280fe832be0SSteven Rostedt (Google) reader = rb_get_reader_page(cpu_buffer);
7281fe832be0SSteven Rostedt (Google) if (WARN_ON(!reader))
7282117c3920SVincent Donnefort goto out;
7283117c3920SVincent Donnefort
7284fe832be0SSteven Rostedt (Google) /* Check if any events were dropped */
7285fe832be0SSteven Rostedt (Google) missed_events = cpu_buffer->lost_events;
7286fe832be0SSteven Rostedt (Google)
7287fe832be0SSteven Rostedt (Google) if (cpu_buffer->reader_page != cpu_buffer->commit_page) {
7288fe832be0SSteven Rostedt (Google) if (missed_events) {
7289fe832be0SSteven Rostedt (Google) struct buffer_data_page *bpage = reader->page;
7290fe832be0SSteven Rostedt (Google) unsigned int commit;
7291fe832be0SSteven Rostedt (Google) /*
7292fe832be0SSteven Rostedt (Google) * Use the real_end for the data size,
7293fe832be0SSteven Rostedt (Google) * This gives us a chance to store the lost events
7294fe832be0SSteven Rostedt (Google) * on the page.
7295fe832be0SSteven Rostedt (Google) */
7296fe832be0SSteven Rostedt (Google) if (reader->real_end)
7297fe832be0SSteven Rostedt (Google) local_set(&bpage->commit, reader->real_end);
7298fe832be0SSteven Rostedt (Google) /*
7299fe832be0SSteven Rostedt (Google) * If there is room at the end of the page to save the
7300fe832be0SSteven Rostedt (Google) * missed events, then record it there.
7301fe832be0SSteven Rostedt (Google) */
7302fe832be0SSteven Rostedt (Google) commit = rb_page_size(reader);
7303fe832be0SSteven Rostedt (Google) if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
7304fe832be0SSteven Rostedt (Google) memcpy(&bpage->data[commit], &missed_events,
7305fe832be0SSteven Rostedt (Google) sizeof(missed_events));
7306fe832be0SSteven Rostedt (Google) local_add(RB_MISSED_STORED, &bpage->commit);
7307fe832be0SSteven Rostedt (Google) }
7308fe832be0SSteven Rostedt (Google) local_add(RB_MISSED_EVENTS, &bpage->commit);
7309fe832be0SSteven Rostedt (Google) }
7310fe832be0SSteven Rostedt (Google) } else {
7311fe832be0SSteven Rostedt (Google) /*
7312fe832be0SSteven Rostedt (Google) * There really shouldn't be any missed events if the commit
7313fe832be0SSteven Rostedt (Google) * is on the reader page.
7314fe832be0SSteven Rostedt (Google) */
7315fe832be0SSteven Rostedt (Google) WARN_ON_ONCE(missed_events);
7316fe832be0SSteven Rostedt (Google) }
7317fe832be0SSteven Rostedt (Google)
7318fe832be0SSteven Rostedt (Google) cpu_buffer->lost_events = 0;
7319fe832be0SSteven Rostedt (Google)
7320117c3920SVincent Donnefort goto consume;
7321117c3920SVincent Donnefort
7322117c3920SVincent Donnefort out:
7323117c3920SVincent Donnefort /* Some archs do not have data cache coherency between kernel and user-space */
7324e4d4b867SSteven Rostedt flush_kernel_vmap_range(cpu_buffer->reader_page->page,
7325e4d4b867SSteven Rostedt buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
7326117c3920SVincent Donnefort
7327117c3920SVincent Donnefort rb_update_meta_page(cpu_buffer);
7328117c3920SVincent Donnefort
7329117c3920SVincent Donnefort raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
7330117c3920SVincent Donnefort rb_put_mapped_buffer(cpu_buffer);
7331117c3920SVincent Donnefort
7332117c3920SVincent Donnefort return 0;
7333117c3920SVincent Donnefort }
7334117c3920SVincent Donnefort
7335b32614c0SSebastian Andrzej Siewior /*
7336b32614c0SSebastian Andrzej Siewior * We only allocate new buffers, never free them if the CPU goes down.
7337b32614c0SSebastian Andrzej Siewior * If we were to free the buffer, then the user would lose any trace that was in
7338b32614c0SSebastian Andrzej Siewior * the buffer.
7339b32614c0SSebastian Andrzej Siewior */
trace_rb_cpu_prepare(unsigned int cpu,struct hlist_node * node)7340b32614c0SSebastian Andrzej Siewior int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
7341554f786eSSteven Rostedt {
734213292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
73439b94a8fbSSteven Rostedt (Red Hat) long nr_pages_same;
73449b94a8fbSSteven Rostedt (Red Hat) int cpu_i;
73459b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages;
7346554f786eSSteven Rostedt
734713292494SSteven Rostedt (VMware) buffer = container_of(node, struct trace_buffer, node);
73483f237a79SRusty Russell if (cpumask_test_cpu(cpu, buffer->cpumask))
7349b32614c0SSebastian Andrzej Siewior return 0;
7350554f786eSSteven Rostedt
7351438ced17SVaibhav Nagarnaik nr_pages = 0;
7352438ced17SVaibhav Nagarnaik nr_pages_same = 1;
7353438ced17SVaibhav Nagarnaik /* check if all cpu sizes are same */
7354438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu_i) {
7355438ced17SVaibhav Nagarnaik /* fill in the size from first enabled cpu */
7356438ced17SVaibhav Nagarnaik if (nr_pages == 0)
7357438ced17SVaibhav Nagarnaik nr_pages = buffer->buffers[cpu_i]->nr_pages;
7358438ced17SVaibhav Nagarnaik if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
7359438ced17SVaibhav Nagarnaik nr_pages_same = 0;
7360438ced17SVaibhav Nagarnaik break;
7361438ced17SVaibhav Nagarnaik }
7362438ced17SVaibhav Nagarnaik }
7363438ced17SVaibhav Nagarnaik /* allocate minimum pages, user can later expand it */
7364438ced17SVaibhav Nagarnaik if (!nr_pages_same)
7365438ced17SVaibhav Nagarnaik nr_pages = 2;
7366554f786eSSteven Rostedt buffer->buffers[cpu] =
7367438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
7368554f786eSSteven Rostedt if (!buffer->buffers[cpu]) {
7369b32614c0SSebastian Andrzej Siewior WARN(1, "failed to allocate ring buffer on CPU %u\n",
7370554f786eSSteven Rostedt cpu);
7371b32614c0SSebastian Andrzej Siewior return -ENOMEM;
7372554f786eSSteven Rostedt }
7373554f786eSSteven Rostedt smp_wmb();
73743f237a79SRusty Russell cpumask_set_cpu(cpu, buffer->cpumask);
7375b32614c0SSebastian Andrzej Siewior return 0;
7376554f786eSSteven Rostedt }
73776c43e554SSteven Rostedt (Red Hat)
73786c43e554SSteven Rostedt (Red Hat) #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
73796c43e554SSteven Rostedt (Red Hat) /*
73806c43e554SSteven Rostedt (Red Hat) * This is a basic integrity check of the ring buffer.
73816c43e554SSteven Rostedt (Red Hat) * Late in the boot cycle this test will run when configured in.
73826c43e554SSteven Rostedt (Red Hat) * It will kick off a thread per CPU that will go into a loop
73836c43e554SSteven Rostedt (Red Hat) * writing to the per cpu ring buffer various sizes of data.
73846c43e554SSteven Rostedt (Red Hat) * Some of the data will be large items, some small.
73856c43e554SSteven Rostedt (Red Hat) *
73866c43e554SSteven Rostedt (Red Hat) * Another thread is created that goes into a spin, sending out
73876c43e554SSteven Rostedt (Red Hat) * IPIs to the other CPUs to also write into the ring buffer.
73886c43e554SSteven Rostedt (Red Hat) * this is to test the nesting ability of the buffer.
73896c43e554SSteven Rostedt (Red Hat) *
73906c43e554SSteven Rostedt (Red Hat) * Basic stats are recorded and reported. If something in the
73916c43e554SSteven Rostedt (Red Hat) * ring buffer should happen that's not expected, a big warning
73926c43e554SSteven Rostedt (Red Hat) * is displayed and all ring buffers are disabled.
73936c43e554SSteven Rostedt (Red Hat) */
73946c43e554SSteven Rostedt (Red Hat) static struct task_struct *rb_threads[NR_CPUS] __initdata;
73956c43e554SSteven Rostedt (Red Hat)
73966c43e554SSteven Rostedt (Red Hat) struct rb_test_data {
739713292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
73986c43e554SSteven Rostedt (Red Hat) unsigned long events;
73996c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written;
74006c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc;
74016c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped;
74026c43e554SSteven Rostedt (Red Hat) unsigned long events_nested;
74036c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written_nested;
74046c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc_nested;
74056c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped_nested;
74066c43e554SSteven Rostedt (Red Hat) int min_size_nested;
74076c43e554SSteven Rostedt (Red Hat) int max_size_nested;
74086c43e554SSteven Rostedt (Red Hat) int max_size;
74096c43e554SSteven Rostedt (Red Hat) int min_size;
74106c43e554SSteven Rostedt (Red Hat) int cpu;
74116c43e554SSteven Rostedt (Red Hat) int cnt;
74126c43e554SSteven Rostedt (Red Hat) };
74136c43e554SSteven Rostedt (Red Hat)
74146c43e554SSteven Rostedt (Red Hat) static struct rb_test_data rb_data[NR_CPUS] __initdata;
74156c43e554SSteven Rostedt (Red Hat)
74166c43e554SSteven Rostedt (Red Hat) /* 1 meg per cpu */
74176c43e554SSteven Rostedt (Red Hat) #define RB_TEST_BUFFER_SIZE 1048576
74186c43e554SSteven Rostedt (Red Hat)
74196c43e554SSteven Rostedt (Red Hat) static char rb_string[] __initdata =
74206c43e554SSteven Rostedt (Red Hat) "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
74216c43e554SSteven Rostedt (Red Hat) "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
74226c43e554SSteven Rostedt (Red Hat) "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
74236c43e554SSteven Rostedt (Red Hat)
74246c43e554SSteven Rostedt (Red Hat) static bool rb_test_started __initdata;
74256c43e554SSteven Rostedt (Red Hat)
74266c43e554SSteven Rostedt (Red Hat) struct rb_item {
74276c43e554SSteven Rostedt (Red Hat) int size;
74286c43e554SSteven Rostedt (Red Hat) char str[];
74296c43e554SSteven Rostedt (Red Hat) };
74306c43e554SSteven Rostedt (Red Hat)
rb_write_something(struct rb_test_data * data,bool nested)74316c43e554SSteven Rostedt (Red Hat) static __init int rb_write_something(struct rb_test_data *data, bool nested)
74326c43e554SSteven Rostedt (Red Hat) {
74336c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event;
74346c43e554SSteven Rostedt (Red Hat) struct rb_item *item;
74356c43e554SSteven Rostedt (Red Hat) bool started;
74366c43e554SSteven Rostedt (Red Hat) int event_len;
74376c43e554SSteven Rostedt (Red Hat) int size;
74386c43e554SSteven Rostedt (Red Hat) int len;
74396c43e554SSteven Rostedt (Red Hat) int cnt;
74406c43e554SSteven Rostedt (Red Hat)
74416c43e554SSteven Rostedt (Red Hat) /* Have nested writes different that what is written */
74426c43e554SSteven Rostedt (Red Hat) cnt = data->cnt + (nested ? 27 : 0);
74436c43e554SSteven Rostedt (Red Hat)
74446c43e554SSteven Rostedt (Red Hat) /* Multiply cnt by ~e, to make some unique increment */
744540ed29b3SYueHaibing size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
74466c43e554SSteven Rostedt (Red Hat)
74476c43e554SSteven Rostedt (Red Hat) len = size + sizeof(struct rb_item);
74486c43e554SSteven Rostedt (Red Hat)
74496c43e554SSteven Rostedt (Red Hat) started = rb_test_started;
74506c43e554SSteven Rostedt (Red Hat) /* read rb_test_started before checking buffer enabled */
74516c43e554SSteven Rostedt (Red Hat) smp_rmb();
74526c43e554SSteven Rostedt (Red Hat)
74536c43e554SSteven Rostedt (Red Hat) event = ring_buffer_lock_reserve(data->buffer, len);
74546c43e554SSteven Rostedt (Red Hat) if (!event) {
74556c43e554SSteven Rostedt (Red Hat) /* Ignore dropped events before test starts. */
74566c43e554SSteven Rostedt (Red Hat) if (started) {
74576c43e554SSteven Rostedt (Red Hat) if (nested)
74586c43e554SSteven Rostedt (Red Hat) data->bytes_dropped_nested += len;
7459c73f0b69SFeng Yang else
7460c73f0b69SFeng Yang data->bytes_dropped += len;
74616c43e554SSteven Rostedt (Red Hat) }
74626c43e554SSteven Rostedt (Red Hat) return len;
74636c43e554SSteven Rostedt (Red Hat) }
74646c43e554SSteven Rostedt (Red Hat)
74656c43e554SSteven Rostedt (Red Hat) event_len = ring_buffer_event_length(event);
74666c43e554SSteven Rostedt (Red Hat)
74676c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(data->buffer, event_len < len))
74686c43e554SSteven Rostedt (Red Hat) goto out;
74696c43e554SSteven Rostedt (Red Hat)
74706c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event);
74716c43e554SSteven Rostedt (Red Hat) item->size = size;
74726c43e554SSteven Rostedt (Red Hat) memcpy(item->str, rb_string, size);
74736c43e554SSteven Rostedt (Red Hat)
74746c43e554SSteven Rostedt (Red Hat) if (nested) {
74756c43e554SSteven Rostedt (Red Hat) data->bytes_alloc_nested += event_len;
74766c43e554SSteven Rostedt (Red Hat) data->bytes_written_nested += len;
74776c43e554SSteven Rostedt (Red Hat) data->events_nested++;
74786c43e554SSteven Rostedt (Red Hat) if (!data->min_size_nested || len < data->min_size_nested)
74796c43e554SSteven Rostedt (Red Hat) data->min_size_nested = len;
74806c43e554SSteven Rostedt (Red Hat) if (len > data->max_size_nested)
74816c43e554SSteven Rostedt (Red Hat) data->max_size_nested = len;
74826c43e554SSteven Rostedt (Red Hat) } else {
74836c43e554SSteven Rostedt (Red Hat) data->bytes_alloc += event_len;
74846c43e554SSteven Rostedt (Red Hat) data->bytes_written += len;
74856c43e554SSteven Rostedt (Red Hat) data->events++;
74866c43e554SSteven Rostedt (Red Hat) if (!data->min_size || len < data->min_size)
74876c43e554SSteven Rostedt (Red Hat) data->max_size = len;
74886c43e554SSteven Rostedt (Red Hat) if (len > data->max_size)
74896c43e554SSteven Rostedt (Red Hat) data->max_size = len;
74906c43e554SSteven Rostedt (Red Hat) }
74916c43e554SSteven Rostedt (Red Hat)
74926c43e554SSteven Rostedt (Red Hat) out:
749304aabc32SSong Chen ring_buffer_unlock_commit(data->buffer);
74946c43e554SSteven Rostedt (Red Hat)
74956c43e554SSteven Rostedt (Red Hat) return 0;
74966c43e554SSteven Rostedt (Red Hat) }
74976c43e554SSteven Rostedt (Red Hat)
rb_test(void * arg)74986c43e554SSteven Rostedt (Red Hat) static __init int rb_test(void *arg)
74996c43e554SSteven Rostedt (Red Hat) {
75006c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = arg;
75016c43e554SSteven Rostedt (Red Hat)
75026c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) {
75036c43e554SSteven Rostedt (Red Hat) rb_write_something(data, false);
75046c43e554SSteven Rostedt (Red Hat) data->cnt++;
75056c43e554SSteven Rostedt (Red Hat)
75066c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE);
75076c43e554SSteven Rostedt (Red Hat) /* Now sleep between a min of 100-300us and a max of 1ms */
75086c43e554SSteven Rostedt (Red Hat) usleep_range(((data->cnt % 3) + 1) * 100, 1000);
75096c43e554SSteven Rostedt (Red Hat) }
75106c43e554SSteven Rostedt (Red Hat)
75116c43e554SSteven Rostedt (Red Hat) return 0;
75126c43e554SSteven Rostedt (Red Hat) }
75136c43e554SSteven Rostedt (Red Hat)
rb_ipi(void * ignore)75146c43e554SSteven Rostedt (Red Hat) static __init void rb_ipi(void *ignore)
75156c43e554SSteven Rostedt (Red Hat) {
75166c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data;
75176c43e554SSteven Rostedt (Red Hat) int cpu = smp_processor_id();
75186c43e554SSteven Rostedt (Red Hat)
75196c43e554SSteven Rostedt (Red Hat) data = &rb_data[cpu];
75206c43e554SSteven Rostedt (Red Hat) rb_write_something(data, true);
75216c43e554SSteven Rostedt (Red Hat) }
75226c43e554SSteven Rostedt (Red Hat)
rb_hammer_test(void * arg)75236c43e554SSteven Rostedt (Red Hat) static __init int rb_hammer_test(void *arg)
75246c43e554SSteven Rostedt (Red Hat) {
75256c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) {
75266c43e554SSteven Rostedt (Red Hat)
75276c43e554SSteven Rostedt (Red Hat) /* Send an IPI to all cpus to write data! */
75286c43e554SSteven Rostedt (Red Hat) smp_call_function(rb_ipi, NULL, 1);
75296c43e554SSteven Rostedt (Red Hat) /* No sleep, but for non preempt, let others run */
75306c43e554SSteven Rostedt (Red Hat) schedule();
75316c43e554SSteven Rostedt (Red Hat) }
75326c43e554SSteven Rostedt (Red Hat)
75336c43e554SSteven Rostedt (Red Hat) return 0;
75346c43e554SSteven Rostedt (Red Hat) }
75356c43e554SSteven Rostedt (Red Hat)
test_ringbuffer(void)75366c43e554SSteven Rostedt (Red Hat) static __init int test_ringbuffer(void)
75376c43e554SSteven Rostedt (Red Hat) {
75386c43e554SSteven Rostedt (Red Hat) struct task_struct *rb_hammer;
753913292494SSteven Rostedt (VMware) struct trace_buffer *buffer;
75406c43e554SSteven Rostedt (Red Hat) int cpu;
75416c43e554SSteven Rostedt (Red Hat) int ret = 0;
75426c43e554SSteven Rostedt (Red Hat)
7543a356646aSSteven Rostedt (VMware) if (security_locked_down(LOCKDOWN_TRACEFS)) {
7544ee195452SStephen Rothwell pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
7545a356646aSSteven Rostedt (VMware) return 0;
7546a356646aSSteven Rostedt (VMware) }
7547a356646aSSteven Rostedt (VMware)
75486c43e554SSteven Rostedt (Red Hat) pr_info("Running ring buffer tests...\n");
75496c43e554SSteven Rostedt (Red Hat)
75506c43e554SSteven Rostedt (Red Hat) buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
75516c43e554SSteven Rostedt (Red Hat) if (WARN_ON(!buffer))
75526c43e554SSteven Rostedt (Red Hat) return 0;
75536c43e554SSteven Rostedt (Red Hat)
75546c43e554SSteven Rostedt (Red Hat) /* Disable buffer so that threads can't write to it yet */
75556c43e554SSteven Rostedt (Red Hat) ring_buffer_record_off(buffer);
75566c43e554SSteven Rostedt (Red Hat)
75576c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) {
75586c43e554SSteven Rostedt (Red Hat) rb_data[cpu].buffer = buffer;
75596c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cpu = cpu;
75606c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cnt = cpu;
756164ed3a04SCai Huoqing rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
756264ed3a04SCai Huoqing cpu, "rbtester/%u");
756362277de7SWei Yongjun if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
75646c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n");
756562277de7SWei Yongjun ret = PTR_ERR(rb_threads[cpu]);
75666c43e554SSteven Rostedt (Red Hat) goto out_free;
75676c43e554SSteven Rostedt (Red Hat) }
75686c43e554SSteven Rostedt (Red Hat) }
75696c43e554SSteven Rostedt (Red Hat)
75706c43e554SSteven Rostedt (Red Hat) /* Now create the rb hammer! */
75716c43e554SSteven Rostedt (Red Hat) rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
757262277de7SWei Yongjun if (WARN_ON(IS_ERR(rb_hammer))) {
75736c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n");
757462277de7SWei Yongjun ret = PTR_ERR(rb_hammer);
75756c43e554SSteven Rostedt (Red Hat) goto out_free;
75766c43e554SSteven Rostedt (Red Hat) }
75776c43e554SSteven Rostedt (Red Hat)
75786c43e554SSteven Rostedt (Red Hat) ring_buffer_record_on(buffer);
75796c43e554SSteven Rostedt (Red Hat) /*
75806c43e554SSteven Rostedt (Red Hat) * Show buffer is enabled before setting rb_test_started.
75816c43e554SSteven Rostedt (Red Hat) * Yes there's a small race window where events could be
75826c43e554SSteven Rostedt (Red Hat) * dropped and the thread wont catch it. But when a ring
75836c43e554SSteven Rostedt (Red Hat) * buffer gets enabled, there will always be some kind of
75846c43e554SSteven Rostedt (Red Hat) * delay before other CPUs see it. Thus, we don't care about
75856c43e554SSteven Rostedt (Red Hat) * those dropped events. We care about events dropped after
75866c43e554SSteven Rostedt (Red Hat) * the threads see that the buffer is active.
75876c43e554SSteven Rostedt (Red Hat) */
75886c43e554SSteven Rostedt (Red Hat) smp_wmb();
75896c43e554SSteven Rostedt (Red Hat) rb_test_started = true;
75906c43e554SSteven Rostedt (Red Hat)
75916c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE);
75926c43e554SSteven Rostedt (Red Hat) /* Just run for 10 seconds */;
75936c43e554SSteven Rostedt (Red Hat) schedule_timeout(10 * HZ);
75946c43e554SSteven Rostedt (Red Hat)
75956c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_hammer);
75966c43e554SSteven Rostedt (Red Hat)
75976c43e554SSteven Rostedt (Red Hat) out_free:
75986c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) {
75996c43e554SSteven Rostedt (Red Hat) if (!rb_threads[cpu])
76006c43e554SSteven Rostedt (Red Hat) break;
76016c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_threads[cpu]);
76026c43e554SSteven Rostedt (Red Hat) }
76036c43e554SSteven Rostedt (Red Hat) if (ret) {
76046c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer);
76056c43e554SSteven Rostedt (Red Hat) return ret;
76066c43e554SSteven Rostedt (Red Hat) }
76076c43e554SSteven Rostedt (Red Hat)
76086c43e554SSteven Rostedt (Red Hat) /* Report! */
76096c43e554SSteven Rostedt (Red Hat) pr_info("finished\n");
76106c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) {
76116c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event;
76126c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = &rb_data[cpu];
76136c43e554SSteven Rostedt (Red Hat) struct rb_item *item;
76146c43e554SSteven Rostedt (Red Hat) unsigned long total_events;
76156c43e554SSteven Rostedt (Red Hat) unsigned long total_dropped;
76166c43e554SSteven Rostedt (Red Hat) unsigned long total_written;
76176c43e554SSteven Rostedt (Red Hat) unsigned long total_alloc;
76186c43e554SSteven Rostedt (Red Hat) unsigned long total_read = 0;
76196c43e554SSteven Rostedt (Red Hat) unsigned long total_size = 0;
76206c43e554SSteven Rostedt (Red Hat) unsigned long total_len = 0;
76216c43e554SSteven Rostedt (Red Hat) unsigned long total_lost = 0;
76226c43e554SSteven Rostedt (Red Hat) unsigned long lost;
76236c43e554SSteven Rostedt (Red Hat) int big_event_size;
76246c43e554SSteven Rostedt (Red Hat) int small_event_size;
76256c43e554SSteven Rostedt (Red Hat)
76266c43e554SSteven Rostedt (Red Hat) ret = -1;
76276c43e554SSteven Rostedt (Red Hat)
76286c43e554SSteven Rostedt (Red Hat) total_events = data->events + data->events_nested;
76296c43e554SSteven Rostedt (Red Hat) total_written = data->bytes_written + data->bytes_written_nested;
76306c43e554SSteven Rostedt (Red Hat) total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
76316c43e554SSteven Rostedt (Red Hat) total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
76326c43e554SSteven Rostedt (Red Hat)
76336c43e554SSteven Rostedt (Red Hat) big_event_size = data->max_size + data->max_size_nested;
76346c43e554SSteven Rostedt (Red Hat) small_event_size = data->min_size + data->min_size_nested;
76356c43e554SSteven Rostedt (Red Hat)
76366c43e554SSteven Rostedt (Red Hat) pr_info("CPU %d:\n", cpu);
76376c43e554SSteven Rostedt (Red Hat) pr_info(" events: %ld\n", total_events);
76386c43e554SSteven Rostedt (Red Hat) pr_info(" dropped bytes: %ld\n", total_dropped);
76396c43e554SSteven Rostedt (Red Hat) pr_info(" alloced bytes: %ld\n", total_alloc);
76406c43e554SSteven Rostedt (Red Hat) pr_info(" written bytes: %ld\n", total_written);
76416c43e554SSteven Rostedt (Red Hat) pr_info(" biggest event: %d\n", big_event_size);
76426c43e554SSteven Rostedt (Red Hat) pr_info(" smallest event: %d\n", small_event_size);
76436c43e554SSteven Rostedt (Red Hat)
76446c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_dropped))
76456c43e554SSteven Rostedt (Red Hat) break;
76466c43e554SSteven Rostedt (Red Hat)
76476c43e554SSteven Rostedt (Red Hat) ret = 0;
76486c43e554SSteven Rostedt (Red Hat)
76496c43e554SSteven Rostedt (Red Hat) while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
76506c43e554SSteven Rostedt (Red Hat) total_lost += lost;
76516c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event);
76526c43e554SSteven Rostedt (Red Hat) total_len += ring_buffer_event_length(event);
76536c43e554SSteven Rostedt (Red Hat) total_size += item->size + sizeof(struct rb_item);
76546c43e554SSteven Rostedt (Red Hat) if (memcmp(&item->str[0], rb_string, item->size) != 0) {
76556c43e554SSteven Rostedt (Red Hat) pr_info("FAILED!\n");
76566c43e554SSteven Rostedt (Red Hat) pr_info("buffer had: %.*s\n", item->size, item->str);
76576c43e554SSteven Rostedt (Red Hat) pr_info("expected: %.*s\n", item->size, rb_string);
76586c43e554SSteven Rostedt (Red Hat) RB_WARN_ON(buffer, 1);
76596c43e554SSteven Rostedt (Red Hat) ret = -1;
76606c43e554SSteven Rostedt (Red Hat) break;
76616c43e554SSteven Rostedt (Red Hat) }
76626c43e554SSteven Rostedt (Red Hat) total_read++;
76636c43e554SSteven Rostedt (Red Hat) }
76646c43e554SSteven Rostedt (Red Hat) if (ret)
76656c43e554SSteven Rostedt (Red Hat) break;
76666c43e554SSteven Rostedt (Red Hat)
76676c43e554SSteven Rostedt (Red Hat) ret = -1;
76686c43e554SSteven Rostedt (Red Hat)
76696c43e554SSteven Rostedt (Red Hat) pr_info(" read events: %ld\n", total_read);
76706c43e554SSteven Rostedt (Red Hat) pr_info(" lost events: %ld\n", total_lost);
76716c43e554SSteven Rostedt (Red Hat) pr_info(" total events: %ld\n", total_lost + total_read);
76726c43e554SSteven Rostedt (Red Hat) pr_info(" recorded len bytes: %ld\n", total_len);
76736c43e554SSteven Rostedt (Red Hat) pr_info(" recorded size bytes: %ld\n", total_size);
7674ed888241SWan Jiabing if (total_lost) {
76756c43e554SSteven Rostedt (Red Hat) pr_info(" With dropped events, record len and size may not match\n"
76766c43e554SSteven Rostedt (Red Hat) " alloced and written from above\n");
7677ed888241SWan Jiabing } else {
76786c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_len != total_alloc ||
76796c43e554SSteven Rostedt (Red Hat) total_size != total_written))
76806c43e554SSteven Rostedt (Red Hat) break;
76816c43e554SSteven Rostedt (Red Hat) }
76826c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
76836c43e554SSteven Rostedt (Red Hat) break;
76846c43e554SSteven Rostedt (Red Hat)
76856c43e554SSteven Rostedt (Red Hat) ret = 0;
76866c43e554SSteven Rostedt (Red Hat) }
76876c43e554SSteven Rostedt (Red Hat) if (!ret)
76886c43e554SSteven Rostedt (Red Hat) pr_info("Ring buffer PASSED!\n");
76896c43e554SSteven Rostedt (Red Hat)
76906c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer);
76916c43e554SSteven Rostedt (Red Hat) return 0;
76926c43e554SSteven Rostedt (Red Hat) }
76936c43e554SSteven Rostedt (Red Hat)
76946c43e554SSteven Rostedt (Red Hat) late_initcall(test_ringbuffer);
76956c43e554SSteven Rostedt (Red Hat) #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
7696