17a8e76a3SSteven Rostedt /* 27a8e76a3SSteven Rostedt * Generic ring buffer 37a8e76a3SSteven Rostedt * 47a8e76a3SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 57a8e76a3SSteven Rostedt */ 60b07436dSSteven Rostedt #include <linux/ftrace_event.h> 77a8e76a3SSteven Rostedt #include <linux/ring_buffer.h> 814131f2fSIngo Molnar #include <linux/trace_clock.h> 90b07436dSSteven Rostedt #include <linux/trace_seq.h> 107a8e76a3SSteven Rostedt #include <linux/spinlock.h> 1115693458SSteven Rostedt (Red Hat) #include <linux/irq_work.h> 127a8e76a3SSteven Rostedt #include <linux/debugfs.h> 137a8e76a3SSteven Rostedt #include <linux/uaccess.h> 14a81bd80aSSteven Rostedt #include <linux/hardirq.h> 156c43e554SSteven Rostedt (Red Hat) #include <linux/kthread.h> /* for self test */ 161744a21dSVegard Nossum #include <linux/kmemcheck.h> 177a8e76a3SSteven Rostedt #include <linux/module.h> 187a8e76a3SSteven Rostedt #include <linux/percpu.h> 197a8e76a3SSteven Rostedt #include <linux/mutex.h> 206c43e554SSteven Rostedt (Red Hat) #include <linux/delay.h> 215a0e3ad6STejun Heo #include <linux/slab.h> 227a8e76a3SSteven Rostedt #include <linux/init.h> 237a8e76a3SSteven Rostedt #include <linux/hash.h> 247a8e76a3SSteven Rostedt #include <linux/list.h> 25554f786eSSteven Rostedt #include <linux/cpu.h> 267a8e76a3SSteven Rostedt #include <linux/fs.h> 277a8e76a3SSteven Rostedt 2879615760SChristoph Lameter #include <asm/local.h> 29182e9f5fSSteven Rostedt 3083f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work); 3183f40318SVaibhav Nagarnaik 32033601a3SSteven Rostedt /* 33d1b182a8SSteven Rostedt * The ring buffer header is special. We must manually up keep it. 34d1b182a8SSteven Rostedt */ 35d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s) 36d1b182a8SSteven Rostedt { 37d1b182a8SSteven Rostedt int ret; 38d1b182a8SSteven Rostedt 39146c3442Szhangwei(Jovi) ret = trace_seq_puts(s, "# compressed entry header\n"); 40146c3442Szhangwei(Jovi) ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); 41146c3442Szhangwei(Jovi) ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 42146c3442Szhangwei(Jovi) ret = trace_seq_puts(s, "\tarray : 32 bits\n"); 43146c3442Szhangwei(Jovi) ret = trace_seq_putc(s, '\n'); 44d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tpadding : type == %d\n", 45d1b182a8SSteven Rostedt RINGBUF_TYPE_PADDING); 46d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 47d1b182a8SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND); 48334d4169SLai Jiangshan ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 49334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 50d1b182a8SSteven Rostedt 51d1b182a8SSteven Rostedt return ret; 52d1b182a8SSteven Rostedt } 53d1b182a8SSteven Rostedt 54d1b182a8SSteven Rostedt /* 555cc98548SSteven Rostedt * The ring buffer is made up of a list of pages. A separate list of pages is 565cc98548SSteven Rostedt * allocated for each CPU. A writer may only write to a buffer that is 575cc98548SSteven Rostedt * associated with the CPU it is currently executing on. A reader may read 585cc98548SSteven Rostedt * from any per cpu buffer. 595cc98548SSteven Rostedt * 605cc98548SSteven Rostedt * The reader is special. For each per cpu buffer, the reader has its own 615cc98548SSteven Rostedt * reader page. When a reader has read the entire reader page, this reader 625cc98548SSteven Rostedt * page is swapped with another page in the ring buffer. 635cc98548SSteven Rostedt * 645cc98548SSteven Rostedt * Now, as long as the writer is off the reader page, the reader can do what 655cc98548SSteven Rostedt * ever it wants with that page. The writer will never write to that page 665cc98548SSteven Rostedt * again (as long as it is out of the ring buffer). 675cc98548SSteven Rostedt * 685cc98548SSteven Rostedt * Here's some silly ASCII art. 695cc98548SSteven Rostedt * 705cc98548SSteven Rostedt * +------+ 715cc98548SSteven Rostedt * |reader| RING BUFFER 725cc98548SSteven Rostedt * |page | 735cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 745cc98548SSteven Rostedt * | |-->| |-->| | 755cc98548SSteven Rostedt * +---+ +---+ +---+ 765cc98548SSteven Rostedt * ^ | 775cc98548SSteven Rostedt * | | 785cc98548SSteven Rostedt * +---------------+ 795cc98548SSteven Rostedt * 805cc98548SSteven Rostedt * 815cc98548SSteven Rostedt * +------+ 825cc98548SSteven Rostedt * |reader| RING BUFFER 835cc98548SSteven Rostedt * |page |------------------v 845cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 855cc98548SSteven Rostedt * | |-->| |-->| | 865cc98548SSteven Rostedt * +---+ +---+ +---+ 875cc98548SSteven Rostedt * ^ | 885cc98548SSteven Rostedt * | | 895cc98548SSteven Rostedt * +---------------+ 905cc98548SSteven Rostedt * 915cc98548SSteven Rostedt * 925cc98548SSteven Rostedt * +------+ 935cc98548SSteven Rostedt * |reader| RING BUFFER 945cc98548SSteven Rostedt * |page |------------------v 955cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 965cc98548SSteven Rostedt * ^ | |-->| |-->| | 975cc98548SSteven Rostedt * | +---+ +---+ +---+ 985cc98548SSteven Rostedt * | | 995cc98548SSteven Rostedt * | | 1005cc98548SSteven Rostedt * +------------------------------+ 1015cc98548SSteven Rostedt * 1025cc98548SSteven Rostedt * 1035cc98548SSteven Rostedt * +------+ 1045cc98548SSteven Rostedt * |buffer| RING BUFFER 1055cc98548SSteven Rostedt * |page |------------------v 1065cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 1075cc98548SSteven Rostedt * ^ | | | |-->| | 1085cc98548SSteven Rostedt * | New +---+ +---+ +---+ 1095cc98548SSteven Rostedt * | Reader------^ | 1105cc98548SSteven Rostedt * | page | 1115cc98548SSteven Rostedt * +------------------------------+ 1125cc98548SSteven Rostedt * 1135cc98548SSteven Rostedt * 1145cc98548SSteven Rostedt * After we make this swap, the reader can hand this page off to the splice 1155cc98548SSteven Rostedt * code and be done with it. It can even allocate a new page if it needs to 1165cc98548SSteven Rostedt * and swap that into the ring buffer. 1175cc98548SSteven Rostedt * 1185cc98548SSteven Rostedt * We will be using cmpxchg soon to make all this lockless. 1195cc98548SSteven Rostedt * 1205cc98548SSteven Rostedt */ 1215cc98548SSteven Rostedt 1225cc98548SSteven Rostedt /* 123033601a3SSteven Rostedt * A fast way to enable or disable all ring buffers is to 124033601a3SSteven Rostedt * call tracing_on or tracing_off. Turning off the ring buffers 125033601a3SSteven Rostedt * prevents all ring buffers from being recorded to. 126033601a3SSteven Rostedt * Turning this switch on, makes it OK to write to the 127033601a3SSteven Rostedt * ring buffer, if the ring buffer is enabled itself. 128033601a3SSteven Rostedt * 129033601a3SSteven Rostedt * There's three layers that must be on in order to write 130033601a3SSteven Rostedt * to the ring buffer. 131033601a3SSteven Rostedt * 132033601a3SSteven Rostedt * 1) This global flag must be set. 133033601a3SSteven Rostedt * 2) The ring buffer must be enabled for recording. 134033601a3SSteven Rostedt * 3) The per cpu buffer must be enabled for recording. 135033601a3SSteven Rostedt * 136033601a3SSteven Rostedt * In case of an anomaly, this global flag has a bit set that 137033601a3SSteven Rostedt * will permantly disable all ring buffers. 138033601a3SSteven Rostedt */ 139033601a3SSteven Rostedt 140033601a3SSteven Rostedt /* 141033601a3SSteven Rostedt * Global flag to disable all recording to ring buffers 142033601a3SSteven Rostedt * This has two bits: ON, DISABLED 143033601a3SSteven Rostedt * 144033601a3SSteven Rostedt * ON DISABLED 145033601a3SSteven Rostedt * ---- ---------- 146033601a3SSteven Rostedt * 0 0 : ring buffers are off 147033601a3SSteven Rostedt * 1 0 : ring buffers are on 148033601a3SSteven Rostedt * X 1 : ring buffers are permanently disabled 149033601a3SSteven Rostedt */ 150033601a3SSteven Rostedt 151033601a3SSteven Rostedt enum { 152033601a3SSteven Rostedt RB_BUFFERS_ON_BIT = 0, 153033601a3SSteven Rostedt RB_BUFFERS_DISABLED_BIT = 1, 154033601a3SSteven Rostedt }; 155033601a3SSteven Rostedt 156033601a3SSteven Rostedt enum { 157033601a3SSteven Rostedt RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 158033601a3SSteven Rostedt RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 159033601a3SSteven Rostedt }; 160033601a3SSteven Rostedt 1615e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 162a3583244SSteven Rostedt 163499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */ 164499e5470SSteven Rostedt #define RB_BUFFER_OFF (1 << 20) 165499e5470SSteven Rostedt 166474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 167474d32b6SSteven Rostedt 168a3583244SSteven Rostedt /** 169033601a3SSteven Rostedt * tracing_off_permanent - permanently disable ring buffers 170033601a3SSteven Rostedt * 171033601a3SSteven Rostedt * This function, once called, will disable all ring buffers 172c3706f00SWenji Huang * permanently. 173033601a3SSteven Rostedt */ 174033601a3SSteven Rostedt void tracing_off_permanent(void) 175033601a3SSteven Rostedt { 176033601a3SSteven Rostedt set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 177a3583244SSteven Rostedt } 178a3583244SSteven Rostedt 179e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 18067d34724SAndrew Morton #define RB_ALIGNMENT 4U 181334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 182c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 183334d4169SLai Jiangshan 184649508f6SJames Hogan #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 1852271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT 0 1862271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 1872271048dSSteven Rostedt #else 1882271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT 1 1892271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT 8U 1902271048dSSteven Rostedt #endif 1912271048dSSteven Rostedt 192649508f6SJames Hogan #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 193649508f6SJames Hogan 194334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 195334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 1967a8e76a3SSteven Rostedt 1977a8e76a3SSteven Rostedt enum { 1987a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND = 8, 1997a8e76a3SSteven Rostedt RB_LEN_TIME_STAMP = 16, 2007a8e76a3SSteven Rostedt }; 2017a8e76a3SSteven Rostedt 20269d1b839SSteven Rostedt #define skip_time_extend(event) \ 20369d1b839SSteven Rostedt ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 20469d1b839SSteven Rostedt 2052d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event) 2062d622719STom Zanussi { 207a1863c21SSteven Rostedt return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 2082d622719STom Zanussi } 2092d622719STom Zanussi 2102d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event) 2112d622719STom Zanussi { 212a1863c21SSteven Rostedt /* padding has a NULL time_delta */ 213334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 2142d622719STom Zanussi event->time_delta = 0; 2152d622719STom Zanussi } 2162d622719STom Zanussi 2172d622719STom Zanussi static unsigned 2182d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event) 2192d622719STom Zanussi { 2202d622719STom Zanussi unsigned length; 2212d622719STom Zanussi 222334d4169SLai Jiangshan if (event->type_len) 223334d4169SLai Jiangshan length = event->type_len * RB_ALIGNMENT; 2242d622719STom Zanussi else 2252d622719STom Zanussi length = event->array[0]; 2262d622719STom Zanussi return length + RB_EVNT_HDR_SIZE; 2272d622719STom Zanussi } 2282d622719STom Zanussi 22969d1b839SSteven Rostedt /* 23069d1b839SSteven Rostedt * Return the length of the given event. Will return 23169d1b839SSteven Rostedt * the length of the time extend if the event is a 23269d1b839SSteven Rostedt * time extend. 23369d1b839SSteven Rostedt */ 23469d1b839SSteven Rostedt static inline unsigned 2357a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event) 2367a8e76a3SSteven Rostedt { 237334d4169SLai Jiangshan switch (event->type_len) { 2387a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 2392d622719STom Zanussi if (rb_null_event(event)) 2407a8e76a3SSteven Rostedt /* undefined */ 2417a8e76a3SSteven Rostedt return -1; 242334d4169SLai Jiangshan return event->array[0] + RB_EVNT_HDR_SIZE; 2437a8e76a3SSteven Rostedt 2447a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 2457a8e76a3SSteven Rostedt return RB_LEN_TIME_EXTEND; 2467a8e76a3SSteven Rostedt 2477a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 2487a8e76a3SSteven Rostedt return RB_LEN_TIME_STAMP; 2497a8e76a3SSteven Rostedt 2507a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 2512d622719STom Zanussi return rb_event_data_length(event); 2527a8e76a3SSteven Rostedt default: 2537a8e76a3SSteven Rostedt BUG(); 2547a8e76a3SSteven Rostedt } 2557a8e76a3SSteven Rostedt /* not hit */ 2567a8e76a3SSteven Rostedt return 0; 2577a8e76a3SSteven Rostedt } 2587a8e76a3SSteven Rostedt 25969d1b839SSteven Rostedt /* 26069d1b839SSteven Rostedt * Return total length of time extend and data, 26169d1b839SSteven Rostedt * or just the event length for all other events. 26269d1b839SSteven Rostedt */ 26369d1b839SSteven Rostedt static inline unsigned 26469d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event) 26569d1b839SSteven Rostedt { 26669d1b839SSteven Rostedt unsigned len = 0; 26769d1b839SSteven Rostedt 26869d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 26969d1b839SSteven Rostedt /* time extends include the data event after it */ 27069d1b839SSteven Rostedt len = RB_LEN_TIME_EXTEND; 27169d1b839SSteven Rostedt event = skip_time_extend(event); 27269d1b839SSteven Rostedt } 27369d1b839SSteven Rostedt return len + rb_event_length(event); 27469d1b839SSteven Rostedt } 27569d1b839SSteven Rostedt 2767a8e76a3SSteven Rostedt /** 2777a8e76a3SSteven Rostedt * ring_buffer_event_length - return the length of the event 2787a8e76a3SSteven Rostedt * @event: the event to get the length of 27969d1b839SSteven Rostedt * 28069d1b839SSteven Rostedt * Returns the size of the data load of a data event. 28169d1b839SSteven Rostedt * If the event is something other than a data event, it 28269d1b839SSteven Rostedt * returns the size of the event itself. With the exception 28369d1b839SSteven Rostedt * of a TIME EXTEND, where it still returns the size of the 28469d1b839SSteven Rostedt * data load of the data event after it. 2857a8e76a3SSteven Rostedt */ 2867a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event) 2877a8e76a3SSteven Rostedt { 28869d1b839SSteven Rostedt unsigned length; 28969d1b839SSteven Rostedt 29069d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 29169d1b839SSteven Rostedt event = skip_time_extend(event); 29269d1b839SSteven Rostedt 29369d1b839SSteven Rostedt length = rb_event_length(event); 294334d4169SLai Jiangshan if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 295465634adSRobert Richter return length; 296465634adSRobert Richter length -= RB_EVNT_HDR_SIZE; 297465634adSRobert Richter if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 298465634adSRobert Richter length -= sizeof(event->array[0]); 299465634adSRobert Richter return length; 3007a8e76a3SSteven Rostedt } 301c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length); 3027a8e76a3SSteven Rostedt 3037a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */ 30434a148bfSAndrew Morton static void * 3057a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event) 3067a8e76a3SSteven Rostedt { 30769d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 30869d1b839SSteven Rostedt event = skip_time_extend(event); 309334d4169SLai Jiangshan BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 3107a8e76a3SSteven Rostedt /* If length is in len field, then array[0] has the data */ 311334d4169SLai Jiangshan if (event->type_len) 3127a8e76a3SSteven Rostedt return (void *)&event->array[0]; 3137a8e76a3SSteven Rostedt /* Otherwise length is in array[0] and array[1] has the data */ 3147a8e76a3SSteven Rostedt return (void *)&event->array[1]; 3157a8e76a3SSteven Rostedt } 3167a8e76a3SSteven Rostedt 3177a8e76a3SSteven Rostedt /** 3187a8e76a3SSteven Rostedt * ring_buffer_event_data - return the data of the event 3197a8e76a3SSteven Rostedt * @event: the event to get the data from 3207a8e76a3SSteven Rostedt */ 3217a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event) 3227a8e76a3SSteven Rostedt { 3237a8e76a3SSteven Rostedt return rb_event_data(event); 3247a8e76a3SSteven Rostedt } 325c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data); 3267a8e76a3SSteven Rostedt 3277a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu) \ 3289e01c1b7SRusty Russell for_each_cpu(cpu, buffer->cpumask) 3297a8e76a3SSteven Rostedt 3307a8e76a3SSteven Rostedt #define TS_SHIFT 27 3317a8e76a3SSteven Rostedt #define TS_MASK ((1ULL << TS_SHIFT) - 1) 3327a8e76a3SSteven Rostedt #define TS_DELTA_TEST (~TS_MASK) 3337a8e76a3SSteven Rostedt 33466a8cb95SSteven Rostedt /* Flag when events were overwritten */ 33566a8cb95SSteven Rostedt #define RB_MISSED_EVENTS (1 << 31) 336ff0ff84aSSteven Rostedt /* Missed count stored at end */ 337ff0ff84aSSteven Rostedt #define RB_MISSED_STORED (1 << 30) 33866a8cb95SSteven Rostedt 339abc9b56dSSteven Rostedt struct buffer_data_page { 3407a8e76a3SSteven Rostedt u64 time_stamp; /* page time stamp */ 341c3706f00SWenji Huang local_t commit; /* write committed index */ 342649508f6SJames Hogan unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 343abc9b56dSSteven Rostedt }; 344abc9b56dSSteven Rostedt 34577ae365eSSteven Rostedt /* 34677ae365eSSteven Rostedt * Note, the buffer_page list must be first. The buffer pages 34777ae365eSSteven Rostedt * are allocated in cache lines, which means that each buffer 34877ae365eSSteven Rostedt * page will be at the beginning of a cache line, and thus 34977ae365eSSteven Rostedt * the least significant bits will be zero. We use this to 35077ae365eSSteven Rostedt * add flags in the list struct pointers, to make the ring buffer 35177ae365eSSteven Rostedt * lockless. 35277ae365eSSteven Rostedt */ 353abc9b56dSSteven Rostedt struct buffer_page { 354778c55d4SSteven Rostedt struct list_head list; /* list of buffer pages */ 355abc9b56dSSteven Rostedt local_t write; /* index for next write */ 3566f807acdSSteven Rostedt unsigned read; /* index for next read */ 357778c55d4SSteven Rostedt local_t entries; /* entries on this page */ 358ff0ff84aSSteven Rostedt unsigned long real_end; /* real end of data */ 359abc9b56dSSteven Rostedt struct buffer_data_page *page; /* Actual data page */ 3607a8e76a3SSteven Rostedt }; 3617a8e76a3SSteven Rostedt 36277ae365eSSteven Rostedt /* 36377ae365eSSteven Rostedt * The buffer page counters, write and entries, must be reset 36477ae365eSSteven Rostedt * atomically when crossing page boundaries. To synchronize this 36577ae365eSSteven Rostedt * update, two counters are inserted into the number. One is 36677ae365eSSteven Rostedt * the actual counter for the write position or count on the page. 36777ae365eSSteven Rostedt * 36877ae365eSSteven Rostedt * The other is a counter of updaters. Before an update happens 36977ae365eSSteven Rostedt * the update partition of the counter is incremented. This will 37077ae365eSSteven Rostedt * allow the updater to update the counter atomically. 37177ae365eSSteven Rostedt * 37277ae365eSSteven Rostedt * The counter is 20 bits, and the state data is 12. 37377ae365eSSteven Rostedt */ 37477ae365eSSteven Rostedt #define RB_WRITE_MASK 0xfffff 37577ae365eSSteven Rostedt #define RB_WRITE_INTCNT (1 << 20) 37677ae365eSSteven Rostedt 377044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage) 378abc9b56dSSteven Rostedt { 379044fa782SSteven Rostedt local_set(&bpage->commit, 0); 380abc9b56dSSteven Rostedt } 381abc9b56dSSteven Rostedt 382474d32b6SSteven Rostedt /** 383474d32b6SSteven Rostedt * ring_buffer_page_len - the size of data on the page. 384474d32b6SSteven Rostedt * @page: The page to read 385474d32b6SSteven Rostedt * 386474d32b6SSteven Rostedt * Returns the amount of data on the page, including buffer page header. 387474d32b6SSteven Rostedt */ 388ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page) 389ef7a4a16SSteven Rostedt { 390474d32b6SSteven Rostedt return local_read(&((struct buffer_data_page *)page)->commit) 391474d32b6SSteven Rostedt + BUF_PAGE_HDR_SIZE; 392ef7a4a16SSteven Rostedt } 393ef7a4a16SSteven Rostedt 3947a8e76a3SSteven Rostedt /* 395ed56829cSSteven Rostedt * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 396ed56829cSSteven Rostedt * this issue out. 397ed56829cSSteven Rostedt */ 39834a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage) 399ed56829cSSteven Rostedt { 4006ae2a076SSteven Rostedt free_page((unsigned long)bpage->page); 401e4c2ce82SSteven Rostedt kfree(bpage); 402ed56829cSSteven Rostedt } 403ed56829cSSteven Rostedt 404ed56829cSSteven Rostedt /* 4057a8e76a3SSteven Rostedt * We need to fit the time_stamp delta into 27 bits. 4067a8e76a3SSteven Rostedt */ 4077a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta) 4087a8e76a3SSteven Rostedt { 4097a8e76a3SSteven Rostedt if (delta & TS_DELTA_TEST) 4107a8e76a3SSteven Rostedt return 1; 4117a8e76a3SSteven Rostedt return 0; 4127a8e76a3SSteven Rostedt } 4137a8e76a3SSteven Rostedt 414474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 4157a8e76a3SSteven Rostedt 416be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 417be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 418be957c44SSteven Rostedt 419d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s) 420d1b182a8SSteven Rostedt { 421d1b182a8SSteven Rostedt struct buffer_data_page field; 422d1b182a8SSteven Rostedt int ret; 423d1b182a8SSteven Rostedt 424d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 42526a50744STom Zanussi "offset:0;\tsize:%u;\tsigned:%u;\n", 42626a50744STom Zanussi (unsigned int)sizeof(field.time_stamp), 42726a50744STom Zanussi (unsigned int)is_signed_type(u64)); 428d1b182a8SSteven Rostedt 429d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 43026a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 431d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 43226a50744STom Zanussi (unsigned int)sizeof(field.commit), 43326a50744STom Zanussi (unsigned int)is_signed_type(long)); 434d1b182a8SSteven Rostedt 43566a8cb95SSteven Rostedt ret = trace_seq_printf(s, "\tfield: int overwrite;\t" 43666a8cb95SSteven Rostedt "offset:%u;\tsize:%u;\tsigned:%u;\n", 43766a8cb95SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 43866a8cb95SSteven Rostedt 1, 43966a8cb95SSteven Rostedt (unsigned int)is_signed_type(long)); 44066a8cb95SSteven Rostedt 441d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: char data;\t" 44226a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 443d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), data), 44426a50744STom Zanussi (unsigned int)BUF_PAGE_SIZE, 44526a50744STom Zanussi (unsigned int)is_signed_type(char)); 446d1b182a8SSteven Rostedt 447d1b182a8SSteven Rostedt return ret; 448d1b182a8SSteven Rostedt } 449d1b182a8SSteven Rostedt 45015693458SSteven Rostedt (Red Hat) struct rb_irq_work { 45115693458SSteven Rostedt (Red Hat) struct irq_work work; 45215693458SSteven Rostedt (Red Hat) wait_queue_head_t waiters; 45315693458SSteven Rostedt (Red Hat) bool waiters_pending; 45415693458SSteven Rostedt (Red Hat) }; 45515693458SSteven Rostedt (Red Hat) 4567a8e76a3SSteven Rostedt /* 4577a8e76a3SSteven Rostedt * head_page == tail_page && head == tail then buffer is empty. 4587a8e76a3SSteven Rostedt */ 4597a8e76a3SSteven Rostedt struct ring_buffer_per_cpu { 4607a8e76a3SSteven Rostedt int cpu; 461985023deSRichard Kennedy atomic_t record_disabled; 4627a8e76a3SSteven Rostedt struct ring_buffer *buffer; 4635389f6faSThomas Gleixner raw_spinlock_t reader_lock; /* serialize readers */ 464445c8951SThomas Gleixner arch_spinlock_t lock; 4657a8e76a3SSteven Rostedt struct lock_class_key lock_key; 466438ced17SVaibhav Nagarnaik unsigned int nr_pages; 4673adc54faSSteven Rostedt struct list_head *pages; 4686f807acdSSteven Rostedt struct buffer_page *head_page; /* read from head */ 4696f807acdSSteven Rostedt struct buffer_page *tail_page; /* write to tail */ 470c3706f00SWenji Huang struct buffer_page *commit_page; /* committed pages */ 471d769041fSSteven Rostedt struct buffer_page *reader_page; 47266a8cb95SSteven Rostedt unsigned long lost_events; 47366a8cb95SSteven Rostedt unsigned long last_overrun; 474c64e148aSVaibhav Nagarnaik local_t entries_bytes; 475e4906effSSteven Rostedt local_t entries; 476884bfe89SSlava Pestov local_t overrun; 477884bfe89SSlava Pestov local_t commit_overrun; 478884bfe89SSlava Pestov local_t dropped_events; 479fa743953SSteven Rostedt local_t committing; 480fa743953SSteven Rostedt local_t commits; 48177ae365eSSteven Rostedt unsigned long read; 482c64e148aSVaibhav Nagarnaik unsigned long read_bytes; 4837a8e76a3SSteven Rostedt u64 write_stamp; 4847a8e76a3SSteven Rostedt u64 read_stamp; 485438ced17SVaibhav Nagarnaik /* ring buffer pages to update, > 0 to add, < 0 to remove */ 486438ced17SVaibhav Nagarnaik int nr_pages_to_update; 487438ced17SVaibhav Nagarnaik struct list_head new_pages; /* new pages to add */ 48883f40318SVaibhav Nagarnaik struct work_struct update_pages_work; 48905fdd70dSVaibhav Nagarnaik struct completion update_done; 49015693458SSteven Rostedt (Red Hat) 49115693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work; 4927a8e76a3SSteven Rostedt }; 4937a8e76a3SSteven Rostedt 4947a8e76a3SSteven Rostedt struct ring_buffer { 4957a8e76a3SSteven Rostedt unsigned flags; 4967a8e76a3SSteven Rostedt int cpus; 4977a8e76a3SSteven Rostedt atomic_t record_disabled; 49883f40318SVaibhav Nagarnaik atomic_t resize_disabled; 49900f62f61SArnaldo Carvalho de Melo cpumask_var_t cpumask; 5007a8e76a3SSteven Rostedt 5011f8a6a10SPeter Zijlstra struct lock_class_key *reader_lock_key; 5021f8a6a10SPeter Zijlstra 5037a8e76a3SSteven Rostedt struct mutex mutex; 5047a8e76a3SSteven Rostedt 5057a8e76a3SSteven Rostedt struct ring_buffer_per_cpu **buffers; 506554f786eSSteven Rostedt 50759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 508554f786eSSteven Rostedt struct notifier_block cpu_notify; 509554f786eSSteven Rostedt #endif 51037886f6aSSteven Rostedt u64 (*clock)(void); 51115693458SSteven Rostedt (Red Hat) 51215693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work; 5137a8e76a3SSteven Rostedt }; 5147a8e76a3SSteven Rostedt 5157a8e76a3SSteven Rostedt struct ring_buffer_iter { 5167a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5177a8e76a3SSteven Rostedt unsigned long head; 5187a8e76a3SSteven Rostedt struct buffer_page *head_page; 519492a74f4SSteven Rostedt struct buffer_page *cache_reader_page; 520492a74f4SSteven Rostedt unsigned long cache_read; 5217a8e76a3SSteven Rostedt u64 read_stamp; 5227a8e76a3SSteven Rostedt }; 5237a8e76a3SSteven Rostedt 52415693458SSteven Rostedt (Red Hat) /* 52515693458SSteven Rostedt (Red Hat) * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 52615693458SSteven Rostedt (Red Hat) * 52715693458SSteven Rostedt (Red Hat) * Schedules a delayed work to wake up any task that is blocked on the 52815693458SSteven Rostedt (Red Hat) * ring buffer waiters queue. 52915693458SSteven Rostedt (Red Hat) */ 53015693458SSteven Rostedt (Red Hat) static void rb_wake_up_waiters(struct irq_work *work) 53115693458SSteven Rostedt (Red Hat) { 53215693458SSteven Rostedt (Red Hat) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 53315693458SSteven Rostedt (Red Hat) 53415693458SSteven Rostedt (Red Hat) wake_up_all(&rbwork->waiters); 53515693458SSteven Rostedt (Red Hat) } 53615693458SSteven Rostedt (Red Hat) 53715693458SSteven Rostedt (Red Hat) /** 53815693458SSteven Rostedt (Red Hat) * ring_buffer_wait - wait for input to the ring buffer 53915693458SSteven Rostedt (Red Hat) * @buffer: buffer to wait on 54015693458SSteven Rostedt (Red Hat) * @cpu: the cpu buffer to wait on 54115693458SSteven Rostedt (Red Hat) * 54215693458SSteven Rostedt (Red Hat) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 54315693458SSteven Rostedt (Red Hat) * as data is added to any of the @buffer's cpu buffers. Otherwise 54415693458SSteven Rostedt (Red Hat) * it will wait for data to be added to a specific cpu buffer. 54515693458SSteven Rostedt (Red Hat) */ 54615693458SSteven Rostedt (Red Hat) void ring_buffer_wait(struct ring_buffer *buffer, int cpu) 54715693458SSteven Rostedt (Red Hat) { 54815693458SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 54915693458SSteven Rostedt (Red Hat) DEFINE_WAIT(wait); 55015693458SSteven Rostedt (Red Hat) struct rb_irq_work *work; 55115693458SSteven Rostedt (Red Hat) 55215693458SSteven Rostedt (Red Hat) /* 55315693458SSteven Rostedt (Red Hat) * Depending on what the caller is waiting for, either any 55415693458SSteven Rostedt (Red Hat) * data in any cpu buffer, or a specific buffer, put the 55515693458SSteven Rostedt (Red Hat) * caller on the appropriate wait queue. 55615693458SSteven Rostedt (Red Hat) */ 55715693458SSteven Rostedt (Red Hat) if (cpu == RING_BUFFER_ALL_CPUS) 55815693458SSteven Rostedt (Red Hat) work = &buffer->irq_work; 55915693458SSteven Rostedt (Red Hat) else { 56015693458SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 56115693458SSteven Rostedt (Red Hat) work = &cpu_buffer->irq_work; 56215693458SSteven Rostedt (Red Hat) } 56315693458SSteven Rostedt (Red Hat) 56415693458SSteven Rostedt (Red Hat) 56515693458SSteven Rostedt (Red Hat) prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 56615693458SSteven Rostedt (Red Hat) 56715693458SSteven Rostedt (Red Hat) /* 56815693458SSteven Rostedt (Red Hat) * The events can happen in critical sections where 56915693458SSteven Rostedt (Red Hat) * checking a work queue can cause deadlocks. 57015693458SSteven Rostedt (Red Hat) * After adding a task to the queue, this flag is set 57115693458SSteven Rostedt (Red Hat) * only to notify events to try to wake up the queue 57215693458SSteven Rostedt (Red Hat) * using irq_work. 57315693458SSteven Rostedt (Red Hat) * 57415693458SSteven Rostedt (Red Hat) * We don't clear it even if the buffer is no longer 57515693458SSteven Rostedt (Red Hat) * empty. The flag only causes the next event to run 57615693458SSteven Rostedt (Red Hat) * irq_work to do the work queue wake up. The worse 57715693458SSteven Rostedt (Red Hat) * that can happen if we race with !trace_empty() is that 57815693458SSteven Rostedt (Red Hat) * an event will cause an irq_work to try to wake up 57915693458SSteven Rostedt (Red Hat) * an empty queue. 58015693458SSteven Rostedt (Red Hat) * 58115693458SSteven Rostedt (Red Hat) * There's no reason to protect this flag either, as 58215693458SSteven Rostedt (Red Hat) * the work queue and irq_work logic will do the necessary 58315693458SSteven Rostedt (Red Hat) * synchronization for the wake ups. The only thing 58415693458SSteven Rostedt (Red Hat) * that is necessary is that the wake up happens after 58515693458SSteven Rostedt (Red Hat) * a task has been queued. It's OK for spurious wake ups. 58615693458SSteven Rostedt (Red Hat) */ 58715693458SSteven Rostedt (Red Hat) work->waiters_pending = true; 58815693458SSteven Rostedt (Red Hat) 58915693458SSteven Rostedt (Red Hat) if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || 59015693458SSteven Rostedt (Red Hat) (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) 59115693458SSteven Rostedt (Red Hat) schedule(); 59215693458SSteven Rostedt (Red Hat) 59315693458SSteven Rostedt (Red Hat) finish_wait(&work->waiters, &wait); 59415693458SSteven Rostedt (Red Hat) } 59515693458SSteven Rostedt (Red Hat) 59615693458SSteven Rostedt (Red Hat) /** 59715693458SSteven Rostedt (Red Hat) * ring_buffer_poll_wait - poll on buffer input 59815693458SSteven Rostedt (Red Hat) * @buffer: buffer to wait on 59915693458SSteven Rostedt (Red Hat) * @cpu: the cpu buffer to wait on 60015693458SSteven Rostedt (Red Hat) * @filp: the file descriptor 60115693458SSteven Rostedt (Red Hat) * @poll_table: The poll descriptor 60215693458SSteven Rostedt (Red Hat) * 60315693458SSteven Rostedt (Red Hat) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 60415693458SSteven Rostedt (Red Hat) * as data is added to any of the @buffer's cpu buffers. Otherwise 60515693458SSteven Rostedt (Red Hat) * it will wait for data to be added to a specific cpu buffer. 60615693458SSteven Rostedt (Red Hat) * 60715693458SSteven Rostedt (Red Hat) * Returns POLLIN | POLLRDNORM if data exists in the buffers, 60815693458SSteven Rostedt (Red Hat) * zero otherwise. 60915693458SSteven Rostedt (Red Hat) */ 61015693458SSteven Rostedt (Red Hat) int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 61115693458SSteven Rostedt (Red Hat) struct file *filp, poll_table *poll_table) 61215693458SSteven Rostedt (Red Hat) { 61315693458SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 61415693458SSteven Rostedt (Red Hat) struct rb_irq_work *work; 61515693458SSteven Rostedt (Red Hat) 61615693458SSteven Rostedt (Red Hat) if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 61715693458SSteven Rostedt (Red Hat) (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 61815693458SSteven Rostedt (Red Hat) return POLLIN | POLLRDNORM; 61915693458SSteven Rostedt (Red Hat) 62015693458SSteven Rostedt (Red Hat) if (cpu == RING_BUFFER_ALL_CPUS) 62115693458SSteven Rostedt (Red Hat) work = &buffer->irq_work; 62215693458SSteven Rostedt (Red Hat) else { 6236721cb60SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 6246721cb60SSteven Rostedt (Red Hat) return -EINVAL; 6256721cb60SSteven Rostedt (Red Hat) 62615693458SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 62715693458SSteven Rostedt (Red Hat) work = &cpu_buffer->irq_work; 62815693458SSteven Rostedt (Red Hat) } 62915693458SSteven Rostedt (Red Hat) 63015693458SSteven Rostedt (Red Hat) work->waiters_pending = true; 63115693458SSteven Rostedt (Red Hat) poll_wait(filp, &work->waiters, poll_table); 63215693458SSteven Rostedt (Red Hat) 63315693458SSteven Rostedt (Red Hat) if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 63415693458SSteven Rostedt (Red Hat) (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 63515693458SSteven Rostedt (Red Hat) return POLLIN | POLLRDNORM; 63615693458SSteven Rostedt (Red Hat) return 0; 63715693458SSteven Rostedt (Red Hat) } 63815693458SSteven Rostedt (Red Hat) 639f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 640077c5407SSteven Rostedt #define RB_WARN_ON(b, cond) \ 6413e89c7bbSSteven Rostedt ({ \ 6423e89c7bbSSteven Rostedt int _____ret = unlikely(cond); \ 6433e89c7bbSSteven Rostedt if (_____ret) { \ 644077c5407SSteven Rostedt if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 645077c5407SSteven Rostedt struct ring_buffer_per_cpu *__b = \ 646077c5407SSteven Rostedt (void *)b; \ 647077c5407SSteven Rostedt atomic_inc(&__b->buffer->record_disabled); \ 648077c5407SSteven Rostedt } else \ 649077c5407SSteven Rostedt atomic_inc(&b->record_disabled); \ 650bf41a158SSteven Rostedt WARN_ON(1); \ 651bf41a158SSteven Rostedt } \ 6523e89c7bbSSteven Rostedt _____ret; \ 6533e89c7bbSSteven Rostedt }) 654f536aafcSSteven Rostedt 65537886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */ 65637886f6aSSteven Rostedt #define DEBUG_SHIFT 0 65737886f6aSSteven Rostedt 6586d3f1e12SJiri Olsa static inline u64 rb_time_stamp(struct ring_buffer *buffer) 65988eb0125SSteven Rostedt { 66088eb0125SSteven Rostedt /* shift to debug/test normalization and TIME_EXTENTS */ 66188eb0125SSteven Rostedt return buffer->clock() << DEBUG_SHIFT; 66288eb0125SSteven Rostedt } 66388eb0125SSteven Rostedt 66437886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 66537886f6aSSteven Rostedt { 66637886f6aSSteven Rostedt u64 time; 66737886f6aSSteven Rostedt 66837886f6aSSteven Rostedt preempt_disable_notrace(); 6696d3f1e12SJiri Olsa time = rb_time_stamp(buffer); 67037886f6aSSteven Rostedt preempt_enable_no_resched_notrace(); 67137886f6aSSteven Rostedt 67237886f6aSSteven Rostedt return time; 67337886f6aSSteven Rostedt } 67437886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 67537886f6aSSteven Rostedt 67637886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 67737886f6aSSteven Rostedt int cpu, u64 *ts) 67837886f6aSSteven Rostedt { 67937886f6aSSteven Rostedt /* Just stupid testing the normalize function and deltas */ 68037886f6aSSteven Rostedt *ts >>= DEBUG_SHIFT; 68137886f6aSSteven Rostedt } 68237886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 68337886f6aSSteven Rostedt 68477ae365eSSteven Rostedt /* 68577ae365eSSteven Rostedt * Making the ring buffer lockless makes things tricky. 68677ae365eSSteven Rostedt * Although writes only happen on the CPU that they are on, 68777ae365eSSteven Rostedt * and they only need to worry about interrupts. Reads can 68877ae365eSSteven Rostedt * happen on any CPU. 68977ae365eSSteven Rostedt * 69077ae365eSSteven Rostedt * The reader page is always off the ring buffer, but when the 69177ae365eSSteven Rostedt * reader finishes with a page, it needs to swap its page with 69277ae365eSSteven Rostedt * a new one from the buffer. The reader needs to take from 69377ae365eSSteven Rostedt * the head (writes go to the tail). But if a writer is in overwrite 69477ae365eSSteven Rostedt * mode and wraps, it must push the head page forward. 69577ae365eSSteven Rostedt * 69677ae365eSSteven Rostedt * Here lies the problem. 69777ae365eSSteven Rostedt * 69877ae365eSSteven Rostedt * The reader must be careful to replace only the head page, and 69977ae365eSSteven Rostedt * not another one. As described at the top of the file in the 70077ae365eSSteven Rostedt * ASCII art, the reader sets its old page to point to the next 70177ae365eSSteven Rostedt * page after head. It then sets the page after head to point to 70277ae365eSSteven Rostedt * the old reader page. But if the writer moves the head page 70377ae365eSSteven Rostedt * during this operation, the reader could end up with the tail. 70477ae365eSSteven Rostedt * 70577ae365eSSteven Rostedt * We use cmpxchg to help prevent this race. We also do something 70677ae365eSSteven Rostedt * special with the page before head. We set the LSB to 1. 70777ae365eSSteven Rostedt * 70877ae365eSSteven Rostedt * When the writer must push the page forward, it will clear the 70977ae365eSSteven Rostedt * bit that points to the head page, move the head, and then set 71077ae365eSSteven Rostedt * the bit that points to the new head page. 71177ae365eSSteven Rostedt * 71277ae365eSSteven Rostedt * We also don't want an interrupt coming in and moving the head 71377ae365eSSteven Rostedt * page on another writer. Thus we use the second LSB to catch 71477ae365eSSteven Rostedt * that too. Thus: 71577ae365eSSteven Rostedt * 71677ae365eSSteven Rostedt * head->list->prev->next bit 1 bit 0 71777ae365eSSteven Rostedt * ------- ------- 71877ae365eSSteven Rostedt * Normal page 0 0 71977ae365eSSteven Rostedt * Points to head page 0 1 72077ae365eSSteven Rostedt * New head page 1 0 72177ae365eSSteven Rostedt * 72277ae365eSSteven Rostedt * Note we can not trust the prev pointer of the head page, because: 72377ae365eSSteven Rostedt * 72477ae365eSSteven Rostedt * +----+ +-----+ +-----+ 72577ae365eSSteven Rostedt * | |------>| T |---X--->| N | 72677ae365eSSteven Rostedt * | |<------| | | | 72777ae365eSSteven Rostedt * +----+ +-----+ +-----+ 72877ae365eSSteven Rostedt * ^ ^ | 72977ae365eSSteven Rostedt * | +-----+ | | 73077ae365eSSteven Rostedt * +----------| R |----------+ | 73177ae365eSSteven Rostedt * | |<-----------+ 73277ae365eSSteven Rostedt * +-----+ 73377ae365eSSteven Rostedt * 73477ae365eSSteven Rostedt * Key: ---X--> HEAD flag set in pointer 73577ae365eSSteven Rostedt * T Tail page 73677ae365eSSteven Rostedt * R Reader page 73777ae365eSSteven Rostedt * N Next page 73877ae365eSSteven Rostedt * 73977ae365eSSteven Rostedt * (see __rb_reserve_next() to see where this happens) 74077ae365eSSteven Rostedt * 74177ae365eSSteven Rostedt * What the above shows is that the reader just swapped out 74277ae365eSSteven Rostedt * the reader page with a page in the buffer, but before it 74377ae365eSSteven Rostedt * could make the new header point back to the new page added 74477ae365eSSteven Rostedt * it was preempted by a writer. The writer moved forward onto 74577ae365eSSteven Rostedt * the new page added by the reader and is about to move forward 74677ae365eSSteven Rostedt * again. 74777ae365eSSteven Rostedt * 74877ae365eSSteven Rostedt * You can see, it is legitimate for the previous pointer of 74977ae365eSSteven Rostedt * the head (or any page) not to point back to itself. But only 75077ae365eSSteven Rostedt * temporarially. 75177ae365eSSteven Rostedt */ 75277ae365eSSteven Rostedt 75377ae365eSSteven Rostedt #define RB_PAGE_NORMAL 0UL 75477ae365eSSteven Rostedt #define RB_PAGE_HEAD 1UL 75577ae365eSSteven Rostedt #define RB_PAGE_UPDATE 2UL 75677ae365eSSteven Rostedt 75777ae365eSSteven Rostedt 75877ae365eSSteven Rostedt #define RB_FLAG_MASK 3UL 75977ae365eSSteven Rostedt 76077ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */ 76177ae365eSSteven Rostedt #define RB_PAGE_MOVED 4UL 76277ae365eSSteven Rostedt 76377ae365eSSteven Rostedt /* 76477ae365eSSteven Rostedt * rb_list_head - remove any bit 76577ae365eSSteven Rostedt */ 76677ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list) 76777ae365eSSteven Rostedt { 76877ae365eSSteven Rostedt unsigned long val = (unsigned long)list; 76977ae365eSSteven Rostedt 77077ae365eSSteven Rostedt return (struct list_head *)(val & ~RB_FLAG_MASK); 77177ae365eSSteven Rostedt } 77277ae365eSSteven Rostedt 77377ae365eSSteven Rostedt /* 7746d3f1e12SJiri Olsa * rb_is_head_page - test if the given page is the head page 77577ae365eSSteven Rostedt * 77677ae365eSSteven Rostedt * Because the reader may move the head_page pointer, we can 77777ae365eSSteven Rostedt * not trust what the head page is (it may be pointing to 77877ae365eSSteven Rostedt * the reader page). But if the next page is a header page, 77977ae365eSSteven Rostedt * its flags will be non zero. 78077ae365eSSteven Rostedt */ 78142b16b3fSJesper Juhl static inline int 78277ae365eSSteven Rostedt rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, 78377ae365eSSteven Rostedt struct buffer_page *page, struct list_head *list) 78477ae365eSSteven Rostedt { 78577ae365eSSteven Rostedt unsigned long val; 78677ae365eSSteven Rostedt 78777ae365eSSteven Rostedt val = (unsigned long)list->next; 78877ae365eSSteven Rostedt 78977ae365eSSteven Rostedt if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 79077ae365eSSteven Rostedt return RB_PAGE_MOVED; 79177ae365eSSteven Rostedt 79277ae365eSSteven Rostedt return val & RB_FLAG_MASK; 79377ae365eSSteven Rostedt } 79477ae365eSSteven Rostedt 79577ae365eSSteven Rostedt /* 79677ae365eSSteven Rostedt * rb_is_reader_page 79777ae365eSSteven Rostedt * 79877ae365eSSteven Rostedt * The unique thing about the reader page, is that, if the 79977ae365eSSteven Rostedt * writer is ever on it, the previous pointer never points 80077ae365eSSteven Rostedt * back to the reader page. 80177ae365eSSteven Rostedt */ 80277ae365eSSteven Rostedt static int rb_is_reader_page(struct buffer_page *page) 80377ae365eSSteven Rostedt { 80477ae365eSSteven Rostedt struct list_head *list = page->list.prev; 80577ae365eSSteven Rostedt 80677ae365eSSteven Rostedt return rb_list_head(list->next) != &page->list; 80777ae365eSSteven Rostedt } 80877ae365eSSteven Rostedt 80977ae365eSSteven Rostedt /* 81077ae365eSSteven Rostedt * rb_set_list_to_head - set a list_head to be pointing to head. 81177ae365eSSteven Rostedt */ 81277ae365eSSteven Rostedt static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, 81377ae365eSSteven Rostedt struct list_head *list) 81477ae365eSSteven Rostedt { 81577ae365eSSteven Rostedt unsigned long *ptr; 81677ae365eSSteven Rostedt 81777ae365eSSteven Rostedt ptr = (unsigned long *)&list->next; 81877ae365eSSteven Rostedt *ptr |= RB_PAGE_HEAD; 81977ae365eSSteven Rostedt *ptr &= ~RB_PAGE_UPDATE; 82077ae365eSSteven Rostedt } 82177ae365eSSteven Rostedt 82277ae365eSSteven Rostedt /* 82377ae365eSSteven Rostedt * rb_head_page_activate - sets up head page 82477ae365eSSteven Rostedt */ 82577ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 82677ae365eSSteven Rostedt { 82777ae365eSSteven Rostedt struct buffer_page *head; 82877ae365eSSteven Rostedt 82977ae365eSSteven Rostedt head = cpu_buffer->head_page; 83077ae365eSSteven Rostedt if (!head) 83177ae365eSSteven Rostedt return; 83277ae365eSSteven Rostedt 83377ae365eSSteven Rostedt /* 83477ae365eSSteven Rostedt * Set the previous list pointer to have the HEAD flag. 83577ae365eSSteven Rostedt */ 83677ae365eSSteven Rostedt rb_set_list_to_head(cpu_buffer, head->list.prev); 83777ae365eSSteven Rostedt } 83877ae365eSSteven Rostedt 83977ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list) 84077ae365eSSteven Rostedt { 84177ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&list->next; 84277ae365eSSteven Rostedt 84377ae365eSSteven Rostedt *ptr &= ~RB_FLAG_MASK; 84477ae365eSSteven Rostedt } 84577ae365eSSteven Rostedt 84677ae365eSSteven Rostedt /* 84777ae365eSSteven Rostedt * rb_head_page_dactivate - clears head page ptr (for free list) 84877ae365eSSteven Rostedt */ 84977ae365eSSteven Rostedt static void 85077ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 85177ae365eSSteven Rostedt { 85277ae365eSSteven Rostedt struct list_head *hd; 85377ae365eSSteven Rostedt 85477ae365eSSteven Rostedt /* Go through the whole list and clear any pointers found. */ 85577ae365eSSteven Rostedt rb_list_head_clear(cpu_buffer->pages); 85677ae365eSSteven Rostedt 85777ae365eSSteven Rostedt list_for_each(hd, cpu_buffer->pages) 85877ae365eSSteven Rostedt rb_list_head_clear(hd); 85977ae365eSSteven Rostedt } 86077ae365eSSteven Rostedt 86177ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 86277ae365eSSteven Rostedt struct buffer_page *head, 86377ae365eSSteven Rostedt struct buffer_page *prev, 86477ae365eSSteven Rostedt int old_flag, int new_flag) 86577ae365eSSteven Rostedt { 86677ae365eSSteven Rostedt struct list_head *list; 86777ae365eSSteven Rostedt unsigned long val = (unsigned long)&head->list; 86877ae365eSSteven Rostedt unsigned long ret; 86977ae365eSSteven Rostedt 87077ae365eSSteven Rostedt list = &prev->list; 87177ae365eSSteven Rostedt 87277ae365eSSteven Rostedt val &= ~RB_FLAG_MASK; 87377ae365eSSteven Rostedt 87408a40816SSteven Rostedt ret = cmpxchg((unsigned long *)&list->next, 87577ae365eSSteven Rostedt val | old_flag, val | new_flag); 87677ae365eSSteven Rostedt 87777ae365eSSteven Rostedt /* check if the reader took the page */ 87877ae365eSSteven Rostedt if ((ret & ~RB_FLAG_MASK) != val) 87977ae365eSSteven Rostedt return RB_PAGE_MOVED; 88077ae365eSSteven Rostedt 88177ae365eSSteven Rostedt return ret & RB_FLAG_MASK; 88277ae365eSSteven Rostedt } 88377ae365eSSteven Rostedt 88477ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 88577ae365eSSteven Rostedt struct buffer_page *head, 88677ae365eSSteven Rostedt struct buffer_page *prev, 88777ae365eSSteven Rostedt int old_flag) 88877ae365eSSteven Rostedt { 88977ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 89077ae365eSSteven Rostedt old_flag, RB_PAGE_UPDATE); 89177ae365eSSteven Rostedt } 89277ae365eSSteven Rostedt 89377ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 89477ae365eSSteven Rostedt struct buffer_page *head, 89577ae365eSSteven Rostedt struct buffer_page *prev, 89677ae365eSSteven Rostedt int old_flag) 89777ae365eSSteven Rostedt { 89877ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 89977ae365eSSteven Rostedt old_flag, RB_PAGE_HEAD); 90077ae365eSSteven Rostedt } 90177ae365eSSteven Rostedt 90277ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 90377ae365eSSteven Rostedt struct buffer_page *head, 90477ae365eSSteven Rostedt struct buffer_page *prev, 90577ae365eSSteven Rostedt int old_flag) 90677ae365eSSteven Rostedt { 90777ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 90877ae365eSSteven Rostedt old_flag, RB_PAGE_NORMAL); 90977ae365eSSteven Rostedt } 91077ae365eSSteven Rostedt 91177ae365eSSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 91277ae365eSSteven Rostedt struct buffer_page **bpage) 91377ae365eSSteven Rostedt { 91477ae365eSSteven Rostedt struct list_head *p = rb_list_head((*bpage)->list.next); 91577ae365eSSteven Rostedt 91677ae365eSSteven Rostedt *bpage = list_entry(p, struct buffer_page, list); 91777ae365eSSteven Rostedt } 91877ae365eSSteven Rostedt 91977ae365eSSteven Rostedt static struct buffer_page * 92077ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 92177ae365eSSteven Rostedt { 92277ae365eSSteven Rostedt struct buffer_page *head; 92377ae365eSSteven Rostedt struct buffer_page *page; 92477ae365eSSteven Rostedt struct list_head *list; 92577ae365eSSteven Rostedt int i; 92677ae365eSSteven Rostedt 92777ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 92877ae365eSSteven Rostedt return NULL; 92977ae365eSSteven Rostedt 93077ae365eSSteven Rostedt /* sanity check */ 93177ae365eSSteven Rostedt list = cpu_buffer->pages; 93277ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 93377ae365eSSteven Rostedt return NULL; 93477ae365eSSteven Rostedt 93577ae365eSSteven Rostedt page = head = cpu_buffer->head_page; 93677ae365eSSteven Rostedt /* 93777ae365eSSteven Rostedt * It is possible that the writer moves the header behind 93877ae365eSSteven Rostedt * where we started, and we miss in one loop. 93977ae365eSSteven Rostedt * A second loop should grab the header, but we'll do 94077ae365eSSteven Rostedt * three loops just because I'm paranoid. 94177ae365eSSteven Rostedt */ 94277ae365eSSteven Rostedt for (i = 0; i < 3; i++) { 94377ae365eSSteven Rostedt do { 94477ae365eSSteven Rostedt if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { 94577ae365eSSteven Rostedt cpu_buffer->head_page = page; 94677ae365eSSteven Rostedt return page; 94777ae365eSSteven Rostedt } 94877ae365eSSteven Rostedt rb_inc_page(cpu_buffer, &page); 94977ae365eSSteven Rostedt } while (page != head); 95077ae365eSSteven Rostedt } 95177ae365eSSteven Rostedt 95277ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 95377ae365eSSteven Rostedt 95477ae365eSSteven Rostedt return NULL; 95577ae365eSSteven Rostedt } 95677ae365eSSteven Rostedt 95777ae365eSSteven Rostedt static int rb_head_page_replace(struct buffer_page *old, 95877ae365eSSteven Rostedt struct buffer_page *new) 95977ae365eSSteven Rostedt { 96077ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&old->list.prev->next; 96177ae365eSSteven Rostedt unsigned long val; 96277ae365eSSteven Rostedt unsigned long ret; 96377ae365eSSteven Rostedt 96477ae365eSSteven Rostedt val = *ptr & ~RB_FLAG_MASK; 96577ae365eSSteven Rostedt val |= RB_PAGE_HEAD; 96677ae365eSSteven Rostedt 96708a40816SSteven Rostedt ret = cmpxchg(ptr, val, (unsigned long)&new->list); 96877ae365eSSteven Rostedt 96977ae365eSSteven Rostedt return ret == val; 97077ae365eSSteven Rostedt } 97177ae365eSSteven Rostedt 97277ae365eSSteven Rostedt /* 97377ae365eSSteven Rostedt * rb_tail_page_update - move the tail page forward 97477ae365eSSteven Rostedt * 97577ae365eSSteven Rostedt * Returns 1 if moved tail page, 0 if someone else did. 97677ae365eSSteven Rostedt */ 97777ae365eSSteven Rostedt static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 97877ae365eSSteven Rostedt struct buffer_page *tail_page, 97977ae365eSSteven Rostedt struct buffer_page *next_page) 98077ae365eSSteven Rostedt { 98177ae365eSSteven Rostedt struct buffer_page *old_tail; 98277ae365eSSteven Rostedt unsigned long old_entries; 98377ae365eSSteven Rostedt unsigned long old_write; 98477ae365eSSteven Rostedt int ret = 0; 98577ae365eSSteven Rostedt 98677ae365eSSteven Rostedt /* 98777ae365eSSteven Rostedt * The tail page now needs to be moved forward. 98877ae365eSSteven Rostedt * 98977ae365eSSteven Rostedt * We need to reset the tail page, but without messing 99077ae365eSSteven Rostedt * with possible erasing of data brought in by interrupts 99177ae365eSSteven Rostedt * that have moved the tail page and are currently on it. 99277ae365eSSteven Rostedt * 99377ae365eSSteven Rostedt * We add a counter to the write field to denote this. 99477ae365eSSteven Rostedt */ 99577ae365eSSteven Rostedt old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 99677ae365eSSteven Rostedt old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 99777ae365eSSteven Rostedt 99877ae365eSSteven Rostedt /* 99977ae365eSSteven Rostedt * Just make sure we have seen our old_write and synchronize 100077ae365eSSteven Rostedt * with any interrupts that come in. 100177ae365eSSteven Rostedt */ 100277ae365eSSteven Rostedt barrier(); 100377ae365eSSteven Rostedt 100477ae365eSSteven Rostedt /* 100577ae365eSSteven Rostedt * If the tail page is still the same as what we think 100677ae365eSSteven Rostedt * it is, then it is up to us to update the tail 100777ae365eSSteven Rostedt * pointer. 100877ae365eSSteven Rostedt */ 100977ae365eSSteven Rostedt if (tail_page == cpu_buffer->tail_page) { 101077ae365eSSteven Rostedt /* Zero the write counter */ 101177ae365eSSteven Rostedt unsigned long val = old_write & ~RB_WRITE_MASK; 101277ae365eSSteven Rostedt unsigned long eval = old_entries & ~RB_WRITE_MASK; 101377ae365eSSteven Rostedt 101477ae365eSSteven Rostedt /* 101577ae365eSSteven Rostedt * This will only succeed if an interrupt did 101677ae365eSSteven Rostedt * not come in and change it. In which case, we 101777ae365eSSteven Rostedt * do not want to modify it. 1018da706d8bSLai Jiangshan * 1019da706d8bSLai Jiangshan * We add (void) to let the compiler know that we do not care 1020da706d8bSLai Jiangshan * about the return value of these functions. We use the 1021da706d8bSLai Jiangshan * cmpxchg to only update if an interrupt did not already 1022da706d8bSLai Jiangshan * do it for us. If the cmpxchg fails, we don't care. 102377ae365eSSteven Rostedt */ 1024da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->write, old_write, val); 1025da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->entries, old_entries, eval); 102677ae365eSSteven Rostedt 102777ae365eSSteven Rostedt /* 102877ae365eSSteven Rostedt * No need to worry about races with clearing out the commit. 102977ae365eSSteven Rostedt * it only can increment when a commit takes place. But that 103077ae365eSSteven Rostedt * only happens in the outer most nested commit. 103177ae365eSSteven Rostedt */ 103277ae365eSSteven Rostedt local_set(&next_page->page->commit, 0); 103377ae365eSSteven Rostedt 103477ae365eSSteven Rostedt old_tail = cmpxchg(&cpu_buffer->tail_page, 103577ae365eSSteven Rostedt tail_page, next_page); 103677ae365eSSteven Rostedt 103777ae365eSSteven Rostedt if (old_tail == tail_page) 103877ae365eSSteven Rostedt ret = 1; 103977ae365eSSteven Rostedt } 104077ae365eSSteven Rostedt 104177ae365eSSteven Rostedt return ret; 104277ae365eSSteven Rostedt } 104377ae365eSSteven Rostedt 104477ae365eSSteven Rostedt static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 104577ae365eSSteven Rostedt struct buffer_page *bpage) 104677ae365eSSteven Rostedt { 104777ae365eSSteven Rostedt unsigned long val = (unsigned long)bpage; 104877ae365eSSteven Rostedt 104977ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 105077ae365eSSteven Rostedt return 1; 105177ae365eSSteven Rostedt 105277ae365eSSteven Rostedt return 0; 105377ae365eSSteven Rostedt } 105477ae365eSSteven Rostedt 105577ae365eSSteven Rostedt /** 105677ae365eSSteven Rostedt * rb_check_list - make sure a pointer to a list has the last bits zero 105777ae365eSSteven Rostedt */ 105877ae365eSSteven Rostedt static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 105977ae365eSSteven Rostedt struct list_head *list) 106077ae365eSSteven Rostedt { 106177ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 106277ae365eSSteven Rostedt return 1; 106377ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 106477ae365eSSteven Rostedt return 1; 106577ae365eSSteven Rostedt return 0; 106677ae365eSSteven Rostedt } 106777ae365eSSteven Rostedt 10687a8e76a3SSteven Rostedt /** 1069d611851bSzhangwei(Jovi) * rb_check_pages - integrity check of buffer pages 10707a8e76a3SSteven Rostedt * @cpu_buffer: CPU buffer with pages to test 10717a8e76a3SSteven Rostedt * 1072c3706f00SWenji Huang * As a safety measure we check to make sure the data pages have not 10737a8e76a3SSteven Rostedt * been corrupted. 10747a8e76a3SSteven Rostedt */ 10757a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 10767a8e76a3SSteven Rostedt { 10773adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages; 1078044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 10797a8e76a3SSteven Rostedt 1080308f7eebSSteven Rostedt /* Reset the head page if it exists */ 1081308f7eebSSteven Rostedt if (cpu_buffer->head_page) 1082308f7eebSSteven Rostedt rb_set_head_page(cpu_buffer); 1083308f7eebSSteven Rostedt 108477ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 108577ae365eSSteven Rostedt 10863e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 10873e89c7bbSSteven Rostedt return -1; 10883e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 10893e89c7bbSSteven Rostedt return -1; 10907a8e76a3SSteven Rostedt 109177ae365eSSteven Rostedt if (rb_check_list(cpu_buffer, head)) 109277ae365eSSteven Rostedt return -1; 109377ae365eSSteven Rostedt 1094044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 10953e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 1096044fa782SSteven Rostedt bpage->list.next->prev != &bpage->list)) 10973e89c7bbSSteven Rostedt return -1; 10983e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 1099044fa782SSteven Rostedt bpage->list.prev->next != &bpage->list)) 11003e89c7bbSSteven Rostedt return -1; 110177ae365eSSteven Rostedt if (rb_check_list(cpu_buffer, &bpage->list)) 110277ae365eSSteven Rostedt return -1; 11037a8e76a3SSteven Rostedt } 11047a8e76a3SSteven Rostedt 110577ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 110677ae365eSSteven Rostedt 11077a8e76a3SSteven Rostedt return 0; 11087a8e76a3SSteven Rostedt } 11097a8e76a3SSteven Rostedt 1110438ced17SVaibhav Nagarnaik static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 11117a8e76a3SSteven Rostedt { 1112438ced17SVaibhav Nagarnaik int i; 1113044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 11143adc54faSSteven Rostedt 11157a8e76a3SSteven Rostedt for (i = 0; i < nr_pages; i++) { 11167ea59064SVaibhav Nagarnaik struct page *page; 1117d7ec4bfeSVaibhav Nagarnaik /* 1118d7ec4bfeSVaibhav Nagarnaik * __GFP_NORETRY flag makes sure that the allocation fails 1119d7ec4bfeSVaibhav Nagarnaik * gracefully without invoking oom-killer and the system is 1120d7ec4bfeSVaibhav Nagarnaik * not destabilized. 1121d7ec4bfeSVaibhav Nagarnaik */ 1122044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1123d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 1124438ced17SVaibhav Nagarnaik cpu_to_node(cpu)); 1125044fa782SSteven Rostedt if (!bpage) 1126e4c2ce82SSteven Rostedt goto free_pages; 112777ae365eSSteven Rostedt 1128438ced17SVaibhav Nagarnaik list_add(&bpage->list, pages); 112977ae365eSSteven Rostedt 1130438ced17SVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), 1131d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 0); 11327ea59064SVaibhav Nagarnaik if (!page) 11337a8e76a3SSteven Rostedt goto free_pages; 11347ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1135044fa782SSteven Rostedt rb_init_page(bpage->page); 11367a8e76a3SSteven Rostedt } 11377a8e76a3SSteven Rostedt 1138438ced17SVaibhav Nagarnaik return 0; 1139438ced17SVaibhav Nagarnaik 1140438ced17SVaibhav Nagarnaik free_pages: 1141438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, pages, list) { 1142438ced17SVaibhav Nagarnaik list_del_init(&bpage->list); 1143438ced17SVaibhav Nagarnaik free_buffer_page(bpage); 1144438ced17SVaibhav Nagarnaik } 1145438ced17SVaibhav Nagarnaik 1146438ced17SVaibhav Nagarnaik return -ENOMEM; 1147438ced17SVaibhav Nagarnaik } 1148438ced17SVaibhav Nagarnaik 1149438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1150438ced17SVaibhav Nagarnaik unsigned nr_pages) 1151438ced17SVaibhav Nagarnaik { 1152438ced17SVaibhav Nagarnaik LIST_HEAD(pages); 1153438ced17SVaibhav Nagarnaik 1154438ced17SVaibhav Nagarnaik WARN_ON(!nr_pages); 1155438ced17SVaibhav Nagarnaik 1156438ced17SVaibhav Nagarnaik if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) 1157438ced17SVaibhav Nagarnaik return -ENOMEM; 1158438ced17SVaibhav Nagarnaik 11593adc54faSSteven Rostedt /* 11603adc54faSSteven Rostedt * The ring buffer page list is a circular list that does not 11613adc54faSSteven Rostedt * start and end with a list head. All page list items point to 11623adc54faSSteven Rostedt * other pages. 11633adc54faSSteven Rostedt */ 11643adc54faSSteven Rostedt cpu_buffer->pages = pages.next; 11653adc54faSSteven Rostedt list_del(&pages); 11667a8e76a3SSteven Rostedt 1167438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages = nr_pages; 1168438ced17SVaibhav Nagarnaik 11697a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 11707a8e76a3SSteven Rostedt 11717a8e76a3SSteven Rostedt return 0; 11727a8e76a3SSteven Rostedt } 11737a8e76a3SSteven Rostedt 11747a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu * 1175438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 11767a8e76a3SSteven Rostedt { 11777a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1178044fa782SSteven Rostedt struct buffer_page *bpage; 11797ea59064SVaibhav Nagarnaik struct page *page; 11807a8e76a3SSteven Rostedt int ret; 11817a8e76a3SSteven Rostedt 11827a8e76a3SSteven Rostedt cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 11837a8e76a3SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 11847a8e76a3SSteven Rostedt if (!cpu_buffer) 11857a8e76a3SSteven Rostedt return NULL; 11867a8e76a3SSteven Rostedt 11877a8e76a3SSteven Rostedt cpu_buffer->cpu = cpu; 11887a8e76a3SSteven Rostedt cpu_buffer->buffer = buffer; 11895389f6faSThomas Gleixner raw_spin_lock_init(&cpu_buffer->reader_lock); 11901f8a6a10SPeter Zijlstra lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1191edc35bd7SThomas Gleixner cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 119283f40318SVaibhav Nagarnaik INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 119305fdd70dSVaibhav Nagarnaik init_completion(&cpu_buffer->update_done); 119415693458SSteven Rostedt (Red Hat) init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1195f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&cpu_buffer->irq_work.waiters); 11967a8e76a3SSteven Rostedt 1197044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1198e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 1199044fa782SSteven Rostedt if (!bpage) 1200e4c2ce82SSteven Rostedt goto fail_free_buffer; 1201e4c2ce82SSteven Rostedt 120277ae365eSSteven Rostedt rb_check_bpage(cpu_buffer, bpage); 120377ae365eSSteven Rostedt 1204044fa782SSteven Rostedt cpu_buffer->reader_page = bpage; 12057ea59064SVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 12067ea59064SVaibhav Nagarnaik if (!page) 1207e4c2ce82SSteven Rostedt goto fail_free_reader; 12087ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1209044fa782SSteven Rostedt rb_init_page(bpage->page); 1210e4c2ce82SSteven Rostedt 1211d769041fSSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 121244b99462SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1213d769041fSSteven Rostedt 1214438ced17SVaibhav Nagarnaik ret = rb_allocate_pages(cpu_buffer, nr_pages); 12157a8e76a3SSteven Rostedt if (ret < 0) 1216d769041fSSteven Rostedt goto fail_free_reader; 12177a8e76a3SSteven Rostedt 12187a8e76a3SSteven Rostedt cpu_buffer->head_page 12193adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 1220bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 12217a8e76a3SSteven Rostedt 122277ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 122377ae365eSSteven Rostedt 12247a8e76a3SSteven Rostedt return cpu_buffer; 12257a8e76a3SSteven Rostedt 1226d769041fSSteven Rostedt fail_free_reader: 1227d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1228d769041fSSteven Rostedt 12297a8e76a3SSteven Rostedt fail_free_buffer: 12307a8e76a3SSteven Rostedt kfree(cpu_buffer); 12317a8e76a3SSteven Rostedt return NULL; 12327a8e76a3SSteven Rostedt } 12337a8e76a3SSteven Rostedt 12347a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 12357a8e76a3SSteven Rostedt { 12363adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages; 1237044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 12387a8e76a3SSteven Rostedt 1239d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1240d769041fSSteven Rostedt 124177ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 124277ae365eSSteven Rostedt 12433adc54faSSteven Rostedt if (head) { 1244044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 1245044fa782SSteven Rostedt list_del_init(&bpage->list); 1246044fa782SSteven Rostedt free_buffer_page(bpage); 12477a8e76a3SSteven Rostedt } 12483adc54faSSteven Rostedt bpage = list_entry(head, struct buffer_page, list); 12493adc54faSSteven Rostedt free_buffer_page(bpage); 12503adc54faSSteven Rostedt } 12513adc54faSSteven Rostedt 12527a8e76a3SSteven Rostedt kfree(cpu_buffer); 12537a8e76a3SSteven Rostedt } 12547a8e76a3SSteven Rostedt 125559222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 125609c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 1257554f786eSSteven Rostedt unsigned long action, void *hcpu); 1258554f786eSSteven Rostedt #endif 1259554f786eSSteven Rostedt 12607a8e76a3SSteven Rostedt /** 1261d611851bSzhangwei(Jovi) * __ring_buffer_alloc - allocate a new ring_buffer 126268814b58SRobert Richter * @size: the size in bytes per cpu that is needed. 12637a8e76a3SSteven Rostedt * @flags: attributes to set for the ring buffer. 12647a8e76a3SSteven Rostedt * 12657a8e76a3SSteven Rostedt * Currently the only flag that is available is the RB_FL_OVERWRITE 12667a8e76a3SSteven Rostedt * flag. This flag means that the buffer will overwrite old data 12677a8e76a3SSteven Rostedt * when the buffer wraps. If this flag is not set, the buffer will 12687a8e76a3SSteven Rostedt * drop data when the tail hits the head. 12697a8e76a3SSteven Rostedt */ 12701f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 12711f8a6a10SPeter Zijlstra struct lock_class_key *key) 12727a8e76a3SSteven Rostedt { 12737a8e76a3SSteven Rostedt struct ring_buffer *buffer; 12747a8e76a3SSteven Rostedt int bsize; 1275438ced17SVaibhav Nagarnaik int cpu, nr_pages; 12767a8e76a3SSteven Rostedt 12777a8e76a3SSteven Rostedt /* keep it in its own cache line */ 12787a8e76a3SSteven Rostedt buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 12797a8e76a3SSteven Rostedt GFP_KERNEL); 12807a8e76a3SSteven Rostedt if (!buffer) 12817a8e76a3SSteven Rostedt return NULL; 12827a8e76a3SSteven Rostedt 12839e01c1b7SRusty Russell if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 12849e01c1b7SRusty Russell goto fail_free_buffer; 12859e01c1b7SRusty Russell 1286438ced17SVaibhav Nagarnaik nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 12877a8e76a3SSteven Rostedt buffer->flags = flags; 128837886f6aSSteven Rostedt buffer->clock = trace_clock_local; 12891f8a6a10SPeter Zijlstra buffer->reader_lock_key = key; 12907a8e76a3SSteven Rostedt 129115693458SSteven Rostedt (Red Hat) init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1292f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&buffer->irq_work.waiters); 129315693458SSteven Rostedt (Red Hat) 12947a8e76a3SSteven Rostedt /* need at least two pages */ 1295438ced17SVaibhav Nagarnaik if (nr_pages < 2) 1296438ced17SVaibhav Nagarnaik nr_pages = 2; 12977a8e76a3SSteven Rostedt 12983bf832ceSFrederic Weisbecker /* 12993bf832ceSFrederic Weisbecker * In case of non-hotplug cpu, if the ring-buffer is allocated 13003bf832ceSFrederic Weisbecker * in early initcall, it will not be notified of secondary cpus. 13013bf832ceSFrederic Weisbecker * In that off case, we need to allocate for all possible cpus. 13023bf832ceSFrederic Weisbecker */ 13033bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU 1304554f786eSSteven Rostedt get_online_cpus(); 1305554f786eSSteven Rostedt cpumask_copy(buffer->cpumask, cpu_online_mask); 13063bf832ceSFrederic Weisbecker #else 13073bf832ceSFrederic Weisbecker cpumask_copy(buffer->cpumask, cpu_possible_mask); 13083bf832ceSFrederic Weisbecker #endif 13097a8e76a3SSteven Rostedt buffer->cpus = nr_cpu_ids; 13107a8e76a3SSteven Rostedt 13117a8e76a3SSteven Rostedt bsize = sizeof(void *) * nr_cpu_ids; 13127a8e76a3SSteven Rostedt buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 13137a8e76a3SSteven Rostedt GFP_KERNEL); 13147a8e76a3SSteven Rostedt if (!buffer->buffers) 13159e01c1b7SRusty Russell goto fail_free_cpumask; 13167a8e76a3SSteven Rostedt 13177a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 13187a8e76a3SSteven Rostedt buffer->buffers[cpu] = 1319438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 13207a8e76a3SSteven Rostedt if (!buffer->buffers[cpu]) 13217a8e76a3SSteven Rostedt goto fail_free_buffers; 13227a8e76a3SSteven Rostedt } 13237a8e76a3SSteven Rostedt 132459222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 1325554f786eSSteven Rostedt buffer->cpu_notify.notifier_call = rb_cpu_notify; 1326554f786eSSteven Rostedt buffer->cpu_notify.priority = 0; 1327554f786eSSteven Rostedt register_cpu_notifier(&buffer->cpu_notify); 1328554f786eSSteven Rostedt #endif 1329554f786eSSteven Rostedt 1330554f786eSSteven Rostedt put_online_cpus(); 13317a8e76a3SSteven Rostedt mutex_init(&buffer->mutex); 13327a8e76a3SSteven Rostedt 13337a8e76a3SSteven Rostedt return buffer; 13347a8e76a3SSteven Rostedt 13357a8e76a3SSteven Rostedt fail_free_buffers: 13367a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 13377a8e76a3SSteven Rostedt if (buffer->buffers[cpu]) 13387a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 13397a8e76a3SSteven Rostedt } 13407a8e76a3SSteven Rostedt kfree(buffer->buffers); 13417a8e76a3SSteven Rostedt 13429e01c1b7SRusty Russell fail_free_cpumask: 13439e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 1344554f786eSSteven Rostedt put_online_cpus(); 13459e01c1b7SRusty Russell 13467a8e76a3SSteven Rostedt fail_free_buffer: 13477a8e76a3SSteven Rostedt kfree(buffer); 13487a8e76a3SSteven Rostedt return NULL; 13497a8e76a3SSteven Rostedt } 13501f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 13517a8e76a3SSteven Rostedt 13527a8e76a3SSteven Rostedt /** 13537a8e76a3SSteven Rostedt * ring_buffer_free - free a ring buffer. 13547a8e76a3SSteven Rostedt * @buffer: the buffer to free. 13557a8e76a3SSteven Rostedt */ 13567a8e76a3SSteven Rostedt void 13577a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer) 13587a8e76a3SSteven Rostedt { 13597a8e76a3SSteven Rostedt int cpu; 13607a8e76a3SSteven Rostedt 1361554f786eSSteven Rostedt get_online_cpus(); 1362554f786eSSteven Rostedt 136359222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 1364554f786eSSteven Rostedt unregister_cpu_notifier(&buffer->cpu_notify); 1365554f786eSSteven Rostedt #endif 1366554f786eSSteven Rostedt 13677a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 13687a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 13697a8e76a3SSteven Rostedt 1370554f786eSSteven Rostedt put_online_cpus(); 1371554f786eSSteven Rostedt 1372bd3f0221SEric Dumazet kfree(buffer->buffers); 13739e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 13749e01c1b7SRusty Russell 13757a8e76a3SSteven Rostedt kfree(buffer); 13767a8e76a3SSteven Rostedt } 1377c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free); 13787a8e76a3SSteven Rostedt 137937886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer, 138037886f6aSSteven Rostedt u64 (*clock)(void)) 138137886f6aSSteven Rostedt { 138237886f6aSSteven Rostedt buffer->clock = clock; 138337886f6aSSteven Rostedt } 138437886f6aSSteven Rostedt 13857a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 13867a8e76a3SSteven Rostedt 138783f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage) 13887a8e76a3SSteven Rostedt { 138983f40318SVaibhav Nagarnaik return local_read(&bpage->entries) & RB_WRITE_MASK; 139083f40318SVaibhav Nagarnaik } 139183f40318SVaibhav Nagarnaik 139283f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage) 139383f40318SVaibhav Nagarnaik { 139483f40318SVaibhav Nagarnaik return local_read(&bpage->write) & RB_WRITE_MASK; 139583f40318SVaibhav Nagarnaik } 139683f40318SVaibhav Nagarnaik 13975040b4b7SVaibhav Nagarnaik static int 139883f40318SVaibhav Nagarnaik rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 139983f40318SVaibhav Nagarnaik { 140083f40318SVaibhav Nagarnaik struct list_head *tail_page, *to_remove, *next_page; 140183f40318SVaibhav Nagarnaik struct buffer_page *to_remove_page, *tmp_iter_page; 140283f40318SVaibhav Nagarnaik struct buffer_page *last_page, *first_page; 140383f40318SVaibhav Nagarnaik unsigned int nr_removed; 140483f40318SVaibhav Nagarnaik unsigned long head_bit; 140583f40318SVaibhav Nagarnaik int page_entries; 140683f40318SVaibhav Nagarnaik 140783f40318SVaibhav Nagarnaik head_bit = 0; 14087a8e76a3SSteven Rostedt 14095389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock); 141083f40318SVaibhav Nagarnaik atomic_inc(&cpu_buffer->record_disabled); 141183f40318SVaibhav Nagarnaik /* 141283f40318SVaibhav Nagarnaik * We don't race with the readers since we have acquired the reader 141383f40318SVaibhav Nagarnaik * lock. We also don't race with writers after disabling recording. 141483f40318SVaibhav Nagarnaik * This makes it easy to figure out the first and the last page to be 141583f40318SVaibhav Nagarnaik * removed from the list. We unlink all the pages in between including 141683f40318SVaibhav Nagarnaik * the first and last pages. This is done in a busy loop so that we 141783f40318SVaibhav Nagarnaik * lose the least number of traces. 141883f40318SVaibhav Nagarnaik * The pages are freed after we restart recording and unlock readers. 141983f40318SVaibhav Nagarnaik */ 142083f40318SVaibhav Nagarnaik tail_page = &cpu_buffer->tail_page->list; 142177ae365eSSteven Rostedt 142283f40318SVaibhav Nagarnaik /* 142383f40318SVaibhav Nagarnaik * tail page might be on reader page, we remove the next page 142483f40318SVaibhav Nagarnaik * from the ring buffer 142583f40318SVaibhav Nagarnaik */ 142683f40318SVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 142783f40318SVaibhav Nagarnaik tail_page = rb_list_head(tail_page->next); 142883f40318SVaibhav Nagarnaik to_remove = tail_page; 142983f40318SVaibhav Nagarnaik 143083f40318SVaibhav Nagarnaik /* start of pages to remove */ 143183f40318SVaibhav Nagarnaik first_page = list_entry(rb_list_head(to_remove->next), 143283f40318SVaibhav Nagarnaik struct buffer_page, list); 143383f40318SVaibhav Nagarnaik 143483f40318SVaibhav Nagarnaik for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 143583f40318SVaibhav Nagarnaik to_remove = rb_list_head(to_remove)->next; 143683f40318SVaibhav Nagarnaik head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 14377a8e76a3SSteven Rostedt } 14387a8e76a3SSteven Rostedt 143983f40318SVaibhav Nagarnaik next_page = rb_list_head(to_remove)->next; 14407a8e76a3SSteven Rostedt 144183f40318SVaibhav Nagarnaik /* 144283f40318SVaibhav Nagarnaik * Now we remove all pages between tail_page and next_page. 144383f40318SVaibhav Nagarnaik * Make sure that we have head_bit value preserved for the 144483f40318SVaibhav Nagarnaik * next page 144583f40318SVaibhav Nagarnaik */ 144683f40318SVaibhav Nagarnaik tail_page->next = (struct list_head *)((unsigned long)next_page | 144783f40318SVaibhav Nagarnaik head_bit); 144883f40318SVaibhav Nagarnaik next_page = rb_list_head(next_page); 144983f40318SVaibhav Nagarnaik next_page->prev = tail_page; 145083f40318SVaibhav Nagarnaik 145183f40318SVaibhav Nagarnaik /* make sure pages points to a valid page in the ring buffer */ 145283f40318SVaibhav Nagarnaik cpu_buffer->pages = next_page; 145383f40318SVaibhav Nagarnaik 145483f40318SVaibhav Nagarnaik /* update head page */ 145583f40318SVaibhav Nagarnaik if (head_bit) 145683f40318SVaibhav Nagarnaik cpu_buffer->head_page = list_entry(next_page, 145783f40318SVaibhav Nagarnaik struct buffer_page, list); 145883f40318SVaibhav Nagarnaik 145983f40318SVaibhav Nagarnaik /* 146083f40318SVaibhav Nagarnaik * change read pointer to make sure any read iterators reset 146183f40318SVaibhav Nagarnaik * themselves 146283f40318SVaibhav Nagarnaik */ 146383f40318SVaibhav Nagarnaik cpu_buffer->read = 0; 146483f40318SVaibhav Nagarnaik 146583f40318SVaibhav Nagarnaik /* pages are removed, resume tracing and then free the pages */ 146683f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->record_disabled); 14675389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock); 146883f40318SVaibhav Nagarnaik 146983f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 147083f40318SVaibhav Nagarnaik 147183f40318SVaibhav Nagarnaik /* last buffer page to remove */ 147283f40318SVaibhav Nagarnaik last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 147383f40318SVaibhav Nagarnaik list); 147483f40318SVaibhav Nagarnaik tmp_iter_page = first_page; 147583f40318SVaibhav Nagarnaik 147683f40318SVaibhav Nagarnaik do { 147783f40318SVaibhav Nagarnaik to_remove_page = tmp_iter_page; 147883f40318SVaibhav Nagarnaik rb_inc_page(cpu_buffer, &tmp_iter_page); 147983f40318SVaibhav Nagarnaik 148083f40318SVaibhav Nagarnaik /* update the counters */ 148183f40318SVaibhav Nagarnaik page_entries = rb_page_entries(to_remove_page); 148283f40318SVaibhav Nagarnaik if (page_entries) { 148383f40318SVaibhav Nagarnaik /* 148483f40318SVaibhav Nagarnaik * If something was added to this page, it was full 148583f40318SVaibhav Nagarnaik * since it is not the tail page. So we deduct the 148683f40318SVaibhav Nagarnaik * bytes consumed in ring buffer from here. 148748fdc72fSVaibhav Nagarnaik * Increment overrun to account for the lost events. 148883f40318SVaibhav Nagarnaik */ 148948fdc72fSVaibhav Nagarnaik local_add(page_entries, &cpu_buffer->overrun); 149083f40318SVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 149183f40318SVaibhav Nagarnaik } 149283f40318SVaibhav Nagarnaik 149383f40318SVaibhav Nagarnaik /* 149483f40318SVaibhav Nagarnaik * We have already removed references to this list item, just 149583f40318SVaibhav Nagarnaik * free up the buffer_page and its page 149683f40318SVaibhav Nagarnaik */ 149783f40318SVaibhav Nagarnaik free_buffer_page(to_remove_page); 149883f40318SVaibhav Nagarnaik nr_removed--; 149983f40318SVaibhav Nagarnaik 150083f40318SVaibhav Nagarnaik } while (to_remove_page != last_page); 150183f40318SVaibhav Nagarnaik 150283f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, nr_removed); 15035040b4b7SVaibhav Nagarnaik 15045040b4b7SVaibhav Nagarnaik return nr_removed == 0; 15057a8e76a3SSteven Rostedt } 15067a8e76a3SSteven Rostedt 15075040b4b7SVaibhav Nagarnaik static int 15085040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 15097a8e76a3SSteven Rostedt { 15105040b4b7SVaibhav Nagarnaik struct list_head *pages = &cpu_buffer->new_pages; 15115040b4b7SVaibhav Nagarnaik int retries, success; 15127a8e76a3SSteven Rostedt 15135389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock); 15145040b4b7SVaibhav Nagarnaik /* 15155040b4b7SVaibhav Nagarnaik * We are holding the reader lock, so the reader page won't be swapped 15165040b4b7SVaibhav Nagarnaik * in the ring buffer. Now we are racing with the writer trying to 15175040b4b7SVaibhav Nagarnaik * move head page and the tail page. 15185040b4b7SVaibhav Nagarnaik * We are going to adapt the reader page update process where: 15195040b4b7SVaibhav Nagarnaik * 1. We first splice the start and end of list of new pages between 15205040b4b7SVaibhav Nagarnaik * the head page and its previous page. 15215040b4b7SVaibhav Nagarnaik * 2. We cmpxchg the prev_page->next to point from head page to the 15225040b4b7SVaibhav Nagarnaik * start of new pages list. 15235040b4b7SVaibhav Nagarnaik * 3. Finally, we update the head->prev to the end of new list. 15245040b4b7SVaibhav Nagarnaik * 15255040b4b7SVaibhav Nagarnaik * We will try this process 10 times, to make sure that we don't keep 15265040b4b7SVaibhav Nagarnaik * spinning. 15275040b4b7SVaibhav Nagarnaik */ 15285040b4b7SVaibhav Nagarnaik retries = 10; 15295040b4b7SVaibhav Nagarnaik success = 0; 15305040b4b7SVaibhav Nagarnaik while (retries--) { 15315040b4b7SVaibhav Nagarnaik struct list_head *head_page, *prev_page, *r; 15325040b4b7SVaibhav Nagarnaik struct list_head *last_page, *first_page; 15335040b4b7SVaibhav Nagarnaik struct list_head *head_page_with_bit; 153477ae365eSSteven Rostedt 15355040b4b7SVaibhav Nagarnaik head_page = &rb_set_head_page(cpu_buffer)->list; 153654f7be5bSSteven Rostedt if (!head_page) 153754f7be5bSSteven Rostedt break; 15385040b4b7SVaibhav Nagarnaik prev_page = head_page->prev; 15395040b4b7SVaibhav Nagarnaik 15405040b4b7SVaibhav Nagarnaik first_page = pages->next; 15415040b4b7SVaibhav Nagarnaik last_page = pages->prev; 15425040b4b7SVaibhav Nagarnaik 15435040b4b7SVaibhav Nagarnaik head_page_with_bit = (struct list_head *) 15445040b4b7SVaibhav Nagarnaik ((unsigned long)head_page | RB_PAGE_HEAD); 15455040b4b7SVaibhav Nagarnaik 15465040b4b7SVaibhav Nagarnaik last_page->next = head_page_with_bit; 15475040b4b7SVaibhav Nagarnaik first_page->prev = prev_page; 15485040b4b7SVaibhav Nagarnaik 15495040b4b7SVaibhav Nagarnaik r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 15505040b4b7SVaibhav Nagarnaik 15515040b4b7SVaibhav Nagarnaik if (r == head_page_with_bit) { 15525040b4b7SVaibhav Nagarnaik /* 15535040b4b7SVaibhav Nagarnaik * yay, we replaced the page pointer to our new list, 15545040b4b7SVaibhav Nagarnaik * now, we just have to update to head page's prev 15555040b4b7SVaibhav Nagarnaik * pointer to point to end of list 15565040b4b7SVaibhav Nagarnaik */ 15575040b4b7SVaibhav Nagarnaik head_page->prev = last_page; 15585040b4b7SVaibhav Nagarnaik success = 1; 15595040b4b7SVaibhav Nagarnaik break; 15607a8e76a3SSteven Rostedt } 15615040b4b7SVaibhav Nagarnaik } 15627a8e76a3SSteven Rostedt 15635040b4b7SVaibhav Nagarnaik if (success) 15645040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(pages); 15655040b4b7SVaibhav Nagarnaik /* 15665040b4b7SVaibhav Nagarnaik * If we weren't successful in adding in new pages, warn and stop 15675040b4b7SVaibhav Nagarnaik * tracing 15685040b4b7SVaibhav Nagarnaik */ 15695040b4b7SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, !success); 15705389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock); 15715040b4b7SVaibhav Nagarnaik 15725040b4b7SVaibhav Nagarnaik /* free pages if they weren't inserted */ 15735040b4b7SVaibhav Nagarnaik if (!success) { 15745040b4b7SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 15755040b4b7SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 15765040b4b7SVaibhav Nagarnaik list) { 15775040b4b7SVaibhav Nagarnaik list_del_init(&bpage->list); 15785040b4b7SVaibhav Nagarnaik free_buffer_page(bpage); 15795040b4b7SVaibhav Nagarnaik } 15805040b4b7SVaibhav Nagarnaik } 15815040b4b7SVaibhav Nagarnaik return success; 15827a8e76a3SSteven Rostedt } 15837a8e76a3SSteven Rostedt 158483f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 1585438ced17SVaibhav Nagarnaik { 15865040b4b7SVaibhav Nagarnaik int success; 158783f40318SVaibhav Nagarnaik 15885040b4b7SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0) 15895040b4b7SVaibhav Nagarnaik success = rb_insert_pages(cpu_buffer); 15905040b4b7SVaibhav Nagarnaik else 15915040b4b7SVaibhav Nagarnaik success = rb_remove_pages(cpu_buffer, 15925040b4b7SVaibhav Nagarnaik -cpu_buffer->nr_pages_to_update); 15935040b4b7SVaibhav Nagarnaik 15945040b4b7SVaibhav Nagarnaik if (success) 1595438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 159683f40318SVaibhav Nagarnaik } 159783f40318SVaibhav Nagarnaik 159883f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work) 159983f40318SVaibhav Nagarnaik { 160083f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 160183f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu, update_pages_work); 160283f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer); 160305fdd70dSVaibhav Nagarnaik complete(&cpu_buffer->update_done); 1604438ced17SVaibhav Nagarnaik } 1605438ced17SVaibhav Nagarnaik 16067a8e76a3SSteven Rostedt /** 16077a8e76a3SSteven Rostedt * ring_buffer_resize - resize the ring buffer 16087a8e76a3SSteven Rostedt * @buffer: the buffer to resize. 16097a8e76a3SSteven Rostedt * @size: the new size. 1610d611851bSzhangwei(Jovi) * @cpu_id: the cpu buffer to resize 16117a8e76a3SSteven Rostedt * 16127a8e76a3SSteven Rostedt * Minimum size is 2 * BUF_PAGE_SIZE. 16137a8e76a3SSteven Rostedt * 161483f40318SVaibhav Nagarnaik * Returns 0 on success and < 0 on failure. 16157a8e76a3SSteven Rostedt */ 1616438ced17SVaibhav Nagarnaik int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, 1617438ced17SVaibhav Nagarnaik int cpu_id) 16187a8e76a3SSteven Rostedt { 16197a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1620438ced17SVaibhav Nagarnaik unsigned nr_pages; 162183f40318SVaibhav Nagarnaik int cpu, err = 0; 16227a8e76a3SSteven Rostedt 1623ee51a1deSIngo Molnar /* 1624ee51a1deSIngo Molnar * Always succeed at resizing a non-existent buffer: 1625ee51a1deSIngo Molnar */ 1626ee51a1deSIngo Molnar if (!buffer) 1627ee51a1deSIngo Molnar return size; 1628ee51a1deSIngo Molnar 16296a31e1f1SSteven Rostedt /* Make sure the requested buffer exists */ 16306a31e1f1SSteven Rostedt if (cpu_id != RING_BUFFER_ALL_CPUS && 16316a31e1f1SSteven Rostedt !cpumask_test_cpu(cpu_id, buffer->cpumask)) 16326a31e1f1SSteven Rostedt return size; 16336a31e1f1SSteven Rostedt 16347a8e76a3SSteven Rostedt size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 16357a8e76a3SSteven Rostedt size *= BUF_PAGE_SIZE; 16367a8e76a3SSteven Rostedt 16377a8e76a3SSteven Rostedt /* we need a minimum of two pages */ 16387a8e76a3SSteven Rostedt if (size < BUF_PAGE_SIZE * 2) 16397a8e76a3SSteven Rostedt size = BUF_PAGE_SIZE * 2; 16407a8e76a3SSteven Rostedt 16417a8e76a3SSteven Rostedt nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 16427a8e76a3SSteven Rostedt 164383f40318SVaibhav Nagarnaik /* 164483f40318SVaibhav Nagarnaik * Don't succeed if resizing is disabled, as a reader might be 164583f40318SVaibhav Nagarnaik * manipulating the ring buffer and is expecting a sane state while 164683f40318SVaibhav Nagarnaik * this is true. 164783f40318SVaibhav Nagarnaik */ 164883f40318SVaibhav Nagarnaik if (atomic_read(&buffer->resize_disabled)) 164983f40318SVaibhav Nagarnaik return -EBUSY; 165083f40318SVaibhav Nagarnaik 165183f40318SVaibhav Nagarnaik /* prevent another thread from changing buffer sizes */ 165283f40318SVaibhav Nagarnaik mutex_lock(&buffer->mutex); 165383f40318SVaibhav Nagarnaik 1654438ced17SVaibhav Nagarnaik if (cpu_id == RING_BUFFER_ALL_CPUS) { 1655438ced17SVaibhav Nagarnaik /* calculate the pages to update */ 16567a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 16577a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1658438ced17SVaibhav Nagarnaik 1659438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 1660438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 1661438ced17SVaibhav Nagarnaik /* 1662438ced17SVaibhav Nagarnaik * nothing more to do for removing pages or no update 1663438ced17SVaibhav Nagarnaik */ 1664438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update <= 0) 1665438ced17SVaibhav Nagarnaik continue; 1666438ced17SVaibhav Nagarnaik /* 1667438ced17SVaibhav Nagarnaik * to add pages, make sure all new pages can be 1668438ced17SVaibhav Nagarnaik * allocated without receiving ENOMEM 1669438ced17SVaibhav Nagarnaik */ 1670438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1671438ced17SVaibhav Nagarnaik if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, 167283f40318SVaibhav Nagarnaik &cpu_buffer->new_pages, cpu)) { 1673438ced17SVaibhav Nagarnaik /* not enough memory for new pages */ 167483f40318SVaibhav Nagarnaik err = -ENOMEM; 167583f40318SVaibhav Nagarnaik goto out_err; 167683f40318SVaibhav Nagarnaik } 167783f40318SVaibhav Nagarnaik } 167883f40318SVaibhav Nagarnaik 167983f40318SVaibhav Nagarnaik get_online_cpus(); 168083f40318SVaibhav Nagarnaik /* 168183f40318SVaibhav Nagarnaik * Fire off all the required work handlers 168205fdd70dSVaibhav Nagarnaik * We can't schedule on offline CPUs, but it's not necessary 168383f40318SVaibhav Nagarnaik * since we can change their buffer sizes without any race. 168483f40318SVaibhav Nagarnaik */ 168583f40318SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 168683f40318SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 168705fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 168883f40318SVaibhav Nagarnaik continue; 168983f40318SVaibhav Nagarnaik 1690f5eb5588SSteven Rostedt (Red Hat) /* The update must run on the CPU that is being updated. */ 1691f5eb5588SSteven Rostedt (Red Hat) preempt_disable(); 1692f5eb5588SSteven Rostedt (Red Hat) if (cpu == smp_processor_id() || !cpu_online(cpu)) { 1693f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer); 1694f5eb5588SSteven Rostedt (Red Hat) cpu_buffer->nr_pages_to_update = 0; 1695f5eb5588SSteven Rostedt (Red Hat) } else { 1696f5eb5588SSteven Rostedt (Red Hat) /* 1697f5eb5588SSteven Rostedt (Red Hat) * Can not disable preemption for schedule_work_on() 1698f5eb5588SSteven Rostedt (Red Hat) * on PREEMPT_RT. 1699f5eb5588SSteven Rostedt (Red Hat) */ 1700f5eb5588SSteven Rostedt (Red Hat) preempt_enable(); 170105fdd70dSVaibhav Nagarnaik schedule_work_on(cpu, 170205fdd70dSVaibhav Nagarnaik &cpu_buffer->update_pages_work); 1703f5eb5588SSteven Rostedt (Red Hat) preempt_disable(); 1704f5eb5588SSteven Rostedt (Red Hat) } 1705f5eb5588SSteven Rostedt (Red Hat) preempt_enable(); 17067a8e76a3SSteven Rostedt } 1707438ced17SVaibhav Nagarnaik 1708438ced17SVaibhav Nagarnaik /* wait for all the updates to complete */ 1709438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 1710438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 171105fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 171283f40318SVaibhav Nagarnaik continue; 171383f40318SVaibhav Nagarnaik 171405fdd70dSVaibhav Nagarnaik if (cpu_online(cpu)) 171505fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 171683f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 1717438ced17SVaibhav Nagarnaik } 171883f40318SVaibhav Nagarnaik 171983f40318SVaibhav Nagarnaik put_online_cpus(); 1720438ced17SVaibhav Nagarnaik } else { 17218e49f418SVaibhav Nagarnaik /* Make sure this CPU has been intitialized */ 17228e49f418SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) 17238e49f418SVaibhav Nagarnaik goto out; 17248e49f418SVaibhav Nagarnaik 1725438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu_id]; 172683f40318SVaibhav Nagarnaik 1727438ced17SVaibhav Nagarnaik if (nr_pages == cpu_buffer->nr_pages) 17287a8e76a3SSteven Rostedt goto out; 1729438ced17SVaibhav Nagarnaik 1730438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 1731438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 1732438ced17SVaibhav Nagarnaik 1733438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1734438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0 && 1735438ced17SVaibhav Nagarnaik __rb_allocate_pages(cpu_buffer->nr_pages_to_update, 173683f40318SVaibhav Nagarnaik &cpu_buffer->new_pages, cpu_id)) { 173783f40318SVaibhav Nagarnaik err = -ENOMEM; 173883f40318SVaibhav Nagarnaik goto out_err; 173983f40318SVaibhav Nagarnaik } 1740438ced17SVaibhav Nagarnaik 174183f40318SVaibhav Nagarnaik get_online_cpus(); 174283f40318SVaibhav Nagarnaik 1743f5eb5588SSteven Rostedt (Red Hat) preempt_disable(); 1744f5eb5588SSteven Rostedt (Red Hat) /* The update must run on the CPU that is being updated. */ 1745f5eb5588SSteven Rostedt (Red Hat) if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) 1746f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer); 1747f5eb5588SSteven Rostedt (Red Hat) else { 1748f5eb5588SSteven Rostedt (Red Hat) /* 1749f5eb5588SSteven Rostedt (Red Hat) * Can not disable preemption for schedule_work_on() 1750f5eb5588SSteven Rostedt (Red Hat) * on PREEMPT_RT. 1751f5eb5588SSteven Rostedt (Red Hat) */ 1752f5eb5588SSteven Rostedt (Red Hat) preempt_enable(); 175383f40318SVaibhav Nagarnaik schedule_work_on(cpu_id, 175483f40318SVaibhav Nagarnaik &cpu_buffer->update_pages_work); 175505fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 1756f5eb5588SSteven Rostedt (Red Hat) preempt_disable(); 1757f5eb5588SSteven Rostedt (Red Hat) } 1758f5eb5588SSteven Rostedt (Red Hat) preempt_enable(); 175983f40318SVaibhav Nagarnaik 176083f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 176105fdd70dSVaibhav Nagarnaik put_online_cpus(); 17627a8e76a3SSteven Rostedt } 17637a8e76a3SSteven Rostedt 17647a8e76a3SSteven Rostedt out: 1765659f451fSSteven Rostedt /* 1766659f451fSSteven Rostedt * The ring buffer resize can happen with the ring buffer 1767659f451fSSteven Rostedt * enabled, so that the update disturbs the tracing as little 1768659f451fSSteven Rostedt * as possible. But if the buffer is disabled, we do not need 1769659f451fSSteven Rostedt * to worry about that, and we can take the time to verify 1770659f451fSSteven Rostedt * that the buffer is not corrupt. 1771659f451fSSteven Rostedt */ 1772659f451fSSteven Rostedt if (atomic_read(&buffer->record_disabled)) { 1773659f451fSSteven Rostedt atomic_inc(&buffer->record_disabled); 1774659f451fSSteven Rostedt /* 1775659f451fSSteven Rostedt * Even though the buffer was disabled, we must make sure 1776659f451fSSteven Rostedt * that it is truly disabled before calling rb_check_pages. 1777659f451fSSteven Rostedt * There could have been a race between checking 1778659f451fSSteven Rostedt * record_disable and incrementing it. 1779659f451fSSteven Rostedt */ 1780659f451fSSteven Rostedt synchronize_sched(); 1781659f451fSSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 1782659f451fSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1783659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 1784659f451fSSteven Rostedt } 1785659f451fSSteven Rostedt atomic_dec(&buffer->record_disabled); 1786659f451fSSteven Rostedt } 1787659f451fSSteven Rostedt 17887a8e76a3SSteven Rostedt mutex_unlock(&buffer->mutex); 17897a8e76a3SSteven Rostedt return size; 17907a8e76a3SSteven Rostedt 179183f40318SVaibhav Nagarnaik out_err: 1792438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 1793438ced17SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 179483f40318SVaibhav Nagarnaik 1795438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 1796438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 179783f40318SVaibhav Nagarnaik 1798438ced17SVaibhav Nagarnaik if (list_empty(&cpu_buffer->new_pages)) 1799438ced17SVaibhav Nagarnaik continue; 180083f40318SVaibhav Nagarnaik 1801438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1802438ced17SVaibhav Nagarnaik list) { 1803044fa782SSteven Rostedt list_del_init(&bpage->list); 1804044fa782SSteven Rostedt free_buffer_page(bpage); 18057a8e76a3SSteven Rostedt } 1806438ced17SVaibhav Nagarnaik } 1807641d2f63SVegard Nossum mutex_unlock(&buffer->mutex); 180883f40318SVaibhav Nagarnaik return err; 18097a8e76a3SSteven Rostedt } 1810c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize); 18117a8e76a3SSteven Rostedt 1812750912faSDavid Sharp void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) 1813750912faSDavid Sharp { 1814750912faSDavid Sharp mutex_lock(&buffer->mutex); 1815750912faSDavid Sharp if (val) 1816750912faSDavid Sharp buffer->flags |= RB_FL_OVERWRITE; 1817750912faSDavid Sharp else 1818750912faSDavid Sharp buffer->flags &= ~RB_FL_OVERWRITE; 1819750912faSDavid Sharp mutex_unlock(&buffer->mutex); 1820750912faSDavid Sharp } 1821750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1822750912faSDavid Sharp 18238789a9e7SSteven Rostedt static inline void * 1824044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 18258789a9e7SSteven Rostedt { 1826044fa782SSteven Rostedt return bpage->data + index; 18278789a9e7SSteven Rostedt } 18288789a9e7SSteven Rostedt 1829044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 18307a8e76a3SSteven Rostedt { 1831044fa782SSteven Rostedt return bpage->page->data + index; 18327a8e76a3SSteven Rostedt } 18337a8e76a3SSteven Rostedt 18347a8e76a3SSteven Rostedt static inline struct ring_buffer_event * 1835d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 18367a8e76a3SSteven Rostedt { 18376f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->reader_page, 18386f807acdSSteven Rostedt cpu_buffer->reader_page->read); 18396f807acdSSteven Rostedt } 18406f807acdSSteven Rostedt 18416f807acdSSteven Rostedt static inline struct ring_buffer_event * 18427a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter) 18437a8e76a3SSteven Rostedt { 18446f807acdSSteven Rostedt return __rb_page_index(iter->head_page, iter->head); 18457a8e76a3SSteven Rostedt } 18467a8e76a3SSteven Rostedt 1847bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage) 1848bf41a158SSteven Rostedt { 1849abc9b56dSSteven Rostedt return local_read(&bpage->page->commit); 1850bf41a158SSteven Rostedt } 1851bf41a158SSteven Rostedt 185225985edcSLucas De Marchi /* Size is determined by what has been committed */ 1853bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage) 1854bf41a158SSteven Rostedt { 1855bf41a158SSteven Rostedt return rb_page_commit(bpage); 1856bf41a158SSteven Rostedt } 1857bf41a158SSteven Rostedt 1858bf41a158SSteven Rostedt static inline unsigned 1859bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 1860bf41a158SSteven Rostedt { 1861bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->commit_page); 1862bf41a158SSteven Rostedt } 1863bf41a158SSteven Rostedt 1864bf41a158SSteven Rostedt static inline unsigned 1865bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event) 18667a8e76a3SSteven Rostedt { 1867bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1868bf41a158SSteven Rostedt 186922f470f8SSteven Rostedt return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 18707a8e76a3SSteven Rostedt } 18717a8e76a3SSteven Rostedt 18720f0c85fcSSteven Rostedt static inline int 1873fa743953SSteven Rostedt rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1874bf41a158SSteven Rostedt struct ring_buffer_event *event) 18757a8e76a3SSteven Rostedt { 1876bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1877bf41a158SSteven Rostedt unsigned long index; 1878bf41a158SSteven Rostedt 1879bf41a158SSteven Rostedt index = rb_event_index(event); 1880bf41a158SSteven Rostedt addr &= PAGE_MASK; 1881bf41a158SSteven Rostedt 1882bf41a158SSteven Rostedt return cpu_buffer->commit_page->page == (void *)addr && 1883bf41a158SSteven Rostedt rb_commit_index(cpu_buffer) == index; 1884bf41a158SSteven Rostedt } 1885bf41a158SSteven Rostedt 188634a148bfSAndrew Morton static void 1887bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1888bf41a158SSteven Rostedt { 188977ae365eSSteven Rostedt unsigned long max_count; 189077ae365eSSteven Rostedt 1891bf41a158SSteven Rostedt /* 1892bf41a158SSteven Rostedt * We only race with interrupts and NMIs on this CPU. 1893bf41a158SSteven Rostedt * If we own the commit event, then we can commit 1894bf41a158SSteven Rostedt * all others that interrupted us, since the interruptions 1895bf41a158SSteven Rostedt * are in stack format (they finish before they come 1896bf41a158SSteven Rostedt * back to us). This allows us to do a simple loop to 1897bf41a158SSteven Rostedt * assign the commit to the tail. 1898bf41a158SSteven Rostedt */ 1899a8ccf1d6SSteven Rostedt again: 1900438ced17SVaibhav Nagarnaik max_count = cpu_buffer->nr_pages * 100; 190177ae365eSSteven Rostedt 1902bf41a158SSteven Rostedt while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 190377ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !(--max_count))) 190477ae365eSSteven Rostedt return; 190577ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 190677ae365eSSteven Rostedt rb_is_reader_page(cpu_buffer->tail_page))) 190777ae365eSSteven Rostedt return; 190877ae365eSSteven Rostedt local_set(&cpu_buffer->commit_page->page->commit, 190977ae365eSSteven Rostedt rb_page_write(cpu_buffer->commit_page)); 1910bf41a158SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1911abc9b56dSSteven Rostedt cpu_buffer->write_stamp = 1912abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 1913bf41a158SSteven Rostedt /* add barrier to keep gcc from optimizing too much */ 1914bf41a158SSteven Rostedt barrier(); 1915bf41a158SSteven Rostedt } 1916bf41a158SSteven Rostedt while (rb_commit_index(cpu_buffer) != 1917bf41a158SSteven Rostedt rb_page_write(cpu_buffer->commit_page)) { 191877ae365eSSteven Rostedt 191977ae365eSSteven Rostedt local_set(&cpu_buffer->commit_page->page->commit, 192077ae365eSSteven Rostedt rb_page_write(cpu_buffer->commit_page)); 192177ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 192277ae365eSSteven Rostedt local_read(&cpu_buffer->commit_page->page->commit) & 192377ae365eSSteven Rostedt ~RB_WRITE_MASK); 1924bf41a158SSteven Rostedt barrier(); 1925bf41a158SSteven Rostedt } 1926a8ccf1d6SSteven Rostedt 1927a8ccf1d6SSteven Rostedt /* again, keep gcc from optimizing */ 1928a8ccf1d6SSteven Rostedt barrier(); 1929a8ccf1d6SSteven Rostedt 1930a8ccf1d6SSteven Rostedt /* 1931a8ccf1d6SSteven Rostedt * If an interrupt came in just after the first while loop 1932a8ccf1d6SSteven Rostedt * and pushed the tail page forward, we will be left with 1933a8ccf1d6SSteven Rostedt * a dangling commit that will never go forward. 1934a8ccf1d6SSteven Rostedt */ 1935a8ccf1d6SSteven Rostedt if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1936a8ccf1d6SSteven Rostedt goto again; 19377a8e76a3SSteven Rostedt } 19387a8e76a3SSteven Rostedt 1939d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 19407a8e76a3SSteven Rostedt { 1941abc9b56dSSteven Rostedt cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 19426f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 1943d769041fSSteven Rostedt } 1944d769041fSSteven Rostedt 194534a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter) 1946d769041fSSteven Rostedt { 1947d769041fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1948d769041fSSteven Rostedt 1949d769041fSSteven Rostedt /* 1950d769041fSSteven Rostedt * The iterator could be on the reader page (it starts there). 1951d769041fSSteven Rostedt * But the head could have moved, since the reader was 1952d769041fSSteven Rostedt * found. Check for this case and assign the iterator 1953d769041fSSteven Rostedt * to the head page instead of next. 1954d769041fSSteven Rostedt */ 1955d769041fSSteven Rostedt if (iter->head_page == cpu_buffer->reader_page) 195677ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer); 1957d769041fSSteven Rostedt else 1958d769041fSSteven Rostedt rb_inc_page(cpu_buffer, &iter->head_page); 1959d769041fSSteven Rostedt 1960abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 19617a8e76a3SSteven Rostedt iter->head = 0; 19627a8e76a3SSteven Rostedt } 19637a8e76a3SSteven Rostedt 196469d1b839SSteven Rostedt /* Slow path, do not inline */ 196569d1b839SSteven Rostedt static noinline struct ring_buffer_event * 196669d1b839SSteven Rostedt rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) 196769d1b839SSteven Rostedt { 196869d1b839SSteven Rostedt event->type_len = RINGBUF_TYPE_TIME_EXTEND; 196969d1b839SSteven Rostedt 197069d1b839SSteven Rostedt /* Not the first event on the page? */ 197169d1b839SSteven Rostedt if (rb_event_index(event)) { 197269d1b839SSteven Rostedt event->time_delta = delta & TS_MASK; 197369d1b839SSteven Rostedt event->array[0] = delta >> TS_SHIFT; 197469d1b839SSteven Rostedt } else { 197569d1b839SSteven Rostedt /* nope, just zero it */ 197669d1b839SSteven Rostedt event->time_delta = 0; 197769d1b839SSteven Rostedt event->array[0] = 0; 197869d1b839SSteven Rostedt } 197969d1b839SSteven Rostedt 198069d1b839SSteven Rostedt return skip_time_extend(event); 198169d1b839SSteven Rostedt } 198269d1b839SSteven Rostedt 19837a8e76a3SSteven Rostedt /** 198401e3e710SDavid Sharp * rb_update_event - update event type and data 19857a8e76a3SSteven Rostedt * @event: the even to update 19867a8e76a3SSteven Rostedt * @type: the type of event 19877a8e76a3SSteven Rostedt * @length: the size of the event field in the ring buffer 19887a8e76a3SSteven Rostedt * 19897a8e76a3SSteven Rostedt * Update the type and data fields of the event. The length 19907a8e76a3SSteven Rostedt * is the actual size that is written to the ring buffer, 19917a8e76a3SSteven Rostedt * and with this, we can determine what to place into the 19927a8e76a3SSteven Rostedt * data field. 19937a8e76a3SSteven Rostedt */ 199434a148bfSAndrew Morton static void 199569d1b839SSteven Rostedt rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 199669d1b839SSteven Rostedt struct ring_buffer_event *event, unsigned length, 199769d1b839SSteven Rostedt int add_timestamp, u64 delta) 19987a8e76a3SSteven Rostedt { 199969d1b839SSteven Rostedt /* Only a commit updates the timestamp */ 200069d1b839SSteven Rostedt if (unlikely(!rb_event_is_commit(cpu_buffer, event))) 200169d1b839SSteven Rostedt delta = 0; 20027a8e76a3SSteven Rostedt 200369d1b839SSteven Rostedt /* 200469d1b839SSteven Rostedt * If we need to add a timestamp, then we 200569d1b839SSteven Rostedt * add it to the start of the resevered space. 200669d1b839SSteven Rostedt */ 200769d1b839SSteven Rostedt if (unlikely(add_timestamp)) { 200869d1b839SSteven Rostedt event = rb_add_time_stamp(event, delta); 200969d1b839SSteven Rostedt length -= RB_LEN_TIME_EXTEND; 201069d1b839SSteven Rostedt delta = 0; 20117a8e76a3SSteven Rostedt } 201269d1b839SSteven Rostedt 201369d1b839SSteven Rostedt event->time_delta = delta; 201469d1b839SSteven Rostedt length -= RB_EVNT_HDR_SIZE; 201569d1b839SSteven Rostedt if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 201669d1b839SSteven Rostedt event->type_len = 0; 201769d1b839SSteven Rostedt event->array[0] = length; 201869d1b839SSteven Rostedt } else 201969d1b839SSteven Rostedt event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 20207a8e76a3SSteven Rostedt } 20217a8e76a3SSteven Rostedt 202277ae365eSSteven Rostedt /* 202377ae365eSSteven Rostedt * rb_handle_head_page - writer hit the head page 202477ae365eSSteven Rostedt * 202577ae365eSSteven Rostedt * Returns: +1 to retry page 202677ae365eSSteven Rostedt * 0 to continue 202777ae365eSSteven Rostedt * -1 on error 202877ae365eSSteven Rostedt */ 202977ae365eSSteven Rostedt static int 203077ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 203177ae365eSSteven Rostedt struct buffer_page *tail_page, 203277ae365eSSteven Rostedt struct buffer_page *next_page) 203377ae365eSSteven Rostedt { 203477ae365eSSteven Rostedt struct buffer_page *new_head; 203577ae365eSSteven Rostedt int entries; 203677ae365eSSteven Rostedt int type; 203777ae365eSSteven Rostedt int ret; 203877ae365eSSteven Rostedt 203977ae365eSSteven Rostedt entries = rb_page_entries(next_page); 204077ae365eSSteven Rostedt 204177ae365eSSteven Rostedt /* 204277ae365eSSteven Rostedt * The hard part is here. We need to move the head 204377ae365eSSteven Rostedt * forward, and protect against both readers on 204477ae365eSSteven Rostedt * other CPUs and writers coming in via interrupts. 204577ae365eSSteven Rostedt */ 204677ae365eSSteven Rostedt type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 204777ae365eSSteven Rostedt RB_PAGE_HEAD); 204877ae365eSSteven Rostedt 204977ae365eSSteven Rostedt /* 205077ae365eSSteven Rostedt * type can be one of four: 205177ae365eSSteven Rostedt * NORMAL - an interrupt already moved it for us 205277ae365eSSteven Rostedt * HEAD - we are the first to get here. 205377ae365eSSteven Rostedt * UPDATE - we are the interrupt interrupting 205477ae365eSSteven Rostedt * a current move. 205577ae365eSSteven Rostedt * MOVED - a reader on another CPU moved the next 205677ae365eSSteven Rostedt * pointer to its reader page. Give up 205777ae365eSSteven Rostedt * and try again. 205877ae365eSSteven Rostedt */ 205977ae365eSSteven Rostedt 206077ae365eSSteven Rostedt switch (type) { 206177ae365eSSteven Rostedt case RB_PAGE_HEAD: 206277ae365eSSteven Rostedt /* 206377ae365eSSteven Rostedt * We changed the head to UPDATE, thus 206477ae365eSSteven Rostedt * it is our responsibility to update 206577ae365eSSteven Rostedt * the counters. 206677ae365eSSteven Rostedt */ 206777ae365eSSteven Rostedt local_add(entries, &cpu_buffer->overrun); 2068c64e148aSVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 206977ae365eSSteven Rostedt 207077ae365eSSteven Rostedt /* 207177ae365eSSteven Rostedt * The entries will be zeroed out when we move the 207277ae365eSSteven Rostedt * tail page. 207377ae365eSSteven Rostedt */ 207477ae365eSSteven Rostedt 207577ae365eSSteven Rostedt /* still more to do */ 207677ae365eSSteven Rostedt break; 207777ae365eSSteven Rostedt 207877ae365eSSteven Rostedt case RB_PAGE_UPDATE: 207977ae365eSSteven Rostedt /* 208077ae365eSSteven Rostedt * This is an interrupt that interrupt the 208177ae365eSSteven Rostedt * previous update. Still more to do. 208277ae365eSSteven Rostedt */ 208377ae365eSSteven Rostedt break; 208477ae365eSSteven Rostedt case RB_PAGE_NORMAL: 208577ae365eSSteven Rostedt /* 208677ae365eSSteven Rostedt * An interrupt came in before the update 208777ae365eSSteven Rostedt * and processed this for us. 208877ae365eSSteven Rostedt * Nothing left to do. 208977ae365eSSteven Rostedt */ 209077ae365eSSteven Rostedt return 1; 209177ae365eSSteven Rostedt case RB_PAGE_MOVED: 209277ae365eSSteven Rostedt /* 209377ae365eSSteven Rostedt * The reader is on another CPU and just did 209477ae365eSSteven Rostedt * a swap with our next_page. 209577ae365eSSteven Rostedt * Try again. 209677ae365eSSteven Rostedt */ 209777ae365eSSteven Rostedt return 1; 209877ae365eSSteven Rostedt default: 209977ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 210077ae365eSSteven Rostedt return -1; 210177ae365eSSteven Rostedt } 210277ae365eSSteven Rostedt 210377ae365eSSteven Rostedt /* 210477ae365eSSteven Rostedt * Now that we are here, the old head pointer is 210577ae365eSSteven Rostedt * set to UPDATE. This will keep the reader from 210677ae365eSSteven Rostedt * swapping the head page with the reader page. 210777ae365eSSteven Rostedt * The reader (on another CPU) will spin till 210877ae365eSSteven Rostedt * we are finished. 210977ae365eSSteven Rostedt * 211077ae365eSSteven Rostedt * We just need to protect against interrupts 211177ae365eSSteven Rostedt * doing the job. We will set the next pointer 211277ae365eSSteven Rostedt * to HEAD. After that, we set the old pointer 211377ae365eSSteven Rostedt * to NORMAL, but only if it was HEAD before. 211477ae365eSSteven Rostedt * otherwise we are an interrupt, and only 211577ae365eSSteven Rostedt * want the outer most commit to reset it. 211677ae365eSSteven Rostedt */ 211777ae365eSSteven Rostedt new_head = next_page; 211877ae365eSSteven Rostedt rb_inc_page(cpu_buffer, &new_head); 211977ae365eSSteven Rostedt 212077ae365eSSteven Rostedt ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 212177ae365eSSteven Rostedt RB_PAGE_NORMAL); 212277ae365eSSteven Rostedt 212377ae365eSSteven Rostedt /* 212477ae365eSSteven Rostedt * Valid returns are: 212577ae365eSSteven Rostedt * HEAD - an interrupt came in and already set it. 212677ae365eSSteven Rostedt * NORMAL - One of two things: 212777ae365eSSteven Rostedt * 1) We really set it. 212877ae365eSSteven Rostedt * 2) A bunch of interrupts came in and moved 212977ae365eSSteven Rostedt * the page forward again. 213077ae365eSSteven Rostedt */ 213177ae365eSSteven Rostedt switch (ret) { 213277ae365eSSteven Rostedt case RB_PAGE_HEAD: 213377ae365eSSteven Rostedt case RB_PAGE_NORMAL: 213477ae365eSSteven Rostedt /* OK */ 213577ae365eSSteven Rostedt break; 213677ae365eSSteven Rostedt default: 213777ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 213877ae365eSSteven Rostedt return -1; 213977ae365eSSteven Rostedt } 214077ae365eSSteven Rostedt 214177ae365eSSteven Rostedt /* 214277ae365eSSteven Rostedt * It is possible that an interrupt came in, 214377ae365eSSteven Rostedt * set the head up, then more interrupts came in 214477ae365eSSteven Rostedt * and moved it again. When we get back here, 214577ae365eSSteven Rostedt * the page would have been set to NORMAL but we 214677ae365eSSteven Rostedt * just set it back to HEAD. 214777ae365eSSteven Rostedt * 214877ae365eSSteven Rostedt * How do you detect this? Well, if that happened 214977ae365eSSteven Rostedt * the tail page would have moved. 215077ae365eSSteven Rostedt */ 215177ae365eSSteven Rostedt if (ret == RB_PAGE_NORMAL) { 215277ae365eSSteven Rostedt /* 215377ae365eSSteven Rostedt * If the tail had moved passed next, then we need 215477ae365eSSteven Rostedt * to reset the pointer. 215577ae365eSSteven Rostedt */ 215677ae365eSSteven Rostedt if (cpu_buffer->tail_page != tail_page && 215777ae365eSSteven Rostedt cpu_buffer->tail_page != next_page) 215877ae365eSSteven Rostedt rb_head_page_set_normal(cpu_buffer, new_head, 215977ae365eSSteven Rostedt next_page, 216077ae365eSSteven Rostedt RB_PAGE_HEAD); 216177ae365eSSteven Rostedt } 216277ae365eSSteven Rostedt 216377ae365eSSteven Rostedt /* 216477ae365eSSteven Rostedt * If this was the outer most commit (the one that 216577ae365eSSteven Rostedt * changed the original pointer from HEAD to UPDATE), 216677ae365eSSteven Rostedt * then it is up to us to reset it to NORMAL. 216777ae365eSSteven Rostedt */ 216877ae365eSSteven Rostedt if (type == RB_PAGE_HEAD) { 216977ae365eSSteven Rostedt ret = rb_head_page_set_normal(cpu_buffer, next_page, 217077ae365eSSteven Rostedt tail_page, 217177ae365eSSteven Rostedt RB_PAGE_UPDATE); 217277ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 217377ae365eSSteven Rostedt ret != RB_PAGE_UPDATE)) 217477ae365eSSteven Rostedt return -1; 217577ae365eSSteven Rostedt } 217677ae365eSSteven Rostedt 217777ae365eSSteven Rostedt return 0; 217877ae365eSSteven Rostedt } 217977ae365eSSteven Rostedt 218034a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length) 21817a8e76a3SSteven Rostedt { 21827a8e76a3SSteven Rostedt struct ring_buffer_event event; /* Used only for sizeof array */ 21837a8e76a3SSteven Rostedt 21847a8e76a3SSteven Rostedt /* zero length can cause confusions */ 21857a8e76a3SSteven Rostedt if (!length) 21867a8e76a3SSteven Rostedt length = 1; 21877a8e76a3SSteven Rostedt 21882271048dSSteven Rostedt if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 21897a8e76a3SSteven Rostedt length += sizeof(event.array[0]); 21907a8e76a3SSteven Rostedt 21917a8e76a3SSteven Rostedt length += RB_EVNT_HDR_SIZE; 21922271048dSSteven Rostedt length = ALIGN(length, RB_ARCH_ALIGNMENT); 21937a8e76a3SSteven Rostedt 21947a8e76a3SSteven Rostedt return length; 21957a8e76a3SSteven Rostedt } 21967a8e76a3SSteven Rostedt 2197c7b09308SSteven Rostedt static inline void 2198c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2199c7b09308SSteven Rostedt struct buffer_page *tail_page, 2200c7b09308SSteven Rostedt unsigned long tail, unsigned long length) 2201c7b09308SSteven Rostedt { 2202c7b09308SSteven Rostedt struct ring_buffer_event *event; 2203c7b09308SSteven Rostedt 2204c7b09308SSteven Rostedt /* 2205c7b09308SSteven Rostedt * Only the event that crossed the page boundary 2206c7b09308SSteven Rostedt * must fill the old tail_page with padding. 2207c7b09308SSteven Rostedt */ 2208c7b09308SSteven Rostedt if (tail >= BUF_PAGE_SIZE) { 2209b3230c8bSSteven Rostedt /* 2210b3230c8bSSteven Rostedt * If the page was filled, then we still need 2211b3230c8bSSteven Rostedt * to update the real_end. Reset it to zero 2212b3230c8bSSteven Rostedt * and the reader will ignore it. 2213b3230c8bSSteven Rostedt */ 2214b3230c8bSSteven Rostedt if (tail == BUF_PAGE_SIZE) 2215b3230c8bSSteven Rostedt tail_page->real_end = 0; 2216b3230c8bSSteven Rostedt 2217c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2218c7b09308SSteven Rostedt return; 2219c7b09308SSteven Rostedt } 2220c7b09308SSteven Rostedt 2221c7b09308SSteven Rostedt event = __rb_page_index(tail_page, tail); 2222b0b7065bSLinus Torvalds kmemcheck_annotate_bitfield(event, bitfield); 2223c7b09308SSteven Rostedt 2224c64e148aSVaibhav Nagarnaik /* account for padding bytes */ 2225c64e148aSVaibhav Nagarnaik local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2226c64e148aSVaibhav Nagarnaik 2227c7b09308SSteven Rostedt /* 2228ff0ff84aSSteven Rostedt * Save the original length to the meta data. 2229ff0ff84aSSteven Rostedt * This will be used by the reader to add lost event 2230ff0ff84aSSteven Rostedt * counter. 2231ff0ff84aSSteven Rostedt */ 2232ff0ff84aSSteven Rostedt tail_page->real_end = tail; 2233ff0ff84aSSteven Rostedt 2234ff0ff84aSSteven Rostedt /* 2235c7b09308SSteven Rostedt * If this event is bigger than the minimum size, then 2236c7b09308SSteven Rostedt * we need to be careful that we don't subtract the 2237c7b09308SSteven Rostedt * write counter enough to allow another writer to slip 2238c7b09308SSteven Rostedt * in on this page. 2239c7b09308SSteven Rostedt * We put in a discarded commit instead, to make sure 2240c7b09308SSteven Rostedt * that this space is not used again. 2241c7b09308SSteven Rostedt * 2242c7b09308SSteven Rostedt * If we are less than the minimum size, we don't need to 2243c7b09308SSteven Rostedt * worry about it. 2244c7b09308SSteven Rostedt */ 2245c7b09308SSteven Rostedt if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2246c7b09308SSteven Rostedt /* No room for any events */ 2247c7b09308SSteven Rostedt 2248c7b09308SSteven Rostedt /* Mark the rest of the page with padding */ 2249c7b09308SSteven Rostedt rb_event_set_padding(event); 2250c7b09308SSteven Rostedt 2251c7b09308SSteven Rostedt /* Set the write back to the previous setting */ 2252c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2253c7b09308SSteven Rostedt return; 2254c7b09308SSteven Rostedt } 2255c7b09308SSteven Rostedt 2256c7b09308SSteven Rostedt /* Put in a discarded event */ 2257c7b09308SSteven Rostedt event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2258c7b09308SSteven Rostedt event->type_len = RINGBUF_TYPE_PADDING; 2259c7b09308SSteven Rostedt /* time delta must be non zero */ 2260c7b09308SSteven Rostedt event->time_delta = 1; 2261c7b09308SSteven Rostedt 2262c7b09308SSteven Rostedt /* Set write to end of buffer */ 2263c7b09308SSteven Rostedt length = (tail + length) - BUF_PAGE_SIZE; 2264c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2265c7b09308SSteven Rostedt } 22666634ff26SSteven Rostedt 2267747e94aeSSteven Rostedt /* 2268747e94aeSSteven Rostedt * This is the slow path, force gcc not to inline it. 2269747e94aeSSteven Rostedt */ 2270747e94aeSSteven Rostedt static noinline struct ring_buffer_event * 22716634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 22726634ff26SSteven Rostedt unsigned long length, unsigned long tail, 2273e8bc43e8SSteven Rostedt struct buffer_page *tail_page, u64 ts) 22747a8e76a3SSteven Rostedt { 22755a50e33cSSteven Rostedt struct buffer_page *commit_page = cpu_buffer->commit_page; 22767a8e76a3SSteven Rostedt struct ring_buffer *buffer = cpu_buffer->buffer; 227777ae365eSSteven Rostedt struct buffer_page *next_page; 227877ae365eSSteven Rostedt int ret; 2279aa20ae84SSteven Rostedt 2280aa20ae84SSteven Rostedt next_page = tail_page; 22817a8e76a3SSteven Rostedt 22827a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &next_page); 22837a8e76a3SSteven Rostedt 2284bf41a158SSteven Rostedt /* 2285bf41a158SSteven Rostedt * If for some reason, we had an interrupt storm that made 2286bf41a158SSteven Rostedt * it all the way around the buffer, bail, and warn 2287bf41a158SSteven Rostedt * about it. 2288bf41a158SSteven Rostedt */ 228998db8df7SSteven Rostedt if (unlikely(next_page == commit_page)) { 229077ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 229145141d46SSteven Rostedt goto out_reset; 2292bf41a158SSteven Rostedt } 2293d769041fSSteven Rostedt 2294bf41a158SSteven Rostedt /* 229577ae365eSSteven Rostedt * This is where the fun begins! 229677ae365eSSteven Rostedt * 229777ae365eSSteven Rostedt * We are fighting against races between a reader that 229877ae365eSSteven Rostedt * could be on another CPU trying to swap its reader 229977ae365eSSteven Rostedt * page with the buffer head. 230077ae365eSSteven Rostedt * 230177ae365eSSteven Rostedt * We are also fighting against interrupts coming in and 230277ae365eSSteven Rostedt * moving the head or tail on us as well. 230377ae365eSSteven Rostedt * 230477ae365eSSteven Rostedt * If the next page is the head page then we have filled 230577ae365eSSteven Rostedt * the buffer, unless the commit page is still on the 230677ae365eSSteven Rostedt * reader page. 2307bf41a158SSteven Rostedt */ 230877ae365eSSteven Rostedt if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { 2309bf41a158SSteven Rostedt 231077ae365eSSteven Rostedt /* 231177ae365eSSteven Rostedt * If the commit is not on the reader page, then 231277ae365eSSteven Rostedt * move the header page. 231377ae365eSSteven Rostedt */ 231477ae365eSSteven Rostedt if (!rb_is_reader_page(cpu_buffer->commit_page)) { 231577ae365eSSteven Rostedt /* 231677ae365eSSteven Rostedt * If we are not in overwrite mode, 231777ae365eSSteven Rostedt * this is easy, just stop here. 231877ae365eSSteven Rostedt */ 2319884bfe89SSlava Pestov if (!(buffer->flags & RB_FL_OVERWRITE)) { 2320884bfe89SSlava Pestov local_inc(&cpu_buffer->dropped_events); 232177ae365eSSteven Rostedt goto out_reset; 2322884bfe89SSlava Pestov } 232377ae365eSSteven Rostedt 232477ae365eSSteven Rostedt ret = rb_handle_head_page(cpu_buffer, 232577ae365eSSteven Rostedt tail_page, 232677ae365eSSteven Rostedt next_page); 232777ae365eSSteven Rostedt if (ret < 0) 232877ae365eSSteven Rostedt goto out_reset; 232977ae365eSSteven Rostedt if (ret) 233077ae365eSSteven Rostedt goto out_again; 233177ae365eSSteven Rostedt } else { 233277ae365eSSteven Rostedt /* 233377ae365eSSteven Rostedt * We need to be careful here too. The 233477ae365eSSteven Rostedt * commit page could still be on the reader 233577ae365eSSteven Rostedt * page. We could have a small buffer, and 233677ae365eSSteven Rostedt * have filled up the buffer with events 233777ae365eSSteven Rostedt * from interrupts and such, and wrapped. 233877ae365eSSteven Rostedt * 233977ae365eSSteven Rostedt * Note, if the tail page is also the on the 234077ae365eSSteven Rostedt * reader_page, we let it move out. 234177ae365eSSteven Rostedt */ 234277ae365eSSteven Rostedt if (unlikely((cpu_buffer->commit_page != 234377ae365eSSteven Rostedt cpu_buffer->tail_page) && 234477ae365eSSteven Rostedt (cpu_buffer->commit_page == 234577ae365eSSteven Rostedt cpu_buffer->reader_page))) { 234677ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 234777ae365eSSteven Rostedt goto out_reset; 234877ae365eSSteven Rostedt } 234977ae365eSSteven Rostedt } 2350bf41a158SSteven Rostedt } 2351bf41a158SSteven Rostedt 235277ae365eSSteven Rostedt ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); 235377ae365eSSteven Rostedt if (ret) { 235477ae365eSSteven Rostedt /* 235577ae365eSSteven Rostedt * Nested commits always have zero deltas, so 235677ae365eSSteven Rostedt * just reread the time stamp 235777ae365eSSteven Rostedt */ 2358e8bc43e8SSteven Rostedt ts = rb_time_stamp(buffer); 2359e8bc43e8SSteven Rostedt next_page->page->time_stamp = ts; 236077ae365eSSteven Rostedt } 23617a8e76a3SSteven Rostedt 236277ae365eSSteven Rostedt out_again: 236377ae365eSSteven Rostedt 236477ae365eSSteven Rostedt rb_reset_tail(cpu_buffer, tail_page, tail, length); 2365bf41a158SSteven Rostedt 2366bf41a158SSteven Rostedt /* fail and let the caller try again */ 2367bf41a158SSteven Rostedt return ERR_PTR(-EAGAIN); 2368bf41a158SSteven Rostedt 236945141d46SSteven Rostedt out_reset: 23706f3b3440SLai Jiangshan /* reset write */ 2371c7b09308SSteven Rostedt rb_reset_tail(cpu_buffer, tail_page, tail, length); 23726f3b3440SLai Jiangshan 2373bf41a158SSteven Rostedt return NULL; 23747a8e76a3SSteven Rostedt } 23757a8e76a3SSteven Rostedt 23766634ff26SSteven Rostedt static struct ring_buffer_event * 23776634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 237869d1b839SSteven Rostedt unsigned long length, u64 ts, 237969d1b839SSteven Rostedt u64 delta, int add_timestamp) 23806634ff26SSteven Rostedt { 23815a50e33cSSteven Rostedt struct buffer_page *tail_page; 23826634ff26SSteven Rostedt struct ring_buffer_event *event; 23836634ff26SSteven Rostedt unsigned long tail, write; 23846634ff26SSteven Rostedt 238569d1b839SSteven Rostedt /* 238669d1b839SSteven Rostedt * If the time delta since the last event is too big to 238769d1b839SSteven Rostedt * hold in the time field of the event, then we append a 238869d1b839SSteven Rostedt * TIME EXTEND event ahead of the data event. 238969d1b839SSteven Rostedt */ 239069d1b839SSteven Rostedt if (unlikely(add_timestamp)) 239169d1b839SSteven Rostedt length += RB_LEN_TIME_EXTEND; 239269d1b839SSteven Rostedt 23936634ff26SSteven Rostedt tail_page = cpu_buffer->tail_page; 23946634ff26SSteven Rostedt write = local_add_return(length, &tail_page->write); 239577ae365eSSteven Rostedt 239677ae365eSSteven Rostedt /* set write to only the index of the write */ 239777ae365eSSteven Rostedt write &= RB_WRITE_MASK; 23986634ff26SSteven Rostedt tail = write - length; 23996634ff26SSteven Rostedt 2400*d651aa1dSSteven Rostedt (Red Hat) /* 2401*d651aa1dSSteven Rostedt (Red Hat) * If this is the first commit on the page, then it has the same 2402*d651aa1dSSteven Rostedt (Red Hat) * timestamp as the page itself. 2403*d651aa1dSSteven Rostedt (Red Hat) */ 2404*d651aa1dSSteven Rostedt (Red Hat) if (!tail) 2405*d651aa1dSSteven Rostedt (Red Hat) delta = 0; 2406*d651aa1dSSteven Rostedt (Red Hat) 24076634ff26SSteven Rostedt /* See if we shot pass the end of this buffer page */ 2408747e94aeSSteven Rostedt if (unlikely(write > BUF_PAGE_SIZE)) 24096634ff26SSteven Rostedt return rb_move_tail(cpu_buffer, length, tail, 24105a50e33cSSteven Rostedt tail_page, ts); 24116634ff26SSteven Rostedt 24126634ff26SSteven Rostedt /* We reserved something on the buffer */ 24136634ff26SSteven Rostedt 24146634ff26SSteven Rostedt event = __rb_page_index(tail_page, tail); 24151744a21dSVegard Nossum kmemcheck_annotate_bitfield(event, bitfield); 241669d1b839SSteven Rostedt rb_update_event(cpu_buffer, event, length, add_timestamp, delta); 24176634ff26SSteven Rostedt 24186634ff26SSteven Rostedt local_inc(&tail_page->entries); 24196634ff26SSteven Rostedt 24206634ff26SSteven Rostedt /* 2421fa743953SSteven Rostedt * If this is the first commit on the page, then update 2422fa743953SSteven Rostedt * its timestamp. 24236634ff26SSteven Rostedt */ 2424fa743953SSteven Rostedt if (!tail) 2425e8bc43e8SSteven Rostedt tail_page->page->time_stamp = ts; 24266634ff26SSteven Rostedt 2427c64e148aSVaibhav Nagarnaik /* account for these added bytes */ 2428c64e148aSVaibhav Nagarnaik local_add(length, &cpu_buffer->entries_bytes); 2429c64e148aSVaibhav Nagarnaik 24306634ff26SSteven Rostedt return event; 24316634ff26SSteven Rostedt } 24326634ff26SSteven Rostedt 2433edd813bfSSteven Rostedt static inline int 2434edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2435edd813bfSSteven Rostedt struct ring_buffer_event *event) 2436edd813bfSSteven Rostedt { 2437edd813bfSSteven Rostedt unsigned long new_index, old_index; 2438edd813bfSSteven Rostedt struct buffer_page *bpage; 2439edd813bfSSteven Rostedt unsigned long index; 2440edd813bfSSteven Rostedt unsigned long addr; 2441edd813bfSSteven Rostedt 2442edd813bfSSteven Rostedt new_index = rb_event_index(event); 244369d1b839SSteven Rostedt old_index = new_index + rb_event_ts_length(event); 2444edd813bfSSteven Rostedt addr = (unsigned long)event; 2445edd813bfSSteven Rostedt addr &= PAGE_MASK; 2446edd813bfSSteven Rostedt 2447edd813bfSSteven Rostedt bpage = cpu_buffer->tail_page; 2448edd813bfSSteven Rostedt 2449edd813bfSSteven Rostedt if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 245077ae365eSSteven Rostedt unsigned long write_mask = 245177ae365eSSteven Rostedt local_read(&bpage->write) & ~RB_WRITE_MASK; 2452c64e148aSVaibhav Nagarnaik unsigned long event_length = rb_event_length(event); 2453edd813bfSSteven Rostedt /* 2454edd813bfSSteven Rostedt * This is on the tail page. It is possible that 2455edd813bfSSteven Rostedt * a write could come in and move the tail page 2456edd813bfSSteven Rostedt * and write to the next page. That is fine 2457edd813bfSSteven Rostedt * because we just shorten what is on this page. 2458edd813bfSSteven Rostedt */ 245977ae365eSSteven Rostedt old_index += write_mask; 246077ae365eSSteven Rostedt new_index += write_mask; 2461edd813bfSSteven Rostedt index = local_cmpxchg(&bpage->write, old_index, new_index); 2462c64e148aSVaibhav Nagarnaik if (index == old_index) { 2463c64e148aSVaibhav Nagarnaik /* update counters */ 2464c64e148aSVaibhav Nagarnaik local_sub(event_length, &cpu_buffer->entries_bytes); 2465edd813bfSSteven Rostedt return 1; 2466edd813bfSSteven Rostedt } 2467c64e148aSVaibhav Nagarnaik } 2468edd813bfSSteven Rostedt 2469edd813bfSSteven Rostedt /* could not discard */ 2470edd813bfSSteven Rostedt return 0; 2471edd813bfSSteven Rostedt } 2472edd813bfSSteven Rostedt 2473fa743953SSteven Rostedt static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 2474fa743953SSteven Rostedt { 2475fa743953SSteven Rostedt local_inc(&cpu_buffer->committing); 2476fa743953SSteven Rostedt local_inc(&cpu_buffer->commits); 2477fa743953SSteven Rostedt } 2478fa743953SSteven Rostedt 2479d9abde21SSteven Rostedt static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 2480fa743953SSteven Rostedt { 2481fa743953SSteven Rostedt unsigned long commits; 2482fa743953SSteven Rostedt 2483fa743953SSteven Rostedt if (RB_WARN_ON(cpu_buffer, 2484fa743953SSteven Rostedt !local_read(&cpu_buffer->committing))) 2485fa743953SSteven Rostedt return; 2486fa743953SSteven Rostedt 2487fa743953SSteven Rostedt again: 2488fa743953SSteven Rostedt commits = local_read(&cpu_buffer->commits); 2489fa743953SSteven Rostedt /* synchronize with interrupts */ 2490fa743953SSteven Rostedt barrier(); 2491fa743953SSteven Rostedt if (local_read(&cpu_buffer->committing) == 1) 2492fa743953SSteven Rostedt rb_set_commit_to_write(cpu_buffer); 2493fa743953SSteven Rostedt 2494fa743953SSteven Rostedt local_dec(&cpu_buffer->committing); 2495fa743953SSteven Rostedt 2496fa743953SSteven Rostedt /* synchronize with interrupts */ 2497fa743953SSteven Rostedt barrier(); 2498fa743953SSteven Rostedt 2499fa743953SSteven Rostedt /* 2500fa743953SSteven Rostedt * Need to account for interrupts coming in between the 2501fa743953SSteven Rostedt * updating of the commit page and the clearing of the 2502fa743953SSteven Rostedt * committing counter. 2503fa743953SSteven Rostedt */ 2504fa743953SSteven Rostedt if (unlikely(local_read(&cpu_buffer->commits) != commits) && 2505fa743953SSteven Rostedt !local_read(&cpu_buffer->committing)) { 2506fa743953SSteven Rostedt local_inc(&cpu_buffer->committing); 2507fa743953SSteven Rostedt goto again; 2508fa743953SSteven Rostedt } 2509fa743953SSteven Rostedt } 2510fa743953SSteven Rostedt 25117a8e76a3SSteven Rostedt static struct ring_buffer_event * 251262f0b3ebSSteven Rostedt rb_reserve_next_event(struct ring_buffer *buffer, 251362f0b3ebSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer, 25141cd8d735SSteven Rostedt unsigned long length) 25157a8e76a3SSteven Rostedt { 25167a8e76a3SSteven Rostedt struct ring_buffer_event *event; 251769d1b839SSteven Rostedt u64 ts, delta; 2518818e3dd3SSteven Rostedt int nr_loops = 0; 251969d1b839SSteven Rostedt int add_timestamp; 2520140ff891SSteven Rostedt u64 diff; 25217a8e76a3SSteven Rostedt 2522fa743953SSteven Rostedt rb_start_commit(cpu_buffer); 2523fa743953SSteven Rostedt 252485bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 252562f0b3ebSSteven Rostedt /* 252662f0b3ebSSteven Rostedt * Due to the ability to swap a cpu buffer from a buffer 252762f0b3ebSSteven Rostedt * it is possible it was swapped before we committed. 252862f0b3ebSSteven Rostedt * (committing stops a swap). We check for it here and 252962f0b3ebSSteven Rostedt * if it happened, we have to fail the write. 253062f0b3ebSSteven Rostedt */ 253162f0b3ebSSteven Rostedt barrier(); 253262f0b3ebSSteven Rostedt if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { 253362f0b3ebSSteven Rostedt local_dec(&cpu_buffer->committing); 253462f0b3ebSSteven Rostedt local_dec(&cpu_buffer->commits); 253562f0b3ebSSteven Rostedt return NULL; 253662f0b3ebSSteven Rostedt } 253785bac32cSSteven Rostedt #endif 253862f0b3ebSSteven Rostedt 2539be957c44SSteven Rostedt length = rb_calculate_event_length(length); 2540bf41a158SSteven Rostedt again: 254169d1b839SSteven Rostedt add_timestamp = 0; 254269d1b839SSteven Rostedt delta = 0; 254369d1b839SSteven Rostedt 2544818e3dd3SSteven Rostedt /* 2545818e3dd3SSteven Rostedt * We allow for interrupts to reenter here and do a trace. 2546818e3dd3SSteven Rostedt * If one does, it will cause this original code to loop 2547818e3dd3SSteven Rostedt * back here. Even with heavy interrupts happening, this 2548818e3dd3SSteven Rostedt * should only happen a few times in a row. If this happens 2549818e3dd3SSteven Rostedt * 1000 times in a row, there must be either an interrupt 2550818e3dd3SSteven Rostedt * storm or we have something buggy. 2551818e3dd3SSteven Rostedt * Bail! 2552818e3dd3SSteven Rostedt */ 25533e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2554fa743953SSteven Rostedt goto out_fail; 2555818e3dd3SSteven Rostedt 25566d3f1e12SJiri Olsa ts = rb_time_stamp(cpu_buffer->buffer); 2557168b6b1dSSteven Rostedt diff = ts - cpu_buffer->write_stamp; 25587a8e76a3SSteven Rostedt 2559168b6b1dSSteven Rostedt /* make sure this diff is calculated here */ 2560bf41a158SSteven Rostedt barrier(); 25617a8e76a3SSteven Rostedt 2562bf41a158SSteven Rostedt /* Did the write stamp get updated already? */ 2563140ff891SSteven Rostedt if (likely(ts >= cpu_buffer->write_stamp)) { 2564168b6b1dSSteven Rostedt delta = diff; 2565168b6b1dSSteven Rostedt if (unlikely(test_time_stamp(delta))) { 256631274d72SJiri Olsa int local_clock_stable = 1; 256731274d72SJiri Olsa #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 256835af99e6SPeter Zijlstra local_clock_stable = sched_clock_stable(); 256931274d72SJiri Olsa #endif 257069d1b839SSteven Rostedt WARN_ONCE(delta > (1ULL << 59), 257131274d72SJiri Olsa KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", 257269d1b839SSteven Rostedt (unsigned long long)delta, 257369d1b839SSteven Rostedt (unsigned long long)ts, 257431274d72SJiri Olsa (unsigned long long)cpu_buffer->write_stamp, 257531274d72SJiri Olsa local_clock_stable ? "" : 257631274d72SJiri Olsa "If you just came from a suspend/resume,\n" 257731274d72SJiri Olsa "please switch to the trace global clock:\n" 257831274d72SJiri Olsa " echo global > /sys/kernel/debug/tracing/trace_clock\n"); 257969d1b839SSteven Rostedt add_timestamp = 1; 25807a8e76a3SSteven Rostedt } 2581168b6b1dSSteven Rostedt } 25827a8e76a3SSteven Rostedt 258369d1b839SSteven Rostedt event = __rb_reserve_next(cpu_buffer, length, ts, 258469d1b839SSteven Rostedt delta, add_timestamp); 2585168b6b1dSSteven Rostedt if (unlikely(PTR_ERR(event) == -EAGAIN)) 2586bf41a158SSteven Rostedt goto again; 25877a8e76a3SSteven Rostedt 2588fa743953SSteven Rostedt if (!event) 2589fa743953SSteven Rostedt goto out_fail; 2590bf41a158SSteven Rostedt 25917a8e76a3SSteven Rostedt return event; 2592fa743953SSteven Rostedt 2593fa743953SSteven Rostedt out_fail: 2594fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 2595fa743953SSteven Rostedt return NULL; 25967a8e76a3SSteven Rostedt } 25977a8e76a3SSteven Rostedt 25981155de47SPaul Mundt #ifdef CONFIG_TRACING 25991155de47SPaul Mundt 2600567cd4daSSteven Rostedt /* 2601567cd4daSSteven Rostedt * The lock and unlock are done within a preempt disable section. 2602567cd4daSSteven Rostedt * The current_context per_cpu variable can only be modified 2603567cd4daSSteven Rostedt * by the current task between lock and unlock. But it can 2604567cd4daSSteven Rostedt * be modified more than once via an interrupt. To pass this 2605567cd4daSSteven Rostedt * information from the lock to the unlock without having to 2606567cd4daSSteven Rostedt * access the 'in_interrupt()' functions again (which do show 2607567cd4daSSteven Rostedt * a bit of overhead in something as critical as function tracing, 2608567cd4daSSteven Rostedt * we use a bitmask trick. 2609567cd4daSSteven Rostedt * 2610567cd4daSSteven Rostedt * bit 0 = NMI context 2611567cd4daSSteven Rostedt * bit 1 = IRQ context 2612567cd4daSSteven Rostedt * bit 2 = SoftIRQ context 2613567cd4daSSteven Rostedt * bit 3 = normal context. 2614567cd4daSSteven Rostedt * 2615567cd4daSSteven Rostedt * This works because this is the order of contexts that can 2616567cd4daSSteven Rostedt * preempt other contexts. A SoftIRQ never preempts an IRQ 2617567cd4daSSteven Rostedt * context. 2618567cd4daSSteven Rostedt * 2619567cd4daSSteven Rostedt * When the context is determined, the corresponding bit is 2620567cd4daSSteven Rostedt * checked and set (if it was set, then a recursion of that context 2621567cd4daSSteven Rostedt * happened). 2622567cd4daSSteven Rostedt * 2623567cd4daSSteven Rostedt * On unlock, we need to clear this bit. To do so, just subtract 2624567cd4daSSteven Rostedt * 1 from the current_context and AND it to itself. 2625567cd4daSSteven Rostedt * 2626567cd4daSSteven Rostedt * (binary) 2627567cd4daSSteven Rostedt * 101 - 1 = 100 2628567cd4daSSteven Rostedt * 101 & 100 = 100 (clearing bit zero) 2629567cd4daSSteven Rostedt * 2630567cd4daSSteven Rostedt * 1010 - 1 = 1001 2631567cd4daSSteven Rostedt * 1010 & 1001 = 1000 (clearing bit 1) 2632567cd4daSSteven Rostedt * 2633567cd4daSSteven Rostedt * The least significant bit can be cleared this way, and it 2634567cd4daSSteven Rostedt * just so happens that it is the same bit corresponding to 2635567cd4daSSteven Rostedt * the current context. 2636567cd4daSSteven Rostedt */ 2637567cd4daSSteven Rostedt static DEFINE_PER_CPU(unsigned int, current_context); 2638261842b7SSteven Rostedt 2639567cd4daSSteven Rostedt static __always_inline int trace_recursive_lock(void) 2640261842b7SSteven Rostedt { 2641567cd4daSSteven Rostedt unsigned int val = this_cpu_read(current_context); 2642567cd4daSSteven Rostedt int bit; 2643e057a5e5SFrederic Weisbecker 2644567cd4daSSteven Rostedt if (in_interrupt()) { 2645567cd4daSSteven Rostedt if (in_nmi()) 2646567cd4daSSteven Rostedt bit = 0; 2647567cd4daSSteven Rostedt else if (in_irq()) 2648567cd4daSSteven Rostedt bit = 1; 2649567cd4daSSteven Rostedt else 2650567cd4daSSteven Rostedt bit = 2; 2651567cd4daSSteven Rostedt } else 2652567cd4daSSteven Rostedt bit = 3; 2653e057a5e5SFrederic Weisbecker 2654567cd4daSSteven Rostedt if (unlikely(val & (1 << bit))) 2655567cd4daSSteven Rostedt return 1; 2656d9abde21SSteven Rostedt 2657567cd4daSSteven Rostedt val |= (1 << bit); 2658567cd4daSSteven Rostedt this_cpu_write(current_context, val); 2659d9abde21SSteven Rostedt 2660d9abde21SSteven Rostedt return 0; 2661261842b7SSteven Rostedt } 2662261842b7SSteven Rostedt 2663567cd4daSSteven Rostedt static __always_inline void trace_recursive_unlock(void) 2664261842b7SSteven Rostedt { 2665567cd4daSSteven Rostedt unsigned int val = this_cpu_read(current_context); 2666261842b7SSteven Rostedt 2667567cd4daSSteven Rostedt val--; 2668567cd4daSSteven Rostedt val &= this_cpu_read(current_context); 2669567cd4daSSteven Rostedt this_cpu_write(current_context, val); 2670261842b7SSteven Rostedt } 2671261842b7SSteven Rostedt 26721155de47SPaul Mundt #else 26731155de47SPaul Mundt 26741155de47SPaul Mundt #define trace_recursive_lock() (0) 26751155de47SPaul Mundt #define trace_recursive_unlock() do { } while (0) 26761155de47SPaul Mundt 26771155de47SPaul Mundt #endif 26781155de47SPaul Mundt 26797a8e76a3SSteven Rostedt /** 26807a8e76a3SSteven Rostedt * ring_buffer_lock_reserve - reserve a part of the buffer 26817a8e76a3SSteven Rostedt * @buffer: the ring buffer to reserve from 26827a8e76a3SSteven Rostedt * @length: the length of the data to reserve (excluding event header) 26837a8e76a3SSteven Rostedt * 26847a8e76a3SSteven Rostedt * Returns a reseverd event on the ring buffer to copy directly to. 26857a8e76a3SSteven Rostedt * The user of this interface will need to get the body to write into 26867a8e76a3SSteven Rostedt * and can use the ring_buffer_event_data() interface. 26877a8e76a3SSteven Rostedt * 26887a8e76a3SSteven Rostedt * The length is the length of the data needed, not the event length 26897a8e76a3SSteven Rostedt * which also includes the event header. 26907a8e76a3SSteven Rostedt * 26917a8e76a3SSteven Rostedt * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 26927a8e76a3SSteven Rostedt * If NULL is returned, then nothing has been allocated or locked. 26937a8e76a3SSteven Rostedt */ 26947a8e76a3SSteven Rostedt struct ring_buffer_event * 26950a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 26967a8e76a3SSteven Rostedt { 26977a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 26987a8e76a3SSteven Rostedt struct ring_buffer_event *event; 26995168ae50SSteven Rostedt int cpu; 27007a8e76a3SSteven Rostedt 2701033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 2702a3583244SSteven Rostedt return NULL; 2703a3583244SSteven Rostedt 2704bf41a158SSteven Rostedt /* If we are tracing schedule, we don't want to recurse */ 27055168ae50SSteven Rostedt preempt_disable_notrace(); 2706bf41a158SSteven Rostedt 270752fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled)) 270852fbe9cdSLai Jiangshan goto out_nocheck; 270952fbe9cdSLai Jiangshan 2710261842b7SSteven Rostedt if (trace_recursive_lock()) 2711261842b7SSteven Rostedt goto out_nocheck; 2712261842b7SSteven Rostedt 27137a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 27147a8e76a3SSteven Rostedt 27159e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2716d769041fSSteven Rostedt goto out; 27177a8e76a3SSteven Rostedt 27187a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 27197a8e76a3SSteven Rostedt 27207a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 2721d769041fSSteven Rostedt goto out; 27227a8e76a3SSteven Rostedt 2723be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 2724bf41a158SSteven Rostedt goto out; 27257a8e76a3SSteven Rostedt 272662f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 27277a8e76a3SSteven Rostedt if (!event) 2728d769041fSSteven Rostedt goto out; 27297a8e76a3SSteven Rostedt 27307a8e76a3SSteven Rostedt return event; 27317a8e76a3SSteven Rostedt 2732d769041fSSteven Rostedt out: 2733261842b7SSteven Rostedt trace_recursive_unlock(); 2734261842b7SSteven Rostedt 2735261842b7SSteven Rostedt out_nocheck: 27365168ae50SSteven Rostedt preempt_enable_notrace(); 27377a8e76a3SSteven Rostedt return NULL; 27387a8e76a3SSteven Rostedt } 2739c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 27407a8e76a3SSteven Rostedt 2741a1863c21SSteven Rostedt static void 2742a1863c21SSteven Rostedt rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, 27437a8e76a3SSteven Rostedt struct ring_buffer_event *event) 27447a8e76a3SSteven Rostedt { 274569d1b839SSteven Rostedt u64 delta; 274669d1b839SSteven Rostedt 2747fa743953SSteven Rostedt /* 2748fa743953SSteven Rostedt * The event first in the commit queue updates the 2749fa743953SSteven Rostedt * time stamp. 2750fa743953SSteven Rostedt */ 275169d1b839SSteven Rostedt if (rb_event_is_commit(cpu_buffer, event)) { 275269d1b839SSteven Rostedt /* 275369d1b839SSteven Rostedt * A commit event that is first on a page 275469d1b839SSteven Rostedt * updates the write timestamp with the page stamp 275569d1b839SSteven Rostedt */ 275669d1b839SSteven Rostedt if (!rb_event_index(event)) 275769d1b839SSteven Rostedt cpu_buffer->write_stamp = 275869d1b839SSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 275969d1b839SSteven Rostedt else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 276069d1b839SSteven Rostedt delta = event->array[0]; 276169d1b839SSteven Rostedt delta <<= TS_SHIFT; 276269d1b839SSteven Rostedt delta += event->time_delta; 276369d1b839SSteven Rostedt cpu_buffer->write_stamp += delta; 276469d1b839SSteven Rostedt } else 2765bf41a158SSteven Rostedt cpu_buffer->write_stamp += event->time_delta; 2766a1863c21SSteven Rostedt } 276769d1b839SSteven Rostedt } 2768bf41a158SSteven Rostedt 2769a1863c21SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2770a1863c21SSteven Rostedt struct ring_buffer_event *event) 2771a1863c21SSteven Rostedt { 2772a1863c21SSteven Rostedt local_inc(&cpu_buffer->entries); 2773a1863c21SSteven Rostedt rb_update_write_stamp(cpu_buffer, event); 2774fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 27757a8e76a3SSteven Rostedt } 27767a8e76a3SSteven Rostedt 277715693458SSteven Rostedt (Red Hat) static __always_inline void 277815693458SSteven Rostedt (Red Hat) rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 277915693458SSteven Rostedt (Red Hat) { 278015693458SSteven Rostedt (Red Hat) if (buffer->irq_work.waiters_pending) { 278115693458SSteven Rostedt (Red Hat) buffer->irq_work.waiters_pending = false; 278215693458SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */ 278315693458SSteven Rostedt (Red Hat) irq_work_queue(&buffer->irq_work.work); 278415693458SSteven Rostedt (Red Hat) } 278515693458SSteven Rostedt (Red Hat) 278615693458SSteven Rostedt (Red Hat) if (cpu_buffer->irq_work.waiters_pending) { 278715693458SSteven Rostedt (Red Hat) cpu_buffer->irq_work.waiters_pending = false; 278815693458SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */ 278915693458SSteven Rostedt (Red Hat) irq_work_queue(&cpu_buffer->irq_work.work); 279015693458SSteven Rostedt (Red Hat) } 279115693458SSteven Rostedt (Red Hat) } 279215693458SSteven Rostedt (Red Hat) 27937a8e76a3SSteven Rostedt /** 27947a8e76a3SSteven Rostedt * ring_buffer_unlock_commit - commit a reserved 27957a8e76a3SSteven Rostedt * @buffer: The buffer to commit to 27967a8e76a3SSteven Rostedt * @event: The event pointer to commit. 27977a8e76a3SSteven Rostedt * 27987a8e76a3SSteven Rostedt * This commits the data to the ring buffer, and releases any locks held. 27997a8e76a3SSteven Rostedt * 28007a8e76a3SSteven Rostedt * Must be paired with ring_buffer_lock_reserve. 28017a8e76a3SSteven Rostedt */ 28027a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer, 28030a987751SArnaldo Carvalho de Melo struct ring_buffer_event *event) 28047a8e76a3SSteven Rostedt { 28057a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 28067a8e76a3SSteven Rostedt int cpu = raw_smp_processor_id(); 28077a8e76a3SSteven Rostedt 28087a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 28097a8e76a3SSteven Rostedt 28107a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 28117a8e76a3SSteven Rostedt 281215693458SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer); 281315693458SSteven Rostedt (Red Hat) 2814261842b7SSteven Rostedt trace_recursive_unlock(); 2815261842b7SSteven Rostedt 28165168ae50SSteven Rostedt preempt_enable_notrace(); 28177a8e76a3SSteven Rostedt 28187a8e76a3SSteven Rostedt return 0; 28197a8e76a3SSteven Rostedt } 2820c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 28217a8e76a3SSteven Rostedt 2822f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event) 2823f3b9aae1SFrederic Weisbecker { 282469d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 282569d1b839SSteven Rostedt event = skip_time_extend(event); 282669d1b839SSteven Rostedt 2827334d4169SLai Jiangshan /* array[0] holds the actual length for the discarded event */ 2828334d4169SLai Jiangshan event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 2829334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 2830f3b9aae1SFrederic Weisbecker /* time delta must be non zero */ 2831f3b9aae1SFrederic Weisbecker if (!event->time_delta) 2832f3b9aae1SFrederic Weisbecker event->time_delta = 1; 2833f3b9aae1SFrederic Weisbecker } 2834f3b9aae1SFrederic Weisbecker 2835a1863c21SSteven Rostedt /* 2836a1863c21SSteven Rostedt * Decrement the entries to the page that an event is on. 2837a1863c21SSteven Rostedt * The event does not even need to exist, only the pointer 2838a1863c21SSteven Rostedt * to the page it is on. This may only be called before the commit 2839a1863c21SSteven Rostedt * takes place. 2840a1863c21SSteven Rostedt */ 2841a1863c21SSteven Rostedt static inline void 2842a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 2843a1863c21SSteven Rostedt struct ring_buffer_event *event) 2844a1863c21SSteven Rostedt { 2845a1863c21SSteven Rostedt unsigned long addr = (unsigned long)event; 2846a1863c21SSteven Rostedt struct buffer_page *bpage = cpu_buffer->commit_page; 2847a1863c21SSteven Rostedt struct buffer_page *start; 2848a1863c21SSteven Rostedt 2849a1863c21SSteven Rostedt addr &= PAGE_MASK; 2850a1863c21SSteven Rostedt 2851a1863c21SSteven Rostedt /* Do the likely case first */ 2852a1863c21SSteven Rostedt if (likely(bpage->page == (void *)addr)) { 2853a1863c21SSteven Rostedt local_dec(&bpage->entries); 2854a1863c21SSteven Rostedt return; 2855a1863c21SSteven Rostedt } 2856a1863c21SSteven Rostedt 2857a1863c21SSteven Rostedt /* 2858a1863c21SSteven Rostedt * Because the commit page may be on the reader page we 2859a1863c21SSteven Rostedt * start with the next page and check the end loop there. 2860a1863c21SSteven Rostedt */ 2861a1863c21SSteven Rostedt rb_inc_page(cpu_buffer, &bpage); 2862a1863c21SSteven Rostedt start = bpage; 2863a1863c21SSteven Rostedt do { 2864a1863c21SSteven Rostedt if (bpage->page == (void *)addr) { 2865a1863c21SSteven Rostedt local_dec(&bpage->entries); 2866a1863c21SSteven Rostedt return; 2867a1863c21SSteven Rostedt } 2868a1863c21SSteven Rostedt rb_inc_page(cpu_buffer, &bpage); 2869a1863c21SSteven Rostedt } while (bpage != start); 2870a1863c21SSteven Rostedt 2871a1863c21SSteven Rostedt /* commit not part of this buffer?? */ 2872a1863c21SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 2873a1863c21SSteven Rostedt } 2874a1863c21SSteven Rostedt 28757a8e76a3SSteven Rostedt /** 2876fa1b47ddSSteven Rostedt * ring_buffer_commit_discard - discard an event that has not been committed 2877fa1b47ddSSteven Rostedt * @buffer: the ring buffer 2878fa1b47ddSSteven Rostedt * @event: non committed event to discard 2879fa1b47ddSSteven Rostedt * 2880dc892f73SSteven Rostedt * Sometimes an event that is in the ring buffer needs to be ignored. 2881dc892f73SSteven Rostedt * This function lets the user discard an event in the ring buffer 2882dc892f73SSteven Rostedt * and then that event will not be read later. 2883dc892f73SSteven Rostedt * 2884dc892f73SSteven Rostedt * This function only works if it is called before the the item has been 2885dc892f73SSteven Rostedt * committed. It will try to free the event from the ring buffer 2886fa1b47ddSSteven Rostedt * if another event has not been added behind it. 2887fa1b47ddSSteven Rostedt * 2888fa1b47ddSSteven Rostedt * If another event has been added behind it, it will set the event 2889fa1b47ddSSteven Rostedt * up as discarded, and perform the commit. 2890fa1b47ddSSteven Rostedt * 2891fa1b47ddSSteven Rostedt * If this function is called, do not call ring_buffer_unlock_commit on 2892fa1b47ddSSteven Rostedt * the event. 2893fa1b47ddSSteven Rostedt */ 2894fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer, 2895fa1b47ddSSteven Rostedt struct ring_buffer_event *event) 2896fa1b47ddSSteven Rostedt { 2897fa1b47ddSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 2898fa1b47ddSSteven Rostedt int cpu; 2899fa1b47ddSSteven Rostedt 2900fa1b47ddSSteven Rostedt /* The event is discarded regardless */ 2901f3b9aae1SFrederic Weisbecker rb_event_discard(event); 2902fa1b47ddSSteven Rostedt 2903fa743953SSteven Rostedt cpu = smp_processor_id(); 2904fa743953SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2905fa743953SSteven Rostedt 2906fa1b47ddSSteven Rostedt /* 2907fa1b47ddSSteven Rostedt * This must only be called if the event has not been 2908fa1b47ddSSteven Rostedt * committed yet. Thus we can assume that preemption 2909fa1b47ddSSteven Rostedt * is still disabled. 2910fa1b47ddSSteven Rostedt */ 2911fa743953SSteven Rostedt RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2912fa1b47ddSSteven Rostedt 2913a1863c21SSteven Rostedt rb_decrement_entry(cpu_buffer, event); 29140f2541d2SSteven Rostedt if (rb_try_to_discard(cpu_buffer, event)) 2915fa1b47ddSSteven Rostedt goto out; 2916fa1b47ddSSteven Rostedt 2917fa1b47ddSSteven Rostedt /* 2918fa1b47ddSSteven Rostedt * The commit is still visible by the reader, so we 2919a1863c21SSteven Rostedt * must still update the timestamp. 2920fa1b47ddSSteven Rostedt */ 2921a1863c21SSteven Rostedt rb_update_write_stamp(cpu_buffer, event); 2922fa1b47ddSSteven Rostedt out: 2923fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 2924fa1b47ddSSteven Rostedt 2925f3b9aae1SFrederic Weisbecker trace_recursive_unlock(); 2926f3b9aae1SFrederic Weisbecker 29275168ae50SSteven Rostedt preempt_enable_notrace(); 2928fa1b47ddSSteven Rostedt 2929fa1b47ddSSteven Rostedt } 2930fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 2931fa1b47ddSSteven Rostedt 2932fa1b47ddSSteven Rostedt /** 29337a8e76a3SSteven Rostedt * ring_buffer_write - write data to the buffer without reserving 29347a8e76a3SSteven Rostedt * @buffer: The ring buffer to write to. 29357a8e76a3SSteven Rostedt * @length: The length of the data being written (excluding the event header) 29367a8e76a3SSteven Rostedt * @data: The data to write to the buffer. 29377a8e76a3SSteven Rostedt * 29387a8e76a3SSteven Rostedt * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 29397a8e76a3SSteven Rostedt * one function. If you already have the data to write to the buffer, it 29407a8e76a3SSteven Rostedt * may be easier to simply call this function. 29417a8e76a3SSteven Rostedt * 29427a8e76a3SSteven Rostedt * Note, like ring_buffer_lock_reserve, the length is the length of the data 29437a8e76a3SSteven Rostedt * and not the length of the event which would hold the header. 29447a8e76a3SSteven Rostedt */ 29457a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer, 29467a8e76a3SSteven Rostedt unsigned long length, 29477a8e76a3SSteven Rostedt void *data) 29487a8e76a3SSteven Rostedt { 29497a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 29507a8e76a3SSteven Rostedt struct ring_buffer_event *event; 29517a8e76a3SSteven Rostedt void *body; 29527a8e76a3SSteven Rostedt int ret = -EBUSY; 29535168ae50SSteven Rostedt int cpu; 29547a8e76a3SSteven Rostedt 2955033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 2956a3583244SSteven Rostedt return -EBUSY; 2957a3583244SSteven Rostedt 29585168ae50SSteven Rostedt preempt_disable_notrace(); 2959bf41a158SSteven Rostedt 296052fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled)) 296152fbe9cdSLai Jiangshan goto out; 296252fbe9cdSLai Jiangshan 29637a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 29647a8e76a3SSteven Rostedt 29659e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2966d769041fSSteven Rostedt goto out; 29677a8e76a3SSteven Rostedt 29687a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 29697a8e76a3SSteven Rostedt 29707a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 29717a8e76a3SSteven Rostedt goto out; 29727a8e76a3SSteven Rostedt 2973be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 2974be957c44SSteven Rostedt goto out; 2975be957c44SSteven Rostedt 297662f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 29777a8e76a3SSteven Rostedt if (!event) 29787a8e76a3SSteven Rostedt goto out; 29797a8e76a3SSteven Rostedt 29807a8e76a3SSteven Rostedt body = rb_event_data(event); 29817a8e76a3SSteven Rostedt 29827a8e76a3SSteven Rostedt memcpy(body, data, length); 29837a8e76a3SSteven Rostedt 29847a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 29857a8e76a3SSteven Rostedt 298615693458SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer); 298715693458SSteven Rostedt (Red Hat) 29887a8e76a3SSteven Rostedt ret = 0; 29897a8e76a3SSteven Rostedt out: 29905168ae50SSteven Rostedt preempt_enable_notrace(); 29917a8e76a3SSteven Rostedt 29927a8e76a3SSteven Rostedt return ret; 29937a8e76a3SSteven Rostedt } 2994c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write); 29957a8e76a3SSteven Rostedt 299634a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 2997bf41a158SSteven Rostedt { 2998bf41a158SSteven Rostedt struct buffer_page *reader = cpu_buffer->reader_page; 299977ae365eSSteven Rostedt struct buffer_page *head = rb_set_head_page(cpu_buffer); 3000bf41a158SSteven Rostedt struct buffer_page *commit = cpu_buffer->commit_page; 3001bf41a158SSteven Rostedt 300277ae365eSSteven Rostedt /* In case of error, head will be NULL */ 300377ae365eSSteven Rostedt if (unlikely(!head)) 300477ae365eSSteven Rostedt return 1; 300577ae365eSSteven Rostedt 3006bf41a158SSteven Rostedt return reader->read == rb_page_commit(reader) && 3007bf41a158SSteven Rostedt (commit == reader || 3008bf41a158SSteven Rostedt (commit == head && 3009bf41a158SSteven Rostedt head->read == rb_page_commit(commit))); 3010bf41a158SSteven Rostedt } 3011bf41a158SSteven Rostedt 30127a8e76a3SSteven Rostedt /** 30137a8e76a3SSteven Rostedt * ring_buffer_record_disable - stop all writes into the buffer 30147a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 30157a8e76a3SSteven Rostedt * 30167a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 30177a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 30187a8e76a3SSteven Rostedt * 30197a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 30207a8e76a3SSteven Rostedt */ 30217a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer) 30227a8e76a3SSteven Rostedt { 30237a8e76a3SSteven Rostedt atomic_inc(&buffer->record_disabled); 30247a8e76a3SSteven Rostedt } 3025c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 30267a8e76a3SSteven Rostedt 30277a8e76a3SSteven Rostedt /** 30287a8e76a3SSteven Rostedt * ring_buffer_record_enable - enable writes to the buffer 30297a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 30307a8e76a3SSteven Rostedt * 30317a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 3032c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 30337a8e76a3SSteven Rostedt */ 30347a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer) 30357a8e76a3SSteven Rostedt { 30367a8e76a3SSteven Rostedt atomic_dec(&buffer->record_disabled); 30377a8e76a3SSteven Rostedt } 3038c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 30397a8e76a3SSteven Rostedt 30407a8e76a3SSteven Rostedt /** 3041499e5470SSteven Rostedt * ring_buffer_record_off - stop all writes into the buffer 3042499e5470SSteven Rostedt * @buffer: The ring buffer to stop writes to. 3043499e5470SSteven Rostedt * 3044499e5470SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 3045499e5470SSteven Rostedt * to the buffer after this will fail and return NULL. 3046499e5470SSteven Rostedt * 3047499e5470SSteven Rostedt * This is different than ring_buffer_record_disable() as 304887abb3b1SWang Tianhong * it works like an on/off switch, where as the disable() version 3049499e5470SSteven Rostedt * must be paired with a enable(). 3050499e5470SSteven Rostedt */ 3051499e5470SSteven Rostedt void ring_buffer_record_off(struct ring_buffer *buffer) 3052499e5470SSteven Rostedt { 3053499e5470SSteven Rostedt unsigned int rd; 3054499e5470SSteven Rostedt unsigned int new_rd; 3055499e5470SSteven Rostedt 3056499e5470SSteven Rostedt do { 3057499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 3058499e5470SSteven Rostedt new_rd = rd | RB_BUFFER_OFF; 3059499e5470SSteven Rostedt } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3060499e5470SSteven Rostedt } 3061499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off); 3062499e5470SSteven Rostedt 3063499e5470SSteven Rostedt /** 3064499e5470SSteven Rostedt * ring_buffer_record_on - restart writes into the buffer 3065499e5470SSteven Rostedt * @buffer: The ring buffer to start writes to. 3066499e5470SSteven Rostedt * 3067499e5470SSteven Rostedt * This enables all writes to the buffer that was disabled by 3068499e5470SSteven Rostedt * ring_buffer_record_off(). 3069499e5470SSteven Rostedt * 3070499e5470SSteven Rostedt * This is different than ring_buffer_record_enable() as 307187abb3b1SWang Tianhong * it works like an on/off switch, where as the enable() version 3072499e5470SSteven Rostedt * must be paired with a disable(). 3073499e5470SSteven Rostedt */ 3074499e5470SSteven Rostedt void ring_buffer_record_on(struct ring_buffer *buffer) 3075499e5470SSteven Rostedt { 3076499e5470SSteven Rostedt unsigned int rd; 3077499e5470SSteven Rostedt unsigned int new_rd; 3078499e5470SSteven Rostedt 3079499e5470SSteven Rostedt do { 3080499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 3081499e5470SSteven Rostedt new_rd = rd & ~RB_BUFFER_OFF; 3082499e5470SSteven Rostedt } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 3083499e5470SSteven Rostedt } 3084499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on); 3085499e5470SSteven Rostedt 3086499e5470SSteven Rostedt /** 3087499e5470SSteven Rostedt * ring_buffer_record_is_on - return true if the ring buffer can write 3088499e5470SSteven Rostedt * @buffer: The ring buffer to see if write is enabled 3089499e5470SSteven Rostedt * 3090499e5470SSteven Rostedt * Returns true if the ring buffer is in a state that it accepts writes. 3091499e5470SSteven Rostedt */ 3092499e5470SSteven Rostedt int ring_buffer_record_is_on(struct ring_buffer *buffer) 3093499e5470SSteven Rostedt { 3094499e5470SSteven Rostedt return !atomic_read(&buffer->record_disabled); 3095499e5470SSteven Rostedt } 3096499e5470SSteven Rostedt 3097499e5470SSteven Rostedt /** 30987a8e76a3SSteven Rostedt * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 30997a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 31007a8e76a3SSteven Rostedt * @cpu: The CPU buffer to stop 31017a8e76a3SSteven Rostedt * 31027a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 31037a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 31047a8e76a3SSteven Rostedt * 31057a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 31067a8e76a3SSteven Rostedt */ 31077a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 31087a8e76a3SSteven Rostedt { 31097a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 31107a8e76a3SSteven Rostedt 31119e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 31128aabee57SSteven Rostedt return; 31137a8e76a3SSteven Rostedt 31147a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 31157a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 31167a8e76a3SSteven Rostedt } 3117c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 31187a8e76a3SSteven Rostedt 31197a8e76a3SSteven Rostedt /** 31207a8e76a3SSteven Rostedt * ring_buffer_record_enable_cpu - enable writes to the buffer 31217a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 31227a8e76a3SSteven Rostedt * @cpu: The CPU to enable. 31237a8e76a3SSteven Rostedt * 31247a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 3125c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 31267a8e76a3SSteven Rostedt */ 31277a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 31287a8e76a3SSteven Rostedt { 31297a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 31307a8e76a3SSteven Rostedt 31319e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 31328aabee57SSteven Rostedt return; 31337a8e76a3SSteven Rostedt 31347a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 31357a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 31367a8e76a3SSteven Rostedt } 3137c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 31387a8e76a3SSteven Rostedt 3139f6195aa0SSteven Rostedt /* 3140f6195aa0SSteven Rostedt * The total entries in the ring buffer is the running counter 3141f6195aa0SSteven Rostedt * of entries entered into the ring buffer, minus the sum of 3142f6195aa0SSteven Rostedt * the entries read from the ring buffer and the number of 3143f6195aa0SSteven Rostedt * entries that were overwritten. 3144f6195aa0SSteven Rostedt */ 3145f6195aa0SSteven Rostedt static inline unsigned long 3146f6195aa0SSteven Rostedt rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 3147f6195aa0SSteven Rostedt { 3148f6195aa0SSteven Rostedt return local_read(&cpu_buffer->entries) - 3149f6195aa0SSteven Rostedt (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 3150f6195aa0SSteven Rostedt } 3151f6195aa0SSteven Rostedt 31527a8e76a3SSteven Rostedt /** 3153c64e148aSVaibhav Nagarnaik * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 3154c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 3155c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 3156c64e148aSVaibhav Nagarnaik */ 315750ecf2c3SYoshihiro YUNOMAE u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 3158c64e148aSVaibhav Nagarnaik { 3159c64e148aSVaibhav Nagarnaik unsigned long flags; 3160c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 3161c64e148aSVaibhav Nagarnaik struct buffer_page *bpage; 3162da830e58SLinus Torvalds u64 ret = 0; 3163c64e148aSVaibhav Nagarnaik 3164c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3165c64e148aSVaibhav Nagarnaik return 0; 3166c64e148aSVaibhav Nagarnaik 3167c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 31687115e3fcSLinus Torvalds raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3169c64e148aSVaibhav Nagarnaik /* 3170c64e148aSVaibhav Nagarnaik * if the tail is on reader_page, oldest time stamp is on the reader 3171c64e148aSVaibhav Nagarnaik * page 3172c64e148aSVaibhav Nagarnaik */ 3173c64e148aSVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 3174c64e148aSVaibhav Nagarnaik bpage = cpu_buffer->reader_page; 3175c64e148aSVaibhav Nagarnaik else 3176c64e148aSVaibhav Nagarnaik bpage = rb_set_head_page(cpu_buffer); 317754f7be5bSSteven Rostedt if (bpage) 3178c64e148aSVaibhav Nagarnaik ret = bpage->page->time_stamp; 31797115e3fcSLinus Torvalds raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3180c64e148aSVaibhav Nagarnaik 3181c64e148aSVaibhav Nagarnaik return ret; 3182c64e148aSVaibhav Nagarnaik } 3183c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 3184c64e148aSVaibhav Nagarnaik 3185c64e148aSVaibhav Nagarnaik /** 3186c64e148aSVaibhav Nagarnaik * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 3187c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 3188c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 3189c64e148aSVaibhav Nagarnaik */ 3190c64e148aSVaibhav Nagarnaik unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) 3191c64e148aSVaibhav Nagarnaik { 3192c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 3193c64e148aSVaibhav Nagarnaik unsigned long ret; 3194c64e148aSVaibhav Nagarnaik 3195c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3196c64e148aSVaibhav Nagarnaik return 0; 3197c64e148aSVaibhav Nagarnaik 3198c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 3199c64e148aSVaibhav Nagarnaik ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 3200c64e148aSVaibhav Nagarnaik 3201c64e148aSVaibhav Nagarnaik return ret; 3202c64e148aSVaibhav Nagarnaik } 3203c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 3204c64e148aSVaibhav Nagarnaik 3205c64e148aSVaibhav Nagarnaik /** 32067a8e76a3SSteven Rostedt * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 32077a8e76a3SSteven Rostedt * @buffer: The ring buffer 32087a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the entries from. 32097a8e76a3SSteven Rostedt */ 32107a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 32117a8e76a3SSteven Rostedt { 32127a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 32137a8e76a3SSteven Rostedt 32149e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 32158aabee57SSteven Rostedt return 0; 32167a8e76a3SSteven Rostedt 32177a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 3218554f786eSSteven Rostedt 3219f6195aa0SSteven Rostedt return rb_num_of_entries(cpu_buffer); 32207a8e76a3SSteven Rostedt } 3221c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 32227a8e76a3SSteven Rostedt 32237a8e76a3SSteven Rostedt /** 3224884bfe89SSlava Pestov * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 3225884bfe89SSlava Pestov * buffer wrapping around (only if RB_FL_OVERWRITE is on). 32267a8e76a3SSteven Rostedt * @buffer: The ring buffer 32277a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 32287a8e76a3SSteven Rostedt */ 32297a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 32307a8e76a3SSteven Rostedt { 32317a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 32328aabee57SSteven Rostedt unsigned long ret; 32337a8e76a3SSteven Rostedt 32349e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 32358aabee57SSteven Rostedt return 0; 32367a8e76a3SSteven Rostedt 32377a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 323877ae365eSSteven Rostedt ret = local_read(&cpu_buffer->overrun); 3239554f786eSSteven Rostedt 3240554f786eSSteven Rostedt return ret; 32417a8e76a3SSteven Rostedt } 3242c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 32437a8e76a3SSteven Rostedt 32447a8e76a3SSteven Rostedt /** 3245884bfe89SSlava Pestov * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 3246884bfe89SSlava Pestov * commits failing due to the buffer wrapping around while there are uncommitted 3247884bfe89SSlava Pestov * events, such as during an interrupt storm. 3248f0d2c681SSteven Rostedt * @buffer: The ring buffer 3249f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 3250f0d2c681SSteven Rostedt */ 3251f0d2c681SSteven Rostedt unsigned long 3252f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 3253f0d2c681SSteven Rostedt { 3254f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3255f0d2c681SSteven Rostedt unsigned long ret; 3256f0d2c681SSteven Rostedt 3257f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3258f0d2c681SSteven Rostedt return 0; 3259f0d2c681SSteven Rostedt 3260f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 326177ae365eSSteven Rostedt ret = local_read(&cpu_buffer->commit_overrun); 3262f0d2c681SSteven Rostedt 3263f0d2c681SSteven Rostedt return ret; 3264f0d2c681SSteven Rostedt } 3265f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3266f0d2c681SSteven Rostedt 3267f0d2c681SSteven Rostedt /** 3268884bfe89SSlava Pestov * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 3269884bfe89SSlava Pestov * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 3270884bfe89SSlava Pestov * @buffer: The ring buffer 3271884bfe89SSlava Pestov * @cpu: The per CPU buffer to get the number of overruns from 3272884bfe89SSlava Pestov */ 3273884bfe89SSlava Pestov unsigned long 3274884bfe89SSlava Pestov ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) 3275884bfe89SSlava Pestov { 3276884bfe89SSlava Pestov struct ring_buffer_per_cpu *cpu_buffer; 3277884bfe89SSlava Pestov unsigned long ret; 3278884bfe89SSlava Pestov 3279884bfe89SSlava Pestov if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3280884bfe89SSlava Pestov return 0; 3281884bfe89SSlava Pestov 3282884bfe89SSlava Pestov cpu_buffer = buffer->buffers[cpu]; 3283884bfe89SSlava Pestov ret = local_read(&cpu_buffer->dropped_events); 3284884bfe89SSlava Pestov 3285884bfe89SSlava Pestov return ret; 3286884bfe89SSlava Pestov } 3287884bfe89SSlava Pestov EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3288884bfe89SSlava Pestov 3289884bfe89SSlava Pestov /** 3290ad964704SSteven Rostedt (Red Hat) * ring_buffer_read_events_cpu - get the number of events successfully read 3291ad964704SSteven Rostedt (Red Hat) * @buffer: The ring buffer 3292ad964704SSteven Rostedt (Red Hat) * @cpu: The per CPU buffer to get the number of events read 3293ad964704SSteven Rostedt (Red Hat) */ 3294ad964704SSteven Rostedt (Red Hat) unsigned long 3295ad964704SSteven Rostedt (Red Hat) ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) 3296ad964704SSteven Rostedt (Red Hat) { 3297ad964704SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 3298ad964704SSteven Rostedt (Red Hat) 3299ad964704SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3300ad964704SSteven Rostedt (Red Hat) return 0; 3301ad964704SSteven Rostedt (Red Hat) 3302ad964704SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 3303ad964704SSteven Rostedt (Red Hat) return cpu_buffer->read; 3304ad964704SSteven Rostedt (Red Hat) } 3305ad964704SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 3306ad964704SSteven Rostedt (Red Hat) 3307ad964704SSteven Rostedt (Red Hat) /** 33087a8e76a3SSteven Rostedt * ring_buffer_entries - get the number of entries in a buffer 33097a8e76a3SSteven Rostedt * @buffer: The ring buffer 33107a8e76a3SSteven Rostedt * 33117a8e76a3SSteven Rostedt * Returns the total number of entries in the ring buffer 33127a8e76a3SSteven Rostedt * (all CPU entries) 33137a8e76a3SSteven Rostedt */ 33147a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer) 33157a8e76a3SSteven Rostedt { 33167a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 33177a8e76a3SSteven Rostedt unsigned long entries = 0; 33187a8e76a3SSteven Rostedt int cpu; 33197a8e76a3SSteven Rostedt 33207a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 33217a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 33227a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 3323f6195aa0SSteven Rostedt entries += rb_num_of_entries(cpu_buffer); 33247a8e76a3SSteven Rostedt } 33257a8e76a3SSteven Rostedt 33267a8e76a3SSteven Rostedt return entries; 33277a8e76a3SSteven Rostedt } 3328c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries); 33297a8e76a3SSteven Rostedt 33307a8e76a3SSteven Rostedt /** 333167b394f7SJiri Olsa * ring_buffer_overruns - get the number of overruns in buffer 33327a8e76a3SSteven Rostedt * @buffer: The ring buffer 33337a8e76a3SSteven Rostedt * 33347a8e76a3SSteven Rostedt * Returns the total number of overruns in the ring buffer 33357a8e76a3SSteven Rostedt * (all CPU entries) 33367a8e76a3SSteven Rostedt */ 33377a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 33387a8e76a3SSteven Rostedt { 33397a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 33407a8e76a3SSteven Rostedt unsigned long overruns = 0; 33417a8e76a3SSteven Rostedt int cpu; 33427a8e76a3SSteven Rostedt 33437a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 33447a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 33457a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 334677ae365eSSteven Rostedt overruns += local_read(&cpu_buffer->overrun); 33477a8e76a3SSteven Rostedt } 33487a8e76a3SSteven Rostedt 33497a8e76a3SSteven Rostedt return overruns; 33507a8e76a3SSteven Rostedt } 3351c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns); 33527a8e76a3SSteven Rostedt 3353642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter) 33547a8e76a3SSteven Rostedt { 33557a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 33567a8e76a3SSteven Rostedt 3357d769041fSSteven Rostedt /* Iterator usage is expected to have record disabled */ 3358d769041fSSteven Rostedt if (list_empty(&cpu_buffer->reader_page->list)) { 335977ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer); 336077ae365eSSteven Rostedt if (unlikely(!iter->head_page)) 336177ae365eSSteven Rostedt return; 336277ae365eSSteven Rostedt iter->head = iter->head_page->read; 3363d769041fSSteven Rostedt } else { 3364d769041fSSteven Rostedt iter->head_page = cpu_buffer->reader_page; 33656f807acdSSteven Rostedt iter->head = cpu_buffer->reader_page->read; 3366d769041fSSteven Rostedt } 3367d769041fSSteven Rostedt if (iter->head) 3368d769041fSSteven Rostedt iter->read_stamp = cpu_buffer->read_stamp; 3369d769041fSSteven Rostedt else 3370abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 3371492a74f4SSteven Rostedt iter->cache_reader_page = cpu_buffer->reader_page; 3372492a74f4SSteven Rostedt iter->cache_read = cpu_buffer->read; 3373642edba5SSteven Rostedt } 3374f83c9d0fSSteven Rostedt 3375642edba5SSteven Rostedt /** 3376642edba5SSteven Rostedt * ring_buffer_iter_reset - reset an iterator 3377642edba5SSteven Rostedt * @iter: The iterator to reset 3378642edba5SSteven Rostedt * 3379642edba5SSteven Rostedt * Resets the iterator, so that it will start from the beginning 3380642edba5SSteven Rostedt * again. 3381642edba5SSteven Rostedt */ 3382642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 3383642edba5SSteven Rostedt { 3384554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3385642edba5SSteven Rostedt unsigned long flags; 3386642edba5SSteven Rostedt 3387554f786eSSteven Rostedt if (!iter) 3388554f786eSSteven Rostedt return; 3389554f786eSSteven Rostedt 3390554f786eSSteven Rostedt cpu_buffer = iter->cpu_buffer; 3391554f786eSSteven Rostedt 33925389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3393642edba5SSteven Rostedt rb_iter_reset(iter); 33945389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 33957a8e76a3SSteven Rostedt } 3396c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 33977a8e76a3SSteven Rostedt 33987a8e76a3SSteven Rostedt /** 33997a8e76a3SSteven Rostedt * ring_buffer_iter_empty - check if an iterator has no more to read 34007a8e76a3SSteven Rostedt * @iter: The iterator to check 34017a8e76a3SSteven Rostedt */ 34027a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 34037a8e76a3SSteven Rostedt { 34047a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 34057a8e76a3SSteven Rostedt 34067a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 34077a8e76a3SSteven Rostedt 3408bf41a158SSteven Rostedt return iter->head_page == cpu_buffer->commit_page && 3409bf41a158SSteven Rostedt iter->head == rb_commit_index(cpu_buffer); 34107a8e76a3SSteven Rostedt } 3411c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 34127a8e76a3SSteven Rostedt 34137a8e76a3SSteven Rostedt static void 34147a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 34157a8e76a3SSteven Rostedt struct ring_buffer_event *event) 34167a8e76a3SSteven Rostedt { 34177a8e76a3SSteven Rostedt u64 delta; 34187a8e76a3SSteven Rostedt 3419334d4169SLai Jiangshan switch (event->type_len) { 34207a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 34217a8e76a3SSteven Rostedt return; 34227a8e76a3SSteven Rostedt 34237a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 34247a8e76a3SSteven Rostedt delta = event->array[0]; 34257a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 34267a8e76a3SSteven Rostedt delta += event->time_delta; 34277a8e76a3SSteven Rostedt cpu_buffer->read_stamp += delta; 34287a8e76a3SSteven Rostedt return; 34297a8e76a3SSteven Rostedt 34307a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 34317a8e76a3SSteven Rostedt /* FIXME: not implemented */ 34327a8e76a3SSteven Rostedt return; 34337a8e76a3SSteven Rostedt 34347a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 34357a8e76a3SSteven Rostedt cpu_buffer->read_stamp += event->time_delta; 34367a8e76a3SSteven Rostedt return; 34377a8e76a3SSteven Rostedt 34387a8e76a3SSteven Rostedt default: 34397a8e76a3SSteven Rostedt BUG(); 34407a8e76a3SSteven Rostedt } 34417a8e76a3SSteven Rostedt return; 34427a8e76a3SSteven Rostedt } 34437a8e76a3SSteven Rostedt 34447a8e76a3SSteven Rostedt static void 34457a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 34467a8e76a3SSteven Rostedt struct ring_buffer_event *event) 34477a8e76a3SSteven Rostedt { 34487a8e76a3SSteven Rostedt u64 delta; 34497a8e76a3SSteven Rostedt 3450334d4169SLai Jiangshan switch (event->type_len) { 34517a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 34527a8e76a3SSteven Rostedt return; 34537a8e76a3SSteven Rostedt 34547a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 34557a8e76a3SSteven Rostedt delta = event->array[0]; 34567a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 34577a8e76a3SSteven Rostedt delta += event->time_delta; 34587a8e76a3SSteven Rostedt iter->read_stamp += delta; 34597a8e76a3SSteven Rostedt return; 34607a8e76a3SSteven Rostedt 34617a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 34627a8e76a3SSteven Rostedt /* FIXME: not implemented */ 34637a8e76a3SSteven Rostedt return; 34647a8e76a3SSteven Rostedt 34657a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 34667a8e76a3SSteven Rostedt iter->read_stamp += event->time_delta; 34677a8e76a3SSteven Rostedt return; 34687a8e76a3SSteven Rostedt 34697a8e76a3SSteven Rostedt default: 34707a8e76a3SSteven Rostedt BUG(); 34717a8e76a3SSteven Rostedt } 34727a8e76a3SSteven Rostedt return; 34737a8e76a3SSteven Rostedt } 34747a8e76a3SSteven Rostedt 3475d769041fSSteven Rostedt static struct buffer_page * 3476d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 34777a8e76a3SSteven Rostedt { 3478d769041fSSteven Rostedt struct buffer_page *reader = NULL; 347966a8cb95SSteven Rostedt unsigned long overwrite; 3480d769041fSSteven Rostedt unsigned long flags; 3481818e3dd3SSteven Rostedt int nr_loops = 0; 348277ae365eSSteven Rostedt int ret; 3483d769041fSSteven Rostedt 34843e03fb7fSSteven Rostedt local_irq_save(flags); 34850199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 3486d769041fSSteven Rostedt 3487d769041fSSteven Rostedt again: 3488818e3dd3SSteven Rostedt /* 3489818e3dd3SSteven Rostedt * This should normally only loop twice. But because the 3490818e3dd3SSteven Rostedt * start of the reader inserts an empty page, it causes 3491818e3dd3SSteven Rostedt * a case where we will loop three times. There should be no 3492818e3dd3SSteven Rostedt * reason to loop four times (that I know of). 3493818e3dd3SSteven Rostedt */ 34943e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 3495818e3dd3SSteven Rostedt reader = NULL; 3496818e3dd3SSteven Rostedt goto out; 3497818e3dd3SSteven Rostedt } 3498818e3dd3SSteven Rostedt 3499d769041fSSteven Rostedt reader = cpu_buffer->reader_page; 3500d769041fSSteven Rostedt 3501d769041fSSteven Rostedt /* If there's more to read, return this page */ 3502bf41a158SSteven Rostedt if (cpu_buffer->reader_page->read < rb_page_size(reader)) 3503d769041fSSteven Rostedt goto out; 3504d769041fSSteven Rostedt 3505d769041fSSteven Rostedt /* Never should we have an index greater than the size */ 35063e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 35073e89c7bbSSteven Rostedt cpu_buffer->reader_page->read > rb_page_size(reader))) 35083e89c7bbSSteven Rostedt goto out; 3509d769041fSSteven Rostedt 3510d769041fSSteven Rostedt /* check if we caught up to the tail */ 3511d769041fSSteven Rostedt reader = NULL; 3512bf41a158SSteven Rostedt if (cpu_buffer->commit_page == cpu_buffer->reader_page) 3513d769041fSSteven Rostedt goto out; 35147a8e76a3SSteven Rostedt 3515a5fb8331SSteven Rostedt /* Don't bother swapping if the ring buffer is empty */ 3516a5fb8331SSteven Rostedt if (rb_num_of_entries(cpu_buffer) == 0) 3517a5fb8331SSteven Rostedt goto out; 3518a5fb8331SSteven Rostedt 35197a8e76a3SSteven Rostedt /* 3520d769041fSSteven Rostedt * Reset the reader page to size zero. 35217a8e76a3SSteven Rostedt */ 352277ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 352377ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 352477ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 3525ff0ff84aSSteven Rostedt cpu_buffer->reader_page->real_end = 0; 3526d769041fSSteven Rostedt 352777ae365eSSteven Rostedt spin: 352877ae365eSSteven Rostedt /* 352977ae365eSSteven Rostedt * Splice the empty reader page into the list around the head. 353077ae365eSSteven Rostedt */ 353177ae365eSSteven Rostedt reader = rb_set_head_page(cpu_buffer); 353254f7be5bSSteven Rostedt if (!reader) 353354f7be5bSSteven Rostedt goto out; 35340e1ff5d7SSteven Rostedt cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3535d769041fSSteven Rostedt cpu_buffer->reader_page->list.prev = reader->list.prev; 3536bf41a158SSteven Rostedt 35373adc54faSSteven Rostedt /* 35383adc54faSSteven Rostedt * cpu_buffer->pages just needs to point to the buffer, it 35393adc54faSSteven Rostedt * has no specific buffer page to point to. Lets move it out 354025985edcSLucas De Marchi * of our way so we don't accidentally swap it. 35413adc54faSSteven Rostedt */ 35423adc54faSSteven Rostedt cpu_buffer->pages = reader->list.prev; 35433adc54faSSteven Rostedt 354477ae365eSSteven Rostedt /* The reader page will be pointing to the new head */ 354577ae365eSSteven Rostedt rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); 3546d769041fSSteven Rostedt 3547d769041fSSteven Rostedt /* 354866a8cb95SSteven Rostedt * We want to make sure we read the overruns after we set up our 354966a8cb95SSteven Rostedt * pointers to the next object. The writer side does a 355066a8cb95SSteven Rostedt * cmpxchg to cross pages which acts as the mb on the writer 355166a8cb95SSteven Rostedt * side. Note, the reader will constantly fail the swap 355266a8cb95SSteven Rostedt * while the writer is updating the pointers, so this 355366a8cb95SSteven Rostedt * guarantees that the overwrite recorded here is the one we 355466a8cb95SSteven Rostedt * want to compare with the last_overrun. 355566a8cb95SSteven Rostedt */ 355666a8cb95SSteven Rostedt smp_mb(); 355766a8cb95SSteven Rostedt overwrite = local_read(&(cpu_buffer->overrun)); 355866a8cb95SSteven Rostedt 355966a8cb95SSteven Rostedt /* 356077ae365eSSteven Rostedt * Here's the tricky part. 356177ae365eSSteven Rostedt * 356277ae365eSSteven Rostedt * We need to move the pointer past the header page. 356377ae365eSSteven Rostedt * But we can only do that if a writer is not currently 356477ae365eSSteven Rostedt * moving it. The page before the header page has the 356577ae365eSSteven Rostedt * flag bit '1' set if it is pointing to the page we want. 356677ae365eSSteven Rostedt * but if the writer is in the process of moving it 356777ae365eSSteven Rostedt * than it will be '2' or already moved '0'. 3568d769041fSSteven Rostedt */ 3569d769041fSSteven Rostedt 357077ae365eSSteven Rostedt ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 357177ae365eSSteven Rostedt 357277ae365eSSteven Rostedt /* 357377ae365eSSteven Rostedt * If we did not convert it, then we must try again. 357477ae365eSSteven Rostedt */ 357577ae365eSSteven Rostedt if (!ret) 357677ae365eSSteven Rostedt goto spin; 357777ae365eSSteven Rostedt 357877ae365eSSteven Rostedt /* 357977ae365eSSteven Rostedt * Yeah! We succeeded in replacing the page. 358077ae365eSSteven Rostedt * 358177ae365eSSteven Rostedt * Now make the new head point back to the reader page. 358277ae365eSSteven Rostedt */ 35835ded3dc6SDavid Sharp rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 35847a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 3585d769041fSSteven Rostedt 3586d769041fSSteven Rostedt /* Finally update the reader page to the new head */ 3587d769041fSSteven Rostedt cpu_buffer->reader_page = reader; 3588d769041fSSteven Rostedt rb_reset_reader_page(cpu_buffer); 3589d769041fSSteven Rostedt 359066a8cb95SSteven Rostedt if (overwrite != cpu_buffer->last_overrun) { 359166a8cb95SSteven Rostedt cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 359266a8cb95SSteven Rostedt cpu_buffer->last_overrun = overwrite; 359366a8cb95SSteven Rostedt } 359466a8cb95SSteven Rostedt 3595d769041fSSteven Rostedt goto again; 3596d769041fSSteven Rostedt 3597d769041fSSteven Rostedt out: 35980199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 35993e03fb7fSSteven Rostedt local_irq_restore(flags); 3600d769041fSSteven Rostedt 3601d769041fSSteven Rostedt return reader; 36027a8e76a3SSteven Rostedt } 36037a8e76a3SSteven Rostedt 3604d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 3605d769041fSSteven Rostedt { 3606d769041fSSteven Rostedt struct ring_buffer_event *event; 3607d769041fSSteven Rostedt struct buffer_page *reader; 3608d769041fSSteven Rostedt unsigned length; 3609d769041fSSteven Rostedt 3610d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 3611d769041fSSteven Rostedt 3612d769041fSSteven Rostedt /* This function should not be called when buffer is empty */ 36133e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !reader)) 36143e89c7bbSSteven Rostedt return; 3615d769041fSSteven Rostedt 3616d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 36177a8e76a3SSteven Rostedt 3618a1863c21SSteven Rostedt if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 3619e4906effSSteven Rostedt cpu_buffer->read++; 36207a8e76a3SSteven Rostedt 36217a8e76a3SSteven Rostedt rb_update_read_stamp(cpu_buffer, event); 36227a8e76a3SSteven Rostedt 3623d769041fSSteven Rostedt length = rb_event_length(event); 36246f807acdSSteven Rostedt cpu_buffer->reader_page->read += length; 36257a8e76a3SSteven Rostedt } 36267a8e76a3SSteven Rostedt 36277a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter) 36287a8e76a3SSteven Rostedt { 36297a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 36307a8e76a3SSteven Rostedt struct ring_buffer_event *event; 36317a8e76a3SSteven Rostedt unsigned length; 36327a8e76a3SSteven Rostedt 36337a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 36347a8e76a3SSteven Rostedt 36357a8e76a3SSteven Rostedt /* 36367a8e76a3SSteven Rostedt * Check if we are at the end of the buffer. 36377a8e76a3SSteven Rostedt */ 3638bf41a158SSteven Rostedt if (iter->head >= rb_page_size(iter->head_page)) { 3639ea05b57cSSteven Rostedt /* discarded commits can make the page empty */ 3640ea05b57cSSteven Rostedt if (iter->head_page == cpu_buffer->commit_page) 36413e89c7bbSSteven Rostedt return; 3642d769041fSSteven Rostedt rb_inc_iter(iter); 36437a8e76a3SSteven Rostedt return; 36447a8e76a3SSteven Rostedt } 36457a8e76a3SSteven Rostedt 36467a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 36477a8e76a3SSteven Rostedt 36487a8e76a3SSteven Rostedt length = rb_event_length(event); 36497a8e76a3SSteven Rostedt 36507a8e76a3SSteven Rostedt /* 36517a8e76a3SSteven Rostedt * This should not be called to advance the header if we are 36527a8e76a3SSteven Rostedt * at the tail of the buffer. 36537a8e76a3SSteven Rostedt */ 36543e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 3655f536aafcSSteven Rostedt (iter->head_page == cpu_buffer->commit_page) && 36563e89c7bbSSteven Rostedt (iter->head + length > rb_commit_index(cpu_buffer)))) 36573e89c7bbSSteven Rostedt return; 36587a8e76a3SSteven Rostedt 36597a8e76a3SSteven Rostedt rb_update_iter_read_stamp(iter, event); 36607a8e76a3SSteven Rostedt 36617a8e76a3SSteven Rostedt iter->head += length; 36627a8e76a3SSteven Rostedt 36637a8e76a3SSteven Rostedt /* check for end of page padding */ 3664bf41a158SSteven Rostedt if ((iter->head >= rb_page_size(iter->head_page)) && 3665bf41a158SSteven Rostedt (iter->head_page != cpu_buffer->commit_page)) 3666771e0384SSteven Rostedt rb_inc_iter(iter); 36677a8e76a3SSteven Rostedt } 36687a8e76a3SSteven Rostedt 366966a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 367066a8cb95SSteven Rostedt { 367166a8cb95SSteven Rostedt return cpu_buffer->lost_events; 367266a8cb95SSteven Rostedt } 367366a8cb95SSteven Rostedt 3674f83c9d0fSSteven Rostedt static struct ring_buffer_event * 367566a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 367666a8cb95SSteven Rostedt unsigned long *lost_events) 36777a8e76a3SSteven Rostedt { 36787a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3679d769041fSSteven Rostedt struct buffer_page *reader; 3680818e3dd3SSteven Rostedt int nr_loops = 0; 36817a8e76a3SSteven Rostedt 36827a8e76a3SSteven Rostedt again: 3683818e3dd3SSteven Rostedt /* 368469d1b839SSteven Rostedt * We repeat when a time extend is encountered. 368569d1b839SSteven Rostedt * Since the time extend is always attached to a data event, 368669d1b839SSteven Rostedt * we should never loop more than once. 368769d1b839SSteven Rostedt * (We never hit the following condition more than twice). 3688818e3dd3SSteven Rostedt */ 368969d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3690818e3dd3SSteven Rostedt return NULL; 3691818e3dd3SSteven Rostedt 3692d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 3693d769041fSSteven Rostedt if (!reader) 36947a8e76a3SSteven Rostedt return NULL; 36957a8e76a3SSteven Rostedt 3696d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 36977a8e76a3SSteven Rostedt 3698334d4169SLai Jiangshan switch (event->type_len) { 36997a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 37002d622719STom Zanussi if (rb_null_event(event)) 3701bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 37022d622719STom Zanussi /* 37032d622719STom Zanussi * Because the writer could be discarding every 37042d622719STom Zanussi * event it creates (which would probably be bad) 37052d622719STom Zanussi * if we were to go back to "again" then we may never 37062d622719STom Zanussi * catch up, and will trigger the warn on, or lock 37072d622719STom Zanussi * the box. Return the padding, and we will release 37082d622719STom Zanussi * the current locks, and try again. 37092d622719STom Zanussi */ 37102d622719STom Zanussi return event; 37117a8e76a3SSteven Rostedt 37127a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 37137a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 3714d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 37157a8e76a3SSteven Rostedt goto again; 37167a8e76a3SSteven Rostedt 37177a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 37187a8e76a3SSteven Rostedt /* FIXME: not implemented */ 3719d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 37207a8e76a3SSteven Rostedt goto again; 37217a8e76a3SSteven Rostedt 37227a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 37237a8e76a3SSteven Rostedt if (ts) { 37247a8e76a3SSteven Rostedt *ts = cpu_buffer->read_stamp + event->time_delta; 3725d8eeb2d3SRobert Richter ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 372637886f6aSSteven Rostedt cpu_buffer->cpu, ts); 37277a8e76a3SSteven Rostedt } 372866a8cb95SSteven Rostedt if (lost_events) 372966a8cb95SSteven Rostedt *lost_events = rb_lost_events(cpu_buffer); 37307a8e76a3SSteven Rostedt return event; 37317a8e76a3SSteven Rostedt 37327a8e76a3SSteven Rostedt default: 37337a8e76a3SSteven Rostedt BUG(); 37347a8e76a3SSteven Rostedt } 37357a8e76a3SSteven Rostedt 37367a8e76a3SSteven Rostedt return NULL; 37377a8e76a3SSteven Rostedt } 3738c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek); 37397a8e76a3SSteven Rostedt 3740f83c9d0fSSteven Rostedt static struct ring_buffer_event * 3741f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 37427a8e76a3SSteven Rostedt { 37437a8e76a3SSteven Rostedt struct ring_buffer *buffer; 37447a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 37457a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3746818e3dd3SSteven Rostedt int nr_loops = 0; 37477a8e76a3SSteven Rostedt 37487a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 37497a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer; 37507a8e76a3SSteven Rostedt 3751492a74f4SSteven Rostedt /* 3752492a74f4SSteven Rostedt * Check if someone performed a consuming read to 3753492a74f4SSteven Rostedt * the buffer. A consuming read invalidates the iterator 3754492a74f4SSteven Rostedt * and we need to reset the iterator in this case. 3755492a74f4SSteven Rostedt */ 3756492a74f4SSteven Rostedt if (unlikely(iter->cache_read != cpu_buffer->read || 3757492a74f4SSteven Rostedt iter->cache_reader_page != cpu_buffer->reader_page)) 3758492a74f4SSteven Rostedt rb_iter_reset(iter); 3759492a74f4SSteven Rostedt 37607a8e76a3SSteven Rostedt again: 37613c05d748SSteven Rostedt if (ring_buffer_iter_empty(iter)) 37623c05d748SSteven Rostedt return NULL; 37633c05d748SSteven Rostedt 3764818e3dd3SSteven Rostedt /* 376569d1b839SSteven Rostedt * We repeat when a time extend is encountered. 376669d1b839SSteven Rostedt * Since the time extend is always attached to a data event, 376769d1b839SSteven Rostedt * we should never loop more than once. 376869d1b839SSteven Rostedt * (We never hit the following condition more than twice). 3769818e3dd3SSteven Rostedt */ 377069d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3771818e3dd3SSteven Rostedt return NULL; 3772818e3dd3SSteven Rostedt 37737a8e76a3SSteven Rostedt if (rb_per_cpu_empty(cpu_buffer)) 37747a8e76a3SSteven Rostedt return NULL; 37757a8e76a3SSteven Rostedt 37763c05d748SSteven Rostedt if (iter->head >= local_read(&iter->head_page->page->commit)) { 37773c05d748SSteven Rostedt rb_inc_iter(iter); 37783c05d748SSteven Rostedt goto again; 37793c05d748SSteven Rostedt } 37803c05d748SSteven Rostedt 37817a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 37827a8e76a3SSteven Rostedt 3783334d4169SLai Jiangshan switch (event->type_len) { 37847a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 37852d622719STom Zanussi if (rb_null_event(event)) { 3786d769041fSSteven Rostedt rb_inc_iter(iter); 37877a8e76a3SSteven Rostedt goto again; 37882d622719STom Zanussi } 37892d622719STom Zanussi rb_advance_iter(iter); 37902d622719STom Zanussi return event; 37917a8e76a3SSteven Rostedt 37927a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 37937a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 37947a8e76a3SSteven Rostedt rb_advance_iter(iter); 37957a8e76a3SSteven Rostedt goto again; 37967a8e76a3SSteven Rostedt 37977a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 37987a8e76a3SSteven Rostedt /* FIXME: not implemented */ 37997a8e76a3SSteven Rostedt rb_advance_iter(iter); 38007a8e76a3SSteven Rostedt goto again; 38017a8e76a3SSteven Rostedt 38027a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 38037a8e76a3SSteven Rostedt if (ts) { 38047a8e76a3SSteven Rostedt *ts = iter->read_stamp + event->time_delta; 380537886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer, 380637886f6aSSteven Rostedt cpu_buffer->cpu, ts); 38077a8e76a3SSteven Rostedt } 38087a8e76a3SSteven Rostedt return event; 38097a8e76a3SSteven Rostedt 38107a8e76a3SSteven Rostedt default: 38117a8e76a3SSteven Rostedt BUG(); 38127a8e76a3SSteven Rostedt } 38137a8e76a3SSteven Rostedt 38147a8e76a3SSteven Rostedt return NULL; 38157a8e76a3SSteven Rostedt } 3816c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 38177a8e76a3SSteven Rostedt 38188d707e8eSSteven Rostedt static inline int rb_ok_to_lock(void) 38198d707e8eSSteven Rostedt { 38208d707e8eSSteven Rostedt /* 38218d707e8eSSteven Rostedt * If an NMI die dumps out the content of the ring buffer 38228d707e8eSSteven Rostedt * do not grab locks. We also permanently disable the ring 38238d707e8eSSteven Rostedt * buffer too. A one time deal is all you get from reading 38248d707e8eSSteven Rostedt * the ring buffer from an NMI. 38258d707e8eSSteven Rostedt */ 3826464e85ebSSteven Rostedt if (likely(!in_nmi())) 38278d707e8eSSteven Rostedt return 1; 38288d707e8eSSteven Rostedt 38298d707e8eSSteven Rostedt tracing_off_permanent(); 38308d707e8eSSteven Rostedt return 0; 38318d707e8eSSteven Rostedt } 38328d707e8eSSteven Rostedt 38337a8e76a3SSteven Rostedt /** 3834f83c9d0fSSteven Rostedt * ring_buffer_peek - peek at the next event to be read 3835f83c9d0fSSteven Rostedt * @buffer: The ring buffer to read 3836f83c9d0fSSteven Rostedt * @cpu: The cpu to peak at 3837f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 383866a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 3839f83c9d0fSSteven Rostedt * 3840f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 3841f83c9d0fSSteven Rostedt * not consume the data. 3842f83c9d0fSSteven Rostedt */ 3843f83c9d0fSSteven Rostedt struct ring_buffer_event * 384466a8cb95SSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 384566a8cb95SSteven Rostedt unsigned long *lost_events) 3846f83c9d0fSSteven Rostedt { 3847f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 38488aabee57SSteven Rostedt struct ring_buffer_event *event; 3849f83c9d0fSSteven Rostedt unsigned long flags; 38508d707e8eSSteven Rostedt int dolock; 3851f83c9d0fSSteven Rostedt 3852554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 38538aabee57SSteven Rostedt return NULL; 3854554f786eSSteven Rostedt 38558d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 38562d622719STom Zanussi again: 38578d707e8eSSteven Rostedt local_irq_save(flags); 38588d707e8eSSteven Rostedt if (dolock) 38595389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 386066a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3861469535a5SRobert Richter if (event && event->type_len == RINGBUF_TYPE_PADDING) 3862469535a5SRobert Richter rb_advance_reader(cpu_buffer); 38638d707e8eSSteven Rostedt if (dolock) 38645389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 38658d707e8eSSteven Rostedt local_irq_restore(flags); 3866f83c9d0fSSteven Rostedt 38671b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 38682d622719STom Zanussi goto again; 38692d622719STom Zanussi 3870f83c9d0fSSteven Rostedt return event; 3871f83c9d0fSSteven Rostedt } 3872f83c9d0fSSteven Rostedt 3873f83c9d0fSSteven Rostedt /** 3874f83c9d0fSSteven Rostedt * ring_buffer_iter_peek - peek at the next event to be read 3875f83c9d0fSSteven Rostedt * @iter: The ring buffer iterator 3876f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 3877f83c9d0fSSteven Rostedt * 3878f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 3879f83c9d0fSSteven Rostedt * not increment the iterator. 3880f83c9d0fSSteven Rostedt */ 3881f83c9d0fSSteven Rostedt struct ring_buffer_event * 3882f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3883f83c9d0fSSteven Rostedt { 3884f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3885f83c9d0fSSteven Rostedt struct ring_buffer_event *event; 3886f83c9d0fSSteven Rostedt unsigned long flags; 3887f83c9d0fSSteven Rostedt 38882d622719STom Zanussi again: 38895389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3890f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 38915389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3892f83c9d0fSSteven Rostedt 38931b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 38942d622719STom Zanussi goto again; 38952d622719STom Zanussi 3896f83c9d0fSSteven Rostedt return event; 3897f83c9d0fSSteven Rostedt } 3898f83c9d0fSSteven Rostedt 3899f83c9d0fSSteven Rostedt /** 39007a8e76a3SSteven Rostedt * ring_buffer_consume - return an event and consume it 39017a8e76a3SSteven Rostedt * @buffer: The ring buffer to get the next event from 390266a8cb95SSteven Rostedt * @cpu: the cpu to read the buffer from 390366a8cb95SSteven Rostedt * @ts: a variable to store the timestamp (may be NULL) 390466a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 39057a8e76a3SSteven Rostedt * 39067a8e76a3SSteven Rostedt * Returns the next event in the ring buffer, and that event is consumed. 39077a8e76a3SSteven Rostedt * Meaning, that sequential reads will keep returning a different event, 39087a8e76a3SSteven Rostedt * and eventually empty the ring buffer if the producer is slower. 39097a8e76a3SSteven Rostedt */ 39107a8e76a3SSteven Rostedt struct ring_buffer_event * 391166a8cb95SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 391266a8cb95SSteven Rostedt unsigned long *lost_events) 39137a8e76a3SSteven Rostedt { 3914554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3915554f786eSSteven Rostedt struct ring_buffer_event *event = NULL; 3916f83c9d0fSSteven Rostedt unsigned long flags; 39178d707e8eSSteven Rostedt int dolock; 39188d707e8eSSteven Rostedt 39198d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 39207a8e76a3SSteven Rostedt 39212d622719STom Zanussi again: 3922554f786eSSteven Rostedt /* might be called in atomic */ 3923554f786eSSteven Rostedt preempt_disable(); 39247a8e76a3SSteven Rostedt 3925554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3926554f786eSSteven Rostedt goto out; 3927554f786eSSteven Rostedt 3928554f786eSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 39298d707e8eSSteven Rostedt local_irq_save(flags); 39308d707e8eSSteven Rostedt if (dolock) 39315389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 39327a8e76a3SSteven Rostedt 393366a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 393466a8cb95SSteven Rostedt if (event) { 393566a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 3936d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 393766a8cb95SSteven Rostedt } 39387a8e76a3SSteven Rostedt 39398d707e8eSSteven Rostedt if (dolock) 39405389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 39418d707e8eSSteven Rostedt local_irq_restore(flags); 3942f83c9d0fSSteven Rostedt 3943554f786eSSteven Rostedt out: 3944554f786eSSteven Rostedt preempt_enable(); 3945554f786eSSteven Rostedt 39461b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 39472d622719STom Zanussi goto again; 39482d622719STom Zanussi 39497a8e76a3SSteven Rostedt return event; 39507a8e76a3SSteven Rostedt } 3951c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume); 39527a8e76a3SSteven Rostedt 39537a8e76a3SSteven Rostedt /** 395472c9ddfdSDavid Miller * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 39557a8e76a3SSteven Rostedt * @buffer: The ring buffer to read from 39567a8e76a3SSteven Rostedt * @cpu: The cpu buffer to iterate over 39577a8e76a3SSteven Rostedt * 395872c9ddfdSDavid Miller * This performs the initial preparations necessary to iterate 395972c9ddfdSDavid Miller * through the buffer. Memory is allocated, buffer recording 396072c9ddfdSDavid Miller * is disabled, and the iterator pointer is returned to the caller. 39617a8e76a3SSteven Rostedt * 396272c9ddfdSDavid Miller * Disabling buffer recordng prevents the reading from being 396372c9ddfdSDavid Miller * corrupted. This is not a consuming read, so a producer is not 396472c9ddfdSDavid Miller * expected. 396572c9ddfdSDavid Miller * 396672c9ddfdSDavid Miller * After a sequence of ring_buffer_read_prepare calls, the user is 3967d611851bSzhangwei(Jovi) * expected to make at least one call to ring_buffer_read_prepare_sync. 396872c9ddfdSDavid Miller * Afterwards, ring_buffer_read_start is invoked to get things going 396972c9ddfdSDavid Miller * for real. 397072c9ddfdSDavid Miller * 3971d611851bSzhangwei(Jovi) * This overall must be paired with ring_buffer_read_finish. 39727a8e76a3SSteven Rostedt */ 39737a8e76a3SSteven Rostedt struct ring_buffer_iter * 397472c9ddfdSDavid Miller ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 39757a8e76a3SSteven Rostedt { 39767a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 39778aabee57SSteven Rostedt struct ring_buffer_iter *iter; 39787a8e76a3SSteven Rostedt 39799e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 39808aabee57SSteven Rostedt return NULL; 39817a8e76a3SSteven Rostedt 39827a8e76a3SSteven Rostedt iter = kmalloc(sizeof(*iter), GFP_KERNEL); 39837a8e76a3SSteven Rostedt if (!iter) 39848aabee57SSteven Rostedt return NULL; 39857a8e76a3SSteven Rostedt 39867a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 39877a8e76a3SSteven Rostedt 39887a8e76a3SSteven Rostedt iter->cpu_buffer = cpu_buffer; 39897a8e76a3SSteven Rostedt 399083f40318SVaibhav Nagarnaik atomic_inc(&buffer->resize_disabled); 39917a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 399272c9ddfdSDavid Miller 399372c9ddfdSDavid Miller return iter; 399472c9ddfdSDavid Miller } 399572c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 399672c9ddfdSDavid Miller 399772c9ddfdSDavid Miller /** 399872c9ddfdSDavid Miller * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 399972c9ddfdSDavid Miller * 400072c9ddfdSDavid Miller * All previously invoked ring_buffer_read_prepare calls to prepare 400172c9ddfdSDavid Miller * iterators will be synchronized. Afterwards, read_buffer_read_start 400272c9ddfdSDavid Miller * calls on those iterators are allowed. 400372c9ddfdSDavid Miller */ 400472c9ddfdSDavid Miller void 400572c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void) 400672c9ddfdSDavid Miller { 40077a8e76a3SSteven Rostedt synchronize_sched(); 400872c9ddfdSDavid Miller } 400972c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 401072c9ddfdSDavid Miller 401172c9ddfdSDavid Miller /** 401272c9ddfdSDavid Miller * ring_buffer_read_start - start a non consuming read of the buffer 401372c9ddfdSDavid Miller * @iter: The iterator returned by ring_buffer_read_prepare 401472c9ddfdSDavid Miller * 401572c9ddfdSDavid Miller * This finalizes the startup of an iteration through the buffer. 401672c9ddfdSDavid Miller * The iterator comes from a call to ring_buffer_read_prepare and 401772c9ddfdSDavid Miller * an intervening ring_buffer_read_prepare_sync must have been 401872c9ddfdSDavid Miller * performed. 401972c9ddfdSDavid Miller * 4020d611851bSzhangwei(Jovi) * Must be paired with ring_buffer_read_finish. 402172c9ddfdSDavid Miller */ 402272c9ddfdSDavid Miller void 402372c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter) 402472c9ddfdSDavid Miller { 402572c9ddfdSDavid Miller struct ring_buffer_per_cpu *cpu_buffer; 402672c9ddfdSDavid Miller unsigned long flags; 402772c9ddfdSDavid Miller 402872c9ddfdSDavid Miller if (!iter) 402972c9ddfdSDavid Miller return; 403072c9ddfdSDavid Miller 403172c9ddfdSDavid Miller cpu_buffer = iter->cpu_buffer; 40327a8e76a3SSteven Rostedt 40335389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 40340199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 4035642edba5SSteven Rostedt rb_iter_reset(iter); 40360199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 40375389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 40387a8e76a3SSteven Rostedt } 4039c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start); 40407a8e76a3SSteven Rostedt 40417a8e76a3SSteven Rostedt /** 4042d611851bSzhangwei(Jovi) * ring_buffer_read_finish - finish reading the iterator of the buffer 40437a8e76a3SSteven Rostedt * @iter: The iterator retrieved by ring_buffer_start 40447a8e76a3SSteven Rostedt * 40457a8e76a3SSteven Rostedt * This re-enables the recording to the buffer, and frees the 40467a8e76a3SSteven Rostedt * iterator. 40477a8e76a3SSteven Rostedt */ 40487a8e76a3SSteven Rostedt void 40497a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter) 40507a8e76a3SSteven Rostedt { 40517a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 40529366c1baSSteven Rostedt unsigned long flags; 40537a8e76a3SSteven Rostedt 4054659f451fSSteven Rostedt /* 4055659f451fSSteven Rostedt * Ring buffer is disabled from recording, here's a good place 4056659f451fSSteven Rostedt * to check the integrity of the ring buffer. 40579366c1baSSteven Rostedt * Must prevent readers from trying to read, as the check 40589366c1baSSteven Rostedt * clears the HEAD page and readers require it. 4059659f451fSSteven Rostedt */ 40609366c1baSSteven Rostedt raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4061659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 40629366c1baSSteven Rostedt raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4063659f451fSSteven Rostedt 40647a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 406583f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->buffer->resize_disabled); 40667a8e76a3SSteven Rostedt kfree(iter); 40677a8e76a3SSteven Rostedt } 4068c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 40697a8e76a3SSteven Rostedt 40707a8e76a3SSteven Rostedt /** 40717a8e76a3SSteven Rostedt * ring_buffer_read - read the next item in the ring buffer by the iterator 40727a8e76a3SSteven Rostedt * @iter: The ring buffer iterator 40737a8e76a3SSteven Rostedt * @ts: The time stamp of the event read. 40747a8e76a3SSteven Rostedt * 40757a8e76a3SSteven Rostedt * This reads the next event in the ring buffer and increments the iterator. 40767a8e76a3SSteven Rostedt */ 40777a8e76a3SSteven Rostedt struct ring_buffer_event * 40787a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 40797a8e76a3SSteven Rostedt { 40807a8e76a3SSteven Rostedt struct ring_buffer_event *event; 4081f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 4082f83c9d0fSSteven Rostedt unsigned long flags; 40837a8e76a3SSteven Rostedt 40845389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 40857e9391cfSSteven Rostedt again: 4086f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 40877a8e76a3SSteven Rostedt if (!event) 4088f83c9d0fSSteven Rostedt goto out; 40897a8e76a3SSteven Rostedt 40907e9391cfSSteven Rostedt if (event->type_len == RINGBUF_TYPE_PADDING) 40917e9391cfSSteven Rostedt goto again; 40927e9391cfSSteven Rostedt 40937a8e76a3SSteven Rostedt rb_advance_iter(iter); 4094f83c9d0fSSteven Rostedt out: 40955389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 40967a8e76a3SSteven Rostedt 40977a8e76a3SSteven Rostedt return event; 40987a8e76a3SSteven Rostedt } 4099c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read); 41007a8e76a3SSteven Rostedt 41017a8e76a3SSteven Rostedt /** 41027a8e76a3SSteven Rostedt * ring_buffer_size - return the size of the ring buffer (in bytes) 41037a8e76a3SSteven Rostedt * @buffer: The ring buffer. 41047a8e76a3SSteven Rostedt */ 4105438ced17SVaibhav Nagarnaik unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) 41067a8e76a3SSteven Rostedt { 4107438ced17SVaibhav Nagarnaik /* 4108438ced17SVaibhav Nagarnaik * Earlier, this method returned 4109438ced17SVaibhav Nagarnaik * BUF_PAGE_SIZE * buffer->nr_pages 4110438ced17SVaibhav Nagarnaik * Since the nr_pages field is now removed, we have converted this to 4111438ced17SVaibhav Nagarnaik * return the per cpu buffer value. 4112438ced17SVaibhav Nagarnaik */ 4113438ced17SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4114438ced17SVaibhav Nagarnaik return 0; 4115438ced17SVaibhav Nagarnaik 4116438ced17SVaibhav Nagarnaik return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 41177a8e76a3SSteven Rostedt } 4118c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size); 41197a8e76a3SSteven Rostedt 41207a8e76a3SSteven Rostedt static void 41217a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 41227a8e76a3SSteven Rostedt { 412377ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 412477ae365eSSteven Rostedt 41257a8e76a3SSteven Rostedt cpu_buffer->head_page 41263adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 4127bf41a158SSteven Rostedt local_set(&cpu_buffer->head_page->write, 0); 4128778c55d4SSteven Rostedt local_set(&cpu_buffer->head_page->entries, 0); 4129abc9b56dSSteven Rostedt local_set(&cpu_buffer->head_page->page->commit, 0); 41307a8e76a3SSteven Rostedt 41316f807acdSSteven Rostedt cpu_buffer->head_page->read = 0; 4132bf41a158SSteven Rostedt 4133bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->head_page; 4134bf41a158SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->head_page; 4135bf41a158SSteven Rostedt 4136bf41a158SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 41375040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 4138bf41a158SSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 4139778c55d4SSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 4140abc9b56dSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 41416f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 4142d769041fSSteven Rostedt 4143c64e148aSVaibhav Nagarnaik local_set(&cpu_buffer->entries_bytes, 0); 414477ae365eSSteven Rostedt local_set(&cpu_buffer->overrun, 0); 4145884bfe89SSlava Pestov local_set(&cpu_buffer->commit_overrun, 0); 4146884bfe89SSlava Pestov local_set(&cpu_buffer->dropped_events, 0); 4147e4906effSSteven Rostedt local_set(&cpu_buffer->entries, 0); 4148fa743953SSteven Rostedt local_set(&cpu_buffer->committing, 0); 4149fa743953SSteven Rostedt local_set(&cpu_buffer->commits, 0); 415077ae365eSSteven Rostedt cpu_buffer->read = 0; 4151c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes = 0; 415269507c06SSteven Rostedt 415369507c06SSteven Rostedt cpu_buffer->write_stamp = 0; 415469507c06SSteven Rostedt cpu_buffer->read_stamp = 0; 415577ae365eSSteven Rostedt 415666a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 415766a8cb95SSteven Rostedt cpu_buffer->last_overrun = 0; 415866a8cb95SSteven Rostedt 415977ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 41607a8e76a3SSteven Rostedt } 41617a8e76a3SSteven Rostedt 41627a8e76a3SSteven Rostedt /** 41637a8e76a3SSteven Rostedt * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 41647a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset a per cpu buffer of 41657a8e76a3SSteven Rostedt * @cpu: The CPU buffer to be reset 41667a8e76a3SSteven Rostedt */ 41677a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 41687a8e76a3SSteven Rostedt { 41697a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 41707a8e76a3SSteven Rostedt unsigned long flags; 41717a8e76a3SSteven Rostedt 41729e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 41738aabee57SSteven Rostedt return; 41747a8e76a3SSteven Rostedt 417583f40318SVaibhav Nagarnaik atomic_inc(&buffer->resize_disabled); 417641ede23eSSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 417741ede23eSSteven Rostedt 417883f40318SVaibhav Nagarnaik /* Make sure all commits have finished */ 417983f40318SVaibhav Nagarnaik synchronize_sched(); 418083f40318SVaibhav Nagarnaik 41815389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4182f83c9d0fSSteven Rostedt 418341b6a95dSSteven Rostedt if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 418441b6a95dSSteven Rostedt goto out; 418541b6a95dSSteven Rostedt 41860199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 41877a8e76a3SSteven Rostedt 41887a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 41897a8e76a3SSteven Rostedt 41900199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 4191f83c9d0fSSteven Rostedt 419241b6a95dSSteven Rostedt out: 41935389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 419441ede23eSSteven Rostedt 419541ede23eSSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 419683f40318SVaibhav Nagarnaik atomic_dec(&buffer->resize_disabled); 41977a8e76a3SSteven Rostedt } 4198c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 41997a8e76a3SSteven Rostedt 42007a8e76a3SSteven Rostedt /** 42017a8e76a3SSteven Rostedt * ring_buffer_reset - reset a ring buffer 42027a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset all cpu buffers 42037a8e76a3SSteven Rostedt */ 42047a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer) 42057a8e76a3SSteven Rostedt { 42067a8e76a3SSteven Rostedt int cpu; 42077a8e76a3SSteven Rostedt 42087a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 4209d769041fSSteven Rostedt ring_buffer_reset_cpu(buffer, cpu); 42107a8e76a3SSteven Rostedt } 4211c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset); 42127a8e76a3SSteven Rostedt 42137a8e76a3SSteven Rostedt /** 42147a8e76a3SSteven Rostedt * rind_buffer_empty - is the ring buffer empty? 42157a8e76a3SSteven Rostedt * @buffer: The ring buffer to test 42167a8e76a3SSteven Rostedt */ 42177a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer) 42187a8e76a3SSteven Rostedt { 42197a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4220d4788207SSteven Rostedt unsigned long flags; 42218d707e8eSSteven Rostedt int dolock; 42227a8e76a3SSteven Rostedt int cpu; 4223d4788207SSteven Rostedt int ret; 42247a8e76a3SSteven Rostedt 42258d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 42267a8e76a3SSteven Rostedt 42277a8e76a3SSteven Rostedt /* yes this is racy, but if you don't like the race, lock the buffer */ 42287a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 42297a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 42308d707e8eSSteven Rostedt local_irq_save(flags); 42318d707e8eSSteven Rostedt if (dolock) 42325389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 4233d4788207SSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 42348d707e8eSSteven Rostedt if (dolock) 42355389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 42368d707e8eSSteven Rostedt local_irq_restore(flags); 42378d707e8eSSteven Rostedt 4238d4788207SSteven Rostedt if (!ret) 42397a8e76a3SSteven Rostedt return 0; 42407a8e76a3SSteven Rostedt } 4241554f786eSSteven Rostedt 42427a8e76a3SSteven Rostedt return 1; 42437a8e76a3SSteven Rostedt } 4244c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty); 42457a8e76a3SSteven Rostedt 42467a8e76a3SSteven Rostedt /** 42477a8e76a3SSteven Rostedt * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 42487a8e76a3SSteven Rostedt * @buffer: The ring buffer 42497a8e76a3SSteven Rostedt * @cpu: The CPU buffer to test 42507a8e76a3SSteven Rostedt */ 42517a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 42527a8e76a3SSteven Rostedt { 42537a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4254d4788207SSteven Rostedt unsigned long flags; 42558d707e8eSSteven Rostedt int dolock; 42568aabee57SSteven Rostedt int ret; 42577a8e76a3SSteven Rostedt 42589e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 42598aabee57SSteven Rostedt return 1; 42607a8e76a3SSteven Rostedt 42618d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 4262554f786eSSteven Rostedt 42637a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 42648d707e8eSSteven Rostedt local_irq_save(flags); 42658d707e8eSSteven Rostedt if (dolock) 42665389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 4267554f786eSSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 42688d707e8eSSteven Rostedt if (dolock) 42695389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 42708d707e8eSSteven Rostedt local_irq_restore(flags); 4271554f786eSSteven Rostedt 4272554f786eSSteven Rostedt return ret; 42737a8e76a3SSteven Rostedt } 4274c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 42757a8e76a3SSteven Rostedt 427685bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 42777a8e76a3SSteven Rostedt /** 42787a8e76a3SSteven Rostedt * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 42797a8e76a3SSteven Rostedt * @buffer_a: One buffer to swap with 42807a8e76a3SSteven Rostedt * @buffer_b: The other buffer to swap with 42817a8e76a3SSteven Rostedt * 42827a8e76a3SSteven Rostedt * This function is useful for tracers that want to take a "snapshot" 42837a8e76a3SSteven Rostedt * of a CPU buffer and has another back up buffer lying around. 42847a8e76a3SSteven Rostedt * it is expected that the tracer handles the cpu buffer not being 42857a8e76a3SSteven Rostedt * used at the moment. 42867a8e76a3SSteven Rostedt */ 42877a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 42887a8e76a3SSteven Rostedt struct ring_buffer *buffer_b, int cpu) 42897a8e76a3SSteven Rostedt { 42907a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_a; 42917a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_b; 4292554f786eSSteven Rostedt int ret = -EINVAL; 4293554f786eSSteven Rostedt 42949e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 42959e01c1b7SRusty Russell !cpumask_test_cpu(cpu, buffer_b->cpumask)) 4296554f786eSSteven Rostedt goto out; 42977a8e76a3SSteven Rostedt 4298438ced17SVaibhav Nagarnaik cpu_buffer_a = buffer_a->buffers[cpu]; 4299438ced17SVaibhav Nagarnaik cpu_buffer_b = buffer_b->buffers[cpu]; 4300438ced17SVaibhav Nagarnaik 43017a8e76a3SSteven Rostedt /* At least make sure the two buffers are somewhat the same */ 4302438ced17SVaibhav Nagarnaik if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 4303554f786eSSteven Rostedt goto out; 4304554f786eSSteven Rostedt 4305554f786eSSteven Rostedt ret = -EAGAIN; 43067a8e76a3SSteven Rostedt 430797b17efeSSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 4308554f786eSSteven Rostedt goto out; 430997b17efeSSteven Rostedt 431097b17efeSSteven Rostedt if (atomic_read(&buffer_a->record_disabled)) 4311554f786eSSteven Rostedt goto out; 431297b17efeSSteven Rostedt 431397b17efeSSteven Rostedt if (atomic_read(&buffer_b->record_disabled)) 4314554f786eSSteven Rostedt goto out; 431597b17efeSSteven Rostedt 431697b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_a->record_disabled)) 4317554f786eSSteven Rostedt goto out; 431897b17efeSSteven Rostedt 431997b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_b->record_disabled)) 4320554f786eSSteven Rostedt goto out; 432197b17efeSSteven Rostedt 43227a8e76a3SSteven Rostedt /* 43237a8e76a3SSteven Rostedt * We can't do a synchronize_sched here because this 43247a8e76a3SSteven Rostedt * function can be called in atomic context. 43257a8e76a3SSteven Rostedt * Normally this will be called from the same CPU as cpu. 43267a8e76a3SSteven Rostedt * If not it's up to the caller to protect this. 43277a8e76a3SSteven Rostedt */ 43287a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_a->record_disabled); 43297a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_b->record_disabled); 43307a8e76a3SSteven Rostedt 433198277991SSteven Rostedt ret = -EBUSY; 433298277991SSteven Rostedt if (local_read(&cpu_buffer_a->committing)) 433398277991SSteven Rostedt goto out_dec; 433498277991SSteven Rostedt if (local_read(&cpu_buffer_b->committing)) 433598277991SSteven Rostedt goto out_dec; 433698277991SSteven Rostedt 43377a8e76a3SSteven Rostedt buffer_a->buffers[cpu] = cpu_buffer_b; 43387a8e76a3SSteven Rostedt buffer_b->buffers[cpu] = cpu_buffer_a; 43397a8e76a3SSteven Rostedt 43407a8e76a3SSteven Rostedt cpu_buffer_b->buffer = buffer_a; 43417a8e76a3SSteven Rostedt cpu_buffer_a->buffer = buffer_b; 43427a8e76a3SSteven Rostedt 434398277991SSteven Rostedt ret = 0; 434498277991SSteven Rostedt 434598277991SSteven Rostedt out_dec: 43467a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_a->record_disabled); 43477a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_b->record_disabled); 4348554f786eSSteven Rostedt out: 4349554f786eSSteven Rostedt return ret; 43507a8e76a3SSteven Rostedt } 4351c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 435285bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 43537a8e76a3SSteven Rostedt 43548789a9e7SSteven Rostedt /** 43558789a9e7SSteven Rostedt * ring_buffer_alloc_read_page - allocate a page to read from buffer 43568789a9e7SSteven Rostedt * @buffer: the buffer to allocate for. 4357d611851bSzhangwei(Jovi) * @cpu: the cpu buffer to allocate. 43588789a9e7SSteven Rostedt * 43598789a9e7SSteven Rostedt * This function is used in conjunction with ring_buffer_read_page. 43608789a9e7SSteven Rostedt * When reading a full page from the ring buffer, these functions 43618789a9e7SSteven Rostedt * can be used to speed up the process. The calling function should 43628789a9e7SSteven Rostedt * allocate a few pages first with this function. Then when it 43638789a9e7SSteven Rostedt * needs to get pages from the ring buffer, it passes the result 43648789a9e7SSteven Rostedt * of this function into ring_buffer_read_page, which will swap 43658789a9e7SSteven Rostedt * the page that was allocated, with the read page of the buffer. 43668789a9e7SSteven Rostedt * 43678789a9e7SSteven Rostedt * Returns: 43688789a9e7SSteven Rostedt * The page allocated, or NULL on error. 43698789a9e7SSteven Rostedt */ 43707ea59064SVaibhav Nagarnaik void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 43718789a9e7SSteven Rostedt { 4372044fa782SSteven Rostedt struct buffer_data_page *bpage; 43737ea59064SVaibhav Nagarnaik struct page *page; 43748789a9e7SSteven Rostedt 4375d7ec4bfeSVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), 4376d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 0); 43777ea59064SVaibhav Nagarnaik if (!page) 43788789a9e7SSteven Rostedt return NULL; 43798789a9e7SSteven Rostedt 43807ea59064SVaibhav Nagarnaik bpage = page_address(page); 43818789a9e7SSteven Rostedt 4382ef7a4a16SSteven Rostedt rb_init_page(bpage); 4383ef7a4a16SSteven Rostedt 4384044fa782SSteven Rostedt return bpage; 43858789a9e7SSteven Rostedt } 4386d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 43878789a9e7SSteven Rostedt 43888789a9e7SSteven Rostedt /** 43898789a9e7SSteven Rostedt * ring_buffer_free_read_page - free an allocated read page 43908789a9e7SSteven Rostedt * @buffer: the buffer the page was allocate for 43918789a9e7SSteven Rostedt * @data: the page to free 43928789a9e7SSteven Rostedt * 43938789a9e7SSteven Rostedt * Free a page allocated from ring_buffer_alloc_read_page. 43948789a9e7SSteven Rostedt */ 43958789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 43968789a9e7SSteven Rostedt { 43978789a9e7SSteven Rostedt free_page((unsigned long)data); 43988789a9e7SSteven Rostedt } 4399d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 44008789a9e7SSteven Rostedt 44018789a9e7SSteven Rostedt /** 44028789a9e7SSteven Rostedt * ring_buffer_read_page - extract a page from the ring buffer 44038789a9e7SSteven Rostedt * @buffer: buffer to extract from 44048789a9e7SSteven Rostedt * @data_page: the page to use allocated from ring_buffer_alloc_read_page 4405ef7a4a16SSteven Rostedt * @len: amount to extract 44068789a9e7SSteven Rostedt * @cpu: the cpu of the buffer to extract 44078789a9e7SSteven Rostedt * @full: should the extraction only happen when the page is full. 44088789a9e7SSteven Rostedt * 44098789a9e7SSteven Rostedt * This function will pull out a page from the ring buffer and consume it. 44108789a9e7SSteven Rostedt * @data_page must be the address of the variable that was returned 44118789a9e7SSteven Rostedt * from ring_buffer_alloc_read_page. This is because the page might be used 44128789a9e7SSteven Rostedt * to swap with a page in the ring buffer. 44138789a9e7SSteven Rostedt * 44148789a9e7SSteven Rostedt * for example: 4415d611851bSzhangwei(Jovi) * rpage = ring_buffer_alloc_read_page(buffer, cpu); 44168789a9e7SSteven Rostedt * if (!rpage) 44178789a9e7SSteven Rostedt * return error; 4418ef7a4a16SSteven Rostedt * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4419667d2412SLai Jiangshan * if (ret >= 0) 4420667d2412SLai Jiangshan * process_page(rpage, ret); 44218789a9e7SSteven Rostedt * 44228789a9e7SSteven Rostedt * When @full is set, the function will not return true unless 44238789a9e7SSteven Rostedt * the writer is off the reader page. 44248789a9e7SSteven Rostedt * 44258789a9e7SSteven Rostedt * Note: it is up to the calling functions to handle sleeps and wakeups. 44268789a9e7SSteven Rostedt * The ring buffer can be used anywhere in the kernel and can not 44278789a9e7SSteven Rostedt * blindly call wake_up. The layer that uses the ring buffer must be 44288789a9e7SSteven Rostedt * responsible for that. 44298789a9e7SSteven Rostedt * 44308789a9e7SSteven Rostedt * Returns: 4431667d2412SLai Jiangshan * >=0 if data has been transferred, returns the offset of consumed data. 4432667d2412SLai Jiangshan * <0 if no data has been transferred. 44338789a9e7SSteven Rostedt */ 44348789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer, 4435ef7a4a16SSteven Rostedt void **data_page, size_t len, int cpu, int full) 44368789a9e7SSteven Rostedt { 44378789a9e7SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 44388789a9e7SSteven Rostedt struct ring_buffer_event *event; 4439044fa782SSteven Rostedt struct buffer_data_page *bpage; 4440ef7a4a16SSteven Rostedt struct buffer_page *reader; 4441ff0ff84aSSteven Rostedt unsigned long missed_events; 44428789a9e7SSteven Rostedt unsigned long flags; 4443ef7a4a16SSteven Rostedt unsigned int commit; 4444667d2412SLai Jiangshan unsigned int read; 44454f3640f8SSteven Rostedt u64 save_timestamp; 4446667d2412SLai Jiangshan int ret = -1; 44478789a9e7SSteven Rostedt 4448554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4449554f786eSSteven Rostedt goto out; 4450554f786eSSteven Rostedt 4451474d32b6SSteven Rostedt /* 4452474d32b6SSteven Rostedt * If len is not big enough to hold the page header, then 4453474d32b6SSteven Rostedt * we can not copy anything. 4454474d32b6SSteven Rostedt */ 4455474d32b6SSteven Rostedt if (len <= BUF_PAGE_HDR_SIZE) 4456554f786eSSteven Rostedt goto out; 4457474d32b6SSteven Rostedt 4458474d32b6SSteven Rostedt len -= BUF_PAGE_HDR_SIZE; 4459474d32b6SSteven Rostedt 44608789a9e7SSteven Rostedt if (!data_page) 4461554f786eSSteven Rostedt goto out; 44628789a9e7SSteven Rostedt 4463044fa782SSteven Rostedt bpage = *data_page; 4464044fa782SSteven Rostedt if (!bpage) 4465554f786eSSteven Rostedt goto out; 44668789a9e7SSteven Rostedt 44675389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 44688789a9e7SSteven Rostedt 4469ef7a4a16SSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 4470ef7a4a16SSteven Rostedt if (!reader) 4471554f786eSSteven Rostedt goto out_unlock; 44728789a9e7SSteven Rostedt 4473ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 4474667d2412SLai Jiangshan 4475ef7a4a16SSteven Rostedt read = reader->read; 4476ef7a4a16SSteven Rostedt commit = rb_page_commit(reader); 4477ef7a4a16SSteven Rostedt 447866a8cb95SSteven Rostedt /* Check if any events were dropped */ 4479ff0ff84aSSteven Rostedt missed_events = cpu_buffer->lost_events; 448066a8cb95SSteven Rostedt 44818789a9e7SSteven Rostedt /* 4482474d32b6SSteven Rostedt * If this page has been partially read or 4483474d32b6SSteven Rostedt * if len is not big enough to read the rest of the page or 4484474d32b6SSteven Rostedt * a writer is still on the page, then 4485474d32b6SSteven Rostedt * we must copy the data from the page to the buffer. 4486474d32b6SSteven Rostedt * Otherwise, we can simply swap the page with the one passed in. 44878789a9e7SSteven Rostedt */ 4488474d32b6SSteven Rostedt if (read || (len < (commit - read)) || 4489ef7a4a16SSteven Rostedt cpu_buffer->reader_page == cpu_buffer->commit_page) { 4490667d2412SLai Jiangshan struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 4491474d32b6SSteven Rostedt unsigned int rpos = read; 4492474d32b6SSteven Rostedt unsigned int pos = 0; 4493ef7a4a16SSteven Rostedt unsigned int size; 44948789a9e7SSteven Rostedt 44958789a9e7SSteven Rostedt if (full) 4496554f786eSSteven Rostedt goto out_unlock; 44978789a9e7SSteven Rostedt 4498ef7a4a16SSteven Rostedt if (len > (commit - read)) 4499ef7a4a16SSteven Rostedt len = (commit - read); 4500ef7a4a16SSteven Rostedt 450169d1b839SSteven Rostedt /* Always keep the time extend and data together */ 450269d1b839SSteven Rostedt size = rb_event_ts_length(event); 4503ef7a4a16SSteven Rostedt 4504ef7a4a16SSteven Rostedt if (len < size) 4505554f786eSSteven Rostedt goto out_unlock; 4506ef7a4a16SSteven Rostedt 45074f3640f8SSteven Rostedt /* save the current timestamp, since the user will need it */ 45084f3640f8SSteven Rostedt save_timestamp = cpu_buffer->read_stamp; 45094f3640f8SSteven Rostedt 4510ef7a4a16SSteven Rostedt /* Need to copy one event at a time */ 4511ef7a4a16SSteven Rostedt do { 4512e1e35927SDavid Sharp /* We need the size of one event, because 4513e1e35927SDavid Sharp * rb_advance_reader only advances by one event, 4514e1e35927SDavid Sharp * whereas rb_event_ts_length may include the size of 4515e1e35927SDavid Sharp * one or two events. 4516e1e35927SDavid Sharp * We have already ensured there's enough space if this 4517e1e35927SDavid Sharp * is a time extend. */ 4518e1e35927SDavid Sharp size = rb_event_length(event); 4519474d32b6SSteven Rostedt memcpy(bpage->data + pos, rpage->data + rpos, size); 4520ef7a4a16SSteven Rostedt 4521ef7a4a16SSteven Rostedt len -= size; 4522ef7a4a16SSteven Rostedt 4523ef7a4a16SSteven Rostedt rb_advance_reader(cpu_buffer); 4524474d32b6SSteven Rostedt rpos = reader->read; 4525474d32b6SSteven Rostedt pos += size; 4526ef7a4a16SSteven Rostedt 452718fab912SHuang Ying if (rpos >= commit) 452818fab912SHuang Ying break; 452918fab912SHuang Ying 4530ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 453169d1b839SSteven Rostedt /* Always keep the time extend and data together */ 453269d1b839SSteven Rostedt size = rb_event_ts_length(event); 4533e1e35927SDavid Sharp } while (len >= size); 4534667d2412SLai Jiangshan 4535667d2412SLai Jiangshan /* update bpage */ 4536ef7a4a16SSteven Rostedt local_set(&bpage->commit, pos); 45374f3640f8SSteven Rostedt bpage->time_stamp = save_timestamp; 4538ef7a4a16SSteven Rostedt 4539474d32b6SSteven Rostedt /* we copied everything to the beginning */ 4540474d32b6SSteven Rostedt read = 0; 45418789a9e7SSteven Rostedt } else { 4542afbab76aSSteven Rostedt /* update the entry counter */ 454377ae365eSSteven Rostedt cpu_buffer->read += rb_page_entries(reader); 4544c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes += BUF_PAGE_SIZE; 4545afbab76aSSteven Rostedt 45468789a9e7SSteven Rostedt /* swap the pages */ 4547044fa782SSteven Rostedt rb_init_page(bpage); 4548ef7a4a16SSteven Rostedt bpage = reader->page; 4549ef7a4a16SSteven Rostedt reader->page = *data_page; 4550ef7a4a16SSteven Rostedt local_set(&reader->write, 0); 4551778c55d4SSteven Rostedt local_set(&reader->entries, 0); 4552ef7a4a16SSteven Rostedt reader->read = 0; 4553044fa782SSteven Rostedt *data_page = bpage; 4554ff0ff84aSSteven Rostedt 4555ff0ff84aSSteven Rostedt /* 4556ff0ff84aSSteven Rostedt * Use the real_end for the data size, 4557ff0ff84aSSteven Rostedt * This gives us a chance to store the lost events 4558ff0ff84aSSteven Rostedt * on the page. 4559ff0ff84aSSteven Rostedt */ 4560ff0ff84aSSteven Rostedt if (reader->real_end) 4561ff0ff84aSSteven Rostedt local_set(&bpage->commit, reader->real_end); 4562ef7a4a16SSteven Rostedt } 4563ef7a4a16SSteven Rostedt ret = read; 4564ef7a4a16SSteven Rostedt 456566a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 45662711ca23SSteven Rostedt 45672711ca23SSteven Rostedt commit = local_read(&bpage->commit); 456866a8cb95SSteven Rostedt /* 456966a8cb95SSteven Rostedt * Set a flag in the commit field if we lost events 457066a8cb95SSteven Rostedt */ 4571ff0ff84aSSteven Rostedt if (missed_events) { 4572ff0ff84aSSteven Rostedt /* If there is room at the end of the page to save the 4573ff0ff84aSSteven Rostedt * missed events, then record it there. 4574ff0ff84aSSteven Rostedt */ 4575ff0ff84aSSteven Rostedt if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 4576ff0ff84aSSteven Rostedt memcpy(&bpage->data[commit], &missed_events, 4577ff0ff84aSSteven Rostedt sizeof(missed_events)); 4578ff0ff84aSSteven Rostedt local_add(RB_MISSED_STORED, &bpage->commit); 45792711ca23SSteven Rostedt commit += sizeof(missed_events); 4580ff0ff84aSSteven Rostedt } 458166a8cb95SSteven Rostedt local_add(RB_MISSED_EVENTS, &bpage->commit); 4582ff0ff84aSSteven Rostedt } 458366a8cb95SSteven Rostedt 45842711ca23SSteven Rostedt /* 45852711ca23SSteven Rostedt * This page may be off to user land. Zero it out here. 45862711ca23SSteven Rostedt */ 45872711ca23SSteven Rostedt if (commit < BUF_PAGE_SIZE) 45882711ca23SSteven Rostedt memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 45892711ca23SSteven Rostedt 4590554f786eSSteven Rostedt out_unlock: 45915389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 45928789a9e7SSteven Rostedt 4593554f786eSSteven Rostedt out: 45948789a9e7SSteven Rostedt return ret; 45958789a9e7SSteven Rostedt } 4596d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page); 45978789a9e7SSteven Rostedt 459859222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 459909c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 4600554f786eSSteven Rostedt unsigned long action, void *hcpu) 4601554f786eSSteven Rostedt { 4602554f786eSSteven Rostedt struct ring_buffer *buffer = 4603554f786eSSteven Rostedt container_of(self, struct ring_buffer, cpu_notify); 4604554f786eSSteven Rostedt long cpu = (long)hcpu; 4605438ced17SVaibhav Nagarnaik int cpu_i, nr_pages_same; 4606438ced17SVaibhav Nagarnaik unsigned int nr_pages; 4607554f786eSSteven Rostedt 4608554f786eSSteven Rostedt switch (action) { 4609554f786eSSteven Rostedt case CPU_UP_PREPARE: 4610554f786eSSteven Rostedt case CPU_UP_PREPARE_FROZEN: 46113f237a79SRusty Russell if (cpumask_test_cpu(cpu, buffer->cpumask)) 4612554f786eSSteven Rostedt return NOTIFY_OK; 4613554f786eSSteven Rostedt 4614438ced17SVaibhav Nagarnaik nr_pages = 0; 4615438ced17SVaibhav Nagarnaik nr_pages_same = 1; 4616438ced17SVaibhav Nagarnaik /* check if all cpu sizes are same */ 4617438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu_i) { 4618438ced17SVaibhav Nagarnaik /* fill in the size from first enabled cpu */ 4619438ced17SVaibhav Nagarnaik if (nr_pages == 0) 4620438ced17SVaibhav Nagarnaik nr_pages = buffer->buffers[cpu_i]->nr_pages; 4621438ced17SVaibhav Nagarnaik if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 4622438ced17SVaibhav Nagarnaik nr_pages_same = 0; 4623438ced17SVaibhav Nagarnaik break; 4624438ced17SVaibhav Nagarnaik } 4625438ced17SVaibhav Nagarnaik } 4626438ced17SVaibhav Nagarnaik /* allocate minimum pages, user can later expand it */ 4627438ced17SVaibhav Nagarnaik if (!nr_pages_same) 4628438ced17SVaibhav Nagarnaik nr_pages = 2; 4629554f786eSSteven Rostedt buffer->buffers[cpu] = 4630438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 4631554f786eSSteven Rostedt if (!buffer->buffers[cpu]) { 4632554f786eSSteven Rostedt WARN(1, "failed to allocate ring buffer on CPU %ld\n", 4633554f786eSSteven Rostedt cpu); 4634554f786eSSteven Rostedt return NOTIFY_OK; 4635554f786eSSteven Rostedt } 4636554f786eSSteven Rostedt smp_wmb(); 46373f237a79SRusty Russell cpumask_set_cpu(cpu, buffer->cpumask); 4638554f786eSSteven Rostedt break; 4639554f786eSSteven Rostedt case CPU_DOWN_PREPARE: 4640554f786eSSteven Rostedt case CPU_DOWN_PREPARE_FROZEN: 4641554f786eSSteven Rostedt /* 4642554f786eSSteven Rostedt * Do nothing. 4643554f786eSSteven Rostedt * If we were to free the buffer, then the user would 4644554f786eSSteven Rostedt * lose any trace that was in the buffer. 4645554f786eSSteven Rostedt */ 4646554f786eSSteven Rostedt break; 4647554f786eSSteven Rostedt default: 4648554f786eSSteven Rostedt break; 4649554f786eSSteven Rostedt } 4650554f786eSSteven Rostedt return NOTIFY_OK; 4651554f786eSSteven Rostedt } 4652554f786eSSteven Rostedt #endif 46536c43e554SSteven Rostedt (Red Hat) 46546c43e554SSteven Rostedt (Red Hat) #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 46556c43e554SSteven Rostedt (Red Hat) /* 46566c43e554SSteven Rostedt (Red Hat) * This is a basic integrity check of the ring buffer. 46576c43e554SSteven Rostedt (Red Hat) * Late in the boot cycle this test will run when configured in. 46586c43e554SSteven Rostedt (Red Hat) * It will kick off a thread per CPU that will go into a loop 46596c43e554SSteven Rostedt (Red Hat) * writing to the per cpu ring buffer various sizes of data. 46606c43e554SSteven Rostedt (Red Hat) * Some of the data will be large items, some small. 46616c43e554SSteven Rostedt (Red Hat) * 46626c43e554SSteven Rostedt (Red Hat) * Another thread is created that goes into a spin, sending out 46636c43e554SSteven Rostedt (Red Hat) * IPIs to the other CPUs to also write into the ring buffer. 46646c43e554SSteven Rostedt (Red Hat) * this is to test the nesting ability of the buffer. 46656c43e554SSteven Rostedt (Red Hat) * 46666c43e554SSteven Rostedt (Red Hat) * Basic stats are recorded and reported. If something in the 46676c43e554SSteven Rostedt (Red Hat) * ring buffer should happen that's not expected, a big warning 46686c43e554SSteven Rostedt (Red Hat) * is displayed and all ring buffers are disabled. 46696c43e554SSteven Rostedt (Red Hat) */ 46706c43e554SSteven Rostedt (Red Hat) static struct task_struct *rb_threads[NR_CPUS] __initdata; 46716c43e554SSteven Rostedt (Red Hat) 46726c43e554SSteven Rostedt (Red Hat) struct rb_test_data { 46736c43e554SSteven Rostedt (Red Hat) struct ring_buffer *buffer; 46746c43e554SSteven Rostedt (Red Hat) unsigned long events; 46756c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written; 46766c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc; 46776c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped; 46786c43e554SSteven Rostedt (Red Hat) unsigned long events_nested; 46796c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written_nested; 46806c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc_nested; 46816c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped_nested; 46826c43e554SSteven Rostedt (Red Hat) int min_size_nested; 46836c43e554SSteven Rostedt (Red Hat) int max_size_nested; 46846c43e554SSteven Rostedt (Red Hat) int max_size; 46856c43e554SSteven Rostedt (Red Hat) int min_size; 46866c43e554SSteven Rostedt (Red Hat) int cpu; 46876c43e554SSteven Rostedt (Red Hat) int cnt; 46886c43e554SSteven Rostedt (Red Hat) }; 46896c43e554SSteven Rostedt (Red Hat) 46906c43e554SSteven Rostedt (Red Hat) static struct rb_test_data rb_data[NR_CPUS] __initdata; 46916c43e554SSteven Rostedt (Red Hat) 46926c43e554SSteven Rostedt (Red Hat) /* 1 meg per cpu */ 46936c43e554SSteven Rostedt (Red Hat) #define RB_TEST_BUFFER_SIZE 1048576 46946c43e554SSteven Rostedt (Red Hat) 46956c43e554SSteven Rostedt (Red Hat) static char rb_string[] __initdata = 46966c43e554SSteven Rostedt (Red Hat) "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 46976c43e554SSteven Rostedt (Red Hat) "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 46986c43e554SSteven Rostedt (Red Hat) "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 46996c43e554SSteven Rostedt (Red Hat) 47006c43e554SSteven Rostedt (Red Hat) static bool rb_test_started __initdata; 47016c43e554SSteven Rostedt (Red Hat) 47026c43e554SSteven Rostedt (Red Hat) struct rb_item { 47036c43e554SSteven Rostedt (Red Hat) int size; 47046c43e554SSteven Rostedt (Red Hat) char str[]; 47056c43e554SSteven Rostedt (Red Hat) }; 47066c43e554SSteven Rostedt (Red Hat) 47076c43e554SSteven Rostedt (Red Hat) static __init int rb_write_something(struct rb_test_data *data, bool nested) 47086c43e554SSteven Rostedt (Red Hat) { 47096c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 47106c43e554SSteven Rostedt (Red Hat) struct rb_item *item; 47116c43e554SSteven Rostedt (Red Hat) bool started; 47126c43e554SSteven Rostedt (Red Hat) int event_len; 47136c43e554SSteven Rostedt (Red Hat) int size; 47146c43e554SSteven Rostedt (Red Hat) int len; 47156c43e554SSteven Rostedt (Red Hat) int cnt; 47166c43e554SSteven Rostedt (Red Hat) 47176c43e554SSteven Rostedt (Red Hat) /* Have nested writes different that what is written */ 47186c43e554SSteven Rostedt (Red Hat) cnt = data->cnt + (nested ? 27 : 0); 47196c43e554SSteven Rostedt (Red Hat) 47206c43e554SSteven Rostedt (Red Hat) /* Multiply cnt by ~e, to make some unique increment */ 47216c43e554SSteven Rostedt (Red Hat) size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); 47226c43e554SSteven Rostedt (Red Hat) 47236c43e554SSteven Rostedt (Red Hat) len = size + sizeof(struct rb_item); 47246c43e554SSteven Rostedt (Red Hat) 47256c43e554SSteven Rostedt (Red Hat) started = rb_test_started; 47266c43e554SSteven Rostedt (Red Hat) /* read rb_test_started before checking buffer enabled */ 47276c43e554SSteven Rostedt (Red Hat) smp_rmb(); 47286c43e554SSteven Rostedt (Red Hat) 47296c43e554SSteven Rostedt (Red Hat) event = ring_buffer_lock_reserve(data->buffer, len); 47306c43e554SSteven Rostedt (Red Hat) if (!event) { 47316c43e554SSteven Rostedt (Red Hat) /* Ignore dropped events before test starts. */ 47326c43e554SSteven Rostedt (Red Hat) if (started) { 47336c43e554SSteven Rostedt (Red Hat) if (nested) 47346c43e554SSteven Rostedt (Red Hat) data->bytes_dropped += len; 47356c43e554SSteven Rostedt (Red Hat) else 47366c43e554SSteven Rostedt (Red Hat) data->bytes_dropped_nested += len; 47376c43e554SSteven Rostedt (Red Hat) } 47386c43e554SSteven Rostedt (Red Hat) return len; 47396c43e554SSteven Rostedt (Red Hat) } 47406c43e554SSteven Rostedt (Red Hat) 47416c43e554SSteven Rostedt (Red Hat) event_len = ring_buffer_event_length(event); 47426c43e554SSteven Rostedt (Red Hat) 47436c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(data->buffer, event_len < len)) 47446c43e554SSteven Rostedt (Red Hat) goto out; 47456c43e554SSteven Rostedt (Red Hat) 47466c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event); 47476c43e554SSteven Rostedt (Red Hat) item->size = size; 47486c43e554SSteven Rostedt (Red Hat) memcpy(item->str, rb_string, size); 47496c43e554SSteven Rostedt (Red Hat) 47506c43e554SSteven Rostedt (Red Hat) if (nested) { 47516c43e554SSteven Rostedt (Red Hat) data->bytes_alloc_nested += event_len; 47526c43e554SSteven Rostedt (Red Hat) data->bytes_written_nested += len; 47536c43e554SSteven Rostedt (Red Hat) data->events_nested++; 47546c43e554SSteven Rostedt (Red Hat) if (!data->min_size_nested || len < data->min_size_nested) 47556c43e554SSteven Rostedt (Red Hat) data->min_size_nested = len; 47566c43e554SSteven Rostedt (Red Hat) if (len > data->max_size_nested) 47576c43e554SSteven Rostedt (Red Hat) data->max_size_nested = len; 47586c43e554SSteven Rostedt (Red Hat) } else { 47596c43e554SSteven Rostedt (Red Hat) data->bytes_alloc += event_len; 47606c43e554SSteven Rostedt (Red Hat) data->bytes_written += len; 47616c43e554SSteven Rostedt (Red Hat) data->events++; 47626c43e554SSteven Rostedt (Red Hat) if (!data->min_size || len < data->min_size) 47636c43e554SSteven Rostedt (Red Hat) data->max_size = len; 47646c43e554SSteven Rostedt (Red Hat) if (len > data->max_size) 47656c43e554SSteven Rostedt (Red Hat) data->max_size = len; 47666c43e554SSteven Rostedt (Red Hat) } 47676c43e554SSteven Rostedt (Red Hat) 47686c43e554SSteven Rostedt (Red Hat) out: 47696c43e554SSteven Rostedt (Red Hat) ring_buffer_unlock_commit(data->buffer, event); 47706c43e554SSteven Rostedt (Red Hat) 47716c43e554SSteven Rostedt (Red Hat) return 0; 47726c43e554SSteven Rostedt (Red Hat) } 47736c43e554SSteven Rostedt (Red Hat) 47746c43e554SSteven Rostedt (Red Hat) static __init int rb_test(void *arg) 47756c43e554SSteven Rostedt (Red Hat) { 47766c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = arg; 47776c43e554SSteven Rostedt (Red Hat) 47786c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 47796c43e554SSteven Rostedt (Red Hat) rb_write_something(data, false); 47806c43e554SSteven Rostedt (Red Hat) data->cnt++; 47816c43e554SSteven Rostedt (Red Hat) 47826c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE); 47836c43e554SSteven Rostedt (Red Hat) /* Now sleep between a min of 100-300us and a max of 1ms */ 47846c43e554SSteven Rostedt (Red Hat) usleep_range(((data->cnt % 3) + 1) * 100, 1000); 47856c43e554SSteven Rostedt (Red Hat) } 47866c43e554SSteven Rostedt (Red Hat) 47876c43e554SSteven Rostedt (Red Hat) return 0; 47886c43e554SSteven Rostedt (Red Hat) } 47896c43e554SSteven Rostedt (Red Hat) 47906c43e554SSteven Rostedt (Red Hat) static __init void rb_ipi(void *ignore) 47916c43e554SSteven Rostedt (Red Hat) { 47926c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data; 47936c43e554SSteven Rostedt (Red Hat) int cpu = smp_processor_id(); 47946c43e554SSteven Rostedt (Red Hat) 47956c43e554SSteven Rostedt (Red Hat) data = &rb_data[cpu]; 47966c43e554SSteven Rostedt (Red Hat) rb_write_something(data, true); 47976c43e554SSteven Rostedt (Red Hat) } 47986c43e554SSteven Rostedt (Red Hat) 47996c43e554SSteven Rostedt (Red Hat) static __init int rb_hammer_test(void *arg) 48006c43e554SSteven Rostedt (Red Hat) { 48016c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 48026c43e554SSteven Rostedt (Red Hat) 48036c43e554SSteven Rostedt (Red Hat) /* Send an IPI to all cpus to write data! */ 48046c43e554SSteven Rostedt (Red Hat) smp_call_function(rb_ipi, NULL, 1); 48056c43e554SSteven Rostedt (Red Hat) /* No sleep, but for non preempt, let others run */ 48066c43e554SSteven Rostedt (Red Hat) schedule(); 48076c43e554SSteven Rostedt (Red Hat) } 48086c43e554SSteven Rostedt (Red Hat) 48096c43e554SSteven Rostedt (Red Hat) return 0; 48106c43e554SSteven Rostedt (Red Hat) } 48116c43e554SSteven Rostedt (Red Hat) 48126c43e554SSteven Rostedt (Red Hat) static __init int test_ringbuffer(void) 48136c43e554SSteven Rostedt (Red Hat) { 48146c43e554SSteven Rostedt (Red Hat) struct task_struct *rb_hammer; 48156c43e554SSteven Rostedt (Red Hat) struct ring_buffer *buffer; 48166c43e554SSteven Rostedt (Red Hat) int cpu; 48176c43e554SSteven Rostedt (Red Hat) int ret = 0; 48186c43e554SSteven Rostedt (Red Hat) 48196c43e554SSteven Rostedt (Red Hat) pr_info("Running ring buffer tests...\n"); 48206c43e554SSteven Rostedt (Red Hat) 48216c43e554SSteven Rostedt (Red Hat) buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 48226c43e554SSteven Rostedt (Red Hat) if (WARN_ON(!buffer)) 48236c43e554SSteven Rostedt (Red Hat) return 0; 48246c43e554SSteven Rostedt (Red Hat) 48256c43e554SSteven Rostedt (Red Hat) /* Disable buffer so that threads can't write to it yet */ 48266c43e554SSteven Rostedt (Red Hat) ring_buffer_record_off(buffer); 48276c43e554SSteven Rostedt (Red Hat) 48286c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 48296c43e554SSteven Rostedt (Red Hat) rb_data[cpu].buffer = buffer; 48306c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cpu = cpu; 48316c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cnt = cpu; 48326c43e554SSteven Rostedt (Red Hat) rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], 48336c43e554SSteven Rostedt (Red Hat) "rbtester/%d", cpu); 48346c43e554SSteven Rostedt (Red Hat) if (WARN_ON(!rb_threads[cpu])) { 48356c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n"); 48366c43e554SSteven Rostedt (Red Hat) ret = -1; 48376c43e554SSteven Rostedt (Red Hat) goto out_free; 48386c43e554SSteven Rostedt (Red Hat) } 48396c43e554SSteven Rostedt (Red Hat) 48406c43e554SSteven Rostedt (Red Hat) kthread_bind(rb_threads[cpu], cpu); 48416c43e554SSteven Rostedt (Red Hat) wake_up_process(rb_threads[cpu]); 48426c43e554SSteven Rostedt (Red Hat) } 48436c43e554SSteven Rostedt (Red Hat) 48446c43e554SSteven Rostedt (Red Hat) /* Now create the rb hammer! */ 48456c43e554SSteven Rostedt (Red Hat) rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 48466c43e554SSteven Rostedt (Red Hat) if (WARN_ON(!rb_hammer)) { 48476c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n"); 48486c43e554SSteven Rostedt (Red Hat) ret = -1; 48496c43e554SSteven Rostedt (Red Hat) goto out_free; 48506c43e554SSteven Rostedt (Red Hat) } 48516c43e554SSteven Rostedt (Red Hat) 48526c43e554SSteven Rostedt (Red Hat) ring_buffer_record_on(buffer); 48536c43e554SSteven Rostedt (Red Hat) /* 48546c43e554SSteven Rostedt (Red Hat) * Show buffer is enabled before setting rb_test_started. 48556c43e554SSteven Rostedt (Red Hat) * Yes there's a small race window where events could be 48566c43e554SSteven Rostedt (Red Hat) * dropped and the thread wont catch it. But when a ring 48576c43e554SSteven Rostedt (Red Hat) * buffer gets enabled, there will always be some kind of 48586c43e554SSteven Rostedt (Red Hat) * delay before other CPUs see it. Thus, we don't care about 48596c43e554SSteven Rostedt (Red Hat) * those dropped events. We care about events dropped after 48606c43e554SSteven Rostedt (Red Hat) * the threads see that the buffer is active. 48616c43e554SSteven Rostedt (Red Hat) */ 48626c43e554SSteven Rostedt (Red Hat) smp_wmb(); 48636c43e554SSteven Rostedt (Red Hat) rb_test_started = true; 48646c43e554SSteven Rostedt (Red Hat) 48656c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE); 48666c43e554SSteven Rostedt (Red Hat) /* Just run for 10 seconds */; 48676c43e554SSteven Rostedt (Red Hat) schedule_timeout(10 * HZ); 48686c43e554SSteven Rostedt (Red Hat) 48696c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_hammer); 48706c43e554SSteven Rostedt (Red Hat) 48716c43e554SSteven Rostedt (Red Hat) out_free: 48726c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 48736c43e554SSteven Rostedt (Red Hat) if (!rb_threads[cpu]) 48746c43e554SSteven Rostedt (Red Hat) break; 48756c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_threads[cpu]); 48766c43e554SSteven Rostedt (Red Hat) } 48776c43e554SSteven Rostedt (Red Hat) if (ret) { 48786c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer); 48796c43e554SSteven Rostedt (Red Hat) return ret; 48806c43e554SSteven Rostedt (Red Hat) } 48816c43e554SSteven Rostedt (Red Hat) 48826c43e554SSteven Rostedt (Red Hat) /* Report! */ 48836c43e554SSteven Rostedt (Red Hat) pr_info("finished\n"); 48846c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 48856c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 48866c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = &rb_data[cpu]; 48876c43e554SSteven Rostedt (Red Hat) struct rb_item *item; 48886c43e554SSteven Rostedt (Red Hat) unsigned long total_events; 48896c43e554SSteven Rostedt (Red Hat) unsigned long total_dropped; 48906c43e554SSteven Rostedt (Red Hat) unsigned long total_written; 48916c43e554SSteven Rostedt (Red Hat) unsigned long total_alloc; 48926c43e554SSteven Rostedt (Red Hat) unsigned long total_read = 0; 48936c43e554SSteven Rostedt (Red Hat) unsigned long total_size = 0; 48946c43e554SSteven Rostedt (Red Hat) unsigned long total_len = 0; 48956c43e554SSteven Rostedt (Red Hat) unsigned long total_lost = 0; 48966c43e554SSteven Rostedt (Red Hat) unsigned long lost; 48976c43e554SSteven Rostedt (Red Hat) int big_event_size; 48986c43e554SSteven Rostedt (Red Hat) int small_event_size; 48996c43e554SSteven Rostedt (Red Hat) 49006c43e554SSteven Rostedt (Red Hat) ret = -1; 49016c43e554SSteven Rostedt (Red Hat) 49026c43e554SSteven Rostedt (Red Hat) total_events = data->events + data->events_nested; 49036c43e554SSteven Rostedt (Red Hat) total_written = data->bytes_written + data->bytes_written_nested; 49046c43e554SSteven Rostedt (Red Hat) total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 49056c43e554SSteven Rostedt (Red Hat) total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 49066c43e554SSteven Rostedt (Red Hat) 49076c43e554SSteven Rostedt (Red Hat) big_event_size = data->max_size + data->max_size_nested; 49086c43e554SSteven Rostedt (Red Hat) small_event_size = data->min_size + data->min_size_nested; 49096c43e554SSteven Rostedt (Red Hat) 49106c43e554SSteven Rostedt (Red Hat) pr_info("CPU %d:\n", cpu); 49116c43e554SSteven Rostedt (Red Hat) pr_info(" events: %ld\n", total_events); 49126c43e554SSteven Rostedt (Red Hat) pr_info(" dropped bytes: %ld\n", total_dropped); 49136c43e554SSteven Rostedt (Red Hat) pr_info(" alloced bytes: %ld\n", total_alloc); 49146c43e554SSteven Rostedt (Red Hat) pr_info(" written bytes: %ld\n", total_written); 49156c43e554SSteven Rostedt (Red Hat) pr_info(" biggest event: %d\n", big_event_size); 49166c43e554SSteven Rostedt (Red Hat) pr_info(" smallest event: %d\n", small_event_size); 49176c43e554SSteven Rostedt (Red Hat) 49186c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_dropped)) 49196c43e554SSteven Rostedt (Red Hat) break; 49206c43e554SSteven Rostedt (Red Hat) 49216c43e554SSteven Rostedt (Red Hat) ret = 0; 49226c43e554SSteven Rostedt (Red Hat) 49236c43e554SSteven Rostedt (Red Hat) while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 49246c43e554SSteven Rostedt (Red Hat) total_lost += lost; 49256c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event); 49266c43e554SSteven Rostedt (Red Hat) total_len += ring_buffer_event_length(event); 49276c43e554SSteven Rostedt (Red Hat) total_size += item->size + sizeof(struct rb_item); 49286c43e554SSteven Rostedt (Red Hat) if (memcmp(&item->str[0], rb_string, item->size) != 0) { 49296c43e554SSteven Rostedt (Red Hat) pr_info("FAILED!\n"); 49306c43e554SSteven Rostedt (Red Hat) pr_info("buffer had: %.*s\n", item->size, item->str); 49316c43e554SSteven Rostedt (Red Hat) pr_info("expected: %.*s\n", item->size, rb_string); 49326c43e554SSteven Rostedt (Red Hat) RB_WARN_ON(buffer, 1); 49336c43e554SSteven Rostedt (Red Hat) ret = -1; 49346c43e554SSteven Rostedt (Red Hat) break; 49356c43e554SSteven Rostedt (Red Hat) } 49366c43e554SSteven Rostedt (Red Hat) total_read++; 49376c43e554SSteven Rostedt (Red Hat) } 49386c43e554SSteven Rostedt (Red Hat) if (ret) 49396c43e554SSteven Rostedt (Red Hat) break; 49406c43e554SSteven Rostedt (Red Hat) 49416c43e554SSteven Rostedt (Red Hat) ret = -1; 49426c43e554SSteven Rostedt (Red Hat) 49436c43e554SSteven Rostedt (Red Hat) pr_info(" read events: %ld\n", total_read); 49446c43e554SSteven Rostedt (Red Hat) pr_info(" lost events: %ld\n", total_lost); 49456c43e554SSteven Rostedt (Red Hat) pr_info(" total events: %ld\n", total_lost + total_read); 49466c43e554SSteven Rostedt (Red Hat) pr_info(" recorded len bytes: %ld\n", total_len); 49476c43e554SSteven Rostedt (Red Hat) pr_info(" recorded size bytes: %ld\n", total_size); 49486c43e554SSteven Rostedt (Red Hat) if (total_lost) 49496c43e554SSteven Rostedt (Red Hat) pr_info(" With dropped events, record len and size may not match\n" 49506c43e554SSteven Rostedt (Red Hat) " alloced and written from above\n"); 49516c43e554SSteven Rostedt (Red Hat) if (!total_lost) { 49526c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_len != total_alloc || 49536c43e554SSteven Rostedt (Red Hat) total_size != total_written)) 49546c43e554SSteven Rostedt (Red Hat) break; 49556c43e554SSteven Rostedt (Red Hat) } 49566c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 49576c43e554SSteven Rostedt (Red Hat) break; 49586c43e554SSteven Rostedt (Red Hat) 49596c43e554SSteven Rostedt (Red Hat) ret = 0; 49606c43e554SSteven Rostedt (Red Hat) } 49616c43e554SSteven Rostedt (Red Hat) if (!ret) 49626c43e554SSteven Rostedt (Red Hat) pr_info("Ring buffer PASSED!\n"); 49636c43e554SSteven Rostedt (Red Hat) 49646c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer); 49656c43e554SSteven Rostedt (Red Hat) return 0; 49666c43e554SSteven Rostedt (Red Hat) } 49676c43e554SSteven Rostedt (Red Hat) 49686c43e554SSteven Rostedt (Red Hat) late_initcall(test_ringbuffer); 49696c43e554SSteven Rostedt (Red Hat) #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 4970