17a8e76a3SSteven Rostedt /* 27a8e76a3SSteven Rostedt * Generic ring buffer 37a8e76a3SSteven Rostedt * 47a8e76a3SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 57a8e76a3SSteven Rostedt */ 67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h> 714131f2fSIngo Molnar #include <linux/trace_clock.h> 87a8e76a3SSteven Rostedt #include <linux/spinlock.h> 97a8e76a3SSteven Rostedt #include <linux/debugfs.h> 107a8e76a3SSteven Rostedt #include <linux/uaccess.h> 11a81bd80aSSteven Rostedt #include <linux/hardirq.h> 121744a21dSVegard Nossum #include <linux/kmemcheck.h> 137a8e76a3SSteven Rostedt #include <linux/module.h> 147a8e76a3SSteven Rostedt #include <linux/percpu.h> 157a8e76a3SSteven Rostedt #include <linux/mutex.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 177a8e76a3SSteven Rostedt #include <linux/init.h> 187a8e76a3SSteven Rostedt #include <linux/hash.h> 197a8e76a3SSteven Rostedt #include <linux/list.h> 20554f786eSSteven Rostedt #include <linux/cpu.h> 217a8e76a3SSteven Rostedt #include <linux/fs.h> 227a8e76a3SSteven Rostedt 2379615760SChristoph Lameter #include <asm/local.h> 24182e9f5fSSteven Rostedt #include "trace.h" 25182e9f5fSSteven Rostedt 2683f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work); 2783f40318SVaibhav Nagarnaik 28033601a3SSteven Rostedt /* 29d1b182a8SSteven Rostedt * The ring buffer header is special. We must manually up keep it. 30d1b182a8SSteven Rostedt */ 31d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s) 32d1b182a8SSteven Rostedt { 33d1b182a8SSteven Rostedt int ret; 34d1b182a8SSteven Rostedt 35334d4169SLai Jiangshan ret = trace_seq_printf(s, "# compressed entry header\n"); 36334d4169SLai Jiangshan ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 37d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 38d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 39d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\n"); 40d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tpadding : type == %d\n", 41d1b182a8SSteven Rostedt RINGBUF_TYPE_PADDING); 42d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 43d1b182a8SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND); 44334d4169SLai Jiangshan ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 45334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 46d1b182a8SSteven Rostedt 47d1b182a8SSteven Rostedt return ret; 48d1b182a8SSteven Rostedt } 49d1b182a8SSteven Rostedt 50d1b182a8SSteven Rostedt /* 515cc98548SSteven Rostedt * The ring buffer is made up of a list of pages. A separate list of pages is 525cc98548SSteven Rostedt * allocated for each CPU. A writer may only write to a buffer that is 535cc98548SSteven Rostedt * associated with the CPU it is currently executing on. A reader may read 545cc98548SSteven Rostedt * from any per cpu buffer. 555cc98548SSteven Rostedt * 565cc98548SSteven Rostedt * The reader is special. For each per cpu buffer, the reader has its own 575cc98548SSteven Rostedt * reader page. When a reader has read the entire reader page, this reader 585cc98548SSteven Rostedt * page is swapped with another page in the ring buffer. 595cc98548SSteven Rostedt * 605cc98548SSteven Rostedt * Now, as long as the writer is off the reader page, the reader can do what 615cc98548SSteven Rostedt * ever it wants with that page. The writer will never write to that page 625cc98548SSteven Rostedt * again (as long as it is out of the ring buffer). 635cc98548SSteven Rostedt * 645cc98548SSteven Rostedt * Here's some silly ASCII art. 655cc98548SSteven Rostedt * 665cc98548SSteven Rostedt * +------+ 675cc98548SSteven Rostedt * |reader| RING BUFFER 685cc98548SSteven Rostedt * |page | 695cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 705cc98548SSteven Rostedt * | |-->| |-->| | 715cc98548SSteven Rostedt * +---+ +---+ +---+ 725cc98548SSteven Rostedt * ^ | 735cc98548SSteven Rostedt * | | 745cc98548SSteven Rostedt * +---------------+ 755cc98548SSteven Rostedt * 765cc98548SSteven Rostedt * 775cc98548SSteven Rostedt * +------+ 785cc98548SSteven Rostedt * |reader| RING BUFFER 795cc98548SSteven Rostedt * |page |------------------v 805cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 815cc98548SSteven Rostedt * | |-->| |-->| | 825cc98548SSteven Rostedt * +---+ +---+ +---+ 835cc98548SSteven Rostedt * ^ | 845cc98548SSteven Rostedt * | | 855cc98548SSteven Rostedt * +---------------+ 865cc98548SSteven Rostedt * 875cc98548SSteven Rostedt * 885cc98548SSteven Rostedt * +------+ 895cc98548SSteven Rostedt * |reader| RING BUFFER 905cc98548SSteven Rostedt * |page |------------------v 915cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 925cc98548SSteven Rostedt * ^ | |-->| |-->| | 935cc98548SSteven Rostedt * | +---+ +---+ +---+ 945cc98548SSteven Rostedt * | | 955cc98548SSteven Rostedt * | | 965cc98548SSteven Rostedt * +------------------------------+ 975cc98548SSteven Rostedt * 985cc98548SSteven Rostedt * 995cc98548SSteven Rostedt * +------+ 1005cc98548SSteven Rostedt * |buffer| RING BUFFER 1015cc98548SSteven Rostedt * |page |------------------v 1025cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 1035cc98548SSteven Rostedt * ^ | | | |-->| | 1045cc98548SSteven Rostedt * | New +---+ +---+ +---+ 1055cc98548SSteven Rostedt * | Reader------^ | 1065cc98548SSteven Rostedt * | page | 1075cc98548SSteven Rostedt * +------------------------------+ 1085cc98548SSteven Rostedt * 1095cc98548SSteven Rostedt * 1105cc98548SSteven Rostedt * After we make this swap, the reader can hand this page off to the splice 1115cc98548SSteven Rostedt * code and be done with it. It can even allocate a new page if it needs to 1125cc98548SSteven Rostedt * and swap that into the ring buffer. 1135cc98548SSteven Rostedt * 1145cc98548SSteven Rostedt * We will be using cmpxchg soon to make all this lockless. 1155cc98548SSteven Rostedt * 1165cc98548SSteven Rostedt */ 1175cc98548SSteven Rostedt 1185cc98548SSteven Rostedt /* 119033601a3SSteven Rostedt * A fast way to enable or disable all ring buffers is to 120033601a3SSteven Rostedt * call tracing_on or tracing_off. Turning off the ring buffers 121033601a3SSteven Rostedt * prevents all ring buffers from being recorded to. 122033601a3SSteven Rostedt * Turning this switch on, makes it OK to write to the 123033601a3SSteven Rostedt * ring buffer, if the ring buffer is enabled itself. 124033601a3SSteven Rostedt * 125033601a3SSteven Rostedt * There's three layers that must be on in order to write 126033601a3SSteven Rostedt * to the ring buffer. 127033601a3SSteven Rostedt * 128033601a3SSteven Rostedt * 1) This global flag must be set. 129033601a3SSteven Rostedt * 2) The ring buffer must be enabled for recording. 130033601a3SSteven Rostedt * 3) The per cpu buffer must be enabled for recording. 131033601a3SSteven Rostedt * 132033601a3SSteven Rostedt * In case of an anomaly, this global flag has a bit set that 133033601a3SSteven Rostedt * will permantly disable all ring buffers. 134033601a3SSteven Rostedt */ 135033601a3SSteven Rostedt 136033601a3SSteven Rostedt /* 137033601a3SSteven Rostedt * Global flag to disable all recording to ring buffers 138033601a3SSteven Rostedt * This has two bits: ON, DISABLED 139033601a3SSteven Rostedt * 140033601a3SSteven Rostedt * ON DISABLED 141033601a3SSteven Rostedt * ---- ---------- 142033601a3SSteven Rostedt * 0 0 : ring buffers are off 143033601a3SSteven Rostedt * 1 0 : ring buffers are on 144033601a3SSteven Rostedt * X 1 : ring buffers are permanently disabled 145033601a3SSteven Rostedt */ 146033601a3SSteven Rostedt 147033601a3SSteven Rostedt enum { 148033601a3SSteven Rostedt RB_BUFFERS_ON_BIT = 0, 149033601a3SSteven Rostedt RB_BUFFERS_DISABLED_BIT = 1, 150033601a3SSteven Rostedt }; 151033601a3SSteven Rostedt 152033601a3SSteven Rostedt enum { 153033601a3SSteven Rostedt RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 154033601a3SSteven Rostedt RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 155033601a3SSteven Rostedt }; 156033601a3SSteven Rostedt 1575e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 158a3583244SSteven Rostedt 159499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */ 160499e5470SSteven Rostedt #define RB_BUFFER_OFF (1 << 20) 161499e5470SSteven Rostedt 162474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 163474d32b6SSteven Rostedt 164a3583244SSteven Rostedt /** 165033601a3SSteven Rostedt * tracing_off_permanent - permanently disable ring buffers 166033601a3SSteven Rostedt * 167033601a3SSteven Rostedt * This function, once called, will disable all ring buffers 168c3706f00SWenji Huang * permanently. 169033601a3SSteven Rostedt */ 170033601a3SSteven Rostedt void tracing_off_permanent(void) 171033601a3SSteven Rostedt { 172033601a3SSteven Rostedt set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 173a3583244SSteven Rostedt } 174a3583244SSteven Rostedt 175e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 17667d34724SAndrew Morton #define RB_ALIGNMENT 4U 177334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 178c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 179334d4169SLai Jiangshan 1802271048dSSteven Rostedt #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 1812271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT 0 1822271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 1832271048dSSteven Rostedt #else 1842271048dSSteven Rostedt # define RB_FORCE_8BYTE_ALIGNMENT 1 1852271048dSSteven Rostedt # define RB_ARCH_ALIGNMENT 8U 1862271048dSSteven Rostedt #endif 1872271048dSSteven Rostedt 188334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 189334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 1907a8e76a3SSteven Rostedt 1917a8e76a3SSteven Rostedt enum { 1927a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND = 8, 1937a8e76a3SSteven Rostedt RB_LEN_TIME_STAMP = 16, 1947a8e76a3SSteven Rostedt }; 1957a8e76a3SSteven Rostedt 19669d1b839SSteven Rostedt #define skip_time_extend(event) \ 19769d1b839SSteven Rostedt ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 19869d1b839SSteven Rostedt 1992d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event) 2002d622719STom Zanussi { 201a1863c21SSteven Rostedt return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 2022d622719STom Zanussi } 2032d622719STom Zanussi 2042d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event) 2052d622719STom Zanussi { 206a1863c21SSteven Rostedt /* padding has a NULL time_delta */ 207334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 2082d622719STom Zanussi event->time_delta = 0; 2092d622719STom Zanussi } 2102d622719STom Zanussi 2112d622719STom Zanussi static unsigned 2122d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event) 2132d622719STom Zanussi { 2142d622719STom Zanussi unsigned length; 2152d622719STom Zanussi 216334d4169SLai Jiangshan if (event->type_len) 217334d4169SLai Jiangshan length = event->type_len * RB_ALIGNMENT; 2182d622719STom Zanussi else 2192d622719STom Zanussi length = event->array[0]; 2202d622719STom Zanussi return length + RB_EVNT_HDR_SIZE; 2212d622719STom Zanussi } 2222d622719STom Zanussi 22369d1b839SSteven Rostedt /* 22469d1b839SSteven Rostedt * Return the length of the given event. Will return 22569d1b839SSteven Rostedt * the length of the time extend if the event is a 22669d1b839SSteven Rostedt * time extend. 22769d1b839SSteven Rostedt */ 22869d1b839SSteven Rostedt static inline unsigned 2297a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event) 2307a8e76a3SSteven Rostedt { 231334d4169SLai Jiangshan switch (event->type_len) { 2327a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 2332d622719STom Zanussi if (rb_null_event(event)) 2347a8e76a3SSteven Rostedt /* undefined */ 2357a8e76a3SSteven Rostedt return -1; 236334d4169SLai Jiangshan return event->array[0] + RB_EVNT_HDR_SIZE; 2377a8e76a3SSteven Rostedt 2387a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 2397a8e76a3SSteven Rostedt return RB_LEN_TIME_EXTEND; 2407a8e76a3SSteven Rostedt 2417a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 2427a8e76a3SSteven Rostedt return RB_LEN_TIME_STAMP; 2437a8e76a3SSteven Rostedt 2447a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 2452d622719STom Zanussi return rb_event_data_length(event); 2467a8e76a3SSteven Rostedt default: 2477a8e76a3SSteven Rostedt BUG(); 2487a8e76a3SSteven Rostedt } 2497a8e76a3SSteven Rostedt /* not hit */ 2507a8e76a3SSteven Rostedt return 0; 2517a8e76a3SSteven Rostedt } 2527a8e76a3SSteven Rostedt 25369d1b839SSteven Rostedt /* 25469d1b839SSteven Rostedt * Return total length of time extend and data, 25569d1b839SSteven Rostedt * or just the event length for all other events. 25669d1b839SSteven Rostedt */ 25769d1b839SSteven Rostedt static inline unsigned 25869d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event) 25969d1b839SSteven Rostedt { 26069d1b839SSteven Rostedt unsigned len = 0; 26169d1b839SSteven Rostedt 26269d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 26369d1b839SSteven Rostedt /* time extends include the data event after it */ 26469d1b839SSteven Rostedt len = RB_LEN_TIME_EXTEND; 26569d1b839SSteven Rostedt event = skip_time_extend(event); 26669d1b839SSteven Rostedt } 26769d1b839SSteven Rostedt return len + rb_event_length(event); 26869d1b839SSteven Rostedt } 26969d1b839SSteven Rostedt 2707a8e76a3SSteven Rostedt /** 2717a8e76a3SSteven Rostedt * ring_buffer_event_length - return the length of the event 2727a8e76a3SSteven Rostedt * @event: the event to get the length of 27369d1b839SSteven Rostedt * 27469d1b839SSteven Rostedt * Returns the size of the data load of a data event. 27569d1b839SSteven Rostedt * If the event is something other than a data event, it 27669d1b839SSteven Rostedt * returns the size of the event itself. With the exception 27769d1b839SSteven Rostedt * of a TIME EXTEND, where it still returns the size of the 27869d1b839SSteven Rostedt * data load of the data event after it. 2797a8e76a3SSteven Rostedt */ 2807a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event) 2817a8e76a3SSteven Rostedt { 28269d1b839SSteven Rostedt unsigned length; 28369d1b839SSteven Rostedt 28469d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 28569d1b839SSteven Rostedt event = skip_time_extend(event); 28669d1b839SSteven Rostedt 28769d1b839SSteven Rostedt length = rb_event_length(event); 288334d4169SLai Jiangshan if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 289465634adSRobert Richter return length; 290465634adSRobert Richter length -= RB_EVNT_HDR_SIZE; 291465634adSRobert Richter if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 292465634adSRobert Richter length -= sizeof(event->array[0]); 293465634adSRobert Richter return length; 2947a8e76a3SSteven Rostedt } 295c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length); 2967a8e76a3SSteven Rostedt 2977a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */ 29834a148bfSAndrew Morton static void * 2997a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event) 3007a8e76a3SSteven Rostedt { 30169d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 30269d1b839SSteven Rostedt event = skip_time_extend(event); 303334d4169SLai Jiangshan BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 3047a8e76a3SSteven Rostedt /* If length is in len field, then array[0] has the data */ 305334d4169SLai Jiangshan if (event->type_len) 3067a8e76a3SSteven Rostedt return (void *)&event->array[0]; 3077a8e76a3SSteven Rostedt /* Otherwise length is in array[0] and array[1] has the data */ 3087a8e76a3SSteven Rostedt return (void *)&event->array[1]; 3097a8e76a3SSteven Rostedt } 3107a8e76a3SSteven Rostedt 3117a8e76a3SSteven Rostedt /** 3127a8e76a3SSteven Rostedt * ring_buffer_event_data - return the data of the event 3137a8e76a3SSteven Rostedt * @event: the event to get the data from 3147a8e76a3SSteven Rostedt */ 3157a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event) 3167a8e76a3SSteven Rostedt { 3177a8e76a3SSteven Rostedt return rb_event_data(event); 3187a8e76a3SSteven Rostedt } 319c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data); 3207a8e76a3SSteven Rostedt 3217a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu) \ 3229e01c1b7SRusty Russell for_each_cpu(cpu, buffer->cpumask) 3237a8e76a3SSteven Rostedt 3247a8e76a3SSteven Rostedt #define TS_SHIFT 27 3257a8e76a3SSteven Rostedt #define TS_MASK ((1ULL << TS_SHIFT) - 1) 3267a8e76a3SSteven Rostedt #define TS_DELTA_TEST (~TS_MASK) 3277a8e76a3SSteven Rostedt 32866a8cb95SSteven Rostedt /* Flag when events were overwritten */ 32966a8cb95SSteven Rostedt #define RB_MISSED_EVENTS (1 << 31) 330ff0ff84aSSteven Rostedt /* Missed count stored at end */ 331ff0ff84aSSteven Rostedt #define RB_MISSED_STORED (1 << 30) 33266a8cb95SSteven Rostedt 333abc9b56dSSteven Rostedt struct buffer_data_page { 3347a8e76a3SSteven Rostedt u64 time_stamp; /* page time stamp */ 335c3706f00SWenji Huang local_t commit; /* write committed index */ 336abc9b56dSSteven Rostedt unsigned char data[]; /* data of buffer page */ 337abc9b56dSSteven Rostedt }; 338abc9b56dSSteven Rostedt 33977ae365eSSteven Rostedt /* 34077ae365eSSteven Rostedt * Note, the buffer_page list must be first. The buffer pages 34177ae365eSSteven Rostedt * are allocated in cache lines, which means that each buffer 34277ae365eSSteven Rostedt * page will be at the beginning of a cache line, and thus 34377ae365eSSteven Rostedt * the least significant bits will be zero. We use this to 34477ae365eSSteven Rostedt * add flags in the list struct pointers, to make the ring buffer 34577ae365eSSteven Rostedt * lockless. 34677ae365eSSteven Rostedt */ 347abc9b56dSSteven Rostedt struct buffer_page { 348778c55d4SSteven Rostedt struct list_head list; /* list of buffer pages */ 349abc9b56dSSteven Rostedt local_t write; /* index for next write */ 3506f807acdSSteven Rostedt unsigned read; /* index for next read */ 351778c55d4SSteven Rostedt local_t entries; /* entries on this page */ 352ff0ff84aSSteven Rostedt unsigned long real_end; /* real end of data */ 353abc9b56dSSteven Rostedt struct buffer_data_page *page; /* Actual data page */ 3547a8e76a3SSteven Rostedt }; 3557a8e76a3SSteven Rostedt 35677ae365eSSteven Rostedt /* 35777ae365eSSteven Rostedt * The buffer page counters, write and entries, must be reset 35877ae365eSSteven Rostedt * atomically when crossing page boundaries. To synchronize this 35977ae365eSSteven Rostedt * update, two counters are inserted into the number. One is 36077ae365eSSteven Rostedt * the actual counter for the write position or count on the page. 36177ae365eSSteven Rostedt * 36277ae365eSSteven Rostedt * The other is a counter of updaters. Before an update happens 36377ae365eSSteven Rostedt * the update partition of the counter is incremented. This will 36477ae365eSSteven Rostedt * allow the updater to update the counter atomically. 36577ae365eSSteven Rostedt * 36677ae365eSSteven Rostedt * The counter is 20 bits, and the state data is 12. 36777ae365eSSteven Rostedt */ 36877ae365eSSteven Rostedt #define RB_WRITE_MASK 0xfffff 36977ae365eSSteven Rostedt #define RB_WRITE_INTCNT (1 << 20) 37077ae365eSSteven Rostedt 371044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage) 372abc9b56dSSteven Rostedt { 373044fa782SSteven Rostedt local_set(&bpage->commit, 0); 374abc9b56dSSteven Rostedt } 375abc9b56dSSteven Rostedt 376474d32b6SSteven Rostedt /** 377474d32b6SSteven Rostedt * ring_buffer_page_len - the size of data on the page. 378474d32b6SSteven Rostedt * @page: The page to read 379474d32b6SSteven Rostedt * 380474d32b6SSteven Rostedt * Returns the amount of data on the page, including buffer page header. 381474d32b6SSteven Rostedt */ 382ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page) 383ef7a4a16SSteven Rostedt { 384474d32b6SSteven Rostedt return local_read(&((struct buffer_data_page *)page)->commit) 385474d32b6SSteven Rostedt + BUF_PAGE_HDR_SIZE; 386ef7a4a16SSteven Rostedt } 387ef7a4a16SSteven Rostedt 3887a8e76a3SSteven Rostedt /* 389ed56829cSSteven Rostedt * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 390ed56829cSSteven Rostedt * this issue out. 391ed56829cSSteven Rostedt */ 39234a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage) 393ed56829cSSteven Rostedt { 3946ae2a076SSteven Rostedt free_page((unsigned long)bpage->page); 395e4c2ce82SSteven Rostedt kfree(bpage); 396ed56829cSSteven Rostedt } 397ed56829cSSteven Rostedt 398ed56829cSSteven Rostedt /* 3997a8e76a3SSteven Rostedt * We need to fit the time_stamp delta into 27 bits. 4007a8e76a3SSteven Rostedt */ 4017a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta) 4027a8e76a3SSteven Rostedt { 4037a8e76a3SSteven Rostedt if (delta & TS_DELTA_TEST) 4047a8e76a3SSteven Rostedt return 1; 4057a8e76a3SSteven Rostedt return 0; 4067a8e76a3SSteven Rostedt } 4077a8e76a3SSteven Rostedt 408474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 4097a8e76a3SSteven Rostedt 410be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 411be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 412be957c44SSteven Rostedt 413d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s) 414d1b182a8SSteven Rostedt { 415d1b182a8SSteven Rostedt struct buffer_data_page field; 416d1b182a8SSteven Rostedt int ret; 417d1b182a8SSteven Rostedt 418d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 41926a50744STom Zanussi "offset:0;\tsize:%u;\tsigned:%u;\n", 42026a50744STom Zanussi (unsigned int)sizeof(field.time_stamp), 42126a50744STom Zanussi (unsigned int)is_signed_type(u64)); 422d1b182a8SSteven Rostedt 423d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 42426a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 425d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 42626a50744STom Zanussi (unsigned int)sizeof(field.commit), 42726a50744STom Zanussi (unsigned int)is_signed_type(long)); 428d1b182a8SSteven Rostedt 42966a8cb95SSteven Rostedt ret = trace_seq_printf(s, "\tfield: int overwrite;\t" 43066a8cb95SSteven Rostedt "offset:%u;\tsize:%u;\tsigned:%u;\n", 43166a8cb95SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 43266a8cb95SSteven Rostedt 1, 43366a8cb95SSteven Rostedt (unsigned int)is_signed_type(long)); 43466a8cb95SSteven Rostedt 435d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: char data;\t" 43626a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 437d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), data), 43826a50744STom Zanussi (unsigned int)BUF_PAGE_SIZE, 43926a50744STom Zanussi (unsigned int)is_signed_type(char)); 440d1b182a8SSteven Rostedt 441d1b182a8SSteven Rostedt return ret; 442d1b182a8SSteven Rostedt } 443d1b182a8SSteven Rostedt 4447a8e76a3SSteven Rostedt /* 4457a8e76a3SSteven Rostedt * head_page == tail_page && head == tail then buffer is empty. 4467a8e76a3SSteven Rostedt */ 4477a8e76a3SSteven Rostedt struct ring_buffer_per_cpu { 4487a8e76a3SSteven Rostedt int cpu; 449985023deSRichard Kennedy atomic_t record_disabled; 4507a8e76a3SSteven Rostedt struct ring_buffer *buffer; 4515389f6faSThomas Gleixner raw_spinlock_t reader_lock; /* serialize readers */ 452445c8951SThomas Gleixner arch_spinlock_t lock; 4537a8e76a3SSteven Rostedt struct lock_class_key lock_key; 454438ced17SVaibhav Nagarnaik unsigned int nr_pages; 4553adc54faSSteven Rostedt struct list_head *pages; 4566f807acdSSteven Rostedt struct buffer_page *head_page; /* read from head */ 4576f807acdSSteven Rostedt struct buffer_page *tail_page; /* write to tail */ 458c3706f00SWenji Huang struct buffer_page *commit_page; /* committed pages */ 459d769041fSSteven Rostedt struct buffer_page *reader_page; 46066a8cb95SSteven Rostedt unsigned long lost_events; 46166a8cb95SSteven Rostedt unsigned long last_overrun; 462c64e148aSVaibhav Nagarnaik local_t entries_bytes; 463e4906effSSteven Rostedt local_t entries; 464884bfe89SSlava Pestov local_t overrun; 465884bfe89SSlava Pestov local_t commit_overrun; 466884bfe89SSlava Pestov local_t dropped_events; 467fa743953SSteven Rostedt local_t committing; 468fa743953SSteven Rostedt local_t commits; 46977ae365eSSteven Rostedt unsigned long read; 470c64e148aSVaibhav Nagarnaik unsigned long read_bytes; 4717a8e76a3SSteven Rostedt u64 write_stamp; 4727a8e76a3SSteven Rostedt u64 read_stamp; 473438ced17SVaibhav Nagarnaik /* ring buffer pages to update, > 0 to add, < 0 to remove */ 474438ced17SVaibhav Nagarnaik int nr_pages_to_update; 475438ced17SVaibhav Nagarnaik struct list_head new_pages; /* new pages to add */ 47683f40318SVaibhav Nagarnaik struct work_struct update_pages_work; 47705fdd70dSVaibhav Nagarnaik struct completion update_done; 4787a8e76a3SSteven Rostedt }; 4797a8e76a3SSteven Rostedt 4807a8e76a3SSteven Rostedt struct ring_buffer { 4817a8e76a3SSteven Rostedt unsigned flags; 4827a8e76a3SSteven Rostedt int cpus; 4837a8e76a3SSteven Rostedt atomic_t record_disabled; 48483f40318SVaibhav Nagarnaik atomic_t resize_disabled; 48500f62f61SArnaldo Carvalho de Melo cpumask_var_t cpumask; 4867a8e76a3SSteven Rostedt 4871f8a6a10SPeter Zijlstra struct lock_class_key *reader_lock_key; 4881f8a6a10SPeter Zijlstra 4897a8e76a3SSteven Rostedt struct mutex mutex; 4907a8e76a3SSteven Rostedt 4917a8e76a3SSteven Rostedt struct ring_buffer_per_cpu **buffers; 492554f786eSSteven Rostedt 49359222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 494554f786eSSteven Rostedt struct notifier_block cpu_notify; 495554f786eSSteven Rostedt #endif 49637886f6aSSteven Rostedt u64 (*clock)(void); 4977a8e76a3SSteven Rostedt }; 4987a8e76a3SSteven Rostedt 4997a8e76a3SSteven Rostedt struct ring_buffer_iter { 5007a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5017a8e76a3SSteven Rostedt unsigned long head; 5027a8e76a3SSteven Rostedt struct buffer_page *head_page; 503492a74f4SSteven Rostedt struct buffer_page *cache_reader_page; 504492a74f4SSteven Rostedt unsigned long cache_read; 5057a8e76a3SSteven Rostedt u64 read_stamp; 5067a8e76a3SSteven Rostedt }; 5077a8e76a3SSteven Rostedt 508f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 509077c5407SSteven Rostedt #define RB_WARN_ON(b, cond) \ 5103e89c7bbSSteven Rostedt ({ \ 5113e89c7bbSSteven Rostedt int _____ret = unlikely(cond); \ 5123e89c7bbSSteven Rostedt if (_____ret) { \ 513077c5407SSteven Rostedt if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 514077c5407SSteven Rostedt struct ring_buffer_per_cpu *__b = \ 515077c5407SSteven Rostedt (void *)b; \ 516077c5407SSteven Rostedt atomic_inc(&__b->buffer->record_disabled); \ 517077c5407SSteven Rostedt } else \ 518077c5407SSteven Rostedt atomic_inc(&b->record_disabled); \ 519bf41a158SSteven Rostedt WARN_ON(1); \ 520bf41a158SSteven Rostedt } \ 5213e89c7bbSSteven Rostedt _____ret; \ 5223e89c7bbSSteven Rostedt }) 523f536aafcSSteven Rostedt 52437886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */ 52537886f6aSSteven Rostedt #define DEBUG_SHIFT 0 52637886f6aSSteven Rostedt 5276d3f1e12SJiri Olsa static inline u64 rb_time_stamp(struct ring_buffer *buffer) 52888eb0125SSteven Rostedt { 52988eb0125SSteven Rostedt /* shift to debug/test normalization and TIME_EXTENTS */ 53088eb0125SSteven Rostedt return buffer->clock() << DEBUG_SHIFT; 53188eb0125SSteven Rostedt } 53288eb0125SSteven Rostedt 53337886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 53437886f6aSSteven Rostedt { 53537886f6aSSteven Rostedt u64 time; 53637886f6aSSteven Rostedt 53737886f6aSSteven Rostedt preempt_disable_notrace(); 5386d3f1e12SJiri Olsa time = rb_time_stamp(buffer); 53937886f6aSSteven Rostedt preempt_enable_no_resched_notrace(); 54037886f6aSSteven Rostedt 54137886f6aSSteven Rostedt return time; 54237886f6aSSteven Rostedt } 54337886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 54437886f6aSSteven Rostedt 54537886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 54637886f6aSSteven Rostedt int cpu, u64 *ts) 54737886f6aSSteven Rostedt { 54837886f6aSSteven Rostedt /* Just stupid testing the normalize function and deltas */ 54937886f6aSSteven Rostedt *ts >>= DEBUG_SHIFT; 55037886f6aSSteven Rostedt } 55137886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 55237886f6aSSteven Rostedt 55377ae365eSSteven Rostedt /* 55477ae365eSSteven Rostedt * Making the ring buffer lockless makes things tricky. 55577ae365eSSteven Rostedt * Although writes only happen on the CPU that they are on, 55677ae365eSSteven Rostedt * and they only need to worry about interrupts. Reads can 55777ae365eSSteven Rostedt * happen on any CPU. 55877ae365eSSteven Rostedt * 55977ae365eSSteven Rostedt * The reader page is always off the ring buffer, but when the 56077ae365eSSteven Rostedt * reader finishes with a page, it needs to swap its page with 56177ae365eSSteven Rostedt * a new one from the buffer. The reader needs to take from 56277ae365eSSteven Rostedt * the head (writes go to the tail). But if a writer is in overwrite 56377ae365eSSteven Rostedt * mode and wraps, it must push the head page forward. 56477ae365eSSteven Rostedt * 56577ae365eSSteven Rostedt * Here lies the problem. 56677ae365eSSteven Rostedt * 56777ae365eSSteven Rostedt * The reader must be careful to replace only the head page, and 56877ae365eSSteven Rostedt * not another one. As described at the top of the file in the 56977ae365eSSteven Rostedt * ASCII art, the reader sets its old page to point to the next 57077ae365eSSteven Rostedt * page after head. It then sets the page after head to point to 57177ae365eSSteven Rostedt * the old reader page. But if the writer moves the head page 57277ae365eSSteven Rostedt * during this operation, the reader could end up with the tail. 57377ae365eSSteven Rostedt * 57477ae365eSSteven Rostedt * We use cmpxchg to help prevent this race. We also do something 57577ae365eSSteven Rostedt * special with the page before head. We set the LSB to 1. 57677ae365eSSteven Rostedt * 57777ae365eSSteven Rostedt * When the writer must push the page forward, it will clear the 57877ae365eSSteven Rostedt * bit that points to the head page, move the head, and then set 57977ae365eSSteven Rostedt * the bit that points to the new head page. 58077ae365eSSteven Rostedt * 58177ae365eSSteven Rostedt * We also don't want an interrupt coming in and moving the head 58277ae365eSSteven Rostedt * page on another writer. Thus we use the second LSB to catch 58377ae365eSSteven Rostedt * that too. Thus: 58477ae365eSSteven Rostedt * 58577ae365eSSteven Rostedt * head->list->prev->next bit 1 bit 0 58677ae365eSSteven Rostedt * ------- ------- 58777ae365eSSteven Rostedt * Normal page 0 0 58877ae365eSSteven Rostedt * Points to head page 0 1 58977ae365eSSteven Rostedt * New head page 1 0 59077ae365eSSteven Rostedt * 59177ae365eSSteven Rostedt * Note we can not trust the prev pointer of the head page, because: 59277ae365eSSteven Rostedt * 59377ae365eSSteven Rostedt * +----+ +-----+ +-----+ 59477ae365eSSteven Rostedt * | |------>| T |---X--->| N | 59577ae365eSSteven Rostedt * | |<------| | | | 59677ae365eSSteven Rostedt * +----+ +-----+ +-----+ 59777ae365eSSteven Rostedt * ^ ^ | 59877ae365eSSteven Rostedt * | +-----+ | | 59977ae365eSSteven Rostedt * +----------| R |----------+ | 60077ae365eSSteven Rostedt * | |<-----------+ 60177ae365eSSteven Rostedt * +-----+ 60277ae365eSSteven Rostedt * 60377ae365eSSteven Rostedt * Key: ---X--> HEAD flag set in pointer 60477ae365eSSteven Rostedt * T Tail page 60577ae365eSSteven Rostedt * R Reader page 60677ae365eSSteven Rostedt * N Next page 60777ae365eSSteven Rostedt * 60877ae365eSSteven Rostedt * (see __rb_reserve_next() to see where this happens) 60977ae365eSSteven Rostedt * 61077ae365eSSteven Rostedt * What the above shows is that the reader just swapped out 61177ae365eSSteven Rostedt * the reader page with a page in the buffer, but before it 61277ae365eSSteven Rostedt * could make the new header point back to the new page added 61377ae365eSSteven Rostedt * it was preempted by a writer. The writer moved forward onto 61477ae365eSSteven Rostedt * the new page added by the reader and is about to move forward 61577ae365eSSteven Rostedt * again. 61677ae365eSSteven Rostedt * 61777ae365eSSteven Rostedt * You can see, it is legitimate for the previous pointer of 61877ae365eSSteven Rostedt * the head (or any page) not to point back to itself. But only 61977ae365eSSteven Rostedt * temporarially. 62077ae365eSSteven Rostedt */ 62177ae365eSSteven Rostedt 62277ae365eSSteven Rostedt #define RB_PAGE_NORMAL 0UL 62377ae365eSSteven Rostedt #define RB_PAGE_HEAD 1UL 62477ae365eSSteven Rostedt #define RB_PAGE_UPDATE 2UL 62577ae365eSSteven Rostedt 62677ae365eSSteven Rostedt 62777ae365eSSteven Rostedt #define RB_FLAG_MASK 3UL 62877ae365eSSteven Rostedt 62977ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */ 63077ae365eSSteven Rostedt #define RB_PAGE_MOVED 4UL 63177ae365eSSteven Rostedt 63277ae365eSSteven Rostedt /* 63377ae365eSSteven Rostedt * rb_list_head - remove any bit 63477ae365eSSteven Rostedt */ 63577ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list) 63677ae365eSSteven Rostedt { 63777ae365eSSteven Rostedt unsigned long val = (unsigned long)list; 63877ae365eSSteven Rostedt 63977ae365eSSteven Rostedt return (struct list_head *)(val & ~RB_FLAG_MASK); 64077ae365eSSteven Rostedt } 64177ae365eSSteven Rostedt 64277ae365eSSteven Rostedt /* 6436d3f1e12SJiri Olsa * rb_is_head_page - test if the given page is the head page 64477ae365eSSteven Rostedt * 64577ae365eSSteven Rostedt * Because the reader may move the head_page pointer, we can 64677ae365eSSteven Rostedt * not trust what the head page is (it may be pointing to 64777ae365eSSteven Rostedt * the reader page). But if the next page is a header page, 64877ae365eSSteven Rostedt * its flags will be non zero. 64977ae365eSSteven Rostedt */ 65042b16b3fSJesper Juhl static inline int 65177ae365eSSteven Rostedt rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, 65277ae365eSSteven Rostedt struct buffer_page *page, struct list_head *list) 65377ae365eSSteven Rostedt { 65477ae365eSSteven Rostedt unsigned long val; 65577ae365eSSteven Rostedt 65677ae365eSSteven Rostedt val = (unsigned long)list->next; 65777ae365eSSteven Rostedt 65877ae365eSSteven Rostedt if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 65977ae365eSSteven Rostedt return RB_PAGE_MOVED; 66077ae365eSSteven Rostedt 66177ae365eSSteven Rostedt return val & RB_FLAG_MASK; 66277ae365eSSteven Rostedt } 66377ae365eSSteven Rostedt 66477ae365eSSteven Rostedt /* 66577ae365eSSteven Rostedt * rb_is_reader_page 66677ae365eSSteven Rostedt * 66777ae365eSSteven Rostedt * The unique thing about the reader page, is that, if the 66877ae365eSSteven Rostedt * writer is ever on it, the previous pointer never points 66977ae365eSSteven Rostedt * back to the reader page. 67077ae365eSSteven Rostedt */ 67177ae365eSSteven Rostedt static int rb_is_reader_page(struct buffer_page *page) 67277ae365eSSteven Rostedt { 67377ae365eSSteven Rostedt struct list_head *list = page->list.prev; 67477ae365eSSteven Rostedt 67577ae365eSSteven Rostedt return rb_list_head(list->next) != &page->list; 67677ae365eSSteven Rostedt } 67777ae365eSSteven Rostedt 67877ae365eSSteven Rostedt /* 67977ae365eSSteven Rostedt * rb_set_list_to_head - set a list_head to be pointing to head. 68077ae365eSSteven Rostedt */ 68177ae365eSSteven Rostedt static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, 68277ae365eSSteven Rostedt struct list_head *list) 68377ae365eSSteven Rostedt { 68477ae365eSSteven Rostedt unsigned long *ptr; 68577ae365eSSteven Rostedt 68677ae365eSSteven Rostedt ptr = (unsigned long *)&list->next; 68777ae365eSSteven Rostedt *ptr |= RB_PAGE_HEAD; 68877ae365eSSteven Rostedt *ptr &= ~RB_PAGE_UPDATE; 68977ae365eSSteven Rostedt } 69077ae365eSSteven Rostedt 69177ae365eSSteven Rostedt /* 69277ae365eSSteven Rostedt * rb_head_page_activate - sets up head page 69377ae365eSSteven Rostedt */ 69477ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 69577ae365eSSteven Rostedt { 69677ae365eSSteven Rostedt struct buffer_page *head; 69777ae365eSSteven Rostedt 69877ae365eSSteven Rostedt head = cpu_buffer->head_page; 69977ae365eSSteven Rostedt if (!head) 70077ae365eSSteven Rostedt return; 70177ae365eSSteven Rostedt 70277ae365eSSteven Rostedt /* 70377ae365eSSteven Rostedt * Set the previous list pointer to have the HEAD flag. 70477ae365eSSteven Rostedt */ 70577ae365eSSteven Rostedt rb_set_list_to_head(cpu_buffer, head->list.prev); 70677ae365eSSteven Rostedt } 70777ae365eSSteven Rostedt 70877ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list) 70977ae365eSSteven Rostedt { 71077ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&list->next; 71177ae365eSSteven Rostedt 71277ae365eSSteven Rostedt *ptr &= ~RB_FLAG_MASK; 71377ae365eSSteven Rostedt } 71477ae365eSSteven Rostedt 71577ae365eSSteven Rostedt /* 71677ae365eSSteven Rostedt * rb_head_page_dactivate - clears head page ptr (for free list) 71777ae365eSSteven Rostedt */ 71877ae365eSSteven Rostedt static void 71977ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 72077ae365eSSteven Rostedt { 72177ae365eSSteven Rostedt struct list_head *hd; 72277ae365eSSteven Rostedt 72377ae365eSSteven Rostedt /* Go through the whole list and clear any pointers found. */ 72477ae365eSSteven Rostedt rb_list_head_clear(cpu_buffer->pages); 72577ae365eSSteven Rostedt 72677ae365eSSteven Rostedt list_for_each(hd, cpu_buffer->pages) 72777ae365eSSteven Rostedt rb_list_head_clear(hd); 72877ae365eSSteven Rostedt } 72977ae365eSSteven Rostedt 73077ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 73177ae365eSSteven Rostedt struct buffer_page *head, 73277ae365eSSteven Rostedt struct buffer_page *prev, 73377ae365eSSteven Rostedt int old_flag, int new_flag) 73477ae365eSSteven Rostedt { 73577ae365eSSteven Rostedt struct list_head *list; 73677ae365eSSteven Rostedt unsigned long val = (unsigned long)&head->list; 73777ae365eSSteven Rostedt unsigned long ret; 73877ae365eSSteven Rostedt 73977ae365eSSteven Rostedt list = &prev->list; 74077ae365eSSteven Rostedt 74177ae365eSSteven Rostedt val &= ~RB_FLAG_MASK; 74277ae365eSSteven Rostedt 74308a40816SSteven Rostedt ret = cmpxchg((unsigned long *)&list->next, 74477ae365eSSteven Rostedt val | old_flag, val | new_flag); 74577ae365eSSteven Rostedt 74677ae365eSSteven Rostedt /* check if the reader took the page */ 74777ae365eSSteven Rostedt if ((ret & ~RB_FLAG_MASK) != val) 74877ae365eSSteven Rostedt return RB_PAGE_MOVED; 74977ae365eSSteven Rostedt 75077ae365eSSteven Rostedt return ret & RB_FLAG_MASK; 75177ae365eSSteven Rostedt } 75277ae365eSSteven Rostedt 75377ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 75477ae365eSSteven Rostedt struct buffer_page *head, 75577ae365eSSteven Rostedt struct buffer_page *prev, 75677ae365eSSteven Rostedt int old_flag) 75777ae365eSSteven Rostedt { 75877ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 75977ae365eSSteven Rostedt old_flag, RB_PAGE_UPDATE); 76077ae365eSSteven Rostedt } 76177ae365eSSteven Rostedt 76277ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 76377ae365eSSteven Rostedt struct buffer_page *head, 76477ae365eSSteven Rostedt struct buffer_page *prev, 76577ae365eSSteven Rostedt int old_flag) 76677ae365eSSteven Rostedt { 76777ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 76877ae365eSSteven Rostedt old_flag, RB_PAGE_HEAD); 76977ae365eSSteven Rostedt } 77077ae365eSSteven Rostedt 77177ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 77277ae365eSSteven Rostedt struct buffer_page *head, 77377ae365eSSteven Rostedt struct buffer_page *prev, 77477ae365eSSteven Rostedt int old_flag) 77577ae365eSSteven Rostedt { 77677ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 77777ae365eSSteven Rostedt old_flag, RB_PAGE_NORMAL); 77877ae365eSSteven Rostedt } 77977ae365eSSteven Rostedt 78077ae365eSSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 78177ae365eSSteven Rostedt struct buffer_page **bpage) 78277ae365eSSteven Rostedt { 78377ae365eSSteven Rostedt struct list_head *p = rb_list_head((*bpage)->list.next); 78477ae365eSSteven Rostedt 78577ae365eSSteven Rostedt *bpage = list_entry(p, struct buffer_page, list); 78677ae365eSSteven Rostedt } 78777ae365eSSteven Rostedt 78877ae365eSSteven Rostedt static struct buffer_page * 78977ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 79077ae365eSSteven Rostedt { 79177ae365eSSteven Rostedt struct buffer_page *head; 79277ae365eSSteven Rostedt struct buffer_page *page; 79377ae365eSSteven Rostedt struct list_head *list; 79477ae365eSSteven Rostedt int i; 79577ae365eSSteven Rostedt 79677ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 79777ae365eSSteven Rostedt return NULL; 79877ae365eSSteven Rostedt 79977ae365eSSteven Rostedt /* sanity check */ 80077ae365eSSteven Rostedt list = cpu_buffer->pages; 80177ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 80277ae365eSSteven Rostedt return NULL; 80377ae365eSSteven Rostedt 80477ae365eSSteven Rostedt page = head = cpu_buffer->head_page; 80577ae365eSSteven Rostedt /* 80677ae365eSSteven Rostedt * It is possible that the writer moves the header behind 80777ae365eSSteven Rostedt * where we started, and we miss in one loop. 80877ae365eSSteven Rostedt * A second loop should grab the header, but we'll do 80977ae365eSSteven Rostedt * three loops just because I'm paranoid. 81077ae365eSSteven Rostedt */ 81177ae365eSSteven Rostedt for (i = 0; i < 3; i++) { 81277ae365eSSteven Rostedt do { 81377ae365eSSteven Rostedt if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { 81477ae365eSSteven Rostedt cpu_buffer->head_page = page; 81577ae365eSSteven Rostedt return page; 81677ae365eSSteven Rostedt } 81777ae365eSSteven Rostedt rb_inc_page(cpu_buffer, &page); 81877ae365eSSteven Rostedt } while (page != head); 81977ae365eSSteven Rostedt } 82077ae365eSSteven Rostedt 82177ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 82277ae365eSSteven Rostedt 82377ae365eSSteven Rostedt return NULL; 82477ae365eSSteven Rostedt } 82577ae365eSSteven Rostedt 82677ae365eSSteven Rostedt static int rb_head_page_replace(struct buffer_page *old, 82777ae365eSSteven Rostedt struct buffer_page *new) 82877ae365eSSteven Rostedt { 82977ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&old->list.prev->next; 83077ae365eSSteven Rostedt unsigned long val; 83177ae365eSSteven Rostedt unsigned long ret; 83277ae365eSSteven Rostedt 83377ae365eSSteven Rostedt val = *ptr & ~RB_FLAG_MASK; 83477ae365eSSteven Rostedt val |= RB_PAGE_HEAD; 83577ae365eSSteven Rostedt 83608a40816SSteven Rostedt ret = cmpxchg(ptr, val, (unsigned long)&new->list); 83777ae365eSSteven Rostedt 83877ae365eSSteven Rostedt return ret == val; 83977ae365eSSteven Rostedt } 84077ae365eSSteven Rostedt 84177ae365eSSteven Rostedt /* 84277ae365eSSteven Rostedt * rb_tail_page_update - move the tail page forward 84377ae365eSSteven Rostedt * 84477ae365eSSteven Rostedt * Returns 1 if moved tail page, 0 if someone else did. 84577ae365eSSteven Rostedt */ 84677ae365eSSteven Rostedt static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 84777ae365eSSteven Rostedt struct buffer_page *tail_page, 84877ae365eSSteven Rostedt struct buffer_page *next_page) 84977ae365eSSteven Rostedt { 85077ae365eSSteven Rostedt struct buffer_page *old_tail; 85177ae365eSSteven Rostedt unsigned long old_entries; 85277ae365eSSteven Rostedt unsigned long old_write; 85377ae365eSSteven Rostedt int ret = 0; 85477ae365eSSteven Rostedt 85577ae365eSSteven Rostedt /* 85677ae365eSSteven Rostedt * The tail page now needs to be moved forward. 85777ae365eSSteven Rostedt * 85877ae365eSSteven Rostedt * We need to reset the tail page, but without messing 85977ae365eSSteven Rostedt * with possible erasing of data brought in by interrupts 86077ae365eSSteven Rostedt * that have moved the tail page and are currently on it. 86177ae365eSSteven Rostedt * 86277ae365eSSteven Rostedt * We add a counter to the write field to denote this. 86377ae365eSSteven Rostedt */ 86477ae365eSSteven Rostedt old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 86577ae365eSSteven Rostedt old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 86677ae365eSSteven Rostedt 86777ae365eSSteven Rostedt /* 86877ae365eSSteven Rostedt * Just make sure we have seen our old_write and synchronize 86977ae365eSSteven Rostedt * with any interrupts that come in. 87077ae365eSSteven Rostedt */ 87177ae365eSSteven Rostedt barrier(); 87277ae365eSSteven Rostedt 87377ae365eSSteven Rostedt /* 87477ae365eSSteven Rostedt * If the tail page is still the same as what we think 87577ae365eSSteven Rostedt * it is, then it is up to us to update the tail 87677ae365eSSteven Rostedt * pointer. 87777ae365eSSteven Rostedt */ 87877ae365eSSteven Rostedt if (tail_page == cpu_buffer->tail_page) { 87977ae365eSSteven Rostedt /* Zero the write counter */ 88077ae365eSSteven Rostedt unsigned long val = old_write & ~RB_WRITE_MASK; 88177ae365eSSteven Rostedt unsigned long eval = old_entries & ~RB_WRITE_MASK; 88277ae365eSSteven Rostedt 88377ae365eSSteven Rostedt /* 88477ae365eSSteven Rostedt * This will only succeed if an interrupt did 88577ae365eSSteven Rostedt * not come in and change it. In which case, we 88677ae365eSSteven Rostedt * do not want to modify it. 887da706d8bSLai Jiangshan * 888da706d8bSLai Jiangshan * We add (void) to let the compiler know that we do not care 889da706d8bSLai Jiangshan * about the return value of these functions. We use the 890da706d8bSLai Jiangshan * cmpxchg to only update if an interrupt did not already 891da706d8bSLai Jiangshan * do it for us. If the cmpxchg fails, we don't care. 89277ae365eSSteven Rostedt */ 893da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->write, old_write, val); 894da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->entries, old_entries, eval); 89577ae365eSSteven Rostedt 89677ae365eSSteven Rostedt /* 89777ae365eSSteven Rostedt * No need to worry about races with clearing out the commit. 89877ae365eSSteven Rostedt * it only can increment when a commit takes place. But that 89977ae365eSSteven Rostedt * only happens in the outer most nested commit. 90077ae365eSSteven Rostedt */ 90177ae365eSSteven Rostedt local_set(&next_page->page->commit, 0); 90277ae365eSSteven Rostedt 90377ae365eSSteven Rostedt old_tail = cmpxchg(&cpu_buffer->tail_page, 90477ae365eSSteven Rostedt tail_page, next_page); 90577ae365eSSteven Rostedt 90677ae365eSSteven Rostedt if (old_tail == tail_page) 90777ae365eSSteven Rostedt ret = 1; 90877ae365eSSteven Rostedt } 90977ae365eSSteven Rostedt 91077ae365eSSteven Rostedt return ret; 91177ae365eSSteven Rostedt } 91277ae365eSSteven Rostedt 91377ae365eSSteven Rostedt static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 91477ae365eSSteven Rostedt struct buffer_page *bpage) 91577ae365eSSteven Rostedt { 91677ae365eSSteven Rostedt unsigned long val = (unsigned long)bpage; 91777ae365eSSteven Rostedt 91877ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) 91977ae365eSSteven Rostedt return 1; 92077ae365eSSteven Rostedt 92177ae365eSSteven Rostedt return 0; 92277ae365eSSteven Rostedt } 92377ae365eSSteven Rostedt 92477ae365eSSteven Rostedt /** 92577ae365eSSteven Rostedt * rb_check_list - make sure a pointer to a list has the last bits zero 92677ae365eSSteven Rostedt */ 92777ae365eSSteven Rostedt static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, 92877ae365eSSteven Rostedt struct list_head *list) 92977ae365eSSteven Rostedt { 93077ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) 93177ae365eSSteven Rostedt return 1; 93277ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) 93377ae365eSSteven Rostedt return 1; 93477ae365eSSteven Rostedt return 0; 93577ae365eSSteven Rostedt } 93677ae365eSSteven Rostedt 9377a8e76a3SSteven Rostedt /** 9387a8e76a3SSteven Rostedt * check_pages - integrity check of buffer pages 9397a8e76a3SSteven Rostedt * @cpu_buffer: CPU buffer with pages to test 9407a8e76a3SSteven Rostedt * 941c3706f00SWenji Huang * As a safety measure we check to make sure the data pages have not 9427a8e76a3SSteven Rostedt * been corrupted. 9437a8e76a3SSteven Rostedt */ 9447a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 9457a8e76a3SSteven Rostedt { 9463adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages; 947044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 9487a8e76a3SSteven Rostedt 949308f7eebSSteven Rostedt /* Reset the head page if it exists */ 950308f7eebSSteven Rostedt if (cpu_buffer->head_page) 951308f7eebSSteven Rostedt rb_set_head_page(cpu_buffer); 952308f7eebSSteven Rostedt 95377ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 95477ae365eSSteven Rostedt 9553e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 9563e89c7bbSSteven Rostedt return -1; 9573e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 9583e89c7bbSSteven Rostedt return -1; 9597a8e76a3SSteven Rostedt 96077ae365eSSteven Rostedt if (rb_check_list(cpu_buffer, head)) 96177ae365eSSteven Rostedt return -1; 96277ae365eSSteven Rostedt 963044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 9643e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 965044fa782SSteven Rostedt bpage->list.next->prev != &bpage->list)) 9663e89c7bbSSteven Rostedt return -1; 9673e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 968044fa782SSteven Rostedt bpage->list.prev->next != &bpage->list)) 9693e89c7bbSSteven Rostedt return -1; 97077ae365eSSteven Rostedt if (rb_check_list(cpu_buffer, &bpage->list)) 97177ae365eSSteven Rostedt return -1; 9727a8e76a3SSteven Rostedt } 9737a8e76a3SSteven Rostedt 97477ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 97577ae365eSSteven Rostedt 9767a8e76a3SSteven Rostedt return 0; 9777a8e76a3SSteven Rostedt } 9787a8e76a3SSteven Rostedt 979438ced17SVaibhav Nagarnaik static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 9807a8e76a3SSteven Rostedt { 981438ced17SVaibhav Nagarnaik int i; 982044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 9833adc54faSSteven Rostedt 9847a8e76a3SSteven Rostedt for (i = 0; i < nr_pages; i++) { 9857ea59064SVaibhav Nagarnaik struct page *page; 986d7ec4bfeSVaibhav Nagarnaik /* 987d7ec4bfeSVaibhav Nagarnaik * __GFP_NORETRY flag makes sure that the allocation fails 988d7ec4bfeSVaibhav Nagarnaik * gracefully without invoking oom-killer and the system is 989d7ec4bfeSVaibhav Nagarnaik * not destabilized. 990d7ec4bfeSVaibhav Nagarnaik */ 991044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 992d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 993438ced17SVaibhav Nagarnaik cpu_to_node(cpu)); 994044fa782SSteven Rostedt if (!bpage) 995e4c2ce82SSteven Rostedt goto free_pages; 99677ae365eSSteven Rostedt 997438ced17SVaibhav Nagarnaik list_add(&bpage->list, pages); 99877ae365eSSteven Rostedt 999438ced17SVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), 1000d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 0); 10017ea59064SVaibhav Nagarnaik if (!page) 10027a8e76a3SSteven Rostedt goto free_pages; 10037ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1004044fa782SSteven Rostedt rb_init_page(bpage->page); 10057a8e76a3SSteven Rostedt } 10067a8e76a3SSteven Rostedt 1007438ced17SVaibhav Nagarnaik return 0; 1008438ced17SVaibhav Nagarnaik 1009438ced17SVaibhav Nagarnaik free_pages: 1010438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, pages, list) { 1011438ced17SVaibhav Nagarnaik list_del_init(&bpage->list); 1012438ced17SVaibhav Nagarnaik free_buffer_page(bpage); 1013438ced17SVaibhav Nagarnaik } 1014438ced17SVaibhav Nagarnaik 1015438ced17SVaibhav Nagarnaik return -ENOMEM; 1016438ced17SVaibhav Nagarnaik } 1017438ced17SVaibhav Nagarnaik 1018438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1019438ced17SVaibhav Nagarnaik unsigned nr_pages) 1020438ced17SVaibhav Nagarnaik { 1021438ced17SVaibhav Nagarnaik LIST_HEAD(pages); 1022438ced17SVaibhav Nagarnaik 1023438ced17SVaibhav Nagarnaik WARN_ON(!nr_pages); 1024438ced17SVaibhav Nagarnaik 1025438ced17SVaibhav Nagarnaik if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) 1026438ced17SVaibhav Nagarnaik return -ENOMEM; 1027438ced17SVaibhav Nagarnaik 10283adc54faSSteven Rostedt /* 10293adc54faSSteven Rostedt * The ring buffer page list is a circular list that does not 10303adc54faSSteven Rostedt * start and end with a list head. All page list items point to 10313adc54faSSteven Rostedt * other pages. 10323adc54faSSteven Rostedt */ 10333adc54faSSteven Rostedt cpu_buffer->pages = pages.next; 10343adc54faSSteven Rostedt list_del(&pages); 10357a8e76a3SSteven Rostedt 1036438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages = nr_pages; 1037438ced17SVaibhav Nagarnaik 10387a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 10397a8e76a3SSteven Rostedt 10407a8e76a3SSteven Rostedt return 0; 10417a8e76a3SSteven Rostedt } 10427a8e76a3SSteven Rostedt 10437a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu * 1044438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 10457a8e76a3SSteven Rostedt { 10467a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1047044fa782SSteven Rostedt struct buffer_page *bpage; 10487ea59064SVaibhav Nagarnaik struct page *page; 10497a8e76a3SSteven Rostedt int ret; 10507a8e76a3SSteven Rostedt 10517a8e76a3SSteven Rostedt cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 10527a8e76a3SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 10537a8e76a3SSteven Rostedt if (!cpu_buffer) 10547a8e76a3SSteven Rostedt return NULL; 10557a8e76a3SSteven Rostedt 10567a8e76a3SSteven Rostedt cpu_buffer->cpu = cpu; 10577a8e76a3SSteven Rostedt cpu_buffer->buffer = buffer; 10585389f6faSThomas Gleixner raw_spin_lock_init(&cpu_buffer->reader_lock); 10591f8a6a10SPeter Zijlstra lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1060edc35bd7SThomas Gleixner cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 106183f40318SVaibhav Nagarnaik INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 106205fdd70dSVaibhav Nagarnaik init_completion(&cpu_buffer->update_done); 10637a8e76a3SSteven Rostedt 1064044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1065e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 1066044fa782SSteven Rostedt if (!bpage) 1067e4c2ce82SSteven Rostedt goto fail_free_buffer; 1068e4c2ce82SSteven Rostedt 106977ae365eSSteven Rostedt rb_check_bpage(cpu_buffer, bpage); 107077ae365eSSteven Rostedt 1071044fa782SSteven Rostedt cpu_buffer->reader_page = bpage; 10727ea59064SVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 10737ea59064SVaibhav Nagarnaik if (!page) 1074e4c2ce82SSteven Rostedt goto fail_free_reader; 10757ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1076044fa782SSteven Rostedt rb_init_page(bpage->page); 1077e4c2ce82SSteven Rostedt 1078d769041fSSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 107944b99462SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1080d769041fSSteven Rostedt 1081438ced17SVaibhav Nagarnaik ret = rb_allocate_pages(cpu_buffer, nr_pages); 10827a8e76a3SSteven Rostedt if (ret < 0) 1083d769041fSSteven Rostedt goto fail_free_reader; 10847a8e76a3SSteven Rostedt 10857a8e76a3SSteven Rostedt cpu_buffer->head_page 10863adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 1087bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 10887a8e76a3SSteven Rostedt 108977ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 109077ae365eSSteven Rostedt 10917a8e76a3SSteven Rostedt return cpu_buffer; 10927a8e76a3SSteven Rostedt 1093d769041fSSteven Rostedt fail_free_reader: 1094d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1095d769041fSSteven Rostedt 10967a8e76a3SSteven Rostedt fail_free_buffer: 10977a8e76a3SSteven Rostedt kfree(cpu_buffer); 10987a8e76a3SSteven Rostedt return NULL; 10997a8e76a3SSteven Rostedt } 11007a8e76a3SSteven Rostedt 11017a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 11027a8e76a3SSteven Rostedt { 11033adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages; 1104044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 11057a8e76a3SSteven Rostedt 1106d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1107d769041fSSteven Rostedt 110877ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 110977ae365eSSteven Rostedt 11103adc54faSSteven Rostedt if (head) { 1111044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 1112044fa782SSteven Rostedt list_del_init(&bpage->list); 1113044fa782SSteven Rostedt free_buffer_page(bpage); 11147a8e76a3SSteven Rostedt } 11153adc54faSSteven Rostedt bpage = list_entry(head, struct buffer_page, list); 11163adc54faSSteven Rostedt free_buffer_page(bpage); 11173adc54faSSteven Rostedt } 11183adc54faSSteven Rostedt 11197a8e76a3SSteven Rostedt kfree(cpu_buffer); 11207a8e76a3SSteven Rostedt } 11217a8e76a3SSteven Rostedt 112259222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 112309c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 1124554f786eSSteven Rostedt unsigned long action, void *hcpu); 1125554f786eSSteven Rostedt #endif 1126554f786eSSteven Rostedt 11277a8e76a3SSteven Rostedt /** 11287a8e76a3SSteven Rostedt * ring_buffer_alloc - allocate a new ring_buffer 112968814b58SRobert Richter * @size: the size in bytes per cpu that is needed. 11307a8e76a3SSteven Rostedt * @flags: attributes to set for the ring buffer. 11317a8e76a3SSteven Rostedt * 11327a8e76a3SSteven Rostedt * Currently the only flag that is available is the RB_FL_OVERWRITE 11337a8e76a3SSteven Rostedt * flag. This flag means that the buffer will overwrite old data 11347a8e76a3SSteven Rostedt * when the buffer wraps. If this flag is not set, the buffer will 11357a8e76a3SSteven Rostedt * drop data when the tail hits the head. 11367a8e76a3SSteven Rostedt */ 11371f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 11381f8a6a10SPeter Zijlstra struct lock_class_key *key) 11397a8e76a3SSteven Rostedt { 11407a8e76a3SSteven Rostedt struct ring_buffer *buffer; 11417a8e76a3SSteven Rostedt int bsize; 1142438ced17SVaibhav Nagarnaik int cpu, nr_pages; 11437a8e76a3SSteven Rostedt 11447a8e76a3SSteven Rostedt /* keep it in its own cache line */ 11457a8e76a3SSteven Rostedt buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 11467a8e76a3SSteven Rostedt GFP_KERNEL); 11477a8e76a3SSteven Rostedt if (!buffer) 11487a8e76a3SSteven Rostedt return NULL; 11497a8e76a3SSteven Rostedt 11509e01c1b7SRusty Russell if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 11519e01c1b7SRusty Russell goto fail_free_buffer; 11529e01c1b7SRusty Russell 1153438ced17SVaibhav Nagarnaik nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 11547a8e76a3SSteven Rostedt buffer->flags = flags; 115537886f6aSSteven Rostedt buffer->clock = trace_clock_local; 11561f8a6a10SPeter Zijlstra buffer->reader_lock_key = key; 11577a8e76a3SSteven Rostedt 11587a8e76a3SSteven Rostedt /* need at least two pages */ 1159438ced17SVaibhav Nagarnaik if (nr_pages < 2) 1160438ced17SVaibhav Nagarnaik nr_pages = 2; 11617a8e76a3SSteven Rostedt 11623bf832ceSFrederic Weisbecker /* 11633bf832ceSFrederic Weisbecker * In case of non-hotplug cpu, if the ring-buffer is allocated 11643bf832ceSFrederic Weisbecker * in early initcall, it will not be notified of secondary cpus. 11653bf832ceSFrederic Weisbecker * In that off case, we need to allocate for all possible cpus. 11663bf832ceSFrederic Weisbecker */ 11673bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU 1168554f786eSSteven Rostedt get_online_cpus(); 1169554f786eSSteven Rostedt cpumask_copy(buffer->cpumask, cpu_online_mask); 11703bf832ceSFrederic Weisbecker #else 11713bf832ceSFrederic Weisbecker cpumask_copy(buffer->cpumask, cpu_possible_mask); 11723bf832ceSFrederic Weisbecker #endif 11737a8e76a3SSteven Rostedt buffer->cpus = nr_cpu_ids; 11747a8e76a3SSteven Rostedt 11757a8e76a3SSteven Rostedt bsize = sizeof(void *) * nr_cpu_ids; 11767a8e76a3SSteven Rostedt buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 11777a8e76a3SSteven Rostedt GFP_KERNEL); 11787a8e76a3SSteven Rostedt if (!buffer->buffers) 11799e01c1b7SRusty Russell goto fail_free_cpumask; 11807a8e76a3SSteven Rostedt 11817a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 11827a8e76a3SSteven Rostedt buffer->buffers[cpu] = 1183438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 11847a8e76a3SSteven Rostedt if (!buffer->buffers[cpu]) 11857a8e76a3SSteven Rostedt goto fail_free_buffers; 11867a8e76a3SSteven Rostedt } 11877a8e76a3SSteven Rostedt 118859222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 1189554f786eSSteven Rostedt buffer->cpu_notify.notifier_call = rb_cpu_notify; 1190554f786eSSteven Rostedt buffer->cpu_notify.priority = 0; 1191554f786eSSteven Rostedt register_cpu_notifier(&buffer->cpu_notify); 1192554f786eSSteven Rostedt #endif 1193554f786eSSteven Rostedt 1194554f786eSSteven Rostedt put_online_cpus(); 11957a8e76a3SSteven Rostedt mutex_init(&buffer->mutex); 11967a8e76a3SSteven Rostedt 11977a8e76a3SSteven Rostedt return buffer; 11987a8e76a3SSteven Rostedt 11997a8e76a3SSteven Rostedt fail_free_buffers: 12007a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 12017a8e76a3SSteven Rostedt if (buffer->buffers[cpu]) 12027a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 12037a8e76a3SSteven Rostedt } 12047a8e76a3SSteven Rostedt kfree(buffer->buffers); 12057a8e76a3SSteven Rostedt 12069e01c1b7SRusty Russell fail_free_cpumask: 12079e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 1208554f786eSSteven Rostedt put_online_cpus(); 12099e01c1b7SRusty Russell 12107a8e76a3SSteven Rostedt fail_free_buffer: 12117a8e76a3SSteven Rostedt kfree(buffer); 12127a8e76a3SSteven Rostedt return NULL; 12137a8e76a3SSteven Rostedt } 12141f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 12157a8e76a3SSteven Rostedt 12167a8e76a3SSteven Rostedt /** 12177a8e76a3SSteven Rostedt * ring_buffer_free - free a ring buffer. 12187a8e76a3SSteven Rostedt * @buffer: the buffer to free. 12197a8e76a3SSteven Rostedt */ 12207a8e76a3SSteven Rostedt void 12217a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer) 12227a8e76a3SSteven Rostedt { 12237a8e76a3SSteven Rostedt int cpu; 12247a8e76a3SSteven Rostedt 1225554f786eSSteven Rostedt get_online_cpus(); 1226554f786eSSteven Rostedt 122759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 1228554f786eSSteven Rostedt unregister_cpu_notifier(&buffer->cpu_notify); 1229554f786eSSteven Rostedt #endif 1230554f786eSSteven Rostedt 12317a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 12327a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 12337a8e76a3SSteven Rostedt 1234554f786eSSteven Rostedt put_online_cpus(); 1235554f786eSSteven Rostedt 1236bd3f0221SEric Dumazet kfree(buffer->buffers); 12379e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 12389e01c1b7SRusty Russell 12397a8e76a3SSteven Rostedt kfree(buffer); 12407a8e76a3SSteven Rostedt } 1241c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free); 12427a8e76a3SSteven Rostedt 124337886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer, 124437886f6aSSteven Rostedt u64 (*clock)(void)) 124537886f6aSSteven Rostedt { 124637886f6aSSteven Rostedt buffer->clock = clock; 124737886f6aSSteven Rostedt } 124837886f6aSSteven Rostedt 12497a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 12507a8e76a3SSteven Rostedt 125183f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage) 12527a8e76a3SSteven Rostedt { 125383f40318SVaibhav Nagarnaik return local_read(&bpage->entries) & RB_WRITE_MASK; 125483f40318SVaibhav Nagarnaik } 125583f40318SVaibhav Nagarnaik 125683f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage) 125783f40318SVaibhav Nagarnaik { 125883f40318SVaibhav Nagarnaik return local_read(&bpage->write) & RB_WRITE_MASK; 125983f40318SVaibhav Nagarnaik } 126083f40318SVaibhav Nagarnaik 12615040b4b7SVaibhav Nagarnaik static int 126283f40318SVaibhav Nagarnaik rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 126383f40318SVaibhav Nagarnaik { 126483f40318SVaibhav Nagarnaik struct list_head *tail_page, *to_remove, *next_page; 126583f40318SVaibhav Nagarnaik struct buffer_page *to_remove_page, *tmp_iter_page; 126683f40318SVaibhav Nagarnaik struct buffer_page *last_page, *first_page; 126783f40318SVaibhav Nagarnaik unsigned int nr_removed; 126883f40318SVaibhav Nagarnaik unsigned long head_bit; 126983f40318SVaibhav Nagarnaik int page_entries; 127083f40318SVaibhav Nagarnaik 127183f40318SVaibhav Nagarnaik head_bit = 0; 12727a8e76a3SSteven Rostedt 12735389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock); 127483f40318SVaibhav Nagarnaik atomic_inc(&cpu_buffer->record_disabled); 127583f40318SVaibhav Nagarnaik /* 127683f40318SVaibhav Nagarnaik * We don't race with the readers since we have acquired the reader 127783f40318SVaibhav Nagarnaik * lock. We also don't race with writers after disabling recording. 127883f40318SVaibhav Nagarnaik * This makes it easy to figure out the first and the last page to be 127983f40318SVaibhav Nagarnaik * removed from the list. We unlink all the pages in between including 128083f40318SVaibhav Nagarnaik * the first and last pages. This is done in a busy loop so that we 128183f40318SVaibhav Nagarnaik * lose the least number of traces. 128283f40318SVaibhav Nagarnaik * The pages are freed after we restart recording and unlock readers. 128383f40318SVaibhav Nagarnaik */ 128483f40318SVaibhav Nagarnaik tail_page = &cpu_buffer->tail_page->list; 128577ae365eSSteven Rostedt 128683f40318SVaibhav Nagarnaik /* 128783f40318SVaibhav Nagarnaik * tail page might be on reader page, we remove the next page 128883f40318SVaibhav Nagarnaik * from the ring buffer 128983f40318SVaibhav Nagarnaik */ 129083f40318SVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 129183f40318SVaibhav Nagarnaik tail_page = rb_list_head(tail_page->next); 129283f40318SVaibhav Nagarnaik to_remove = tail_page; 129383f40318SVaibhav Nagarnaik 129483f40318SVaibhav Nagarnaik /* start of pages to remove */ 129583f40318SVaibhav Nagarnaik first_page = list_entry(rb_list_head(to_remove->next), 129683f40318SVaibhav Nagarnaik struct buffer_page, list); 129783f40318SVaibhav Nagarnaik 129883f40318SVaibhav Nagarnaik for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 129983f40318SVaibhav Nagarnaik to_remove = rb_list_head(to_remove)->next; 130083f40318SVaibhav Nagarnaik head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 13017a8e76a3SSteven Rostedt } 13027a8e76a3SSteven Rostedt 130383f40318SVaibhav Nagarnaik next_page = rb_list_head(to_remove)->next; 13047a8e76a3SSteven Rostedt 130583f40318SVaibhav Nagarnaik /* 130683f40318SVaibhav Nagarnaik * Now we remove all pages between tail_page and next_page. 130783f40318SVaibhav Nagarnaik * Make sure that we have head_bit value preserved for the 130883f40318SVaibhav Nagarnaik * next page 130983f40318SVaibhav Nagarnaik */ 131083f40318SVaibhav Nagarnaik tail_page->next = (struct list_head *)((unsigned long)next_page | 131183f40318SVaibhav Nagarnaik head_bit); 131283f40318SVaibhav Nagarnaik next_page = rb_list_head(next_page); 131383f40318SVaibhav Nagarnaik next_page->prev = tail_page; 131483f40318SVaibhav Nagarnaik 131583f40318SVaibhav Nagarnaik /* make sure pages points to a valid page in the ring buffer */ 131683f40318SVaibhav Nagarnaik cpu_buffer->pages = next_page; 131783f40318SVaibhav Nagarnaik 131883f40318SVaibhav Nagarnaik /* update head page */ 131983f40318SVaibhav Nagarnaik if (head_bit) 132083f40318SVaibhav Nagarnaik cpu_buffer->head_page = list_entry(next_page, 132183f40318SVaibhav Nagarnaik struct buffer_page, list); 132283f40318SVaibhav Nagarnaik 132383f40318SVaibhav Nagarnaik /* 132483f40318SVaibhav Nagarnaik * change read pointer to make sure any read iterators reset 132583f40318SVaibhav Nagarnaik * themselves 132683f40318SVaibhav Nagarnaik */ 132783f40318SVaibhav Nagarnaik cpu_buffer->read = 0; 132883f40318SVaibhav Nagarnaik 132983f40318SVaibhav Nagarnaik /* pages are removed, resume tracing and then free the pages */ 133083f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->record_disabled); 13315389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock); 133283f40318SVaibhav Nagarnaik 133383f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 133483f40318SVaibhav Nagarnaik 133583f40318SVaibhav Nagarnaik /* last buffer page to remove */ 133683f40318SVaibhav Nagarnaik last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 133783f40318SVaibhav Nagarnaik list); 133883f40318SVaibhav Nagarnaik tmp_iter_page = first_page; 133983f40318SVaibhav Nagarnaik 134083f40318SVaibhav Nagarnaik do { 134183f40318SVaibhav Nagarnaik to_remove_page = tmp_iter_page; 134283f40318SVaibhav Nagarnaik rb_inc_page(cpu_buffer, &tmp_iter_page); 134383f40318SVaibhav Nagarnaik 134483f40318SVaibhav Nagarnaik /* update the counters */ 134583f40318SVaibhav Nagarnaik page_entries = rb_page_entries(to_remove_page); 134683f40318SVaibhav Nagarnaik if (page_entries) { 134783f40318SVaibhav Nagarnaik /* 134883f40318SVaibhav Nagarnaik * If something was added to this page, it was full 134983f40318SVaibhav Nagarnaik * since it is not the tail page. So we deduct the 135083f40318SVaibhav Nagarnaik * bytes consumed in ring buffer from here. 135148fdc72fSVaibhav Nagarnaik * Increment overrun to account for the lost events. 135283f40318SVaibhav Nagarnaik */ 135348fdc72fSVaibhav Nagarnaik local_add(page_entries, &cpu_buffer->overrun); 135483f40318SVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 135583f40318SVaibhav Nagarnaik } 135683f40318SVaibhav Nagarnaik 135783f40318SVaibhav Nagarnaik /* 135883f40318SVaibhav Nagarnaik * We have already removed references to this list item, just 135983f40318SVaibhav Nagarnaik * free up the buffer_page and its page 136083f40318SVaibhav Nagarnaik */ 136183f40318SVaibhav Nagarnaik free_buffer_page(to_remove_page); 136283f40318SVaibhav Nagarnaik nr_removed--; 136383f40318SVaibhav Nagarnaik 136483f40318SVaibhav Nagarnaik } while (to_remove_page != last_page); 136583f40318SVaibhav Nagarnaik 136683f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, nr_removed); 13675040b4b7SVaibhav Nagarnaik 13685040b4b7SVaibhav Nagarnaik return nr_removed == 0; 13697a8e76a3SSteven Rostedt } 13707a8e76a3SSteven Rostedt 13715040b4b7SVaibhav Nagarnaik static int 13725040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 13737a8e76a3SSteven Rostedt { 13745040b4b7SVaibhav Nagarnaik struct list_head *pages = &cpu_buffer->new_pages; 13755040b4b7SVaibhav Nagarnaik int retries, success; 13767a8e76a3SSteven Rostedt 13775389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock); 13785040b4b7SVaibhav Nagarnaik /* 13795040b4b7SVaibhav Nagarnaik * We are holding the reader lock, so the reader page won't be swapped 13805040b4b7SVaibhav Nagarnaik * in the ring buffer. Now we are racing with the writer trying to 13815040b4b7SVaibhav Nagarnaik * move head page and the tail page. 13825040b4b7SVaibhav Nagarnaik * We are going to adapt the reader page update process where: 13835040b4b7SVaibhav Nagarnaik * 1. We first splice the start and end of list of new pages between 13845040b4b7SVaibhav Nagarnaik * the head page and its previous page. 13855040b4b7SVaibhav Nagarnaik * 2. We cmpxchg the prev_page->next to point from head page to the 13865040b4b7SVaibhav Nagarnaik * start of new pages list. 13875040b4b7SVaibhav Nagarnaik * 3. Finally, we update the head->prev to the end of new list. 13885040b4b7SVaibhav Nagarnaik * 13895040b4b7SVaibhav Nagarnaik * We will try this process 10 times, to make sure that we don't keep 13905040b4b7SVaibhav Nagarnaik * spinning. 13915040b4b7SVaibhav Nagarnaik */ 13925040b4b7SVaibhav Nagarnaik retries = 10; 13935040b4b7SVaibhav Nagarnaik success = 0; 13945040b4b7SVaibhav Nagarnaik while (retries--) { 13955040b4b7SVaibhav Nagarnaik struct list_head *head_page, *prev_page, *r; 13965040b4b7SVaibhav Nagarnaik struct list_head *last_page, *first_page; 13975040b4b7SVaibhav Nagarnaik struct list_head *head_page_with_bit; 139877ae365eSSteven Rostedt 13995040b4b7SVaibhav Nagarnaik head_page = &rb_set_head_page(cpu_buffer)->list; 14005040b4b7SVaibhav Nagarnaik prev_page = head_page->prev; 14015040b4b7SVaibhav Nagarnaik 14025040b4b7SVaibhav Nagarnaik first_page = pages->next; 14035040b4b7SVaibhav Nagarnaik last_page = pages->prev; 14045040b4b7SVaibhav Nagarnaik 14055040b4b7SVaibhav Nagarnaik head_page_with_bit = (struct list_head *) 14065040b4b7SVaibhav Nagarnaik ((unsigned long)head_page | RB_PAGE_HEAD); 14075040b4b7SVaibhav Nagarnaik 14085040b4b7SVaibhav Nagarnaik last_page->next = head_page_with_bit; 14095040b4b7SVaibhav Nagarnaik first_page->prev = prev_page; 14105040b4b7SVaibhav Nagarnaik 14115040b4b7SVaibhav Nagarnaik r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 14125040b4b7SVaibhav Nagarnaik 14135040b4b7SVaibhav Nagarnaik if (r == head_page_with_bit) { 14145040b4b7SVaibhav Nagarnaik /* 14155040b4b7SVaibhav Nagarnaik * yay, we replaced the page pointer to our new list, 14165040b4b7SVaibhav Nagarnaik * now, we just have to update to head page's prev 14175040b4b7SVaibhav Nagarnaik * pointer to point to end of list 14185040b4b7SVaibhav Nagarnaik */ 14195040b4b7SVaibhav Nagarnaik head_page->prev = last_page; 14205040b4b7SVaibhav Nagarnaik success = 1; 14215040b4b7SVaibhav Nagarnaik break; 14227a8e76a3SSteven Rostedt } 14235040b4b7SVaibhav Nagarnaik } 14247a8e76a3SSteven Rostedt 14255040b4b7SVaibhav Nagarnaik if (success) 14265040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(pages); 14275040b4b7SVaibhav Nagarnaik /* 14285040b4b7SVaibhav Nagarnaik * If we weren't successful in adding in new pages, warn and stop 14295040b4b7SVaibhav Nagarnaik * tracing 14305040b4b7SVaibhav Nagarnaik */ 14315040b4b7SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, !success); 14325389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock); 14335040b4b7SVaibhav Nagarnaik 14345040b4b7SVaibhav Nagarnaik /* free pages if they weren't inserted */ 14355040b4b7SVaibhav Nagarnaik if (!success) { 14365040b4b7SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 14375040b4b7SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 14385040b4b7SVaibhav Nagarnaik list) { 14395040b4b7SVaibhav Nagarnaik list_del_init(&bpage->list); 14405040b4b7SVaibhav Nagarnaik free_buffer_page(bpage); 14415040b4b7SVaibhav Nagarnaik } 14425040b4b7SVaibhav Nagarnaik } 14435040b4b7SVaibhav Nagarnaik return success; 14447a8e76a3SSteven Rostedt } 14457a8e76a3SSteven Rostedt 144683f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 1447438ced17SVaibhav Nagarnaik { 14485040b4b7SVaibhav Nagarnaik int success; 144983f40318SVaibhav Nagarnaik 14505040b4b7SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0) 14515040b4b7SVaibhav Nagarnaik success = rb_insert_pages(cpu_buffer); 14525040b4b7SVaibhav Nagarnaik else 14535040b4b7SVaibhav Nagarnaik success = rb_remove_pages(cpu_buffer, 14545040b4b7SVaibhav Nagarnaik -cpu_buffer->nr_pages_to_update); 14555040b4b7SVaibhav Nagarnaik 14565040b4b7SVaibhav Nagarnaik if (success) 1457438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 145883f40318SVaibhav Nagarnaik } 145983f40318SVaibhav Nagarnaik 146083f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work) 146183f40318SVaibhav Nagarnaik { 146283f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 146383f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu, update_pages_work); 146483f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer); 146505fdd70dSVaibhav Nagarnaik complete(&cpu_buffer->update_done); 1466438ced17SVaibhav Nagarnaik } 1467438ced17SVaibhav Nagarnaik 14687a8e76a3SSteven Rostedt /** 14697a8e76a3SSteven Rostedt * ring_buffer_resize - resize the ring buffer 14707a8e76a3SSteven Rostedt * @buffer: the buffer to resize. 14717a8e76a3SSteven Rostedt * @size: the new size. 14727a8e76a3SSteven Rostedt * 14737a8e76a3SSteven Rostedt * Minimum size is 2 * BUF_PAGE_SIZE. 14747a8e76a3SSteven Rostedt * 147583f40318SVaibhav Nagarnaik * Returns 0 on success and < 0 on failure. 14767a8e76a3SSteven Rostedt */ 1477438ced17SVaibhav Nagarnaik int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, 1478438ced17SVaibhav Nagarnaik int cpu_id) 14797a8e76a3SSteven Rostedt { 14807a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1481438ced17SVaibhav Nagarnaik unsigned nr_pages; 148283f40318SVaibhav Nagarnaik int cpu, err = 0; 14837a8e76a3SSteven Rostedt 1484ee51a1deSIngo Molnar /* 1485ee51a1deSIngo Molnar * Always succeed at resizing a non-existent buffer: 1486ee51a1deSIngo Molnar */ 1487ee51a1deSIngo Molnar if (!buffer) 1488ee51a1deSIngo Molnar return size; 1489ee51a1deSIngo Molnar 14906a31e1f1SSteven Rostedt /* Make sure the requested buffer exists */ 14916a31e1f1SSteven Rostedt if (cpu_id != RING_BUFFER_ALL_CPUS && 14926a31e1f1SSteven Rostedt !cpumask_test_cpu(cpu_id, buffer->cpumask)) 14936a31e1f1SSteven Rostedt return size; 14946a31e1f1SSteven Rostedt 14957a8e76a3SSteven Rostedt size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 14967a8e76a3SSteven Rostedt size *= BUF_PAGE_SIZE; 14977a8e76a3SSteven Rostedt 14987a8e76a3SSteven Rostedt /* we need a minimum of two pages */ 14997a8e76a3SSteven Rostedt if (size < BUF_PAGE_SIZE * 2) 15007a8e76a3SSteven Rostedt size = BUF_PAGE_SIZE * 2; 15017a8e76a3SSteven Rostedt 15027a8e76a3SSteven Rostedt nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 15037a8e76a3SSteven Rostedt 150483f40318SVaibhav Nagarnaik /* 150583f40318SVaibhav Nagarnaik * Don't succeed if resizing is disabled, as a reader might be 150683f40318SVaibhav Nagarnaik * manipulating the ring buffer and is expecting a sane state while 150783f40318SVaibhav Nagarnaik * this is true. 150883f40318SVaibhav Nagarnaik */ 150983f40318SVaibhav Nagarnaik if (atomic_read(&buffer->resize_disabled)) 151083f40318SVaibhav Nagarnaik return -EBUSY; 151183f40318SVaibhav Nagarnaik 151283f40318SVaibhav Nagarnaik /* prevent another thread from changing buffer sizes */ 151383f40318SVaibhav Nagarnaik mutex_lock(&buffer->mutex); 151483f40318SVaibhav Nagarnaik 1515438ced17SVaibhav Nagarnaik if (cpu_id == RING_BUFFER_ALL_CPUS) { 1516438ced17SVaibhav Nagarnaik /* calculate the pages to update */ 15177a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 15187a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1519438ced17SVaibhav Nagarnaik 1520438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 1521438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 1522438ced17SVaibhav Nagarnaik /* 1523438ced17SVaibhav Nagarnaik * nothing more to do for removing pages or no update 1524438ced17SVaibhav Nagarnaik */ 1525438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update <= 0) 1526438ced17SVaibhav Nagarnaik continue; 1527438ced17SVaibhav Nagarnaik /* 1528438ced17SVaibhav Nagarnaik * to add pages, make sure all new pages can be 1529438ced17SVaibhav Nagarnaik * allocated without receiving ENOMEM 1530438ced17SVaibhav Nagarnaik */ 1531438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1532438ced17SVaibhav Nagarnaik if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, 153383f40318SVaibhav Nagarnaik &cpu_buffer->new_pages, cpu)) { 1534438ced17SVaibhav Nagarnaik /* not enough memory for new pages */ 153583f40318SVaibhav Nagarnaik err = -ENOMEM; 153683f40318SVaibhav Nagarnaik goto out_err; 153783f40318SVaibhav Nagarnaik } 153883f40318SVaibhav Nagarnaik } 153983f40318SVaibhav Nagarnaik 154083f40318SVaibhav Nagarnaik get_online_cpus(); 154183f40318SVaibhav Nagarnaik /* 154283f40318SVaibhav Nagarnaik * Fire off all the required work handlers 154305fdd70dSVaibhav Nagarnaik * We can't schedule on offline CPUs, but it's not necessary 154483f40318SVaibhav Nagarnaik * since we can change their buffer sizes without any race. 154583f40318SVaibhav Nagarnaik */ 154683f40318SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 154783f40318SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 154805fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 154983f40318SVaibhav Nagarnaik continue; 155083f40318SVaibhav Nagarnaik 155105fdd70dSVaibhav Nagarnaik if (cpu_online(cpu)) 155205fdd70dSVaibhav Nagarnaik schedule_work_on(cpu, 155305fdd70dSVaibhav Nagarnaik &cpu_buffer->update_pages_work); 155405fdd70dSVaibhav Nagarnaik else 155583f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer); 15567a8e76a3SSteven Rostedt } 1557438ced17SVaibhav Nagarnaik 1558438ced17SVaibhav Nagarnaik /* wait for all the updates to complete */ 1559438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 1560438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 156105fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 156283f40318SVaibhav Nagarnaik continue; 156383f40318SVaibhav Nagarnaik 156405fdd70dSVaibhav Nagarnaik if (cpu_online(cpu)) 156505fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 156683f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 1567438ced17SVaibhav Nagarnaik } 156883f40318SVaibhav Nagarnaik 156983f40318SVaibhav Nagarnaik put_online_cpus(); 1570438ced17SVaibhav Nagarnaik } else { 15718e49f418SVaibhav Nagarnaik /* Make sure this CPU has been intitialized */ 15728e49f418SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) 15738e49f418SVaibhav Nagarnaik goto out; 15748e49f418SVaibhav Nagarnaik 1575438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu_id]; 157683f40318SVaibhav Nagarnaik 1577438ced17SVaibhav Nagarnaik if (nr_pages == cpu_buffer->nr_pages) 15787a8e76a3SSteven Rostedt goto out; 1579438ced17SVaibhav Nagarnaik 1580438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 1581438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 1582438ced17SVaibhav Nagarnaik 1583438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1584438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0 && 1585438ced17SVaibhav Nagarnaik __rb_allocate_pages(cpu_buffer->nr_pages_to_update, 158683f40318SVaibhav Nagarnaik &cpu_buffer->new_pages, cpu_id)) { 158783f40318SVaibhav Nagarnaik err = -ENOMEM; 158883f40318SVaibhav Nagarnaik goto out_err; 158983f40318SVaibhav Nagarnaik } 1590438ced17SVaibhav Nagarnaik 159183f40318SVaibhav Nagarnaik get_online_cpus(); 159283f40318SVaibhav Nagarnaik 159383f40318SVaibhav Nagarnaik if (cpu_online(cpu_id)) { 159483f40318SVaibhav Nagarnaik schedule_work_on(cpu_id, 159583f40318SVaibhav Nagarnaik &cpu_buffer->update_pages_work); 159605fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 159783f40318SVaibhav Nagarnaik } else 159883f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer); 159983f40318SVaibhav Nagarnaik 160083f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 160105fdd70dSVaibhav Nagarnaik put_online_cpus(); 16027a8e76a3SSteven Rostedt } 16037a8e76a3SSteven Rostedt 16047a8e76a3SSteven Rostedt out: 1605659f451fSSteven Rostedt /* 1606659f451fSSteven Rostedt * The ring buffer resize can happen with the ring buffer 1607659f451fSSteven Rostedt * enabled, so that the update disturbs the tracing as little 1608659f451fSSteven Rostedt * as possible. But if the buffer is disabled, we do not need 1609659f451fSSteven Rostedt * to worry about that, and we can take the time to verify 1610659f451fSSteven Rostedt * that the buffer is not corrupt. 1611659f451fSSteven Rostedt */ 1612659f451fSSteven Rostedt if (atomic_read(&buffer->record_disabled)) { 1613659f451fSSteven Rostedt atomic_inc(&buffer->record_disabled); 1614659f451fSSteven Rostedt /* 1615659f451fSSteven Rostedt * Even though the buffer was disabled, we must make sure 1616659f451fSSteven Rostedt * that it is truly disabled before calling rb_check_pages. 1617659f451fSSteven Rostedt * There could have been a race between checking 1618659f451fSSteven Rostedt * record_disable and incrementing it. 1619659f451fSSteven Rostedt */ 1620659f451fSSteven Rostedt synchronize_sched(); 1621659f451fSSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 1622659f451fSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1623659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 1624659f451fSSteven Rostedt } 1625659f451fSSteven Rostedt atomic_dec(&buffer->record_disabled); 1626659f451fSSteven Rostedt } 1627659f451fSSteven Rostedt 16287a8e76a3SSteven Rostedt mutex_unlock(&buffer->mutex); 16297a8e76a3SSteven Rostedt return size; 16307a8e76a3SSteven Rostedt 163183f40318SVaibhav Nagarnaik out_err: 1632438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 1633438ced17SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 163483f40318SVaibhav Nagarnaik 1635438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 1636438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 163783f40318SVaibhav Nagarnaik 1638438ced17SVaibhav Nagarnaik if (list_empty(&cpu_buffer->new_pages)) 1639438ced17SVaibhav Nagarnaik continue; 164083f40318SVaibhav Nagarnaik 1641438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 1642438ced17SVaibhav Nagarnaik list) { 1643044fa782SSteven Rostedt list_del_init(&bpage->list); 1644044fa782SSteven Rostedt free_buffer_page(bpage); 16457a8e76a3SSteven Rostedt } 1646438ced17SVaibhav Nagarnaik } 1647641d2f63SVegard Nossum mutex_unlock(&buffer->mutex); 164883f40318SVaibhav Nagarnaik return err; 16497a8e76a3SSteven Rostedt } 1650c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize); 16517a8e76a3SSteven Rostedt 1652750912faSDavid Sharp void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) 1653750912faSDavid Sharp { 1654750912faSDavid Sharp mutex_lock(&buffer->mutex); 1655750912faSDavid Sharp if (val) 1656750912faSDavid Sharp buffer->flags |= RB_FL_OVERWRITE; 1657750912faSDavid Sharp else 1658750912faSDavid Sharp buffer->flags &= ~RB_FL_OVERWRITE; 1659750912faSDavid Sharp mutex_unlock(&buffer->mutex); 1660750912faSDavid Sharp } 1661750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1662750912faSDavid Sharp 16638789a9e7SSteven Rostedt static inline void * 1664044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 16658789a9e7SSteven Rostedt { 1666044fa782SSteven Rostedt return bpage->data + index; 16678789a9e7SSteven Rostedt } 16688789a9e7SSteven Rostedt 1669044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 16707a8e76a3SSteven Rostedt { 1671044fa782SSteven Rostedt return bpage->page->data + index; 16727a8e76a3SSteven Rostedt } 16737a8e76a3SSteven Rostedt 16747a8e76a3SSteven Rostedt static inline struct ring_buffer_event * 1675d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 16767a8e76a3SSteven Rostedt { 16776f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->reader_page, 16786f807acdSSteven Rostedt cpu_buffer->reader_page->read); 16796f807acdSSteven Rostedt } 16806f807acdSSteven Rostedt 16816f807acdSSteven Rostedt static inline struct ring_buffer_event * 16827a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter) 16837a8e76a3SSteven Rostedt { 16846f807acdSSteven Rostedt return __rb_page_index(iter->head_page, iter->head); 16857a8e76a3SSteven Rostedt } 16867a8e76a3SSteven Rostedt 1687bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage) 1688bf41a158SSteven Rostedt { 1689abc9b56dSSteven Rostedt return local_read(&bpage->page->commit); 1690bf41a158SSteven Rostedt } 1691bf41a158SSteven Rostedt 169225985edcSLucas De Marchi /* Size is determined by what has been committed */ 1693bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage) 1694bf41a158SSteven Rostedt { 1695bf41a158SSteven Rostedt return rb_page_commit(bpage); 1696bf41a158SSteven Rostedt } 1697bf41a158SSteven Rostedt 1698bf41a158SSteven Rostedt static inline unsigned 1699bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 1700bf41a158SSteven Rostedt { 1701bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->commit_page); 1702bf41a158SSteven Rostedt } 1703bf41a158SSteven Rostedt 1704bf41a158SSteven Rostedt static inline unsigned 1705bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event) 17067a8e76a3SSteven Rostedt { 1707bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1708bf41a158SSteven Rostedt 170922f470f8SSteven Rostedt return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 17107a8e76a3SSteven Rostedt } 17117a8e76a3SSteven Rostedt 17120f0c85fcSSteven Rostedt static inline int 1713fa743953SSteven Rostedt rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1714bf41a158SSteven Rostedt struct ring_buffer_event *event) 17157a8e76a3SSteven Rostedt { 1716bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1717bf41a158SSteven Rostedt unsigned long index; 1718bf41a158SSteven Rostedt 1719bf41a158SSteven Rostedt index = rb_event_index(event); 1720bf41a158SSteven Rostedt addr &= PAGE_MASK; 1721bf41a158SSteven Rostedt 1722bf41a158SSteven Rostedt return cpu_buffer->commit_page->page == (void *)addr && 1723bf41a158SSteven Rostedt rb_commit_index(cpu_buffer) == index; 1724bf41a158SSteven Rostedt } 1725bf41a158SSteven Rostedt 172634a148bfSAndrew Morton static void 1727bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1728bf41a158SSteven Rostedt { 172977ae365eSSteven Rostedt unsigned long max_count; 173077ae365eSSteven Rostedt 1731bf41a158SSteven Rostedt /* 1732bf41a158SSteven Rostedt * We only race with interrupts and NMIs on this CPU. 1733bf41a158SSteven Rostedt * If we own the commit event, then we can commit 1734bf41a158SSteven Rostedt * all others that interrupted us, since the interruptions 1735bf41a158SSteven Rostedt * are in stack format (they finish before they come 1736bf41a158SSteven Rostedt * back to us). This allows us to do a simple loop to 1737bf41a158SSteven Rostedt * assign the commit to the tail. 1738bf41a158SSteven Rostedt */ 1739a8ccf1d6SSteven Rostedt again: 1740438ced17SVaibhav Nagarnaik max_count = cpu_buffer->nr_pages * 100; 174177ae365eSSteven Rostedt 1742bf41a158SSteven Rostedt while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 174377ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !(--max_count))) 174477ae365eSSteven Rostedt return; 174577ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 174677ae365eSSteven Rostedt rb_is_reader_page(cpu_buffer->tail_page))) 174777ae365eSSteven Rostedt return; 174877ae365eSSteven Rostedt local_set(&cpu_buffer->commit_page->page->commit, 174977ae365eSSteven Rostedt rb_page_write(cpu_buffer->commit_page)); 1750bf41a158SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1751abc9b56dSSteven Rostedt cpu_buffer->write_stamp = 1752abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 1753bf41a158SSteven Rostedt /* add barrier to keep gcc from optimizing too much */ 1754bf41a158SSteven Rostedt barrier(); 1755bf41a158SSteven Rostedt } 1756bf41a158SSteven Rostedt while (rb_commit_index(cpu_buffer) != 1757bf41a158SSteven Rostedt rb_page_write(cpu_buffer->commit_page)) { 175877ae365eSSteven Rostedt 175977ae365eSSteven Rostedt local_set(&cpu_buffer->commit_page->page->commit, 176077ae365eSSteven Rostedt rb_page_write(cpu_buffer->commit_page)); 176177ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 176277ae365eSSteven Rostedt local_read(&cpu_buffer->commit_page->page->commit) & 176377ae365eSSteven Rostedt ~RB_WRITE_MASK); 1764bf41a158SSteven Rostedt barrier(); 1765bf41a158SSteven Rostedt } 1766a8ccf1d6SSteven Rostedt 1767a8ccf1d6SSteven Rostedt /* again, keep gcc from optimizing */ 1768a8ccf1d6SSteven Rostedt barrier(); 1769a8ccf1d6SSteven Rostedt 1770a8ccf1d6SSteven Rostedt /* 1771a8ccf1d6SSteven Rostedt * If an interrupt came in just after the first while loop 1772a8ccf1d6SSteven Rostedt * and pushed the tail page forward, we will be left with 1773a8ccf1d6SSteven Rostedt * a dangling commit that will never go forward. 1774a8ccf1d6SSteven Rostedt */ 1775a8ccf1d6SSteven Rostedt if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1776a8ccf1d6SSteven Rostedt goto again; 17777a8e76a3SSteven Rostedt } 17787a8e76a3SSteven Rostedt 1779d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 17807a8e76a3SSteven Rostedt { 1781abc9b56dSSteven Rostedt cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 17826f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 1783d769041fSSteven Rostedt } 1784d769041fSSteven Rostedt 178534a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter) 1786d769041fSSteven Rostedt { 1787d769041fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1788d769041fSSteven Rostedt 1789d769041fSSteven Rostedt /* 1790d769041fSSteven Rostedt * The iterator could be on the reader page (it starts there). 1791d769041fSSteven Rostedt * But the head could have moved, since the reader was 1792d769041fSSteven Rostedt * found. Check for this case and assign the iterator 1793d769041fSSteven Rostedt * to the head page instead of next. 1794d769041fSSteven Rostedt */ 1795d769041fSSteven Rostedt if (iter->head_page == cpu_buffer->reader_page) 179677ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer); 1797d769041fSSteven Rostedt else 1798d769041fSSteven Rostedt rb_inc_page(cpu_buffer, &iter->head_page); 1799d769041fSSteven Rostedt 1800abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 18017a8e76a3SSteven Rostedt iter->head = 0; 18027a8e76a3SSteven Rostedt } 18037a8e76a3SSteven Rostedt 180469d1b839SSteven Rostedt /* Slow path, do not inline */ 180569d1b839SSteven Rostedt static noinline struct ring_buffer_event * 180669d1b839SSteven Rostedt rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) 180769d1b839SSteven Rostedt { 180869d1b839SSteven Rostedt event->type_len = RINGBUF_TYPE_TIME_EXTEND; 180969d1b839SSteven Rostedt 181069d1b839SSteven Rostedt /* Not the first event on the page? */ 181169d1b839SSteven Rostedt if (rb_event_index(event)) { 181269d1b839SSteven Rostedt event->time_delta = delta & TS_MASK; 181369d1b839SSteven Rostedt event->array[0] = delta >> TS_SHIFT; 181469d1b839SSteven Rostedt } else { 181569d1b839SSteven Rostedt /* nope, just zero it */ 181669d1b839SSteven Rostedt event->time_delta = 0; 181769d1b839SSteven Rostedt event->array[0] = 0; 181869d1b839SSteven Rostedt } 181969d1b839SSteven Rostedt 182069d1b839SSteven Rostedt return skip_time_extend(event); 182169d1b839SSteven Rostedt } 182269d1b839SSteven Rostedt 18237a8e76a3SSteven Rostedt /** 182401e3e710SDavid Sharp * rb_update_event - update event type and data 18257a8e76a3SSteven Rostedt * @event: the even to update 18267a8e76a3SSteven Rostedt * @type: the type of event 18277a8e76a3SSteven Rostedt * @length: the size of the event field in the ring buffer 18287a8e76a3SSteven Rostedt * 18297a8e76a3SSteven Rostedt * Update the type and data fields of the event. The length 18307a8e76a3SSteven Rostedt * is the actual size that is written to the ring buffer, 18317a8e76a3SSteven Rostedt * and with this, we can determine what to place into the 18327a8e76a3SSteven Rostedt * data field. 18337a8e76a3SSteven Rostedt */ 183434a148bfSAndrew Morton static void 183569d1b839SSteven Rostedt rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 183669d1b839SSteven Rostedt struct ring_buffer_event *event, unsigned length, 183769d1b839SSteven Rostedt int add_timestamp, u64 delta) 18387a8e76a3SSteven Rostedt { 183969d1b839SSteven Rostedt /* Only a commit updates the timestamp */ 184069d1b839SSteven Rostedt if (unlikely(!rb_event_is_commit(cpu_buffer, event))) 184169d1b839SSteven Rostedt delta = 0; 18427a8e76a3SSteven Rostedt 184369d1b839SSteven Rostedt /* 184469d1b839SSteven Rostedt * If we need to add a timestamp, then we 184569d1b839SSteven Rostedt * add it to the start of the resevered space. 184669d1b839SSteven Rostedt */ 184769d1b839SSteven Rostedt if (unlikely(add_timestamp)) { 184869d1b839SSteven Rostedt event = rb_add_time_stamp(event, delta); 184969d1b839SSteven Rostedt length -= RB_LEN_TIME_EXTEND; 185069d1b839SSteven Rostedt delta = 0; 18517a8e76a3SSteven Rostedt } 185269d1b839SSteven Rostedt 185369d1b839SSteven Rostedt event->time_delta = delta; 185469d1b839SSteven Rostedt length -= RB_EVNT_HDR_SIZE; 185569d1b839SSteven Rostedt if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 185669d1b839SSteven Rostedt event->type_len = 0; 185769d1b839SSteven Rostedt event->array[0] = length; 185869d1b839SSteven Rostedt } else 185969d1b839SSteven Rostedt event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 18607a8e76a3SSteven Rostedt } 18617a8e76a3SSteven Rostedt 186277ae365eSSteven Rostedt /* 186377ae365eSSteven Rostedt * rb_handle_head_page - writer hit the head page 186477ae365eSSteven Rostedt * 186577ae365eSSteven Rostedt * Returns: +1 to retry page 186677ae365eSSteven Rostedt * 0 to continue 186777ae365eSSteven Rostedt * -1 on error 186877ae365eSSteven Rostedt */ 186977ae365eSSteven Rostedt static int 187077ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 187177ae365eSSteven Rostedt struct buffer_page *tail_page, 187277ae365eSSteven Rostedt struct buffer_page *next_page) 187377ae365eSSteven Rostedt { 187477ae365eSSteven Rostedt struct buffer_page *new_head; 187577ae365eSSteven Rostedt int entries; 187677ae365eSSteven Rostedt int type; 187777ae365eSSteven Rostedt int ret; 187877ae365eSSteven Rostedt 187977ae365eSSteven Rostedt entries = rb_page_entries(next_page); 188077ae365eSSteven Rostedt 188177ae365eSSteven Rostedt /* 188277ae365eSSteven Rostedt * The hard part is here. We need to move the head 188377ae365eSSteven Rostedt * forward, and protect against both readers on 188477ae365eSSteven Rostedt * other CPUs and writers coming in via interrupts. 188577ae365eSSteven Rostedt */ 188677ae365eSSteven Rostedt type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 188777ae365eSSteven Rostedt RB_PAGE_HEAD); 188877ae365eSSteven Rostedt 188977ae365eSSteven Rostedt /* 189077ae365eSSteven Rostedt * type can be one of four: 189177ae365eSSteven Rostedt * NORMAL - an interrupt already moved it for us 189277ae365eSSteven Rostedt * HEAD - we are the first to get here. 189377ae365eSSteven Rostedt * UPDATE - we are the interrupt interrupting 189477ae365eSSteven Rostedt * a current move. 189577ae365eSSteven Rostedt * MOVED - a reader on another CPU moved the next 189677ae365eSSteven Rostedt * pointer to its reader page. Give up 189777ae365eSSteven Rostedt * and try again. 189877ae365eSSteven Rostedt */ 189977ae365eSSteven Rostedt 190077ae365eSSteven Rostedt switch (type) { 190177ae365eSSteven Rostedt case RB_PAGE_HEAD: 190277ae365eSSteven Rostedt /* 190377ae365eSSteven Rostedt * We changed the head to UPDATE, thus 190477ae365eSSteven Rostedt * it is our responsibility to update 190577ae365eSSteven Rostedt * the counters. 190677ae365eSSteven Rostedt */ 190777ae365eSSteven Rostedt local_add(entries, &cpu_buffer->overrun); 1908c64e148aSVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 190977ae365eSSteven Rostedt 191077ae365eSSteven Rostedt /* 191177ae365eSSteven Rostedt * The entries will be zeroed out when we move the 191277ae365eSSteven Rostedt * tail page. 191377ae365eSSteven Rostedt */ 191477ae365eSSteven Rostedt 191577ae365eSSteven Rostedt /* still more to do */ 191677ae365eSSteven Rostedt break; 191777ae365eSSteven Rostedt 191877ae365eSSteven Rostedt case RB_PAGE_UPDATE: 191977ae365eSSteven Rostedt /* 192077ae365eSSteven Rostedt * This is an interrupt that interrupt the 192177ae365eSSteven Rostedt * previous update. Still more to do. 192277ae365eSSteven Rostedt */ 192377ae365eSSteven Rostedt break; 192477ae365eSSteven Rostedt case RB_PAGE_NORMAL: 192577ae365eSSteven Rostedt /* 192677ae365eSSteven Rostedt * An interrupt came in before the update 192777ae365eSSteven Rostedt * and processed this for us. 192877ae365eSSteven Rostedt * Nothing left to do. 192977ae365eSSteven Rostedt */ 193077ae365eSSteven Rostedt return 1; 193177ae365eSSteven Rostedt case RB_PAGE_MOVED: 193277ae365eSSteven Rostedt /* 193377ae365eSSteven Rostedt * The reader is on another CPU and just did 193477ae365eSSteven Rostedt * a swap with our next_page. 193577ae365eSSteven Rostedt * Try again. 193677ae365eSSteven Rostedt */ 193777ae365eSSteven Rostedt return 1; 193877ae365eSSteven Rostedt default: 193977ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 194077ae365eSSteven Rostedt return -1; 194177ae365eSSteven Rostedt } 194277ae365eSSteven Rostedt 194377ae365eSSteven Rostedt /* 194477ae365eSSteven Rostedt * Now that we are here, the old head pointer is 194577ae365eSSteven Rostedt * set to UPDATE. This will keep the reader from 194677ae365eSSteven Rostedt * swapping the head page with the reader page. 194777ae365eSSteven Rostedt * The reader (on another CPU) will spin till 194877ae365eSSteven Rostedt * we are finished. 194977ae365eSSteven Rostedt * 195077ae365eSSteven Rostedt * We just need to protect against interrupts 195177ae365eSSteven Rostedt * doing the job. We will set the next pointer 195277ae365eSSteven Rostedt * to HEAD. After that, we set the old pointer 195377ae365eSSteven Rostedt * to NORMAL, but only if it was HEAD before. 195477ae365eSSteven Rostedt * otherwise we are an interrupt, and only 195577ae365eSSteven Rostedt * want the outer most commit to reset it. 195677ae365eSSteven Rostedt */ 195777ae365eSSteven Rostedt new_head = next_page; 195877ae365eSSteven Rostedt rb_inc_page(cpu_buffer, &new_head); 195977ae365eSSteven Rostedt 196077ae365eSSteven Rostedt ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 196177ae365eSSteven Rostedt RB_PAGE_NORMAL); 196277ae365eSSteven Rostedt 196377ae365eSSteven Rostedt /* 196477ae365eSSteven Rostedt * Valid returns are: 196577ae365eSSteven Rostedt * HEAD - an interrupt came in and already set it. 196677ae365eSSteven Rostedt * NORMAL - One of two things: 196777ae365eSSteven Rostedt * 1) We really set it. 196877ae365eSSteven Rostedt * 2) A bunch of interrupts came in and moved 196977ae365eSSteven Rostedt * the page forward again. 197077ae365eSSteven Rostedt */ 197177ae365eSSteven Rostedt switch (ret) { 197277ae365eSSteven Rostedt case RB_PAGE_HEAD: 197377ae365eSSteven Rostedt case RB_PAGE_NORMAL: 197477ae365eSSteven Rostedt /* OK */ 197577ae365eSSteven Rostedt break; 197677ae365eSSteven Rostedt default: 197777ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 197877ae365eSSteven Rostedt return -1; 197977ae365eSSteven Rostedt } 198077ae365eSSteven Rostedt 198177ae365eSSteven Rostedt /* 198277ae365eSSteven Rostedt * It is possible that an interrupt came in, 198377ae365eSSteven Rostedt * set the head up, then more interrupts came in 198477ae365eSSteven Rostedt * and moved it again. When we get back here, 198577ae365eSSteven Rostedt * the page would have been set to NORMAL but we 198677ae365eSSteven Rostedt * just set it back to HEAD. 198777ae365eSSteven Rostedt * 198877ae365eSSteven Rostedt * How do you detect this? Well, if that happened 198977ae365eSSteven Rostedt * the tail page would have moved. 199077ae365eSSteven Rostedt */ 199177ae365eSSteven Rostedt if (ret == RB_PAGE_NORMAL) { 199277ae365eSSteven Rostedt /* 199377ae365eSSteven Rostedt * If the tail had moved passed next, then we need 199477ae365eSSteven Rostedt * to reset the pointer. 199577ae365eSSteven Rostedt */ 199677ae365eSSteven Rostedt if (cpu_buffer->tail_page != tail_page && 199777ae365eSSteven Rostedt cpu_buffer->tail_page != next_page) 199877ae365eSSteven Rostedt rb_head_page_set_normal(cpu_buffer, new_head, 199977ae365eSSteven Rostedt next_page, 200077ae365eSSteven Rostedt RB_PAGE_HEAD); 200177ae365eSSteven Rostedt } 200277ae365eSSteven Rostedt 200377ae365eSSteven Rostedt /* 200477ae365eSSteven Rostedt * If this was the outer most commit (the one that 200577ae365eSSteven Rostedt * changed the original pointer from HEAD to UPDATE), 200677ae365eSSteven Rostedt * then it is up to us to reset it to NORMAL. 200777ae365eSSteven Rostedt */ 200877ae365eSSteven Rostedt if (type == RB_PAGE_HEAD) { 200977ae365eSSteven Rostedt ret = rb_head_page_set_normal(cpu_buffer, next_page, 201077ae365eSSteven Rostedt tail_page, 201177ae365eSSteven Rostedt RB_PAGE_UPDATE); 201277ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 201377ae365eSSteven Rostedt ret != RB_PAGE_UPDATE)) 201477ae365eSSteven Rostedt return -1; 201577ae365eSSteven Rostedt } 201677ae365eSSteven Rostedt 201777ae365eSSteven Rostedt return 0; 201877ae365eSSteven Rostedt } 201977ae365eSSteven Rostedt 202034a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length) 20217a8e76a3SSteven Rostedt { 20227a8e76a3SSteven Rostedt struct ring_buffer_event event; /* Used only for sizeof array */ 20237a8e76a3SSteven Rostedt 20247a8e76a3SSteven Rostedt /* zero length can cause confusions */ 20257a8e76a3SSteven Rostedt if (!length) 20267a8e76a3SSteven Rostedt length = 1; 20277a8e76a3SSteven Rostedt 20282271048dSSteven Rostedt if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 20297a8e76a3SSteven Rostedt length += sizeof(event.array[0]); 20307a8e76a3SSteven Rostedt 20317a8e76a3SSteven Rostedt length += RB_EVNT_HDR_SIZE; 20322271048dSSteven Rostedt length = ALIGN(length, RB_ARCH_ALIGNMENT); 20337a8e76a3SSteven Rostedt 20347a8e76a3SSteven Rostedt return length; 20357a8e76a3SSteven Rostedt } 20367a8e76a3SSteven Rostedt 2037c7b09308SSteven Rostedt static inline void 2038c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2039c7b09308SSteven Rostedt struct buffer_page *tail_page, 2040c7b09308SSteven Rostedt unsigned long tail, unsigned long length) 2041c7b09308SSteven Rostedt { 2042c7b09308SSteven Rostedt struct ring_buffer_event *event; 2043c7b09308SSteven Rostedt 2044c7b09308SSteven Rostedt /* 2045c7b09308SSteven Rostedt * Only the event that crossed the page boundary 2046c7b09308SSteven Rostedt * must fill the old tail_page with padding. 2047c7b09308SSteven Rostedt */ 2048c7b09308SSteven Rostedt if (tail >= BUF_PAGE_SIZE) { 2049b3230c8bSSteven Rostedt /* 2050b3230c8bSSteven Rostedt * If the page was filled, then we still need 2051b3230c8bSSteven Rostedt * to update the real_end. Reset it to zero 2052b3230c8bSSteven Rostedt * and the reader will ignore it. 2053b3230c8bSSteven Rostedt */ 2054b3230c8bSSteven Rostedt if (tail == BUF_PAGE_SIZE) 2055b3230c8bSSteven Rostedt tail_page->real_end = 0; 2056b3230c8bSSteven Rostedt 2057c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2058c7b09308SSteven Rostedt return; 2059c7b09308SSteven Rostedt } 2060c7b09308SSteven Rostedt 2061c7b09308SSteven Rostedt event = __rb_page_index(tail_page, tail); 2062b0b7065bSLinus Torvalds kmemcheck_annotate_bitfield(event, bitfield); 2063c7b09308SSteven Rostedt 2064c64e148aSVaibhav Nagarnaik /* account for padding bytes */ 2065c64e148aSVaibhav Nagarnaik local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2066c64e148aSVaibhav Nagarnaik 2067c7b09308SSteven Rostedt /* 2068ff0ff84aSSteven Rostedt * Save the original length to the meta data. 2069ff0ff84aSSteven Rostedt * This will be used by the reader to add lost event 2070ff0ff84aSSteven Rostedt * counter. 2071ff0ff84aSSteven Rostedt */ 2072ff0ff84aSSteven Rostedt tail_page->real_end = tail; 2073ff0ff84aSSteven Rostedt 2074ff0ff84aSSteven Rostedt /* 2075c7b09308SSteven Rostedt * If this event is bigger than the minimum size, then 2076c7b09308SSteven Rostedt * we need to be careful that we don't subtract the 2077c7b09308SSteven Rostedt * write counter enough to allow another writer to slip 2078c7b09308SSteven Rostedt * in on this page. 2079c7b09308SSteven Rostedt * We put in a discarded commit instead, to make sure 2080c7b09308SSteven Rostedt * that this space is not used again. 2081c7b09308SSteven Rostedt * 2082c7b09308SSteven Rostedt * If we are less than the minimum size, we don't need to 2083c7b09308SSteven Rostedt * worry about it. 2084c7b09308SSteven Rostedt */ 2085c7b09308SSteven Rostedt if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2086c7b09308SSteven Rostedt /* No room for any events */ 2087c7b09308SSteven Rostedt 2088c7b09308SSteven Rostedt /* Mark the rest of the page with padding */ 2089c7b09308SSteven Rostedt rb_event_set_padding(event); 2090c7b09308SSteven Rostedt 2091c7b09308SSteven Rostedt /* Set the write back to the previous setting */ 2092c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2093c7b09308SSteven Rostedt return; 2094c7b09308SSteven Rostedt } 2095c7b09308SSteven Rostedt 2096c7b09308SSteven Rostedt /* Put in a discarded event */ 2097c7b09308SSteven Rostedt event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2098c7b09308SSteven Rostedt event->type_len = RINGBUF_TYPE_PADDING; 2099c7b09308SSteven Rostedt /* time delta must be non zero */ 2100c7b09308SSteven Rostedt event->time_delta = 1; 2101c7b09308SSteven Rostedt 2102c7b09308SSteven Rostedt /* Set write to end of buffer */ 2103c7b09308SSteven Rostedt length = (tail + length) - BUF_PAGE_SIZE; 2104c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2105c7b09308SSteven Rostedt } 21066634ff26SSteven Rostedt 2107747e94aeSSteven Rostedt /* 2108747e94aeSSteven Rostedt * This is the slow path, force gcc not to inline it. 2109747e94aeSSteven Rostedt */ 2110747e94aeSSteven Rostedt static noinline struct ring_buffer_event * 21116634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 21126634ff26SSteven Rostedt unsigned long length, unsigned long tail, 2113e8bc43e8SSteven Rostedt struct buffer_page *tail_page, u64 ts) 21147a8e76a3SSteven Rostedt { 21155a50e33cSSteven Rostedt struct buffer_page *commit_page = cpu_buffer->commit_page; 21167a8e76a3SSteven Rostedt struct ring_buffer *buffer = cpu_buffer->buffer; 211777ae365eSSteven Rostedt struct buffer_page *next_page; 211877ae365eSSteven Rostedt int ret; 2119aa20ae84SSteven Rostedt 2120aa20ae84SSteven Rostedt next_page = tail_page; 21217a8e76a3SSteven Rostedt 21227a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &next_page); 21237a8e76a3SSteven Rostedt 2124bf41a158SSteven Rostedt /* 2125bf41a158SSteven Rostedt * If for some reason, we had an interrupt storm that made 2126bf41a158SSteven Rostedt * it all the way around the buffer, bail, and warn 2127bf41a158SSteven Rostedt * about it. 2128bf41a158SSteven Rostedt */ 212998db8df7SSteven Rostedt if (unlikely(next_page == commit_page)) { 213077ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 213145141d46SSteven Rostedt goto out_reset; 2132bf41a158SSteven Rostedt } 2133d769041fSSteven Rostedt 2134bf41a158SSteven Rostedt /* 213577ae365eSSteven Rostedt * This is where the fun begins! 213677ae365eSSteven Rostedt * 213777ae365eSSteven Rostedt * We are fighting against races between a reader that 213877ae365eSSteven Rostedt * could be on another CPU trying to swap its reader 213977ae365eSSteven Rostedt * page with the buffer head. 214077ae365eSSteven Rostedt * 214177ae365eSSteven Rostedt * We are also fighting against interrupts coming in and 214277ae365eSSteven Rostedt * moving the head or tail on us as well. 214377ae365eSSteven Rostedt * 214477ae365eSSteven Rostedt * If the next page is the head page then we have filled 214577ae365eSSteven Rostedt * the buffer, unless the commit page is still on the 214677ae365eSSteven Rostedt * reader page. 2147bf41a158SSteven Rostedt */ 214877ae365eSSteven Rostedt if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { 2149bf41a158SSteven Rostedt 215077ae365eSSteven Rostedt /* 215177ae365eSSteven Rostedt * If the commit is not on the reader page, then 215277ae365eSSteven Rostedt * move the header page. 215377ae365eSSteven Rostedt */ 215477ae365eSSteven Rostedt if (!rb_is_reader_page(cpu_buffer->commit_page)) { 215577ae365eSSteven Rostedt /* 215677ae365eSSteven Rostedt * If we are not in overwrite mode, 215777ae365eSSteven Rostedt * this is easy, just stop here. 215877ae365eSSteven Rostedt */ 2159884bfe89SSlava Pestov if (!(buffer->flags & RB_FL_OVERWRITE)) { 2160884bfe89SSlava Pestov local_inc(&cpu_buffer->dropped_events); 216177ae365eSSteven Rostedt goto out_reset; 2162884bfe89SSlava Pestov } 216377ae365eSSteven Rostedt 216477ae365eSSteven Rostedt ret = rb_handle_head_page(cpu_buffer, 216577ae365eSSteven Rostedt tail_page, 216677ae365eSSteven Rostedt next_page); 216777ae365eSSteven Rostedt if (ret < 0) 216877ae365eSSteven Rostedt goto out_reset; 216977ae365eSSteven Rostedt if (ret) 217077ae365eSSteven Rostedt goto out_again; 217177ae365eSSteven Rostedt } else { 217277ae365eSSteven Rostedt /* 217377ae365eSSteven Rostedt * We need to be careful here too. The 217477ae365eSSteven Rostedt * commit page could still be on the reader 217577ae365eSSteven Rostedt * page. We could have a small buffer, and 217677ae365eSSteven Rostedt * have filled up the buffer with events 217777ae365eSSteven Rostedt * from interrupts and such, and wrapped. 217877ae365eSSteven Rostedt * 217977ae365eSSteven Rostedt * Note, if the tail page is also the on the 218077ae365eSSteven Rostedt * reader_page, we let it move out. 218177ae365eSSteven Rostedt */ 218277ae365eSSteven Rostedt if (unlikely((cpu_buffer->commit_page != 218377ae365eSSteven Rostedt cpu_buffer->tail_page) && 218477ae365eSSteven Rostedt (cpu_buffer->commit_page == 218577ae365eSSteven Rostedt cpu_buffer->reader_page))) { 218677ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 218777ae365eSSteven Rostedt goto out_reset; 218877ae365eSSteven Rostedt } 218977ae365eSSteven Rostedt } 2190bf41a158SSteven Rostedt } 2191bf41a158SSteven Rostedt 219277ae365eSSteven Rostedt ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); 219377ae365eSSteven Rostedt if (ret) { 219477ae365eSSteven Rostedt /* 219577ae365eSSteven Rostedt * Nested commits always have zero deltas, so 219677ae365eSSteven Rostedt * just reread the time stamp 219777ae365eSSteven Rostedt */ 2198e8bc43e8SSteven Rostedt ts = rb_time_stamp(buffer); 2199e8bc43e8SSteven Rostedt next_page->page->time_stamp = ts; 220077ae365eSSteven Rostedt } 22017a8e76a3SSteven Rostedt 220277ae365eSSteven Rostedt out_again: 220377ae365eSSteven Rostedt 220477ae365eSSteven Rostedt rb_reset_tail(cpu_buffer, tail_page, tail, length); 2205bf41a158SSteven Rostedt 2206bf41a158SSteven Rostedt /* fail and let the caller try again */ 2207bf41a158SSteven Rostedt return ERR_PTR(-EAGAIN); 2208bf41a158SSteven Rostedt 220945141d46SSteven Rostedt out_reset: 22106f3b3440SLai Jiangshan /* reset write */ 2211c7b09308SSteven Rostedt rb_reset_tail(cpu_buffer, tail_page, tail, length); 22126f3b3440SLai Jiangshan 2213bf41a158SSteven Rostedt return NULL; 22147a8e76a3SSteven Rostedt } 22157a8e76a3SSteven Rostedt 22166634ff26SSteven Rostedt static struct ring_buffer_event * 22176634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 221869d1b839SSteven Rostedt unsigned long length, u64 ts, 221969d1b839SSteven Rostedt u64 delta, int add_timestamp) 22206634ff26SSteven Rostedt { 22215a50e33cSSteven Rostedt struct buffer_page *tail_page; 22226634ff26SSteven Rostedt struct ring_buffer_event *event; 22236634ff26SSteven Rostedt unsigned long tail, write; 22246634ff26SSteven Rostedt 222569d1b839SSteven Rostedt /* 222669d1b839SSteven Rostedt * If the time delta since the last event is too big to 222769d1b839SSteven Rostedt * hold in the time field of the event, then we append a 222869d1b839SSteven Rostedt * TIME EXTEND event ahead of the data event. 222969d1b839SSteven Rostedt */ 223069d1b839SSteven Rostedt if (unlikely(add_timestamp)) 223169d1b839SSteven Rostedt length += RB_LEN_TIME_EXTEND; 223269d1b839SSteven Rostedt 22336634ff26SSteven Rostedt tail_page = cpu_buffer->tail_page; 22346634ff26SSteven Rostedt write = local_add_return(length, &tail_page->write); 223577ae365eSSteven Rostedt 223677ae365eSSteven Rostedt /* set write to only the index of the write */ 223777ae365eSSteven Rostedt write &= RB_WRITE_MASK; 22386634ff26SSteven Rostedt tail = write - length; 22396634ff26SSteven Rostedt 22406634ff26SSteven Rostedt /* See if we shot pass the end of this buffer page */ 2241747e94aeSSteven Rostedt if (unlikely(write > BUF_PAGE_SIZE)) 22426634ff26SSteven Rostedt return rb_move_tail(cpu_buffer, length, tail, 22435a50e33cSSteven Rostedt tail_page, ts); 22446634ff26SSteven Rostedt 22456634ff26SSteven Rostedt /* We reserved something on the buffer */ 22466634ff26SSteven Rostedt 22476634ff26SSteven Rostedt event = __rb_page_index(tail_page, tail); 22481744a21dSVegard Nossum kmemcheck_annotate_bitfield(event, bitfield); 224969d1b839SSteven Rostedt rb_update_event(cpu_buffer, event, length, add_timestamp, delta); 22506634ff26SSteven Rostedt 22516634ff26SSteven Rostedt local_inc(&tail_page->entries); 22526634ff26SSteven Rostedt 22536634ff26SSteven Rostedt /* 2254fa743953SSteven Rostedt * If this is the first commit on the page, then update 2255fa743953SSteven Rostedt * its timestamp. 22566634ff26SSteven Rostedt */ 2257fa743953SSteven Rostedt if (!tail) 2258e8bc43e8SSteven Rostedt tail_page->page->time_stamp = ts; 22596634ff26SSteven Rostedt 2260c64e148aSVaibhav Nagarnaik /* account for these added bytes */ 2261c64e148aSVaibhav Nagarnaik local_add(length, &cpu_buffer->entries_bytes); 2262c64e148aSVaibhav Nagarnaik 22636634ff26SSteven Rostedt return event; 22646634ff26SSteven Rostedt } 22656634ff26SSteven Rostedt 2266edd813bfSSteven Rostedt static inline int 2267edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2268edd813bfSSteven Rostedt struct ring_buffer_event *event) 2269edd813bfSSteven Rostedt { 2270edd813bfSSteven Rostedt unsigned long new_index, old_index; 2271edd813bfSSteven Rostedt struct buffer_page *bpage; 2272edd813bfSSteven Rostedt unsigned long index; 2273edd813bfSSteven Rostedt unsigned long addr; 2274edd813bfSSteven Rostedt 2275edd813bfSSteven Rostedt new_index = rb_event_index(event); 227669d1b839SSteven Rostedt old_index = new_index + rb_event_ts_length(event); 2277edd813bfSSteven Rostedt addr = (unsigned long)event; 2278edd813bfSSteven Rostedt addr &= PAGE_MASK; 2279edd813bfSSteven Rostedt 2280edd813bfSSteven Rostedt bpage = cpu_buffer->tail_page; 2281edd813bfSSteven Rostedt 2282edd813bfSSteven Rostedt if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 228377ae365eSSteven Rostedt unsigned long write_mask = 228477ae365eSSteven Rostedt local_read(&bpage->write) & ~RB_WRITE_MASK; 2285c64e148aSVaibhav Nagarnaik unsigned long event_length = rb_event_length(event); 2286edd813bfSSteven Rostedt /* 2287edd813bfSSteven Rostedt * This is on the tail page. It is possible that 2288edd813bfSSteven Rostedt * a write could come in and move the tail page 2289edd813bfSSteven Rostedt * and write to the next page. That is fine 2290edd813bfSSteven Rostedt * because we just shorten what is on this page. 2291edd813bfSSteven Rostedt */ 229277ae365eSSteven Rostedt old_index += write_mask; 229377ae365eSSteven Rostedt new_index += write_mask; 2294edd813bfSSteven Rostedt index = local_cmpxchg(&bpage->write, old_index, new_index); 2295c64e148aSVaibhav Nagarnaik if (index == old_index) { 2296c64e148aSVaibhav Nagarnaik /* update counters */ 2297c64e148aSVaibhav Nagarnaik local_sub(event_length, &cpu_buffer->entries_bytes); 2298edd813bfSSteven Rostedt return 1; 2299edd813bfSSteven Rostedt } 2300c64e148aSVaibhav Nagarnaik } 2301edd813bfSSteven Rostedt 2302edd813bfSSteven Rostedt /* could not discard */ 2303edd813bfSSteven Rostedt return 0; 2304edd813bfSSteven Rostedt } 2305edd813bfSSteven Rostedt 2306fa743953SSteven Rostedt static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 2307fa743953SSteven Rostedt { 2308fa743953SSteven Rostedt local_inc(&cpu_buffer->committing); 2309fa743953SSteven Rostedt local_inc(&cpu_buffer->commits); 2310fa743953SSteven Rostedt } 2311fa743953SSteven Rostedt 2312d9abde21SSteven Rostedt static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 2313fa743953SSteven Rostedt { 2314fa743953SSteven Rostedt unsigned long commits; 2315fa743953SSteven Rostedt 2316fa743953SSteven Rostedt if (RB_WARN_ON(cpu_buffer, 2317fa743953SSteven Rostedt !local_read(&cpu_buffer->committing))) 2318fa743953SSteven Rostedt return; 2319fa743953SSteven Rostedt 2320fa743953SSteven Rostedt again: 2321fa743953SSteven Rostedt commits = local_read(&cpu_buffer->commits); 2322fa743953SSteven Rostedt /* synchronize with interrupts */ 2323fa743953SSteven Rostedt barrier(); 2324fa743953SSteven Rostedt if (local_read(&cpu_buffer->committing) == 1) 2325fa743953SSteven Rostedt rb_set_commit_to_write(cpu_buffer); 2326fa743953SSteven Rostedt 2327fa743953SSteven Rostedt local_dec(&cpu_buffer->committing); 2328fa743953SSteven Rostedt 2329fa743953SSteven Rostedt /* synchronize with interrupts */ 2330fa743953SSteven Rostedt barrier(); 2331fa743953SSteven Rostedt 2332fa743953SSteven Rostedt /* 2333fa743953SSteven Rostedt * Need to account for interrupts coming in between the 2334fa743953SSteven Rostedt * updating of the commit page and the clearing of the 2335fa743953SSteven Rostedt * committing counter. 2336fa743953SSteven Rostedt */ 2337fa743953SSteven Rostedt if (unlikely(local_read(&cpu_buffer->commits) != commits) && 2338fa743953SSteven Rostedt !local_read(&cpu_buffer->committing)) { 2339fa743953SSteven Rostedt local_inc(&cpu_buffer->committing); 2340fa743953SSteven Rostedt goto again; 2341fa743953SSteven Rostedt } 2342fa743953SSteven Rostedt } 2343fa743953SSteven Rostedt 23447a8e76a3SSteven Rostedt static struct ring_buffer_event * 234562f0b3ebSSteven Rostedt rb_reserve_next_event(struct ring_buffer *buffer, 234662f0b3ebSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer, 23471cd8d735SSteven Rostedt unsigned long length) 23487a8e76a3SSteven Rostedt { 23497a8e76a3SSteven Rostedt struct ring_buffer_event *event; 235069d1b839SSteven Rostedt u64 ts, delta; 2351818e3dd3SSteven Rostedt int nr_loops = 0; 235269d1b839SSteven Rostedt int add_timestamp; 2353140ff891SSteven Rostedt u64 diff; 23547a8e76a3SSteven Rostedt 2355fa743953SSteven Rostedt rb_start_commit(cpu_buffer); 2356fa743953SSteven Rostedt 235785bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 235862f0b3ebSSteven Rostedt /* 235962f0b3ebSSteven Rostedt * Due to the ability to swap a cpu buffer from a buffer 236062f0b3ebSSteven Rostedt * it is possible it was swapped before we committed. 236162f0b3ebSSteven Rostedt * (committing stops a swap). We check for it here and 236262f0b3ebSSteven Rostedt * if it happened, we have to fail the write. 236362f0b3ebSSteven Rostedt */ 236462f0b3ebSSteven Rostedt barrier(); 236562f0b3ebSSteven Rostedt if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { 236662f0b3ebSSteven Rostedt local_dec(&cpu_buffer->committing); 236762f0b3ebSSteven Rostedt local_dec(&cpu_buffer->commits); 236862f0b3ebSSteven Rostedt return NULL; 236962f0b3ebSSteven Rostedt } 237085bac32cSSteven Rostedt #endif 237162f0b3ebSSteven Rostedt 2372be957c44SSteven Rostedt length = rb_calculate_event_length(length); 2373bf41a158SSteven Rostedt again: 237469d1b839SSteven Rostedt add_timestamp = 0; 237569d1b839SSteven Rostedt delta = 0; 237669d1b839SSteven Rostedt 2377818e3dd3SSteven Rostedt /* 2378818e3dd3SSteven Rostedt * We allow for interrupts to reenter here and do a trace. 2379818e3dd3SSteven Rostedt * If one does, it will cause this original code to loop 2380818e3dd3SSteven Rostedt * back here. Even with heavy interrupts happening, this 2381818e3dd3SSteven Rostedt * should only happen a few times in a row. If this happens 2382818e3dd3SSteven Rostedt * 1000 times in a row, there must be either an interrupt 2383818e3dd3SSteven Rostedt * storm or we have something buggy. 2384818e3dd3SSteven Rostedt * Bail! 2385818e3dd3SSteven Rostedt */ 23863e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2387fa743953SSteven Rostedt goto out_fail; 2388818e3dd3SSteven Rostedt 23896d3f1e12SJiri Olsa ts = rb_time_stamp(cpu_buffer->buffer); 2390168b6b1dSSteven Rostedt diff = ts - cpu_buffer->write_stamp; 23917a8e76a3SSteven Rostedt 2392168b6b1dSSteven Rostedt /* make sure this diff is calculated here */ 2393bf41a158SSteven Rostedt barrier(); 23947a8e76a3SSteven Rostedt 2395bf41a158SSteven Rostedt /* Did the write stamp get updated already? */ 2396140ff891SSteven Rostedt if (likely(ts >= cpu_buffer->write_stamp)) { 2397168b6b1dSSteven Rostedt delta = diff; 2398168b6b1dSSteven Rostedt if (unlikely(test_time_stamp(delta))) { 239931274d72SJiri Olsa int local_clock_stable = 1; 240031274d72SJiri Olsa #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 240131274d72SJiri Olsa local_clock_stable = sched_clock_stable; 240231274d72SJiri Olsa #endif 240369d1b839SSteven Rostedt WARN_ONCE(delta > (1ULL << 59), 240431274d72SJiri Olsa KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", 240569d1b839SSteven Rostedt (unsigned long long)delta, 240669d1b839SSteven Rostedt (unsigned long long)ts, 240731274d72SJiri Olsa (unsigned long long)cpu_buffer->write_stamp, 240831274d72SJiri Olsa local_clock_stable ? "" : 240931274d72SJiri Olsa "If you just came from a suspend/resume,\n" 241031274d72SJiri Olsa "please switch to the trace global clock:\n" 241131274d72SJiri Olsa " echo global > /sys/kernel/debug/tracing/trace_clock\n"); 241269d1b839SSteven Rostedt add_timestamp = 1; 24137a8e76a3SSteven Rostedt } 2414168b6b1dSSteven Rostedt } 24157a8e76a3SSteven Rostedt 241669d1b839SSteven Rostedt event = __rb_reserve_next(cpu_buffer, length, ts, 241769d1b839SSteven Rostedt delta, add_timestamp); 2418168b6b1dSSteven Rostedt if (unlikely(PTR_ERR(event) == -EAGAIN)) 2419bf41a158SSteven Rostedt goto again; 24207a8e76a3SSteven Rostedt 2421fa743953SSteven Rostedt if (!event) 2422fa743953SSteven Rostedt goto out_fail; 2423bf41a158SSteven Rostedt 24247a8e76a3SSteven Rostedt return event; 2425fa743953SSteven Rostedt 2426fa743953SSteven Rostedt out_fail: 2427fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 2428fa743953SSteven Rostedt return NULL; 24297a8e76a3SSteven Rostedt } 24307a8e76a3SSteven Rostedt 24311155de47SPaul Mundt #ifdef CONFIG_TRACING 24321155de47SPaul Mundt 2433aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16 2434261842b7SSteven Rostedt 2435d9abde21SSteven Rostedt /* Keep this code out of the fast path cache */ 2436d9abde21SSteven Rostedt static noinline void trace_recursive_fail(void) 2437261842b7SSteven Rostedt { 2438261842b7SSteven Rostedt /* Disable all tracing before we do anything else */ 2439261842b7SSteven Rostedt tracing_off_permanent(); 2440e057a5e5SFrederic Weisbecker 24417d7d2b80SSteven Rostedt printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" 2442e057a5e5SFrederic Weisbecker "HC[%lu]:SC[%lu]:NMI[%lu]\n", 2443b1cff0adSSteven Rostedt trace_recursion_buffer(), 2444e057a5e5SFrederic Weisbecker hardirq_count() >> HARDIRQ_SHIFT, 2445e057a5e5SFrederic Weisbecker softirq_count() >> SOFTIRQ_SHIFT, 2446e057a5e5SFrederic Weisbecker in_nmi()); 2447e057a5e5SFrederic Weisbecker 2448261842b7SSteven Rostedt WARN_ON_ONCE(1); 2449d9abde21SSteven Rostedt } 2450d9abde21SSteven Rostedt 2451d9abde21SSteven Rostedt static inline int trace_recursive_lock(void) 2452d9abde21SSteven Rostedt { 2453b1cff0adSSteven Rostedt trace_recursion_inc(); 2454d9abde21SSteven Rostedt 2455b1cff0adSSteven Rostedt if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH)) 2456d9abde21SSteven Rostedt return 0; 2457d9abde21SSteven Rostedt 2458d9abde21SSteven Rostedt trace_recursive_fail(); 2459d9abde21SSteven Rostedt 2460261842b7SSteven Rostedt return -1; 2461261842b7SSteven Rostedt } 2462261842b7SSteven Rostedt 2463d9abde21SSteven Rostedt static inline void trace_recursive_unlock(void) 2464261842b7SSteven Rostedt { 2465b1cff0adSSteven Rostedt WARN_ON_ONCE(!trace_recursion_buffer()); 2466261842b7SSteven Rostedt 2467b1cff0adSSteven Rostedt trace_recursion_dec(); 2468261842b7SSteven Rostedt } 2469261842b7SSteven Rostedt 24701155de47SPaul Mundt #else 24711155de47SPaul Mundt 24721155de47SPaul Mundt #define trace_recursive_lock() (0) 24731155de47SPaul Mundt #define trace_recursive_unlock() do { } while (0) 24741155de47SPaul Mundt 24751155de47SPaul Mundt #endif 24761155de47SPaul Mundt 24777a8e76a3SSteven Rostedt /** 24787a8e76a3SSteven Rostedt * ring_buffer_lock_reserve - reserve a part of the buffer 24797a8e76a3SSteven Rostedt * @buffer: the ring buffer to reserve from 24807a8e76a3SSteven Rostedt * @length: the length of the data to reserve (excluding event header) 24817a8e76a3SSteven Rostedt * 24827a8e76a3SSteven Rostedt * Returns a reseverd event on the ring buffer to copy directly to. 24837a8e76a3SSteven Rostedt * The user of this interface will need to get the body to write into 24847a8e76a3SSteven Rostedt * and can use the ring_buffer_event_data() interface. 24857a8e76a3SSteven Rostedt * 24867a8e76a3SSteven Rostedt * The length is the length of the data needed, not the event length 24877a8e76a3SSteven Rostedt * which also includes the event header. 24887a8e76a3SSteven Rostedt * 24897a8e76a3SSteven Rostedt * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 24907a8e76a3SSteven Rostedt * If NULL is returned, then nothing has been allocated or locked. 24917a8e76a3SSteven Rostedt */ 24927a8e76a3SSteven Rostedt struct ring_buffer_event * 24930a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 24947a8e76a3SSteven Rostedt { 24957a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 24967a8e76a3SSteven Rostedt struct ring_buffer_event *event; 24975168ae50SSteven Rostedt int cpu; 24987a8e76a3SSteven Rostedt 2499033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 2500a3583244SSteven Rostedt return NULL; 2501a3583244SSteven Rostedt 2502bf41a158SSteven Rostedt /* If we are tracing schedule, we don't want to recurse */ 25035168ae50SSteven Rostedt preempt_disable_notrace(); 2504bf41a158SSteven Rostedt 250552fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled)) 250652fbe9cdSLai Jiangshan goto out_nocheck; 250752fbe9cdSLai Jiangshan 2508261842b7SSteven Rostedt if (trace_recursive_lock()) 2509261842b7SSteven Rostedt goto out_nocheck; 2510261842b7SSteven Rostedt 25117a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 25127a8e76a3SSteven Rostedt 25139e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2514d769041fSSteven Rostedt goto out; 25157a8e76a3SSteven Rostedt 25167a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 25177a8e76a3SSteven Rostedt 25187a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 2519d769041fSSteven Rostedt goto out; 25207a8e76a3SSteven Rostedt 2521be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 2522bf41a158SSteven Rostedt goto out; 25237a8e76a3SSteven Rostedt 252462f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 25257a8e76a3SSteven Rostedt if (!event) 2526d769041fSSteven Rostedt goto out; 25277a8e76a3SSteven Rostedt 25287a8e76a3SSteven Rostedt return event; 25297a8e76a3SSteven Rostedt 2530d769041fSSteven Rostedt out: 2531261842b7SSteven Rostedt trace_recursive_unlock(); 2532261842b7SSteven Rostedt 2533261842b7SSteven Rostedt out_nocheck: 25345168ae50SSteven Rostedt preempt_enable_notrace(); 25357a8e76a3SSteven Rostedt return NULL; 25367a8e76a3SSteven Rostedt } 2537c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 25387a8e76a3SSteven Rostedt 2539a1863c21SSteven Rostedt static void 2540a1863c21SSteven Rostedt rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, 25417a8e76a3SSteven Rostedt struct ring_buffer_event *event) 25427a8e76a3SSteven Rostedt { 254369d1b839SSteven Rostedt u64 delta; 254469d1b839SSteven Rostedt 2545fa743953SSteven Rostedt /* 2546fa743953SSteven Rostedt * The event first in the commit queue updates the 2547fa743953SSteven Rostedt * time stamp. 2548fa743953SSteven Rostedt */ 254969d1b839SSteven Rostedt if (rb_event_is_commit(cpu_buffer, event)) { 255069d1b839SSteven Rostedt /* 255169d1b839SSteven Rostedt * A commit event that is first on a page 255269d1b839SSteven Rostedt * updates the write timestamp with the page stamp 255369d1b839SSteven Rostedt */ 255469d1b839SSteven Rostedt if (!rb_event_index(event)) 255569d1b839SSteven Rostedt cpu_buffer->write_stamp = 255669d1b839SSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 255769d1b839SSteven Rostedt else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { 255869d1b839SSteven Rostedt delta = event->array[0]; 255969d1b839SSteven Rostedt delta <<= TS_SHIFT; 256069d1b839SSteven Rostedt delta += event->time_delta; 256169d1b839SSteven Rostedt cpu_buffer->write_stamp += delta; 256269d1b839SSteven Rostedt } else 2563bf41a158SSteven Rostedt cpu_buffer->write_stamp += event->time_delta; 2564a1863c21SSteven Rostedt } 256569d1b839SSteven Rostedt } 2566bf41a158SSteven Rostedt 2567a1863c21SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2568a1863c21SSteven Rostedt struct ring_buffer_event *event) 2569a1863c21SSteven Rostedt { 2570a1863c21SSteven Rostedt local_inc(&cpu_buffer->entries); 2571a1863c21SSteven Rostedt rb_update_write_stamp(cpu_buffer, event); 2572fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 25737a8e76a3SSteven Rostedt } 25747a8e76a3SSteven Rostedt 25757a8e76a3SSteven Rostedt /** 25767a8e76a3SSteven Rostedt * ring_buffer_unlock_commit - commit a reserved 25777a8e76a3SSteven Rostedt * @buffer: The buffer to commit to 25787a8e76a3SSteven Rostedt * @event: The event pointer to commit. 25797a8e76a3SSteven Rostedt * 25807a8e76a3SSteven Rostedt * This commits the data to the ring buffer, and releases any locks held. 25817a8e76a3SSteven Rostedt * 25827a8e76a3SSteven Rostedt * Must be paired with ring_buffer_lock_reserve. 25837a8e76a3SSteven Rostedt */ 25847a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer, 25850a987751SArnaldo Carvalho de Melo struct ring_buffer_event *event) 25867a8e76a3SSteven Rostedt { 25877a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 25887a8e76a3SSteven Rostedt int cpu = raw_smp_processor_id(); 25897a8e76a3SSteven Rostedt 25907a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 25917a8e76a3SSteven Rostedt 25927a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 25937a8e76a3SSteven Rostedt 2594261842b7SSteven Rostedt trace_recursive_unlock(); 2595261842b7SSteven Rostedt 25965168ae50SSteven Rostedt preempt_enable_notrace(); 25977a8e76a3SSteven Rostedt 25987a8e76a3SSteven Rostedt return 0; 25997a8e76a3SSteven Rostedt } 2600c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 26017a8e76a3SSteven Rostedt 2602f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event) 2603f3b9aae1SFrederic Weisbecker { 260469d1b839SSteven Rostedt if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) 260569d1b839SSteven Rostedt event = skip_time_extend(event); 260669d1b839SSteven Rostedt 2607334d4169SLai Jiangshan /* array[0] holds the actual length for the discarded event */ 2608334d4169SLai Jiangshan event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 2609334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 2610f3b9aae1SFrederic Weisbecker /* time delta must be non zero */ 2611f3b9aae1SFrederic Weisbecker if (!event->time_delta) 2612f3b9aae1SFrederic Weisbecker event->time_delta = 1; 2613f3b9aae1SFrederic Weisbecker } 2614f3b9aae1SFrederic Weisbecker 2615a1863c21SSteven Rostedt /* 2616a1863c21SSteven Rostedt * Decrement the entries to the page that an event is on. 2617a1863c21SSteven Rostedt * The event does not even need to exist, only the pointer 2618a1863c21SSteven Rostedt * to the page it is on. This may only be called before the commit 2619a1863c21SSteven Rostedt * takes place. 2620a1863c21SSteven Rostedt */ 2621a1863c21SSteven Rostedt static inline void 2622a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 2623a1863c21SSteven Rostedt struct ring_buffer_event *event) 2624a1863c21SSteven Rostedt { 2625a1863c21SSteven Rostedt unsigned long addr = (unsigned long)event; 2626a1863c21SSteven Rostedt struct buffer_page *bpage = cpu_buffer->commit_page; 2627a1863c21SSteven Rostedt struct buffer_page *start; 2628a1863c21SSteven Rostedt 2629a1863c21SSteven Rostedt addr &= PAGE_MASK; 2630a1863c21SSteven Rostedt 2631a1863c21SSteven Rostedt /* Do the likely case first */ 2632a1863c21SSteven Rostedt if (likely(bpage->page == (void *)addr)) { 2633a1863c21SSteven Rostedt local_dec(&bpage->entries); 2634a1863c21SSteven Rostedt return; 2635a1863c21SSteven Rostedt } 2636a1863c21SSteven Rostedt 2637a1863c21SSteven Rostedt /* 2638a1863c21SSteven Rostedt * Because the commit page may be on the reader page we 2639a1863c21SSteven Rostedt * start with the next page and check the end loop there. 2640a1863c21SSteven Rostedt */ 2641a1863c21SSteven Rostedt rb_inc_page(cpu_buffer, &bpage); 2642a1863c21SSteven Rostedt start = bpage; 2643a1863c21SSteven Rostedt do { 2644a1863c21SSteven Rostedt if (bpage->page == (void *)addr) { 2645a1863c21SSteven Rostedt local_dec(&bpage->entries); 2646a1863c21SSteven Rostedt return; 2647a1863c21SSteven Rostedt } 2648a1863c21SSteven Rostedt rb_inc_page(cpu_buffer, &bpage); 2649a1863c21SSteven Rostedt } while (bpage != start); 2650a1863c21SSteven Rostedt 2651a1863c21SSteven Rostedt /* commit not part of this buffer?? */ 2652a1863c21SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 2653a1863c21SSteven Rostedt } 2654a1863c21SSteven Rostedt 26557a8e76a3SSteven Rostedt /** 2656fa1b47ddSSteven Rostedt * ring_buffer_commit_discard - discard an event that has not been committed 2657fa1b47ddSSteven Rostedt * @buffer: the ring buffer 2658fa1b47ddSSteven Rostedt * @event: non committed event to discard 2659fa1b47ddSSteven Rostedt * 2660dc892f73SSteven Rostedt * Sometimes an event that is in the ring buffer needs to be ignored. 2661dc892f73SSteven Rostedt * This function lets the user discard an event in the ring buffer 2662dc892f73SSteven Rostedt * and then that event will not be read later. 2663dc892f73SSteven Rostedt * 2664dc892f73SSteven Rostedt * This function only works if it is called before the the item has been 2665dc892f73SSteven Rostedt * committed. It will try to free the event from the ring buffer 2666fa1b47ddSSteven Rostedt * if another event has not been added behind it. 2667fa1b47ddSSteven Rostedt * 2668fa1b47ddSSteven Rostedt * If another event has been added behind it, it will set the event 2669fa1b47ddSSteven Rostedt * up as discarded, and perform the commit. 2670fa1b47ddSSteven Rostedt * 2671fa1b47ddSSteven Rostedt * If this function is called, do not call ring_buffer_unlock_commit on 2672fa1b47ddSSteven Rostedt * the event. 2673fa1b47ddSSteven Rostedt */ 2674fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer, 2675fa1b47ddSSteven Rostedt struct ring_buffer_event *event) 2676fa1b47ddSSteven Rostedt { 2677fa1b47ddSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 2678fa1b47ddSSteven Rostedt int cpu; 2679fa1b47ddSSteven Rostedt 2680fa1b47ddSSteven Rostedt /* The event is discarded regardless */ 2681f3b9aae1SFrederic Weisbecker rb_event_discard(event); 2682fa1b47ddSSteven Rostedt 2683fa743953SSteven Rostedt cpu = smp_processor_id(); 2684fa743953SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2685fa743953SSteven Rostedt 2686fa1b47ddSSteven Rostedt /* 2687fa1b47ddSSteven Rostedt * This must only be called if the event has not been 2688fa1b47ddSSteven Rostedt * committed yet. Thus we can assume that preemption 2689fa1b47ddSSteven Rostedt * is still disabled. 2690fa1b47ddSSteven Rostedt */ 2691fa743953SSteven Rostedt RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2692fa1b47ddSSteven Rostedt 2693a1863c21SSteven Rostedt rb_decrement_entry(cpu_buffer, event); 26940f2541d2SSteven Rostedt if (rb_try_to_discard(cpu_buffer, event)) 2695fa1b47ddSSteven Rostedt goto out; 2696fa1b47ddSSteven Rostedt 2697fa1b47ddSSteven Rostedt /* 2698fa1b47ddSSteven Rostedt * The commit is still visible by the reader, so we 2699a1863c21SSteven Rostedt * must still update the timestamp. 2700fa1b47ddSSteven Rostedt */ 2701a1863c21SSteven Rostedt rb_update_write_stamp(cpu_buffer, event); 2702fa1b47ddSSteven Rostedt out: 2703fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 2704fa1b47ddSSteven Rostedt 2705f3b9aae1SFrederic Weisbecker trace_recursive_unlock(); 2706f3b9aae1SFrederic Weisbecker 27075168ae50SSteven Rostedt preempt_enable_notrace(); 2708fa1b47ddSSteven Rostedt 2709fa1b47ddSSteven Rostedt } 2710fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 2711fa1b47ddSSteven Rostedt 2712fa1b47ddSSteven Rostedt /** 27137a8e76a3SSteven Rostedt * ring_buffer_write - write data to the buffer without reserving 27147a8e76a3SSteven Rostedt * @buffer: The ring buffer to write to. 27157a8e76a3SSteven Rostedt * @length: The length of the data being written (excluding the event header) 27167a8e76a3SSteven Rostedt * @data: The data to write to the buffer. 27177a8e76a3SSteven Rostedt * 27187a8e76a3SSteven Rostedt * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 27197a8e76a3SSteven Rostedt * one function. If you already have the data to write to the buffer, it 27207a8e76a3SSteven Rostedt * may be easier to simply call this function. 27217a8e76a3SSteven Rostedt * 27227a8e76a3SSteven Rostedt * Note, like ring_buffer_lock_reserve, the length is the length of the data 27237a8e76a3SSteven Rostedt * and not the length of the event which would hold the header. 27247a8e76a3SSteven Rostedt */ 27257a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer, 27267a8e76a3SSteven Rostedt unsigned long length, 27277a8e76a3SSteven Rostedt void *data) 27287a8e76a3SSteven Rostedt { 27297a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 27307a8e76a3SSteven Rostedt struct ring_buffer_event *event; 27317a8e76a3SSteven Rostedt void *body; 27327a8e76a3SSteven Rostedt int ret = -EBUSY; 27335168ae50SSteven Rostedt int cpu; 27347a8e76a3SSteven Rostedt 2735033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 2736a3583244SSteven Rostedt return -EBUSY; 2737a3583244SSteven Rostedt 27385168ae50SSteven Rostedt preempt_disable_notrace(); 2739bf41a158SSteven Rostedt 274052fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled)) 274152fbe9cdSLai Jiangshan goto out; 274252fbe9cdSLai Jiangshan 27437a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 27447a8e76a3SSteven Rostedt 27459e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2746d769041fSSteven Rostedt goto out; 27477a8e76a3SSteven Rostedt 27487a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 27497a8e76a3SSteven Rostedt 27507a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 27517a8e76a3SSteven Rostedt goto out; 27527a8e76a3SSteven Rostedt 2753be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 2754be957c44SSteven Rostedt goto out; 2755be957c44SSteven Rostedt 275662f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 27577a8e76a3SSteven Rostedt if (!event) 27587a8e76a3SSteven Rostedt goto out; 27597a8e76a3SSteven Rostedt 27607a8e76a3SSteven Rostedt body = rb_event_data(event); 27617a8e76a3SSteven Rostedt 27627a8e76a3SSteven Rostedt memcpy(body, data, length); 27637a8e76a3SSteven Rostedt 27647a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 27657a8e76a3SSteven Rostedt 27667a8e76a3SSteven Rostedt ret = 0; 27677a8e76a3SSteven Rostedt out: 27685168ae50SSteven Rostedt preempt_enable_notrace(); 27697a8e76a3SSteven Rostedt 27707a8e76a3SSteven Rostedt return ret; 27717a8e76a3SSteven Rostedt } 2772c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write); 27737a8e76a3SSteven Rostedt 277434a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 2775bf41a158SSteven Rostedt { 2776bf41a158SSteven Rostedt struct buffer_page *reader = cpu_buffer->reader_page; 277777ae365eSSteven Rostedt struct buffer_page *head = rb_set_head_page(cpu_buffer); 2778bf41a158SSteven Rostedt struct buffer_page *commit = cpu_buffer->commit_page; 2779bf41a158SSteven Rostedt 278077ae365eSSteven Rostedt /* In case of error, head will be NULL */ 278177ae365eSSteven Rostedt if (unlikely(!head)) 278277ae365eSSteven Rostedt return 1; 278377ae365eSSteven Rostedt 2784bf41a158SSteven Rostedt return reader->read == rb_page_commit(reader) && 2785bf41a158SSteven Rostedt (commit == reader || 2786bf41a158SSteven Rostedt (commit == head && 2787bf41a158SSteven Rostedt head->read == rb_page_commit(commit))); 2788bf41a158SSteven Rostedt } 2789bf41a158SSteven Rostedt 27907a8e76a3SSteven Rostedt /** 27917a8e76a3SSteven Rostedt * ring_buffer_record_disable - stop all writes into the buffer 27927a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 27937a8e76a3SSteven Rostedt * 27947a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 27957a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 27967a8e76a3SSteven Rostedt * 27977a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 27987a8e76a3SSteven Rostedt */ 27997a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer) 28007a8e76a3SSteven Rostedt { 28017a8e76a3SSteven Rostedt atomic_inc(&buffer->record_disabled); 28027a8e76a3SSteven Rostedt } 2803c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 28047a8e76a3SSteven Rostedt 28057a8e76a3SSteven Rostedt /** 28067a8e76a3SSteven Rostedt * ring_buffer_record_enable - enable writes to the buffer 28077a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 28087a8e76a3SSteven Rostedt * 28097a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 2810c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 28117a8e76a3SSteven Rostedt */ 28127a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer) 28137a8e76a3SSteven Rostedt { 28147a8e76a3SSteven Rostedt atomic_dec(&buffer->record_disabled); 28157a8e76a3SSteven Rostedt } 2816c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 28177a8e76a3SSteven Rostedt 28187a8e76a3SSteven Rostedt /** 2819499e5470SSteven Rostedt * ring_buffer_record_off - stop all writes into the buffer 2820499e5470SSteven Rostedt * @buffer: The ring buffer to stop writes to. 2821499e5470SSteven Rostedt * 2822499e5470SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 2823499e5470SSteven Rostedt * to the buffer after this will fail and return NULL. 2824499e5470SSteven Rostedt * 2825499e5470SSteven Rostedt * This is different than ring_buffer_record_disable() as 282687abb3b1SWang Tianhong * it works like an on/off switch, where as the disable() version 2827499e5470SSteven Rostedt * must be paired with a enable(). 2828499e5470SSteven Rostedt */ 2829499e5470SSteven Rostedt void ring_buffer_record_off(struct ring_buffer *buffer) 2830499e5470SSteven Rostedt { 2831499e5470SSteven Rostedt unsigned int rd; 2832499e5470SSteven Rostedt unsigned int new_rd; 2833499e5470SSteven Rostedt 2834499e5470SSteven Rostedt do { 2835499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 2836499e5470SSteven Rostedt new_rd = rd | RB_BUFFER_OFF; 2837499e5470SSteven Rostedt } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2838499e5470SSteven Rostedt } 2839499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off); 2840499e5470SSteven Rostedt 2841499e5470SSteven Rostedt /** 2842499e5470SSteven Rostedt * ring_buffer_record_on - restart writes into the buffer 2843499e5470SSteven Rostedt * @buffer: The ring buffer to start writes to. 2844499e5470SSteven Rostedt * 2845499e5470SSteven Rostedt * This enables all writes to the buffer that was disabled by 2846499e5470SSteven Rostedt * ring_buffer_record_off(). 2847499e5470SSteven Rostedt * 2848499e5470SSteven Rostedt * This is different than ring_buffer_record_enable() as 284987abb3b1SWang Tianhong * it works like an on/off switch, where as the enable() version 2850499e5470SSteven Rostedt * must be paired with a disable(). 2851499e5470SSteven Rostedt */ 2852499e5470SSteven Rostedt void ring_buffer_record_on(struct ring_buffer *buffer) 2853499e5470SSteven Rostedt { 2854499e5470SSteven Rostedt unsigned int rd; 2855499e5470SSteven Rostedt unsigned int new_rd; 2856499e5470SSteven Rostedt 2857499e5470SSteven Rostedt do { 2858499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 2859499e5470SSteven Rostedt new_rd = rd & ~RB_BUFFER_OFF; 2860499e5470SSteven Rostedt } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2861499e5470SSteven Rostedt } 2862499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on); 2863499e5470SSteven Rostedt 2864499e5470SSteven Rostedt /** 2865499e5470SSteven Rostedt * ring_buffer_record_is_on - return true if the ring buffer can write 2866499e5470SSteven Rostedt * @buffer: The ring buffer to see if write is enabled 2867499e5470SSteven Rostedt * 2868499e5470SSteven Rostedt * Returns true if the ring buffer is in a state that it accepts writes. 2869499e5470SSteven Rostedt */ 2870499e5470SSteven Rostedt int ring_buffer_record_is_on(struct ring_buffer *buffer) 2871499e5470SSteven Rostedt { 2872499e5470SSteven Rostedt return !atomic_read(&buffer->record_disabled); 2873499e5470SSteven Rostedt } 2874499e5470SSteven Rostedt 2875499e5470SSteven Rostedt /** 28767a8e76a3SSteven Rostedt * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 28777a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 28787a8e76a3SSteven Rostedt * @cpu: The CPU buffer to stop 28797a8e76a3SSteven Rostedt * 28807a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 28817a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 28827a8e76a3SSteven Rostedt * 28837a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 28847a8e76a3SSteven Rostedt */ 28857a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 28867a8e76a3SSteven Rostedt { 28877a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 28887a8e76a3SSteven Rostedt 28899e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 28908aabee57SSteven Rostedt return; 28917a8e76a3SSteven Rostedt 28927a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 28937a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 28947a8e76a3SSteven Rostedt } 2895c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 28967a8e76a3SSteven Rostedt 28977a8e76a3SSteven Rostedt /** 28987a8e76a3SSteven Rostedt * ring_buffer_record_enable_cpu - enable writes to the buffer 28997a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 29007a8e76a3SSteven Rostedt * @cpu: The CPU to enable. 29017a8e76a3SSteven Rostedt * 29027a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 2903c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 29047a8e76a3SSteven Rostedt */ 29057a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 29067a8e76a3SSteven Rostedt { 29077a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 29087a8e76a3SSteven Rostedt 29099e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 29108aabee57SSteven Rostedt return; 29117a8e76a3SSteven Rostedt 29127a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 29137a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 29147a8e76a3SSteven Rostedt } 2915c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 29167a8e76a3SSteven Rostedt 2917f6195aa0SSteven Rostedt /* 2918f6195aa0SSteven Rostedt * The total entries in the ring buffer is the running counter 2919f6195aa0SSteven Rostedt * of entries entered into the ring buffer, minus the sum of 2920f6195aa0SSteven Rostedt * the entries read from the ring buffer and the number of 2921f6195aa0SSteven Rostedt * entries that were overwritten. 2922f6195aa0SSteven Rostedt */ 2923f6195aa0SSteven Rostedt static inline unsigned long 2924f6195aa0SSteven Rostedt rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 2925f6195aa0SSteven Rostedt { 2926f6195aa0SSteven Rostedt return local_read(&cpu_buffer->entries) - 2927f6195aa0SSteven Rostedt (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 2928f6195aa0SSteven Rostedt } 2929f6195aa0SSteven Rostedt 29307a8e76a3SSteven Rostedt /** 2931c64e148aSVaibhav Nagarnaik * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 2932c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 2933c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 2934c64e148aSVaibhav Nagarnaik */ 2935*50ecf2c3SYoshihiro YUNOMAE u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2936c64e148aSVaibhav Nagarnaik { 2937c64e148aSVaibhav Nagarnaik unsigned long flags; 2938c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 2939c64e148aSVaibhav Nagarnaik struct buffer_page *bpage; 2940*50ecf2c3SYoshihiro YUNOMAE u64 ret; 2941c64e148aSVaibhav Nagarnaik 2942c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2943c64e148aSVaibhav Nagarnaik return 0; 2944c64e148aSVaibhav Nagarnaik 2945c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 29467115e3fcSLinus Torvalds raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2947c64e148aSVaibhav Nagarnaik /* 2948c64e148aSVaibhav Nagarnaik * if the tail is on reader_page, oldest time stamp is on the reader 2949c64e148aSVaibhav Nagarnaik * page 2950c64e148aSVaibhav Nagarnaik */ 2951c64e148aSVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 2952c64e148aSVaibhav Nagarnaik bpage = cpu_buffer->reader_page; 2953c64e148aSVaibhav Nagarnaik else 2954c64e148aSVaibhav Nagarnaik bpage = rb_set_head_page(cpu_buffer); 2955c64e148aSVaibhav Nagarnaik ret = bpage->page->time_stamp; 29567115e3fcSLinus Torvalds raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2957c64e148aSVaibhav Nagarnaik 2958c64e148aSVaibhav Nagarnaik return ret; 2959c64e148aSVaibhav Nagarnaik } 2960c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 2961c64e148aSVaibhav Nagarnaik 2962c64e148aSVaibhav Nagarnaik /** 2963c64e148aSVaibhav Nagarnaik * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 2964c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 2965c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 2966c64e148aSVaibhav Nagarnaik */ 2967c64e148aSVaibhav Nagarnaik unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) 2968c64e148aSVaibhav Nagarnaik { 2969c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 2970c64e148aSVaibhav Nagarnaik unsigned long ret; 2971c64e148aSVaibhav Nagarnaik 2972c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2973c64e148aSVaibhav Nagarnaik return 0; 2974c64e148aSVaibhav Nagarnaik 2975c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 2976c64e148aSVaibhav Nagarnaik ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 2977c64e148aSVaibhav Nagarnaik 2978c64e148aSVaibhav Nagarnaik return ret; 2979c64e148aSVaibhav Nagarnaik } 2980c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 2981c64e148aSVaibhav Nagarnaik 2982c64e148aSVaibhav Nagarnaik /** 29837a8e76a3SSteven Rostedt * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 29847a8e76a3SSteven Rostedt * @buffer: The ring buffer 29857a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the entries from. 29867a8e76a3SSteven Rostedt */ 29877a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 29887a8e76a3SSteven Rostedt { 29897a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 29907a8e76a3SSteven Rostedt 29919e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 29928aabee57SSteven Rostedt return 0; 29937a8e76a3SSteven Rostedt 29947a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2995554f786eSSteven Rostedt 2996f6195aa0SSteven Rostedt return rb_num_of_entries(cpu_buffer); 29977a8e76a3SSteven Rostedt } 2998c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 29997a8e76a3SSteven Rostedt 30007a8e76a3SSteven Rostedt /** 3001884bfe89SSlava Pestov * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 3002884bfe89SSlava Pestov * buffer wrapping around (only if RB_FL_OVERWRITE is on). 30037a8e76a3SSteven Rostedt * @buffer: The ring buffer 30047a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 30057a8e76a3SSteven Rostedt */ 30067a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 30077a8e76a3SSteven Rostedt { 30087a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 30098aabee57SSteven Rostedt unsigned long ret; 30107a8e76a3SSteven Rostedt 30119e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 30128aabee57SSteven Rostedt return 0; 30137a8e76a3SSteven Rostedt 30147a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 301577ae365eSSteven Rostedt ret = local_read(&cpu_buffer->overrun); 3016554f786eSSteven Rostedt 3017554f786eSSteven Rostedt return ret; 30187a8e76a3SSteven Rostedt } 3019c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 30207a8e76a3SSteven Rostedt 30217a8e76a3SSteven Rostedt /** 3022884bfe89SSlava Pestov * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 3023884bfe89SSlava Pestov * commits failing due to the buffer wrapping around while there are uncommitted 3024884bfe89SSlava Pestov * events, such as during an interrupt storm. 3025f0d2c681SSteven Rostedt * @buffer: The ring buffer 3026f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 3027f0d2c681SSteven Rostedt */ 3028f0d2c681SSteven Rostedt unsigned long 3029f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 3030f0d2c681SSteven Rostedt { 3031f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3032f0d2c681SSteven Rostedt unsigned long ret; 3033f0d2c681SSteven Rostedt 3034f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3035f0d2c681SSteven Rostedt return 0; 3036f0d2c681SSteven Rostedt 3037f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 303877ae365eSSteven Rostedt ret = local_read(&cpu_buffer->commit_overrun); 3039f0d2c681SSteven Rostedt 3040f0d2c681SSteven Rostedt return ret; 3041f0d2c681SSteven Rostedt } 3042f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3043f0d2c681SSteven Rostedt 3044f0d2c681SSteven Rostedt /** 3045884bfe89SSlava Pestov * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 3046884bfe89SSlava Pestov * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 3047884bfe89SSlava Pestov * @buffer: The ring buffer 3048884bfe89SSlava Pestov * @cpu: The per CPU buffer to get the number of overruns from 3049884bfe89SSlava Pestov */ 3050884bfe89SSlava Pestov unsigned long 3051884bfe89SSlava Pestov ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) 3052884bfe89SSlava Pestov { 3053884bfe89SSlava Pestov struct ring_buffer_per_cpu *cpu_buffer; 3054884bfe89SSlava Pestov unsigned long ret; 3055884bfe89SSlava Pestov 3056884bfe89SSlava Pestov if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3057884bfe89SSlava Pestov return 0; 3058884bfe89SSlava Pestov 3059884bfe89SSlava Pestov cpu_buffer = buffer->buffers[cpu]; 3060884bfe89SSlava Pestov ret = local_read(&cpu_buffer->dropped_events); 3061884bfe89SSlava Pestov 3062884bfe89SSlava Pestov return ret; 3063884bfe89SSlava Pestov } 3064884bfe89SSlava Pestov EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3065884bfe89SSlava Pestov 3066884bfe89SSlava Pestov /** 30677a8e76a3SSteven Rostedt * ring_buffer_entries - get the number of entries in a buffer 30687a8e76a3SSteven Rostedt * @buffer: The ring buffer 30697a8e76a3SSteven Rostedt * 30707a8e76a3SSteven Rostedt * Returns the total number of entries in the ring buffer 30717a8e76a3SSteven Rostedt * (all CPU entries) 30727a8e76a3SSteven Rostedt */ 30737a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer) 30747a8e76a3SSteven Rostedt { 30757a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 30767a8e76a3SSteven Rostedt unsigned long entries = 0; 30777a8e76a3SSteven Rostedt int cpu; 30787a8e76a3SSteven Rostedt 30797a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 30807a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 30817a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 3082f6195aa0SSteven Rostedt entries += rb_num_of_entries(cpu_buffer); 30837a8e76a3SSteven Rostedt } 30847a8e76a3SSteven Rostedt 30857a8e76a3SSteven Rostedt return entries; 30867a8e76a3SSteven Rostedt } 3087c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries); 30887a8e76a3SSteven Rostedt 30897a8e76a3SSteven Rostedt /** 309067b394f7SJiri Olsa * ring_buffer_overruns - get the number of overruns in buffer 30917a8e76a3SSteven Rostedt * @buffer: The ring buffer 30927a8e76a3SSteven Rostedt * 30937a8e76a3SSteven Rostedt * Returns the total number of overruns in the ring buffer 30947a8e76a3SSteven Rostedt * (all CPU entries) 30957a8e76a3SSteven Rostedt */ 30967a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 30977a8e76a3SSteven Rostedt { 30987a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 30997a8e76a3SSteven Rostedt unsigned long overruns = 0; 31007a8e76a3SSteven Rostedt int cpu; 31017a8e76a3SSteven Rostedt 31027a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 31037a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 31047a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 310577ae365eSSteven Rostedt overruns += local_read(&cpu_buffer->overrun); 31067a8e76a3SSteven Rostedt } 31077a8e76a3SSteven Rostedt 31087a8e76a3SSteven Rostedt return overruns; 31097a8e76a3SSteven Rostedt } 3110c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns); 31117a8e76a3SSteven Rostedt 3112642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter) 31137a8e76a3SSteven Rostedt { 31147a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 31157a8e76a3SSteven Rostedt 3116d769041fSSteven Rostedt /* Iterator usage is expected to have record disabled */ 3117d769041fSSteven Rostedt if (list_empty(&cpu_buffer->reader_page->list)) { 311877ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer); 311977ae365eSSteven Rostedt if (unlikely(!iter->head_page)) 312077ae365eSSteven Rostedt return; 312177ae365eSSteven Rostedt iter->head = iter->head_page->read; 3122d769041fSSteven Rostedt } else { 3123d769041fSSteven Rostedt iter->head_page = cpu_buffer->reader_page; 31246f807acdSSteven Rostedt iter->head = cpu_buffer->reader_page->read; 3125d769041fSSteven Rostedt } 3126d769041fSSteven Rostedt if (iter->head) 3127d769041fSSteven Rostedt iter->read_stamp = cpu_buffer->read_stamp; 3128d769041fSSteven Rostedt else 3129abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 3130492a74f4SSteven Rostedt iter->cache_reader_page = cpu_buffer->reader_page; 3131492a74f4SSteven Rostedt iter->cache_read = cpu_buffer->read; 3132642edba5SSteven Rostedt } 3133f83c9d0fSSteven Rostedt 3134642edba5SSteven Rostedt /** 3135642edba5SSteven Rostedt * ring_buffer_iter_reset - reset an iterator 3136642edba5SSteven Rostedt * @iter: The iterator to reset 3137642edba5SSteven Rostedt * 3138642edba5SSteven Rostedt * Resets the iterator, so that it will start from the beginning 3139642edba5SSteven Rostedt * again. 3140642edba5SSteven Rostedt */ 3141642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 3142642edba5SSteven Rostedt { 3143554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3144642edba5SSteven Rostedt unsigned long flags; 3145642edba5SSteven Rostedt 3146554f786eSSteven Rostedt if (!iter) 3147554f786eSSteven Rostedt return; 3148554f786eSSteven Rostedt 3149554f786eSSteven Rostedt cpu_buffer = iter->cpu_buffer; 3150554f786eSSteven Rostedt 31515389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3152642edba5SSteven Rostedt rb_iter_reset(iter); 31535389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 31547a8e76a3SSteven Rostedt } 3155c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 31567a8e76a3SSteven Rostedt 31577a8e76a3SSteven Rostedt /** 31587a8e76a3SSteven Rostedt * ring_buffer_iter_empty - check if an iterator has no more to read 31597a8e76a3SSteven Rostedt * @iter: The iterator to check 31607a8e76a3SSteven Rostedt */ 31617a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 31627a8e76a3SSteven Rostedt { 31637a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 31647a8e76a3SSteven Rostedt 31657a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 31667a8e76a3SSteven Rostedt 3167bf41a158SSteven Rostedt return iter->head_page == cpu_buffer->commit_page && 3168bf41a158SSteven Rostedt iter->head == rb_commit_index(cpu_buffer); 31697a8e76a3SSteven Rostedt } 3170c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 31717a8e76a3SSteven Rostedt 31727a8e76a3SSteven Rostedt static void 31737a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 31747a8e76a3SSteven Rostedt struct ring_buffer_event *event) 31757a8e76a3SSteven Rostedt { 31767a8e76a3SSteven Rostedt u64 delta; 31777a8e76a3SSteven Rostedt 3178334d4169SLai Jiangshan switch (event->type_len) { 31797a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 31807a8e76a3SSteven Rostedt return; 31817a8e76a3SSteven Rostedt 31827a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 31837a8e76a3SSteven Rostedt delta = event->array[0]; 31847a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 31857a8e76a3SSteven Rostedt delta += event->time_delta; 31867a8e76a3SSteven Rostedt cpu_buffer->read_stamp += delta; 31877a8e76a3SSteven Rostedt return; 31887a8e76a3SSteven Rostedt 31897a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 31907a8e76a3SSteven Rostedt /* FIXME: not implemented */ 31917a8e76a3SSteven Rostedt return; 31927a8e76a3SSteven Rostedt 31937a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 31947a8e76a3SSteven Rostedt cpu_buffer->read_stamp += event->time_delta; 31957a8e76a3SSteven Rostedt return; 31967a8e76a3SSteven Rostedt 31977a8e76a3SSteven Rostedt default: 31987a8e76a3SSteven Rostedt BUG(); 31997a8e76a3SSteven Rostedt } 32007a8e76a3SSteven Rostedt return; 32017a8e76a3SSteven Rostedt } 32027a8e76a3SSteven Rostedt 32037a8e76a3SSteven Rostedt static void 32047a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 32057a8e76a3SSteven Rostedt struct ring_buffer_event *event) 32067a8e76a3SSteven Rostedt { 32077a8e76a3SSteven Rostedt u64 delta; 32087a8e76a3SSteven Rostedt 3209334d4169SLai Jiangshan switch (event->type_len) { 32107a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 32117a8e76a3SSteven Rostedt return; 32127a8e76a3SSteven Rostedt 32137a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 32147a8e76a3SSteven Rostedt delta = event->array[0]; 32157a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 32167a8e76a3SSteven Rostedt delta += event->time_delta; 32177a8e76a3SSteven Rostedt iter->read_stamp += delta; 32187a8e76a3SSteven Rostedt return; 32197a8e76a3SSteven Rostedt 32207a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 32217a8e76a3SSteven Rostedt /* FIXME: not implemented */ 32227a8e76a3SSteven Rostedt return; 32237a8e76a3SSteven Rostedt 32247a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 32257a8e76a3SSteven Rostedt iter->read_stamp += event->time_delta; 32267a8e76a3SSteven Rostedt return; 32277a8e76a3SSteven Rostedt 32287a8e76a3SSteven Rostedt default: 32297a8e76a3SSteven Rostedt BUG(); 32307a8e76a3SSteven Rostedt } 32317a8e76a3SSteven Rostedt return; 32327a8e76a3SSteven Rostedt } 32337a8e76a3SSteven Rostedt 3234d769041fSSteven Rostedt static struct buffer_page * 3235d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 32367a8e76a3SSteven Rostedt { 3237d769041fSSteven Rostedt struct buffer_page *reader = NULL; 323866a8cb95SSteven Rostedt unsigned long overwrite; 3239d769041fSSteven Rostedt unsigned long flags; 3240818e3dd3SSteven Rostedt int nr_loops = 0; 324177ae365eSSteven Rostedt int ret; 3242d769041fSSteven Rostedt 32433e03fb7fSSteven Rostedt local_irq_save(flags); 32440199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 3245d769041fSSteven Rostedt 3246d769041fSSteven Rostedt again: 3247818e3dd3SSteven Rostedt /* 3248818e3dd3SSteven Rostedt * This should normally only loop twice. But because the 3249818e3dd3SSteven Rostedt * start of the reader inserts an empty page, it causes 3250818e3dd3SSteven Rostedt * a case where we will loop three times. There should be no 3251818e3dd3SSteven Rostedt * reason to loop four times (that I know of). 3252818e3dd3SSteven Rostedt */ 32533e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 3254818e3dd3SSteven Rostedt reader = NULL; 3255818e3dd3SSteven Rostedt goto out; 3256818e3dd3SSteven Rostedt } 3257818e3dd3SSteven Rostedt 3258d769041fSSteven Rostedt reader = cpu_buffer->reader_page; 3259d769041fSSteven Rostedt 3260d769041fSSteven Rostedt /* If there's more to read, return this page */ 3261bf41a158SSteven Rostedt if (cpu_buffer->reader_page->read < rb_page_size(reader)) 3262d769041fSSteven Rostedt goto out; 3263d769041fSSteven Rostedt 3264d769041fSSteven Rostedt /* Never should we have an index greater than the size */ 32653e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 32663e89c7bbSSteven Rostedt cpu_buffer->reader_page->read > rb_page_size(reader))) 32673e89c7bbSSteven Rostedt goto out; 3268d769041fSSteven Rostedt 3269d769041fSSteven Rostedt /* check if we caught up to the tail */ 3270d769041fSSteven Rostedt reader = NULL; 3271bf41a158SSteven Rostedt if (cpu_buffer->commit_page == cpu_buffer->reader_page) 3272d769041fSSteven Rostedt goto out; 32737a8e76a3SSteven Rostedt 3274a5fb8331SSteven Rostedt /* Don't bother swapping if the ring buffer is empty */ 3275a5fb8331SSteven Rostedt if (rb_num_of_entries(cpu_buffer) == 0) 3276a5fb8331SSteven Rostedt goto out; 3277a5fb8331SSteven Rostedt 32787a8e76a3SSteven Rostedt /* 3279d769041fSSteven Rostedt * Reset the reader page to size zero. 32807a8e76a3SSteven Rostedt */ 328177ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 328277ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 328377ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 3284ff0ff84aSSteven Rostedt cpu_buffer->reader_page->real_end = 0; 3285d769041fSSteven Rostedt 328677ae365eSSteven Rostedt spin: 328777ae365eSSteven Rostedt /* 328877ae365eSSteven Rostedt * Splice the empty reader page into the list around the head. 328977ae365eSSteven Rostedt */ 329077ae365eSSteven Rostedt reader = rb_set_head_page(cpu_buffer); 32910e1ff5d7SSteven Rostedt cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 3292d769041fSSteven Rostedt cpu_buffer->reader_page->list.prev = reader->list.prev; 3293bf41a158SSteven Rostedt 32943adc54faSSteven Rostedt /* 32953adc54faSSteven Rostedt * cpu_buffer->pages just needs to point to the buffer, it 32963adc54faSSteven Rostedt * has no specific buffer page to point to. Lets move it out 329725985edcSLucas De Marchi * of our way so we don't accidentally swap it. 32983adc54faSSteven Rostedt */ 32993adc54faSSteven Rostedt cpu_buffer->pages = reader->list.prev; 33003adc54faSSteven Rostedt 330177ae365eSSteven Rostedt /* The reader page will be pointing to the new head */ 330277ae365eSSteven Rostedt rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); 3303d769041fSSteven Rostedt 3304d769041fSSteven Rostedt /* 330566a8cb95SSteven Rostedt * We want to make sure we read the overruns after we set up our 330666a8cb95SSteven Rostedt * pointers to the next object. The writer side does a 330766a8cb95SSteven Rostedt * cmpxchg to cross pages which acts as the mb on the writer 330866a8cb95SSteven Rostedt * side. Note, the reader will constantly fail the swap 330966a8cb95SSteven Rostedt * while the writer is updating the pointers, so this 331066a8cb95SSteven Rostedt * guarantees that the overwrite recorded here is the one we 331166a8cb95SSteven Rostedt * want to compare with the last_overrun. 331266a8cb95SSteven Rostedt */ 331366a8cb95SSteven Rostedt smp_mb(); 331466a8cb95SSteven Rostedt overwrite = local_read(&(cpu_buffer->overrun)); 331566a8cb95SSteven Rostedt 331666a8cb95SSteven Rostedt /* 331777ae365eSSteven Rostedt * Here's the tricky part. 331877ae365eSSteven Rostedt * 331977ae365eSSteven Rostedt * We need to move the pointer past the header page. 332077ae365eSSteven Rostedt * But we can only do that if a writer is not currently 332177ae365eSSteven Rostedt * moving it. The page before the header page has the 332277ae365eSSteven Rostedt * flag bit '1' set if it is pointing to the page we want. 332377ae365eSSteven Rostedt * but if the writer is in the process of moving it 332477ae365eSSteven Rostedt * than it will be '2' or already moved '0'. 3325d769041fSSteven Rostedt */ 3326d769041fSSteven Rostedt 332777ae365eSSteven Rostedt ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 332877ae365eSSteven Rostedt 332977ae365eSSteven Rostedt /* 333077ae365eSSteven Rostedt * If we did not convert it, then we must try again. 333177ae365eSSteven Rostedt */ 333277ae365eSSteven Rostedt if (!ret) 333377ae365eSSteven Rostedt goto spin; 333477ae365eSSteven Rostedt 333577ae365eSSteven Rostedt /* 333677ae365eSSteven Rostedt * Yeah! We succeeded in replacing the page. 333777ae365eSSteven Rostedt * 333877ae365eSSteven Rostedt * Now make the new head point back to the reader page. 333977ae365eSSteven Rostedt */ 33405ded3dc6SDavid Sharp rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 33417a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 3342d769041fSSteven Rostedt 3343d769041fSSteven Rostedt /* Finally update the reader page to the new head */ 3344d769041fSSteven Rostedt cpu_buffer->reader_page = reader; 3345d769041fSSteven Rostedt rb_reset_reader_page(cpu_buffer); 3346d769041fSSteven Rostedt 334766a8cb95SSteven Rostedt if (overwrite != cpu_buffer->last_overrun) { 334866a8cb95SSteven Rostedt cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 334966a8cb95SSteven Rostedt cpu_buffer->last_overrun = overwrite; 335066a8cb95SSteven Rostedt } 335166a8cb95SSteven Rostedt 3352d769041fSSteven Rostedt goto again; 3353d769041fSSteven Rostedt 3354d769041fSSteven Rostedt out: 33550199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 33563e03fb7fSSteven Rostedt local_irq_restore(flags); 3357d769041fSSteven Rostedt 3358d769041fSSteven Rostedt return reader; 33597a8e76a3SSteven Rostedt } 33607a8e76a3SSteven Rostedt 3361d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 3362d769041fSSteven Rostedt { 3363d769041fSSteven Rostedt struct ring_buffer_event *event; 3364d769041fSSteven Rostedt struct buffer_page *reader; 3365d769041fSSteven Rostedt unsigned length; 3366d769041fSSteven Rostedt 3367d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 3368d769041fSSteven Rostedt 3369d769041fSSteven Rostedt /* This function should not be called when buffer is empty */ 33703e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !reader)) 33713e89c7bbSSteven Rostedt return; 3372d769041fSSteven Rostedt 3373d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 33747a8e76a3SSteven Rostedt 3375a1863c21SSteven Rostedt if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 3376e4906effSSteven Rostedt cpu_buffer->read++; 33777a8e76a3SSteven Rostedt 33787a8e76a3SSteven Rostedt rb_update_read_stamp(cpu_buffer, event); 33797a8e76a3SSteven Rostedt 3380d769041fSSteven Rostedt length = rb_event_length(event); 33816f807acdSSteven Rostedt cpu_buffer->reader_page->read += length; 33827a8e76a3SSteven Rostedt } 33837a8e76a3SSteven Rostedt 33847a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter) 33857a8e76a3SSteven Rostedt { 33867a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 33877a8e76a3SSteven Rostedt struct ring_buffer_event *event; 33887a8e76a3SSteven Rostedt unsigned length; 33897a8e76a3SSteven Rostedt 33907a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 33917a8e76a3SSteven Rostedt 33927a8e76a3SSteven Rostedt /* 33937a8e76a3SSteven Rostedt * Check if we are at the end of the buffer. 33947a8e76a3SSteven Rostedt */ 3395bf41a158SSteven Rostedt if (iter->head >= rb_page_size(iter->head_page)) { 3396ea05b57cSSteven Rostedt /* discarded commits can make the page empty */ 3397ea05b57cSSteven Rostedt if (iter->head_page == cpu_buffer->commit_page) 33983e89c7bbSSteven Rostedt return; 3399d769041fSSteven Rostedt rb_inc_iter(iter); 34007a8e76a3SSteven Rostedt return; 34017a8e76a3SSteven Rostedt } 34027a8e76a3SSteven Rostedt 34037a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 34047a8e76a3SSteven Rostedt 34057a8e76a3SSteven Rostedt length = rb_event_length(event); 34067a8e76a3SSteven Rostedt 34077a8e76a3SSteven Rostedt /* 34087a8e76a3SSteven Rostedt * This should not be called to advance the header if we are 34097a8e76a3SSteven Rostedt * at the tail of the buffer. 34107a8e76a3SSteven Rostedt */ 34113e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 3412f536aafcSSteven Rostedt (iter->head_page == cpu_buffer->commit_page) && 34133e89c7bbSSteven Rostedt (iter->head + length > rb_commit_index(cpu_buffer)))) 34143e89c7bbSSteven Rostedt return; 34157a8e76a3SSteven Rostedt 34167a8e76a3SSteven Rostedt rb_update_iter_read_stamp(iter, event); 34177a8e76a3SSteven Rostedt 34187a8e76a3SSteven Rostedt iter->head += length; 34197a8e76a3SSteven Rostedt 34207a8e76a3SSteven Rostedt /* check for end of page padding */ 3421bf41a158SSteven Rostedt if ((iter->head >= rb_page_size(iter->head_page)) && 3422bf41a158SSteven Rostedt (iter->head_page != cpu_buffer->commit_page)) 34237a8e76a3SSteven Rostedt rb_advance_iter(iter); 34247a8e76a3SSteven Rostedt } 34257a8e76a3SSteven Rostedt 342666a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 342766a8cb95SSteven Rostedt { 342866a8cb95SSteven Rostedt return cpu_buffer->lost_events; 342966a8cb95SSteven Rostedt } 343066a8cb95SSteven Rostedt 3431f83c9d0fSSteven Rostedt static struct ring_buffer_event * 343266a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 343366a8cb95SSteven Rostedt unsigned long *lost_events) 34347a8e76a3SSteven Rostedt { 34357a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3436d769041fSSteven Rostedt struct buffer_page *reader; 3437818e3dd3SSteven Rostedt int nr_loops = 0; 34387a8e76a3SSteven Rostedt 34397a8e76a3SSteven Rostedt again: 3440818e3dd3SSteven Rostedt /* 344169d1b839SSteven Rostedt * We repeat when a time extend is encountered. 344269d1b839SSteven Rostedt * Since the time extend is always attached to a data event, 344369d1b839SSteven Rostedt * we should never loop more than once. 344469d1b839SSteven Rostedt * (We never hit the following condition more than twice). 3445818e3dd3SSteven Rostedt */ 344669d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3447818e3dd3SSteven Rostedt return NULL; 3448818e3dd3SSteven Rostedt 3449d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 3450d769041fSSteven Rostedt if (!reader) 34517a8e76a3SSteven Rostedt return NULL; 34527a8e76a3SSteven Rostedt 3453d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 34547a8e76a3SSteven Rostedt 3455334d4169SLai Jiangshan switch (event->type_len) { 34567a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 34572d622719STom Zanussi if (rb_null_event(event)) 3458bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 34592d622719STom Zanussi /* 34602d622719STom Zanussi * Because the writer could be discarding every 34612d622719STom Zanussi * event it creates (which would probably be bad) 34622d622719STom Zanussi * if we were to go back to "again" then we may never 34632d622719STom Zanussi * catch up, and will trigger the warn on, or lock 34642d622719STom Zanussi * the box. Return the padding, and we will release 34652d622719STom Zanussi * the current locks, and try again. 34662d622719STom Zanussi */ 34672d622719STom Zanussi return event; 34687a8e76a3SSteven Rostedt 34697a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 34707a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 3471d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 34727a8e76a3SSteven Rostedt goto again; 34737a8e76a3SSteven Rostedt 34747a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 34757a8e76a3SSteven Rostedt /* FIXME: not implemented */ 3476d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 34777a8e76a3SSteven Rostedt goto again; 34787a8e76a3SSteven Rostedt 34797a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 34807a8e76a3SSteven Rostedt if (ts) { 34817a8e76a3SSteven Rostedt *ts = cpu_buffer->read_stamp + event->time_delta; 3482d8eeb2d3SRobert Richter ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 348337886f6aSSteven Rostedt cpu_buffer->cpu, ts); 34847a8e76a3SSteven Rostedt } 348566a8cb95SSteven Rostedt if (lost_events) 348666a8cb95SSteven Rostedt *lost_events = rb_lost_events(cpu_buffer); 34877a8e76a3SSteven Rostedt return event; 34887a8e76a3SSteven Rostedt 34897a8e76a3SSteven Rostedt default: 34907a8e76a3SSteven Rostedt BUG(); 34917a8e76a3SSteven Rostedt } 34927a8e76a3SSteven Rostedt 34937a8e76a3SSteven Rostedt return NULL; 34947a8e76a3SSteven Rostedt } 3495c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek); 34967a8e76a3SSteven Rostedt 3497f83c9d0fSSteven Rostedt static struct ring_buffer_event * 3498f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 34997a8e76a3SSteven Rostedt { 35007a8e76a3SSteven Rostedt struct ring_buffer *buffer; 35017a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 35027a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3503818e3dd3SSteven Rostedt int nr_loops = 0; 35047a8e76a3SSteven Rostedt 35057a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 35067a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer; 35077a8e76a3SSteven Rostedt 3508492a74f4SSteven Rostedt /* 3509492a74f4SSteven Rostedt * Check if someone performed a consuming read to 3510492a74f4SSteven Rostedt * the buffer. A consuming read invalidates the iterator 3511492a74f4SSteven Rostedt * and we need to reset the iterator in this case. 3512492a74f4SSteven Rostedt */ 3513492a74f4SSteven Rostedt if (unlikely(iter->cache_read != cpu_buffer->read || 3514492a74f4SSteven Rostedt iter->cache_reader_page != cpu_buffer->reader_page)) 3515492a74f4SSteven Rostedt rb_iter_reset(iter); 3516492a74f4SSteven Rostedt 35177a8e76a3SSteven Rostedt again: 35183c05d748SSteven Rostedt if (ring_buffer_iter_empty(iter)) 35193c05d748SSteven Rostedt return NULL; 35203c05d748SSteven Rostedt 3521818e3dd3SSteven Rostedt /* 352269d1b839SSteven Rostedt * We repeat when a time extend is encountered. 352369d1b839SSteven Rostedt * Since the time extend is always attached to a data event, 352469d1b839SSteven Rostedt * we should never loop more than once. 352569d1b839SSteven Rostedt * (We never hit the following condition more than twice). 3526818e3dd3SSteven Rostedt */ 352769d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 3528818e3dd3SSteven Rostedt return NULL; 3529818e3dd3SSteven Rostedt 35307a8e76a3SSteven Rostedt if (rb_per_cpu_empty(cpu_buffer)) 35317a8e76a3SSteven Rostedt return NULL; 35327a8e76a3SSteven Rostedt 35333c05d748SSteven Rostedt if (iter->head >= local_read(&iter->head_page->page->commit)) { 35343c05d748SSteven Rostedt rb_inc_iter(iter); 35353c05d748SSteven Rostedt goto again; 35363c05d748SSteven Rostedt } 35373c05d748SSteven Rostedt 35387a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 35397a8e76a3SSteven Rostedt 3540334d4169SLai Jiangshan switch (event->type_len) { 35417a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 35422d622719STom Zanussi if (rb_null_event(event)) { 3543d769041fSSteven Rostedt rb_inc_iter(iter); 35447a8e76a3SSteven Rostedt goto again; 35452d622719STom Zanussi } 35462d622719STom Zanussi rb_advance_iter(iter); 35472d622719STom Zanussi return event; 35487a8e76a3SSteven Rostedt 35497a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 35507a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 35517a8e76a3SSteven Rostedt rb_advance_iter(iter); 35527a8e76a3SSteven Rostedt goto again; 35537a8e76a3SSteven Rostedt 35547a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 35557a8e76a3SSteven Rostedt /* FIXME: not implemented */ 35567a8e76a3SSteven Rostedt rb_advance_iter(iter); 35577a8e76a3SSteven Rostedt goto again; 35587a8e76a3SSteven Rostedt 35597a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 35607a8e76a3SSteven Rostedt if (ts) { 35617a8e76a3SSteven Rostedt *ts = iter->read_stamp + event->time_delta; 356237886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer, 356337886f6aSSteven Rostedt cpu_buffer->cpu, ts); 35647a8e76a3SSteven Rostedt } 35657a8e76a3SSteven Rostedt return event; 35667a8e76a3SSteven Rostedt 35677a8e76a3SSteven Rostedt default: 35687a8e76a3SSteven Rostedt BUG(); 35697a8e76a3SSteven Rostedt } 35707a8e76a3SSteven Rostedt 35717a8e76a3SSteven Rostedt return NULL; 35727a8e76a3SSteven Rostedt } 3573c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 35747a8e76a3SSteven Rostedt 35758d707e8eSSteven Rostedt static inline int rb_ok_to_lock(void) 35768d707e8eSSteven Rostedt { 35778d707e8eSSteven Rostedt /* 35788d707e8eSSteven Rostedt * If an NMI die dumps out the content of the ring buffer 35798d707e8eSSteven Rostedt * do not grab locks. We also permanently disable the ring 35808d707e8eSSteven Rostedt * buffer too. A one time deal is all you get from reading 35818d707e8eSSteven Rostedt * the ring buffer from an NMI. 35828d707e8eSSteven Rostedt */ 3583464e85ebSSteven Rostedt if (likely(!in_nmi())) 35848d707e8eSSteven Rostedt return 1; 35858d707e8eSSteven Rostedt 35868d707e8eSSteven Rostedt tracing_off_permanent(); 35878d707e8eSSteven Rostedt return 0; 35888d707e8eSSteven Rostedt } 35898d707e8eSSteven Rostedt 35907a8e76a3SSteven Rostedt /** 3591f83c9d0fSSteven Rostedt * ring_buffer_peek - peek at the next event to be read 3592f83c9d0fSSteven Rostedt * @buffer: The ring buffer to read 3593f83c9d0fSSteven Rostedt * @cpu: The cpu to peak at 3594f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 359566a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 3596f83c9d0fSSteven Rostedt * 3597f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 3598f83c9d0fSSteven Rostedt * not consume the data. 3599f83c9d0fSSteven Rostedt */ 3600f83c9d0fSSteven Rostedt struct ring_buffer_event * 360166a8cb95SSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, 360266a8cb95SSteven Rostedt unsigned long *lost_events) 3603f83c9d0fSSteven Rostedt { 3604f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 36058aabee57SSteven Rostedt struct ring_buffer_event *event; 3606f83c9d0fSSteven Rostedt unsigned long flags; 36078d707e8eSSteven Rostedt int dolock; 3608f83c9d0fSSteven Rostedt 3609554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 36108aabee57SSteven Rostedt return NULL; 3611554f786eSSteven Rostedt 36128d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 36132d622719STom Zanussi again: 36148d707e8eSSteven Rostedt local_irq_save(flags); 36158d707e8eSSteven Rostedt if (dolock) 36165389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 361766a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3618469535a5SRobert Richter if (event && event->type_len == RINGBUF_TYPE_PADDING) 3619469535a5SRobert Richter rb_advance_reader(cpu_buffer); 36208d707e8eSSteven Rostedt if (dolock) 36215389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 36228d707e8eSSteven Rostedt local_irq_restore(flags); 3623f83c9d0fSSteven Rostedt 36241b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 36252d622719STom Zanussi goto again; 36262d622719STom Zanussi 3627f83c9d0fSSteven Rostedt return event; 3628f83c9d0fSSteven Rostedt } 3629f83c9d0fSSteven Rostedt 3630f83c9d0fSSteven Rostedt /** 3631f83c9d0fSSteven Rostedt * ring_buffer_iter_peek - peek at the next event to be read 3632f83c9d0fSSteven Rostedt * @iter: The ring buffer iterator 3633f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 3634f83c9d0fSSteven Rostedt * 3635f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 3636f83c9d0fSSteven Rostedt * not increment the iterator. 3637f83c9d0fSSteven Rostedt */ 3638f83c9d0fSSteven Rostedt struct ring_buffer_event * 3639f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 3640f83c9d0fSSteven Rostedt { 3641f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3642f83c9d0fSSteven Rostedt struct ring_buffer_event *event; 3643f83c9d0fSSteven Rostedt unsigned long flags; 3644f83c9d0fSSteven Rostedt 36452d622719STom Zanussi again: 36465389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3647f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 36485389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3649f83c9d0fSSteven Rostedt 36501b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 36512d622719STom Zanussi goto again; 36522d622719STom Zanussi 3653f83c9d0fSSteven Rostedt return event; 3654f83c9d0fSSteven Rostedt } 3655f83c9d0fSSteven Rostedt 3656f83c9d0fSSteven Rostedt /** 36577a8e76a3SSteven Rostedt * ring_buffer_consume - return an event and consume it 36587a8e76a3SSteven Rostedt * @buffer: The ring buffer to get the next event from 365966a8cb95SSteven Rostedt * @cpu: the cpu to read the buffer from 366066a8cb95SSteven Rostedt * @ts: a variable to store the timestamp (may be NULL) 366166a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 36627a8e76a3SSteven Rostedt * 36637a8e76a3SSteven Rostedt * Returns the next event in the ring buffer, and that event is consumed. 36647a8e76a3SSteven Rostedt * Meaning, that sequential reads will keep returning a different event, 36657a8e76a3SSteven Rostedt * and eventually empty the ring buffer if the producer is slower. 36667a8e76a3SSteven Rostedt */ 36677a8e76a3SSteven Rostedt struct ring_buffer_event * 366866a8cb95SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, 366966a8cb95SSteven Rostedt unsigned long *lost_events) 36707a8e76a3SSteven Rostedt { 3671554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3672554f786eSSteven Rostedt struct ring_buffer_event *event = NULL; 3673f83c9d0fSSteven Rostedt unsigned long flags; 36748d707e8eSSteven Rostedt int dolock; 36758d707e8eSSteven Rostedt 36768d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 36777a8e76a3SSteven Rostedt 36782d622719STom Zanussi again: 3679554f786eSSteven Rostedt /* might be called in atomic */ 3680554f786eSSteven Rostedt preempt_disable(); 36817a8e76a3SSteven Rostedt 3682554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3683554f786eSSteven Rostedt goto out; 3684554f786eSSteven Rostedt 3685554f786eSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 36868d707e8eSSteven Rostedt local_irq_save(flags); 36878d707e8eSSteven Rostedt if (dolock) 36885389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 36897a8e76a3SSteven Rostedt 369066a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 369166a8cb95SSteven Rostedt if (event) { 369266a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 3693d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 369466a8cb95SSteven Rostedt } 36957a8e76a3SSteven Rostedt 36968d707e8eSSteven Rostedt if (dolock) 36975389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 36988d707e8eSSteven Rostedt local_irq_restore(flags); 3699f83c9d0fSSteven Rostedt 3700554f786eSSteven Rostedt out: 3701554f786eSSteven Rostedt preempt_enable(); 3702554f786eSSteven Rostedt 37031b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 37042d622719STom Zanussi goto again; 37052d622719STom Zanussi 37067a8e76a3SSteven Rostedt return event; 37077a8e76a3SSteven Rostedt } 3708c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume); 37097a8e76a3SSteven Rostedt 37107a8e76a3SSteven Rostedt /** 371172c9ddfdSDavid Miller * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 37127a8e76a3SSteven Rostedt * @buffer: The ring buffer to read from 37137a8e76a3SSteven Rostedt * @cpu: The cpu buffer to iterate over 37147a8e76a3SSteven Rostedt * 371572c9ddfdSDavid Miller * This performs the initial preparations necessary to iterate 371672c9ddfdSDavid Miller * through the buffer. Memory is allocated, buffer recording 371772c9ddfdSDavid Miller * is disabled, and the iterator pointer is returned to the caller. 37187a8e76a3SSteven Rostedt * 371972c9ddfdSDavid Miller * Disabling buffer recordng prevents the reading from being 372072c9ddfdSDavid Miller * corrupted. This is not a consuming read, so a producer is not 372172c9ddfdSDavid Miller * expected. 372272c9ddfdSDavid Miller * 372372c9ddfdSDavid Miller * After a sequence of ring_buffer_read_prepare calls, the user is 372472c9ddfdSDavid Miller * expected to make at least one call to ring_buffer_prepare_sync. 372572c9ddfdSDavid Miller * Afterwards, ring_buffer_read_start is invoked to get things going 372672c9ddfdSDavid Miller * for real. 372772c9ddfdSDavid Miller * 372872c9ddfdSDavid Miller * This overall must be paired with ring_buffer_finish. 37297a8e76a3SSteven Rostedt */ 37307a8e76a3SSteven Rostedt struct ring_buffer_iter * 373172c9ddfdSDavid Miller ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) 37327a8e76a3SSteven Rostedt { 37337a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 37348aabee57SSteven Rostedt struct ring_buffer_iter *iter; 37357a8e76a3SSteven Rostedt 37369e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 37378aabee57SSteven Rostedt return NULL; 37387a8e76a3SSteven Rostedt 37397a8e76a3SSteven Rostedt iter = kmalloc(sizeof(*iter), GFP_KERNEL); 37407a8e76a3SSteven Rostedt if (!iter) 37418aabee57SSteven Rostedt return NULL; 37427a8e76a3SSteven Rostedt 37437a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 37447a8e76a3SSteven Rostedt 37457a8e76a3SSteven Rostedt iter->cpu_buffer = cpu_buffer; 37467a8e76a3SSteven Rostedt 374783f40318SVaibhav Nagarnaik atomic_inc(&buffer->resize_disabled); 37487a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 374972c9ddfdSDavid Miller 375072c9ddfdSDavid Miller return iter; 375172c9ddfdSDavid Miller } 375272c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 375372c9ddfdSDavid Miller 375472c9ddfdSDavid Miller /** 375572c9ddfdSDavid Miller * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 375672c9ddfdSDavid Miller * 375772c9ddfdSDavid Miller * All previously invoked ring_buffer_read_prepare calls to prepare 375872c9ddfdSDavid Miller * iterators will be synchronized. Afterwards, read_buffer_read_start 375972c9ddfdSDavid Miller * calls on those iterators are allowed. 376072c9ddfdSDavid Miller */ 376172c9ddfdSDavid Miller void 376272c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void) 376372c9ddfdSDavid Miller { 37647a8e76a3SSteven Rostedt synchronize_sched(); 376572c9ddfdSDavid Miller } 376672c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 376772c9ddfdSDavid Miller 376872c9ddfdSDavid Miller /** 376972c9ddfdSDavid Miller * ring_buffer_read_start - start a non consuming read of the buffer 377072c9ddfdSDavid Miller * @iter: The iterator returned by ring_buffer_read_prepare 377172c9ddfdSDavid Miller * 377272c9ddfdSDavid Miller * This finalizes the startup of an iteration through the buffer. 377372c9ddfdSDavid Miller * The iterator comes from a call to ring_buffer_read_prepare and 377472c9ddfdSDavid Miller * an intervening ring_buffer_read_prepare_sync must have been 377572c9ddfdSDavid Miller * performed. 377672c9ddfdSDavid Miller * 377772c9ddfdSDavid Miller * Must be paired with ring_buffer_finish. 377872c9ddfdSDavid Miller */ 377972c9ddfdSDavid Miller void 378072c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter) 378172c9ddfdSDavid Miller { 378272c9ddfdSDavid Miller struct ring_buffer_per_cpu *cpu_buffer; 378372c9ddfdSDavid Miller unsigned long flags; 378472c9ddfdSDavid Miller 378572c9ddfdSDavid Miller if (!iter) 378672c9ddfdSDavid Miller return; 378772c9ddfdSDavid Miller 378872c9ddfdSDavid Miller cpu_buffer = iter->cpu_buffer; 37897a8e76a3SSteven Rostedt 37905389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 37910199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 3792642edba5SSteven Rostedt rb_iter_reset(iter); 37930199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 37945389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 37957a8e76a3SSteven Rostedt } 3796c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start); 37977a8e76a3SSteven Rostedt 37987a8e76a3SSteven Rostedt /** 37997a8e76a3SSteven Rostedt * ring_buffer_finish - finish reading the iterator of the buffer 38007a8e76a3SSteven Rostedt * @iter: The iterator retrieved by ring_buffer_start 38017a8e76a3SSteven Rostedt * 38027a8e76a3SSteven Rostedt * This re-enables the recording to the buffer, and frees the 38037a8e76a3SSteven Rostedt * iterator. 38047a8e76a3SSteven Rostedt */ 38057a8e76a3SSteven Rostedt void 38067a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter) 38077a8e76a3SSteven Rostedt { 38087a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 38097a8e76a3SSteven Rostedt 3810659f451fSSteven Rostedt /* 3811659f451fSSteven Rostedt * Ring buffer is disabled from recording, here's a good place 3812659f451fSSteven Rostedt * to check the integrity of the ring buffer. 3813659f451fSSteven Rostedt */ 3814659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 3815659f451fSSteven Rostedt 38167a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 381783f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->buffer->resize_disabled); 38187a8e76a3SSteven Rostedt kfree(iter); 38197a8e76a3SSteven Rostedt } 3820c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 38217a8e76a3SSteven Rostedt 38227a8e76a3SSteven Rostedt /** 38237a8e76a3SSteven Rostedt * ring_buffer_read - read the next item in the ring buffer by the iterator 38247a8e76a3SSteven Rostedt * @iter: The ring buffer iterator 38257a8e76a3SSteven Rostedt * @ts: The time stamp of the event read. 38267a8e76a3SSteven Rostedt * 38277a8e76a3SSteven Rostedt * This reads the next event in the ring buffer and increments the iterator. 38287a8e76a3SSteven Rostedt */ 38297a8e76a3SSteven Rostedt struct ring_buffer_event * 38307a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 38317a8e76a3SSteven Rostedt { 38327a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3833f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3834f83c9d0fSSteven Rostedt unsigned long flags; 38357a8e76a3SSteven Rostedt 38365389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 38377e9391cfSSteven Rostedt again: 3838f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 38397a8e76a3SSteven Rostedt if (!event) 3840f83c9d0fSSteven Rostedt goto out; 38417a8e76a3SSteven Rostedt 38427e9391cfSSteven Rostedt if (event->type_len == RINGBUF_TYPE_PADDING) 38437e9391cfSSteven Rostedt goto again; 38447e9391cfSSteven Rostedt 38457a8e76a3SSteven Rostedt rb_advance_iter(iter); 3846f83c9d0fSSteven Rostedt out: 38475389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 38487a8e76a3SSteven Rostedt 38497a8e76a3SSteven Rostedt return event; 38507a8e76a3SSteven Rostedt } 3851c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read); 38527a8e76a3SSteven Rostedt 38537a8e76a3SSteven Rostedt /** 38547a8e76a3SSteven Rostedt * ring_buffer_size - return the size of the ring buffer (in bytes) 38557a8e76a3SSteven Rostedt * @buffer: The ring buffer. 38567a8e76a3SSteven Rostedt */ 3857438ced17SVaibhav Nagarnaik unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) 38587a8e76a3SSteven Rostedt { 3859438ced17SVaibhav Nagarnaik /* 3860438ced17SVaibhav Nagarnaik * Earlier, this method returned 3861438ced17SVaibhav Nagarnaik * BUF_PAGE_SIZE * buffer->nr_pages 3862438ced17SVaibhav Nagarnaik * Since the nr_pages field is now removed, we have converted this to 3863438ced17SVaibhav Nagarnaik * return the per cpu buffer value. 3864438ced17SVaibhav Nagarnaik */ 3865438ced17SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3866438ced17SVaibhav Nagarnaik return 0; 3867438ced17SVaibhav Nagarnaik 3868438ced17SVaibhav Nagarnaik return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 38697a8e76a3SSteven Rostedt } 3870c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size); 38717a8e76a3SSteven Rostedt 38727a8e76a3SSteven Rostedt static void 38737a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 38747a8e76a3SSteven Rostedt { 387577ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 387677ae365eSSteven Rostedt 38777a8e76a3SSteven Rostedt cpu_buffer->head_page 38783adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 3879bf41a158SSteven Rostedt local_set(&cpu_buffer->head_page->write, 0); 3880778c55d4SSteven Rostedt local_set(&cpu_buffer->head_page->entries, 0); 3881abc9b56dSSteven Rostedt local_set(&cpu_buffer->head_page->page->commit, 0); 38827a8e76a3SSteven Rostedt 38836f807acdSSteven Rostedt cpu_buffer->head_page->read = 0; 3884bf41a158SSteven Rostedt 3885bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->head_page; 3886bf41a158SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->head_page; 3887bf41a158SSteven Rostedt 3888bf41a158SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 38895040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 3890bf41a158SSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 3891778c55d4SSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 3892abc9b56dSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 38936f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 3894d769041fSSteven Rostedt 3895c64e148aSVaibhav Nagarnaik local_set(&cpu_buffer->entries_bytes, 0); 389677ae365eSSteven Rostedt local_set(&cpu_buffer->overrun, 0); 3897884bfe89SSlava Pestov local_set(&cpu_buffer->commit_overrun, 0); 3898884bfe89SSlava Pestov local_set(&cpu_buffer->dropped_events, 0); 3899e4906effSSteven Rostedt local_set(&cpu_buffer->entries, 0); 3900fa743953SSteven Rostedt local_set(&cpu_buffer->committing, 0); 3901fa743953SSteven Rostedt local_set(&cpu_buffer->commits, 0); 390277ae365eSSteven Rostedt cpu_buffer->read = 0; 3903c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes = 0; 390469507c06SSteven Rostedt 390569507c06SSteven Rostedt cpu_buffer->write_stamp = 0; 390669507c06SSteven Rostedt cpu_buffer->read_stamp = 0; 390777ae365eSSteven Rostedt 390866a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 390966a8cb95SSteven Rostedt cpu_buffer->last_overrun = 0; 391066a8cb95SSteven Rostedt 391177ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 39127a8e76a3SSteven Rostedt } 39137a8e76a3SSteven Rostedt 39147a8e76a3SSteven Rostedt /** 39157a8e76a3SSteven Rostedt * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 39167a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset a per cpu buffer of 39177a8e76a3SSteven Rostedt * @cpu: The CPU buffer to be reset 39187a8e76a3SSteven Rostedt */ 39197a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 39207a8e76a3SSteven Rostedt { 39217a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 39227a8e76a3SSteven Rostedt unsigned long flags; 39237a8e76a3SSteven Rostedt 39249e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 39258aabee57SSteven Rostedt return; 39267a8e76a3SSteven Rostedt 392783f40318SVaibhav Nagarnaik atomic_inc(&buffer->resize_disabled); 392841ede23eSSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 392941ede23eSSteven Rostedt 393083f40318SVaibhav Nagarnaik /* Make sure all commits have finished */ 393183f40318SVaibhav Nagarnaik synchronize_sched(); 393283f40318SVaibhav Nagarnaik 39335389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3934f83c9d0fSSteven Rostedt 393541b6a95dSSteven Rostedt if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 393641b6a95dSSteven Rostedt goto out; 393741b6a95dSSteven Rostedt 39380199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 39397a8e76a3SSteven Rostedt 39407a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 39417a8e76a3SSteven Rostedt 39420199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 3943f83c9d0fSSteven Rostedt 394441b6a95dSSteven Rostedt out: 39455389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 394641ede23eSSteven Rostedt 394741ede23eSSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 394883f40318SVaibhav Nagarnaik atomic_dec(&buffer->resize_disabled); 39497a8e76a3SSteven Rostedt } 3950c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 39517a8e76a3SSteven Rostedt 39527a8e76a3SSteven Rostedt /** 39537a8e76a3SSteven Rostedt * ring_buffer_reset - reset a ring buffer 39547a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset all cpu buffers 39557a8e76a3SSteven Rostedt */ 39567a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer) 39577a8e76a3SSteven Rostedt { 39587a8e76a3SSteven Rostedt int cpu; 39597a8e76a3SSteven Rostedt 39607a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 3961d769041fSSteven Rostedt ring_buffer_reset_cpu(buffer, cpu); 39627a8e76a3SSteven Rostedt } 3963c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset); 39647a8e76a3SSteven Rostedt 39657a8e76a3SSteven Rostedt /** 39667a8e76a3SSteven Rostedt * rind_buffer_empty - is the ring buffer empty? 39677a8e76a3SSteven Rostedt * @buffer: The ring buffer to test 39687a8e76a3SSteven Rostedt */ 39697a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer) 39707a8e76a3SSteven Rostedt { 39717a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3972d4788207SSteven Rostedt unsigned long flags; 39738d707e8eSSteven Rostedt int dolock; 39747a8e76a3SSteven Rostedt int cpu; 3975d4788207SSteven Rostedt int ret; 39767a8e76a3SSteven Rostedt 39778d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 39787a8e76a3SSteven Rostedt 39797a8e76a3SSteven Rostedt /* yes this is racy, but if you don't like the race, lock the buffer */ 39807a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 39817a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 39828d707e8eSSteven Rostedt local_irq_save(flags); 39838d707e8eSSteven Rostedt if (dolock) 39845389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 3985d4788207SSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 39868d707e8eSSteven Rostedt if (dolock) 39875389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 39888d707e8eSSteven Rostedt local_irq_restore(flags); 39898d707e8eSSteven Rostedt 3990d4788207SSteven Rostedt if (!ret) 39917a8e76a3SSteven Rostedt return 0; 39927a8e76a3SSteven Rostedt } 3993554f786eSSteven Rostedt 39947a8e76a3SSteven Rostedt return 1; 39957a8e76a3SSteven Rostedt } 3996c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty); 39977a8e76a3SSteven Rostedt 39987a8e76a3SSteven Rostedt /** 39997a8e76a3SSteven Rostedt * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 40007a8e76a3SSteven Rostedt * @buffer: The ring buffer 40017a8e76a3SSteven Rostedt * @cpu: The CPU buffer to test 40027a8e76a3SSteven Rostedt */ 40037a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 40047a8e76a3SSteven Rostedt { 40057a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4006d4788207SSteven Rostedt unsigned long flags; 40078d707e8eSSteven Rostedt int dolock; 40088aabee57SSteven Rostedt int ret; 40097a8e76a3SSteven Rostedt 40109e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 40118aabee57SSteven Rostedt return 1; 40127a8e76a3SSteven Rostedt 40138d707e8eSSteven Rostedt dolock = rb_ok_to_lock(); 4014554f786eSSteven Rostedt 40157a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 40168d707e8eSSteven Rostedt local_irq_save(flags); 40178d707e8eSSteven Rostedt if (dolock) 40185389f6faSThomas Gleixner raw_spin_lock(&cpu_buffer->reader_lock); 4019554f786eSSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 40208d707e8eSSteven Rostedt if (dolock) 40215389f6faSThomas Gleixner raw_spin_unlock(&cpu_buffer->reader_lock); 40228d707e8eSSteven Rostedt local_irq_restore(flags); 4023554f786eSSteven Rostedt 4024554f786eSSteven Rostedt return ret; 40257a8e76a3SSteven Rostedt } 4026c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 40277a8e76a3SSteven Rostedt 402885bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 40297a8e76a3SSteven Rostedt /** 40307a8e76a3SSteven Rostedt * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 40317a8e76a3SSteven Rostedt * @buffer_a: One buffer to swap with 40327a8e76a3SSteven Rostedt * @buffer_b: The other buffer to swap with 40337a8e76a3SSteven Rostedt * 40347a8e76a3SSteven Rostedt * This function is useful for tracers that want to take a "snapshot" 40357a8e76a3SSteven Rostedt * of a CPU buffer and has another back up buffer lying around. 40367a8e76a3SSteven Rostedt * it is expected that the tracer handles the cpu buffer not being 40377a8e76a3SSteven Rostedt * used at the moment. 40387a8e76a3SSteven Rostedt */ 40397a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 40407a8e76a3SSteven Rostedt struct ring_buffer *buffer_b, int cpu) 40417a8e76a3SSteven Rostedt { 40427a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_a; 40437a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_b; 4044554f786eSSteven Rostedt int ret = -EINVAL; 4045554f786eSSteven Rostedt 40469e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 40479e01c1b7SRusty Russell !cpumask_test_cpu(cpu, buffer_b->cpumask)) 4048554f786eSSteven Rostedt goto out; 40497a8e76a3SSteven Rostedt 4050438ced17SVaibhav Nagarnaik cpu_buffer_a = buffer_a->buffers[cpu]; 4051438ced17SVaibhav Nagarnaik cpu_buffer_b = buffer_b->buffers[cpu]; 4052438ced17SVaibhav Nagarnaik 40537a8e76a3SSteven Rostedt /* At least make sure the two buffers are somewhat the same */ 4054438ced17SVaibhav Nagarnaik if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 4055554f786eSSteven Rostedt goto out; 4056554f786eSSteven Rostedt 4057554f786eSSteven Rostedt ret = -EAGAIN; 40587a8e76a3SSteven Rostedt 405997b17efeSSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 4060554f786eSSteven Rostedt goto out; 406197b17efeSSteven Rostedt 406297b17efeSSteven Rostedt if (atomic_read(&buffer_a->record_disabled)) 4063554f786eSSteven Rostedt goto out; 406497b17efeSSteven Rostedt 406597b17efeSSteven Rostedt if (atomic_read(&buffer_b->record_disabled)) 4066554f786eSSteven Rostedt goto out; 406797b17efeSSteven Rostedt 406897b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_a->record_disabled)) 4069554f786eSSteven Rostedt goto out; 407097b17efeSSteven Rostedt 407197b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_b->record_disabled)) 4072554f786eSSteven Rostedt goto out; 407397b17efeSSteven Rostedt 40747a8e76a3SSteven Rostedt /* 40757a8e76a3SSteven Rostedt * We can't do a synchronize_sched here because this 40767a8e76a3SSteven Rostedt * function can be called in atomic context. 40777a8e76a3SSteven Rostedt * Normally this will be called from the same CPU as cpu. 40787a8e76a3SSteven Rostedt * If not it's up to the caller to protect this. 40797a8e76a3SSteven Rostedt */ 40807a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_a->record_disabled); 40817a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_b->record_disabled); 40827a8e76a3SSteven Rostedt 408398277991SSteven Rostedt ret = -EBUSY; 408498277991SSteven Rostedt if (local_read(&cpu_buffer_a->committing)) 408598277991SSteven Rostedt goto out_dec; 408698277991SSteven Rostedt if (local_read(&cpu_buffer_b->committing)) 408798277991SSteven Rostedt goto out_dec; 408898277991SSteven Rostedt 40897a8e76a3SSteven Rostedt buffer_a->buffers[cpu] = cpu_buffer_b; 40907a8e76a3SSteven Rostedt buffer_b->buffers[cpu] = cpu_buffer_a; 40917a8e76a3SSteven Rostedt 40927a8e76a3SSteven Rostedt cpu_buffer_b->buffer = buffer_a; 40937a8e76a3SSteven Rostedt cpu_buffer_a->buffer = buffer_b; 40947a8e76a3SSteven Rostedt 409598277991SSteven Rostedt ret = 0; 409698277991SSteven Rostedt 409798277991SSteven Rostedt out_dec: 40987a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_a->record_disabled); 40997a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_b->record_disabled); 4100554f786eSSteven Rostedt out: 4101554f786eSSteven Rostedt return ret; 41027a8e76a3SSteven Rostedt } 4103c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 410485bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 41057a8e76a3SSteven Rostedt 41068789a9e7SSteven Rostedt /** 41078789a9e7SSteven Rostedt * ring_buffer_alloc_read_page - allocate a page to read from buffer 41088789a9e7SSteven Rostedt * @buffer: the buffer to allocate for. 41098789a9e7SSteven Rostedt * 41108789a9e7SSteven Rostedt * This function is used in conjunction with ring_buffer_read_page. 41118789a9e7SSteven Rostedt * When reading a full page from the ring buffer, these functions 41128789a9e7SSteven Rostedt * can be used to speed up the process. The calling function should 41138789a9e7SSteven Rostedt * allocate a few pages first with this function. Then when it 41148789a9e7SSteven Rostedt * needs to get pages from the ring buffer, it passes the result 41158789a9e7SSteven Rostedt * of this function into ring_buffer_read_page, which will swap 41168789a9e7SSteven Rostedt * the page that was allocated, with the read page of the buffer. 41178789a9e7SSteven Rostedt * 41188789a9e7SSteven Rostedt * Returns: 41198789a9e7SSteven Rostedt * The page allocated, or NULL on error. 41208789a9e7SSteven Rostedt */ 41217ea59064SVaibhav Nagarnaik void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) 41228789a9e7SSteven Rostedt { 4123044fa782SSteven Rostedt struct buffer_data_page *bpage; 41247ea59064SVaibhav Nagarnaik struct page *page; 41258789a9e7SSteven Rostedt 4126d7ec4bfeSVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), 4127d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 0); 41287ea59064SVaibhav Nagarnaik if (!page) 41298789a9e7SSteven Rostedt return NULL; 41308789a9e7SSteven Rostedt 41317ea59064SVaibhav Nagarnaik bpage = page_address(page); 41328789a9e7SSteven Rostedt 4133ef7a4a16SSteven Rostedt rb_init_page(bpage); 4134ef7a4a16SSteven Rostedt 4135044fa782SSteven Rostedt return bpage; 41368789a9e7SSteven Rostedt } 4137d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 41388789a9e7SSteven Rostedt 41398789a9e7SSteven Rostedt /** 41408789a9e7SSteven Rostedt * ring_buffer_free_read_page - free an allocated read page 41418789a9e7SSteven Rostedt * @buffer: the buffer the page was allocate for 41428789a9e7SSteven Rostedt * @data: the page to free 41438789a9e7SSteven Rostedt * 41448789a9e7SSteven Rostedt * Free a page allocated from ring_buffer_alloc_read_page. 41458789a9e7SSteven Rostedt */ 41468789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 41478789a9e7SSteven Rostedt { 41488789a9e7SSteven Rostedt free_page((unsigned long)data); 41498789a9e7SSteven Rostedt } 4150d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 41518789a9e7SSteven Rostedt 41528789a9e7SSteven Rostedt /** 41538789a9e7SSteven Rostedt * ring_buffer_read_page - extract a page from the ring buffer 41548789a9e7SSteven Rostedt * @buffer: buffer to extract from 41558789a9e7SSteven Rostedt * @data_page: the page to use allocated from ring_buffer_alloc_read_page 4156ef7a4a16SSteven Rostedt * @len: amount to extract 41578789a9e7SSteven Rostedt * @cpu: the cpu of the buffer to extract 41588789a9e7SSteven Rostedt * @full: should the extraction only happen when the page is full. 41598789a9e7SSteven Rostedt * 41608789a9e7SSteven Rostedt * This function will pull out a page from the ring buffer and consume it. 41618789a9e7SSteven Rostedt * @data_page must be the address of the variable that was returned 41628789a9e7SSteven Rostedt * from ring_buffer_alloc_read_page. This is because the page might be used 41638789a9e7SSteven Rostedt * to swap with a page in the ring buffer. 41648789a9e7SSteven Rostedt * 41658789a9e7SSteven Rostedt * for example: 4166b85fa01eSLai Jiangshan * rpage = ring_buffer_alloc_read_page(buffer); 41678789a9e7SSteven Rostedt * if (!rpage) 41688789a9e7SSteven Rostedt * return error; 4169ef7a4a16SSteven Rostedt * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 4170667d2412SLai Jiangshan * if (ret >= 0) 4171667d2412SLai Jiangshan * process_page(rpage, ret); 41728789a9e7SSteven Rostedt * 41738789a9e7SSteven Rostedt * When @full is set, the function will not return true unless 41748789a9e7SSteven Rostedt * the writer is off the reader page. 41758789a9e7SSteven Rostedt * 41768789a9e7SSteven Rostedt * Note: it is up to the calling functions to handle sleeps and wakeups. 41778789a9e7SSteven Rostedt * The ring buffer can be used anywhere in the kernel and can not 41788789a9e7SSteven Rostedt * blindly call wake_up. The layer that uses the ring buffer must be 41798789a9e7SSteven Rostedt * responsible for that. 41808789a9e7SSteven Rostedt * 41818789a9e7SSteven Rostedt * Returns: 4182667d2412SLai Jiangshan * >=0 if data has been transferred, returns the offset of consumed data. 4183667d2412SLai Jiangshan * <0 if no data has been transferred. 41848789a9e7SSteven Rostedt */ 41858789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer, 4186ef7a4a16SSteven Rostedt void **data_page, size_t len, int cpu, int full) 41878789a9e7SSteven Rostedt { 41888789a9e7SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 41898789a9e7SSteven Rostedt struct ring_buffer_event *event; 4190044fa782SSteven Rostedt struct buffer_data_page *bpage; 4191ef7a4a16SSteven Rostedt struct buffer_page *reader; 4192ff0ff84aSSteven Rostedt unsigned long missed_events; 41938789a9e7SSteven Rostedt unsigned long flags; 4194ef7a4a16SSteven Rostedt unsigned int commit; 4195667d2412SLai Jiangshan unsigned int read; 41964f3640f8SSteven Rostedt u64 save_timestamp; 4197667d2412SLai Jiangshan int ret = -1; 41988789a9e7SSteven Rostedt 4199554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4200554f786eSSteven Rostedt goto out; 4201554f786eSSteven Rostedt 4202474d32b6SSteven Rostedt /* 4203474d32b6SSteven Rostedt * If len is not big enough to hold the page header, then 4204474d32b6SSteven Rostedt * we can not copy anything. 4205474d32b6SSteven Rostedt */ 4206474d32b6SSteven Rostedt if (len <= BUF_PAGE_HDR_SIZE) 4207554f786eSSteven Rostedt goto out; 4208474d32b6SSteven Rostedt 4209474d32b6SSteven Rostedt len -= BUF_PAGE_HDR_SIZE; 4210474d32b6SSteven Rostedt 42118789a9e7SSteven Rostedt if (!data_page) 4212554f786eSSteven Rostedt goto out; 42138789a9e7SSteven Rostedt 4214044fa782SSteven Rostedt bpage = *data_page; 4215044fa782SSteven Rostedt if (!bpage) 4216554f786eSSteven Rostedt goto out; 42178789a9e7SSteven Rostedt 42185389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 42198789a9e7SSteven Rostedt 4220ef7a4a16SSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 4221ef7a4a16SSteven Rostedt if (!reader) 4222554f786eSSteven Rostedt goto out_unlock; 42238789a9e7SSteven Rostedt 4224ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 4225667d2412SLai Jiangshan 4226ef7a4a16SSteven Rostedt read = reader->read; 4227ef7a4a16SSteven Rostedt commit = rb_page_commit(reader); 4228ef7a4a16SSteven Rostedt 422966a8cb95SSteven Rostedt /* Check if any events were dropped */ 4230ff0ff84aSSteven Rostedt missed_events = cpu_buffer->lost_events; 423166a8cb95SSteven Rostedt 42328789a9e7SSteven Rostedt /* 4233474d32b6SSteven Rostedt * If this page has been partially read or 4234474d32b6SSteven Rostedt * if len is not big enough to read the rest of the page or 4235474d32b6SSteven Rostedt * a writer is still on the page, then 4236474d32b6SSteven Rostedt * we must copy the data from the page to the buffer. 4237474d32b6SSteven Rostedt * Otherwise, we can simply swap the page with the one passed in. 42388789a9e7SSteven Rostedt */ 4239474d32b6SSteven Rostedt if (read || (len < (commit - read)) || 4240ef7a4a16SSteven Rostedt cpu_buffer->reader_page == cpu_buffer->commit_page) { 4241667d2412SLai Jiangshan struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 4242474d32b6SSteven Rostedt unsigned int rpos = read; 4243474d32b6SSteven Rostedt unsigned int pos = 0; 4244ef7a4a16SSteven Rostedt unsigned int size; 42458789a9e7SSteven Rostedt 42468789a9e7SSteven Rostedt if (full) 4247554f786eSSteven Rostedt goto out_unlock; 42488789a9e7SSteven Rostedt 4249ef7a4a16SSteven Rostedt if (len > (commit - read)) 4250ef7a4a16SSteven Rostedt len = (commit - read); 4251ef7a4a16SSteven Rostedt 425269d1b839SSteven Rostedt /* Always keep the time extend and data together */ 425369d1b839SSteven Rostedt size = rb_event_ts_length(event); 4254ef7a4a16SSteven Rostedt 4255ef7a4a16SSteven Rostedt if (len < size) 4256554f786eSSteven Rostedt goto out_unlock; 4257ef7a4a16SSteven Rostedt 42584f3640f8SSteven Rostedt /* save the current timestamp, since the user will need it */ 42594f3640f8SSteven Rostedt save_timestamp = cpu_buffer->read_stamp; 42604f3640f8SSteven Rostedt 4261ef7a4a16SSteven Rostedt /* Need to copy one event at a time */ 4262ef7a4a16SSteven Rostedt do { 4263e1e35927SDavid Sharp /* We need the size of one event, because 4264e1e35927SDavid Sharp * rb_advance_reader only advances by one event, 4265e1e35927SDavid Sharp * whereas rb_event_ts_length may include the size of 4266e1e35927SDavid Sharp * one or two events. 4267e1e35927SDavid Sharp * We have already ensured there's enough space if this 4268e1e35927SDavid Sharp * is a time extend. */ 4269e1e35927SDavid Sharp size = rb_event_length(event); 4270474d32b6SSteven Rostedt memcpy(bpage->data + pos, rpage->data + rpos, size); 4271ef7a4a16SSteven Rostedt 4272ef7a4a16SSteven Rostedt len -= size; 4273ef7a4a16SSteven Rostedt 4274ef7a4a16SSteven Rostedt rb_advance_reader(cpu_buffer); 4275474d32b6SSteven Rostedt rpos = reader->read; 4276474d32b6SSteven Rostedt pos += size; 4277ef7a4a16SSteven Rostedt 427818fab912SHuang Ying if (rpos >= commit) 427918fab912SHuang Ying break; 428018fab912SHuang Ying 4281ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 428269d1b839SSteven Rostedt /* Always keep the time extend and data together */ 428369d1b839SSteven Rostedt size = rb_event_ts_length(event); 4284e1e35927SDavid Sharp } while (len >= size); 4285667d2412SLai Jiangshan 4286667d2412SLai Jiangshan /* update bpage */ 4287ef7a4a16SSteven Rostedt local_set(&bpage->commit, pos); 42884f3640f8SSteven Rostedt bpage->time_stamp = save_timestamp; 4289ef7a4a16SSteven Rostedt 4290474d32b6SSteven Rostedt /* we copied everything to the beginning */ 4291474d32b6SSteven Rostedt read = 0; 42928789a9e7SSteven Rostedt } else { 4293afbab76aSSteven Rostedt /* update the entry counter */ 429477ae365eSSteven Rostedt cpu_buffer->read += rb_page_entries(reader); 4295c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes += BUF_PAGE_SIZE; 4296afbab76aSSteven Rostedt 42978789a9e7SSteven Rostedt /* swap the pages */ 4298044fa782SSteven Rostedt rb_init_page(bpage); 4299ef7a4a16SSteven Rostedt bpage = reader->page; 4300ef7a4a16SSteven Rostedt reader->page = *data_page; 4301ef7a4a16SSteven Rostedt local_set(&reader->write, 0); 4302778c55d4SSteven Rostedt local_set(&reader->entries, 0); 4303ef7a4a16SSteven Rostedt reader->read = 0; 4304044fa782SSteven Rostedt *data_page = bpage; 4305ff0ff84aSSteven Rostedt 4306ff0ff84aSSteven Rostedt /* 4307ff0ff84aSSteven Rostedt * Use the real_end for the data size, 4308ff0ff84aSSteven Rostedt * This gives us a chance to store the lost events 4309ff0ff84aSSteven Rostedt * on the page. 4310ff0ff84aSSteven Rostedt */ 4311ff0ff84aSSteven Rostedt if (reader->real_end) 4312ff0ff84aSSteven Rostedt local_set(&bpage->commit, reader->real_end); 4313ef7a4a16SSteven Rostedt } 4314ef7a4a16SSteven Rostedt ret = read; 4315ef7a4a16SSteven Rostedt 431666a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 43172711ca23SSteven Rostedt 43182711ca23SSteven Rostedt commit = local_read(&bpage->commit); 431966a8cb95SSteven Rostedt /* 432066a8cb95SSteven Rostedt * Set a flag in the commit field if we lost events 432166a8cb95SSteven Rostedt */ 4322ff0ff84aSSteven Rostedt if (missed_events) { 4323ff0ff84aSSteven Rostedt /* If there is room at the end of the page to save the 4324ff0ff84aSSteven Rostedt * missed events, then record it there. 4325ff0ff84aSSteven Rostedt */ 4326ff0ff84aSSteven Rostedt if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 4327ff0ff84aSSteven Rostedt memcpy(&bpage->data[commit], &missed_events, 4328ff0ff84aSSteven Rostedt sizeof(missed_events)); 4329ff0ff84aSSteven Rostedt local_add(RB_MISSED_STORED, &bpage->commit); 43302711ca23SSteven Rostedt commit += sizeof(missed_events); 4331ff0ff84aSSteven Rostedt } 433266a8cb95SSteven Rostedt local_add(RB_MISSED_EVENTS, &bpage->commit); 4333ff0ff84aSSteven Rostedt } 433466a8cb95SSteven Rostedt 43352711ca23SSteven Rostedt /* 43362711ca23SSteven Rostedt * This page may be off to user land. Zero it out here. 43372711ca23SSteven Rostedt */ 43382711ca23SSteven Rostedt if (commit < BUF_PAGE_SIZE) 43392711ca23SSteven Rostedt memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 43402711ca23SSteven Rostedt 4341554f786eSSteven Rostedt out_unlock: 43425389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 43438789a9e7SSteven Rostedt 4344554f786eSSteven Rostedt out: 43458789a9e7SSteven Rostedt return ret; 43468789a9e7SSteven Rostedt } 4347d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page); 43488789a9e7SSteven Rostedt 434959222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 435009c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 4351554f786eSSteven Rostedt unsigned long action, void *hcpu) 4352554f786eSSteven Rostedt { 4353554f786eSSteven Rostedt struct ring_buffer *buffer = 4354554f786eSSteven Rostedt container_of(self, struct ring_buffer, cpu_notify); 4355554f786eSSteven Rostedt long cpu = (long)hcpu; 4356438ced17SVaibhav Nagarnaik int cpu_i, nr_pages_same; 4357438ced17SVaibhav Nagarnaik unsigned int nr_pages; 4358554f786eSSteven Rostedt 4359554f786eSSteven Rostedt switch (action) { 4360554f786eSSteven Rostedt case CPU_UP_PREPARE: 4361554f786eSSteven Rostedt case CPU_UP_PREPARE_FROZEN: 43623f237a79SRusty Russell if (cpumask_test_cpu(cpu, buffer->cpumask)) 4363554f786eSSteven Rostedt return NOTIFY_OK; 4364554f786eSSteven Rostedt 4365438ced17SVaibhav Nagarnaik nr_pages = 0; 4366438ced17SVaibhav Nagarnaik nr_pages_same = 1; 4367438ced17SVaibhav Nagarnaik /* check if all cpu sizes are same */ 4368438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu_i) { 4369438ced17SVaibhav Nagarnaik /* fill in the size from first enabled cpu */ 4370438ced17SVaibhav Nagarnaik if (nr_pages == 0) 4371438ced17SVaibhav Nagarnaik nr_pages = buffer->buffers[cpu_i]->nr_pages; 4372438ced17SVaibhav Nagarnaik if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 4373438ced17SVaibhav Nagarnaik nr_pages_same = 0; 4374438ced17SVaibhav Nagarnaik break; 4375438ced17SVaibhav Nagarnaik } 4376438ced17SVaibhav Nagarnaik } 4377438ced17SVaibhav Nagarnaik /* allocate minimum pages, user can later expand it */ 4378438ced17SVaibhav Nagarnaik if (!nr_pages_same) 4379438ced17SVaibhav Nagarnaik nr_pages = 2; 4380554f786eSSteven Rostedt buffer->buffers[cpu] = 4381438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 4382554f786eSSteven Rostedt if (!buffer->buffers[cpu]) { 4383554f786eSSteven Rostedt WARN(1, "failed to allocate ring buffer on CPU %ld\n", 4384554f786eSSteven Rostedt cpu); 4385554f786eSSteven Rostedt return NOTIFY_OK; 4386554f786eSSteven Rostedt } 4387554f786eSSteven Rostedt smp_wmb(); 43883f237a79SRusty Russell cpumask_set_cpu(cpu, buffer->cpumask); 4389554f786eSSteven Rostedt break; 4390554f786eSSteven Rostedt case CPU_DOWN_PREPARE: 4391554f786eSSteven Rostedt case CPU_DOWN_PREPARE_FROZEN: 4392554f786eSSteven Rostedt /* 4393554f786eSSteven Rostedt * Do nothing. 4394554f786eSSteven Rostedt * If we were to free the buffer, then the user would 4395554f786eSSteven Rostedt * lose any trace that was in the buffer. 4396554f786eSSteven Rostedt */ 4397554f786eSSteven Rostedt break; 4398554f786eSSteven Rostedt default: 4399554f786eSSteven Rostedt break; 4400554f786eSSteven Rostedt } 4401554f786eSSteven Rostedt return NOTIFY_OK; 4402554f786eSSteven Rostedt } 4403554f786eSSteven Rostedt #endif 4404