17a8e76a3SSteven Rostedt /* 27a8e76a3SSteven Rostedt * Generic ring buffer 37a8e76a3SSteven Rostedt * 47a8e76a3SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 57a8e76a3SSteven Rostedt */ 67a8e76a3SSteven Rostedt #include <linux/ring_buffer.h> 714131f2fSIngo Molnar #include <linux/trace_clock.h> 878d904b4SSteven Rostedt #include <linux/ftrace_irq.h> 97a8e76a3SSteven Rostedt #include <linux/spinlock.h> 107a8e76a3SSteven Rostedt #include <linux/debugfs.h> 117a8e76a3SSteven Rostedt #include <linux/uaccess.h> 12a81bd80aSSteven Rostedt #include <linux/hardirq.h> 137a8e76a3SSteven Rostedt #include <linux/module.h> 147a8e76a3SSteven Rostedt #include <linux/percpu.h> 157a8e76a3SSteven Rostedt #include <linux/mutex.h> 167a8e76a3SSteven Rostedt #include <linux/init.h> 177a8e76a3SSteven Rostedt #include <linux/hash.h> 187a8e76a3SSteven Rostedt #include <linux/list.h> 19554f786eSSteven Rostedt #include <linux/cpu.h> 207a8e76a3SSteven Rostedt #include <linux/fs.h> 217a8e76a3SSteven Rostedt 22182e9f5fSSteven Rostedt #include "trace.h" 23182e9f5fSSteven Rostedt 24033601a3SSteven Rostedt /* 25d1b182a8SSteven Rostedt * The ring buffer header is special. We must manually up keep it. 26d1b182a8SSteven Rostedt */ 27d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s) 28d1b182a8SSteven Rostedt { 29d1b182a8SSteven Rostedt int ret; 30d1b182a8SSteven Rostedt 31334d4169SLai Jiangshan ret = trace_seq_printf(s, "# compressed entry header\n"); 32334d4169SLai Jiangshan ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); 33d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); 34d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tarray : 32 bits\n"); 35d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\n"); 36d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tpadding : type == %d\n", 37d1b182a8SSteven Rostedt RINGBUF_TYPE_PADDING); 38d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", 39d1b182a8SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND); 40334d4169SLai Jiangshan ret = trace_seq_printf(s, "\tdata max type_len == %d\n", 41334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 42d1b182a8SSteven Rostedt 43d1b182a8SSteven Rostedt return ret; 44d1b182a8SSteven Rostedt } 45d1b182a8SSteven Rostedt 46d1b182a8SSteven Rostedt /* 475cc98548SSteven Rostedt * The ring buffer is made up of a list of pages. A separate list of pages is 485cc98548SSteven Rostedt * allocated for each CPU. A writer may only write to a buffer that is 495cc98548SSteven Rostedt * associated with the CPU it is currently executing on. A reader may read 505cc98548SSteven Rostedt * from any per cpu buffer. 515cc98548SSteven Rostedt * 525cc98548SSteven Rostedt * The reader is special. For each per cpu buffer, the reader has its own 535cc98548SSteven Rostedt * reader page. When a reader has read the entire reader page, this reader 545cc98548SSteven Rostedt * page is swapped with another page in the ring buffer. 555cc98548SSteven Rostedt * 565cc98548SSteven Rostedt * Now, as long as the writer is off the reader page, the reader can do what 575cc98548SSteven Rostedt * ever it wants with that page. The writer will never write to that page 585cc98548SSteven Rostedt * again (as long as it is out of the ring buffer). 595cc98548SSteven Rostedt * 605cc98548SSteven Rostedt * Here's some silly ASCII art. 615cc98548SSteven Rostedt * 625cc98548SSteven Rostedt * +------+ 635cc98548SSteven Rostedt * |reader| RING BUFFER 645cc98548SSteven Rostedt * |page | 655cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 665cc98548SSteven Rostedt * | |-->| |-->| | 675cc98548SSteven Rostedt * +---+ +---+ +---+ 685cc98548SSteven Rostedt * ^ | 695cc98548SSteven Rostedt * | | 705cc98548SSteven Rostedt * +---------------+ 715cc98548SSteven Rostedt * 725cc98548SSteven Rostedt * 735cc98548SSteven Rostedt * +------+ 745cc98548SSteven Rostedt * |reader| RING BUFFER 755cc98548SSteven Rostedt * |page |------------------v 765cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 775cc98548SSteven Rostedt * | |-->| |-->| | 785cc98548SSteven Rostedt * +---+ +---+ +---+ 795cc98548SSteven Rostedt * ^ | 805cc98548SSteven Rostedt * | | 815cc98548SSteven Rostedt * +---------------+ 825cc98548SSteven Rostedt * 835cc98548SSteven Rostedt * 845cc98548SSteven Rostedt * +------+ 855cc98548SSteven Rostedt * |reader| RING BUFFER 865cc98548SSteven Rostedt * |page |------------------v 875cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 885cc98548SSteven Rostedt * ^ | |-->| |-->| | 895cc98548SSteven Rostedt * | +---+ +---+ +---+ 905cc98548SSteven Rostedt * | | 915cc98548SSteven Rostedt * | | 925cc98548SSteven Rostedt * +------------------------------+ 935cc98548SSteven Rostedt * 945cc98548SSteven Rostedt * 955cc98548SSteven Rostedt * +------+ 965cc98548SSteven Rostedt * |buffer| RING BUFFER 975cc98548SSteven Rostedt * |page |------------------v 985cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 995cc98548SSteven Rostedt * ^ | | | |-->| | 1005cc98548SSteven Rostedt * | New +---+ +---+ +---+ 1015cc98548SSteven Rostedt * | Reader------^ | 1025cc98548SSteven Rostedt * | page | 1035cc98548SSteven Rostedt * +------------------------------+ 1045cc98548SSteven Rostedt * 1055cc98548SSteven Rostedt * 1065cc98548SSteven Rostedt * After we make this swap, the reader can hand this page off to the splice 1075cc98548SSteven Rostedt * code and be done with it. It can even allocate a new page if it needs to 1085cc98548SSteven Rostedt * and swap that into the ring buffer. 1095cc98548SSteven Rostedt * 1105cc98548SSteven Rostedt * We will be using cmpxchg soon to make all this lockless. 1115cc98548SSteven Rostedt * 1125cc98548SSteven Rostedt */ 1135cc98548SSteven Rostedt 1145cc98548SSteven Rostedt /* 115033601a3SSteven Rostedt * A fast way to enable or disable all ring buffers is to 116033601a3SSteven Rostedt * call tracing_on or tracing_off. Turning off the ring buffers 117033601a3SSteven Rostedt * prevents all ring buffers from being recorded to. 118033601a3SSteven Rostedt * Turning this switch on, makes it OK to write to the 119033601a3SSteven Rostedt * ring buffer, if the ring buffer is enabled itself. 120033601a3SSteven Rostedt * 121033601a3SSteven Rostedt * There's three layers that must be on in order to write 122033601a3SSteven Rostedt * to the ring buffer. 123033601a3SSteven Rostedt * 124033601a3SSteven Rostedt * 1) This global flag must be set. 125033601a3SSteven Rostedt * 2) The ring buffer must be enabled for recording. 126033601a3SSteven Rostedt * 3) The per cpu buffer must be enabled for recording. 127033601a3SSteven Rostedt * 128033601a3SSteven Rostedt * In case of an anomaly, this global flag has a bit set that 129033601a3SSteven Rostedt * will permantly disable all ring buffers. 130033601a3SSteven Rostedt */ 131033601a3SSteven Rostedt 132033601a3SSteven Rostedt /* 133033601a3SSteven Rostedt * Global flag to disable all recording to ring buffers 134033601a3SSteven Rostedt * This has two bits: ON, DISABLED 135033601a3SSteven Rostedt * 136033601a3SSteven Rostedt * ON DISABLED 137033601a3SSteven Rostedt * ---- ---------- 138033601a3SSteven Rostedt * 0 0 : ring buffers are off 139033601a3SSteven Rostedt * 1 0 : ring buffers are on 140033601a3SSteven Rostedt * X 1 : ring buffers are permanently disabled 141033601a3SSteven Rostedt */ 142033601a3SSteven Rostedt 143033601a3SSteven Rostedt enum { 144033601a3SSteven Rostedt RB_BUFFERS_ON_BIT = 0, 145033601a3SSteven Rostedt RB_BUFFERS_DISABLED_BIT = 1, 146033601a3SSteven Rostedt }; 147033601a3SSteven Rostedt 148033601a3SSteven Rostedt enum { 149033601a3SSteven Rostedt RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 150033601a3SSteven Rostedt RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 151033601a3SSteven Rostedt }; 152033601a3SSteven Rostedt 1535e39841cSHannes Eder static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 154a3583244SSteven Rostedt 155474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 156474d32b6SSteven Rostedt 157a3583244SSteven Rostedt /** 158a3583244SSteven Rostedt * tracing_on - enable all tracing buffers 159a3583244SSteven Rostedt * 160a3583244SSteven Rostedt * This function enables all tracing buffers that may have been 161a3583244SSteven Rostedt * disabled with tracing_off. 162a3583244SSteven Rostedt */ 163a3583244SSteven Rostedt void tracing_on(void) 164a3583244SSteven Rostedt { 165033601a3SSteven Rostedt set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 166a3583244SSteven Rostedt } 167c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_on); 168a3583244SSteven Rostedt 169a3583244SSteven Rostedt /** 170a3583244SSteven Rostedt * tracing_off - turn off all tracing buffers 171a3583244SSteven Rostedt * 172a3583244SSteven Rostedt * This function stops all tracing buffers from recording data. 173a3583244SSteven Rostedt * It does not disable any overhead the tracers themselves may 174a3583244SSteven Rostedt * be causing. This function simply causes all recording to 175a3583244SSteven Rostedt * the ring buffers to fail. 176a3583244SSteven Rostedt */ 177a3583244SSteven Rostedt void tracing_off(void) 178a3583244SSteven Rostedt { 179033601a3SSteven Rostedt clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 180033601a3SSteven Rostedt } 181c4f50183SRobert Richter EXPORT_SYMBOL_GPL(tracing_off); 182033601a3SSteven Rostedt 183033601a3SSteven Rostedt /** 184033601a3SSteven Rostedt * tracing_off_permanent - permanently disable ring buffers 185033601a3SSteven Rostedt * 186033601a3SSteven Rostedt * This function, once called, will disable all ring buffers 187c3706f00SWenji Huang * permanently. 188033601a3SSteven Rostedt */ 189033601a3SSteven Rostedt void tracing_off_permanent(void) 190033601a3SSteven Rostedt { 191033601a3SSteven Rostedt set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 192a3583244SSteven Rostedt } 193a3583244SSteven Rostedt 194988ae9d6SSteven Rostedt /** 195988ae9d6SSteven Rostedt * tracing_is_on - show state of ring buffers enabled 196988ae9d6SSteven Rostedt */ 197988ae9d6SSteven Rostedt int tracing_is_on(void) 198988ae9d6SSteven Rostedt { 199988ae9d6SSteven Rostedt return ring_buffer_flags == RB_BUFFERS_ON; 200988ae9d6SSteven Rostedt } 201988ae9d6SSteven Rostedt EXPORT_SYMBOL_GPL(tracing_is_on); 202988ae9d6SSteven Rostedt 203d06bbd66SIngo Molnar #include "trace.h" 204d06bbd66SIngo Molnar 205e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 20667d34724SAndrew Morton #define RB_ALIGNMENT 4U 207334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 208334d4169SLai Jiangshan 209334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 210334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 2117a8e76a3SSteven Rostedt 2127a8e76a3SSteven Rostedt enum { 2137a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND = 8, 2147a8e76a3SSteven Rostedt RB_LEN_TIME_STAMP = 16, 2157a8e76a3SSteven Rostedt }; 2167a8e76a3SSteven Rostedt 2172d622719STom Zanussi static inline int rb_null_event(struct ring_buffer_event *event) 2182d622719STom Zanussi { 219334d4169SLai Jiangshan return event->type_len == RINGBUF_TYPE_PADDING 220334d4169SLai Jiangshan && event->time_delta == 0; 2212d622719STom Zanussi } 2222d622719STom Zanussi 2232d622719STom Zanussi static inline int rb_discarded_event(struct ring_buffer_event *event) 2242d622719STom Zanussi { 225334d4169SLai Jiangshan return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta; 2262d622719STom Zanussi } 2272d622719STom Zanussi 2282d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event) 2292d622719STom Zanussi { 230334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 2312d622719STom Zanussi event->time_delta = 0; 2322d622719STom Zanussi } 2332d622719STom Zanussi 2342d622719STom Zanussi static unsigned 2352d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event) 2362d622719STom Zanussi { 2372d622719STom Zanussi unsigned length; 2382d622719STom Zanussi 239334d4169SLai Jiangshan if (event->type_len) 240334d4169SLai Jiangshan length = event->type_len * RB_ALIGNMENT; 2412d622719STom Zanussi else 2422d622719STom Zanussi length = event->array[0]; 2432d622719STom Zanussi return length + RB_EVNT_HDR_SIZE; 2442d622719STom Zanussi } 2452d622719STom Zanussi 2467a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */ 24734a148bfSAndrew Morton static unsigned 2487a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event) 2497a8e76a3SSteven Rostedt { 250334d4169SLai Jiangshan switch (event->type_len) { 2517a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 2522d622719STom Zanussi if (rb_null_event(event)) 2537a8e76a3SSteven Rostedt /* undefined */ 2547a8e76a3SSteven Rostedt return -1; 255334d4169SLai Jiangshan return event->array[0] + RB_EVNT_HDR_SIZE; 2567a8e76a3SSteven Rostedt 2577a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 2587a8e76a3SSteven Rostedt return RB_LEN_TIME_EXTEND; 2597a8e76a3SSteven Rostedt 2607a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 2617a8e76a3SSteven Rostedt return RB_LEN_TIME_STAMP; 2627a8e76a3SSteven Rostedt 2637a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 2642d622719STom Zanussi return rb_event_data_length(event); 2657a8e76a3SSteven Rostedt default: 2667a8e76a3SSteven Rostedt BUG(); 2677a8e76a3SSteven Rostedt } 2687a8e76a3SSteven Rostedt /* not hit */ 2697a8e76a3SSteven Rostedt return 0; 2707a8e76a3SSteven Rostedt } 2717a8e76a3SSteven Rostedt 2727a8e76a3SSteven Rostedt /** 2737a8e76a3SSteven Rostedt * ring_buffer_event_length - return the length of the event 2747a8e76a3SSteven Rostedt * @event: the event to get the length of 2757a8e76a3SSteven Rostedt */ 2767a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event) 2777a8e76a3SSteven Rostedt { 278465634adSRobert Richter unsigned length = rb_event_length(event); 279334d4169SLai Jiangshan if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 280465634adSRobert Richter return length; 281465634adSRobert Richter length -= RB_EVNT_HDR_SIZE; 282465634adSRobert Richter if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 283465634adSRobert Richter length -= sizeof(event->array[0]); 284465634adSRobert Richter return length; 2857a8e76a3SSteven Rostedt } 286c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length); 2877a8e76a3SSteven Rostedt 2887a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */ 28934a148bfSAndrew Morton static void * 2907a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event) 2917a8e76a3SSteven Rostedt { 292334d4169SLai Jiangshan BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 2937a8e76a3SSteven Rostedt /* If length is in len field, then array[0] has the data */ 294334d4169SLai Jiangshan if (event->type_len) 2957a8e76a3SSteven Rostedt return (void *)&event->array[0]; 2967a8e76a3SSteven Rostedt /* Otherwise length is in array[0] and array[1] has the data */ 2977a8e76a3SSteven Rostedt return (void *)&event->array[1]; 2987a8e76a3SSteven Rostedt } 2997a8e76a3SSteven Rostedt 3007a8e76a3SSteven Rostedt /** 3017a8e76a3SSteven Rostedt * ring_buffer_event_data - return the data of the event 3027a8e76a3SSteven Rostedt * @event: the event to get the data from 3037a8e76a3SSteven Rostedt */ 3047a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event) 3057a8e76a3SSteven Rostedt { 3067a8e76a3SSteven Rostedt return rb_event_data(event); 3077a8e76a3SSteven Rostedt } 308c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data); 3097a8e76a3SSteven Rostedt 3107a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu) \ 3119e01c1b7SRusty Russell for_each_cpu(cpu, buffer->cpumask) 3127a8e76a3SSteven Rostedt 3137a8e76a3SSteven Rostedt #define TS_SHIFT 27 3147a8e76a3SSteven Rostedt #define TS_MASK ((1ULL << TS_SHIFT) - 1) 3157a8e76a3SSteven Rostedt #define TS_DELTA_TEST (~TS_MASK) 3167a8e76a3SSteven Rostedt 317abc9b56dSSteven Rostedt struct buffer_data_page { 3187a8e76a3SSteven Rostedt u64 time_stamp; /* page time stamp */ 319c3706f00SWenji Huang local_t commit; /* write committed index */ 320abc9b56dSSteven Rostedt unsigned char data[]; /* data of buffer page */ 321abc9b56dSSteven Rostedt }; 322abc9b56dSSteven Rostedt 323abc9b56dSSteven Rostedt struct buffer_page { 324778c55d4SSteven Rostedt struct list_head list; /* list of buffer pages */ 325abc9b56dSSteven Rostedt local_t write; /* index for next write */ 3266f807acdSSteven Rostedt unsigned read; /* index for next read */ 327778c55d4SSteven Rostedt local_t entries; /* entries on this page */ 328abc9b56dSSteven Rostedt struct buffer_data_page *page; /* Actual data page */ 3297a8e76a3SSteven Rostedt }; 3307a8e76a3SSteven Rostedt 331044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage) 332abc9b56dSSteven Rostedt { 333044fa782SSteven Rostedt local_set(&bpage->commit, 0); 334abc9b56dSSteven Rostedt } 335abc9b56dSSteven Rostedt 336474d32b6SSteven Rostedt /** 337474d32b6SSteven Rostedt * ring_buffer_page_len - the size of data on the page. 338474d32b6SSteven Rostedt * @page: The page to read 339474d32b6SSteven Rostedt * 340474d32b6SSteven Rostedt * Returns the amount of data on the page, including buffer page header. 341474d32b6SSteven Rostedt */ 342ef7a4a16SSteven Rostedt size_t ring_buffer_page_len(void *page) 343ef7a4a16SSteven Rostedt { 344474d32b6SSteven Rostedt return local_read(&((struct buffer_data_page *)page)->commit) 345474d32b6SSteven Rostedt + BUF_PAGE_HDR_SIZE; 346ef7a4a16SSteven Rostedt } 347ef7a4a16SSteven Rostedt 3487a8e76a3SSteven Rostedt /* 349ed56829cSSteven Rostedt * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 350ed56829cSSteven Rostedt * this issue out. 351ed56829cSSteven Rostedt */ 35234a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage) 353ed56829cSSteven Rostedt { 3546ae2a076SSteven Rostedt free_page((unsigned long)bpage->page); 355e4c2ce82SSteven Rostedt kfree(bpage); 356ed56829cSSteven Rostedt } 357ed56829cSSteven Rostedt 358ed56829cSSteven Rostedt /* 3597a8e76a3SSteven Rostedt * We need to fit the time_stamp delta into 27 bits. 3607a8e76a3SSteven Rostedt */ 3617a8e76a3SSteven Rostedt static inline int test_time_stamp(u64 delta) 3627a8e76a3SSteven Rostedt { 3637a8e76a3SSteven Rostedt if (delta & TS_DELTA_TEST) 3647a8e76a3SSteven Rostedt return 1; 3657a8e76a3SSteven Rostedt return 0; 3667a8e76a3SSteven Rostedt } 3677a8e76a3SSteven Rostedt 368474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 3697a8e76a3SSteven Rostedt 370be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 371be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 372be957c44SSteven Rostedt 373ea05b57cSSteven Rostedt /* Max number of timestamps that can fit on a page */ 374ea05b57cSSteven Rostedt #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) 375ea05b57cSSteven Rostedt 376d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s) 377d1b182a8SSteven Rostedt { 378d1b182a8SSteven Rostedt struct buffer_data_page field; 379d1b182a8SSteven Rostedt int ret; 380d1b182a8SSteven Rostedt 381d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 382d1b182a8SSteven Rostedt "offset:0;\tsize:%u;\n", 383d1b182a8SSteven Rostedt (unsigned int)sizeof(field.time_stamp)); 384d1b182a8SSteven Rostedt 385d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 386d1b182a8SSteven Rostedt "offset:%u;\tsize:%u;\n", 387d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 388d1b182a8SSteven Rostedt (unsigned int)sizeof(field.commit)); 389d1b182a8SSteven Rostedt 390d1b182a8SSteven Rostedt ret = trace_seq_printf(s, "\tfield: char data;\t" 391d1b182a8SSteven Rostedt "offset:%u;\tsize:%u;\n", 392d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), data), 393d1b182a8SSteven Rostedt (unsigned int)BUF_PAGE_SIZE); 394d1b182a8SSteven Rostedt 395d1b182a8SSteven Rostedt return ret; 396d1b182a8SSteven Rostedt } 397d1b182a8SSteven Rostedt 3987a8e76a3SSteven Rostedt /* 3997a8e76a3SSteven Rostedt * head_page == tail_page && head == tail then buffer is empty. 4007a8e76a3SSteven Rostedt */ 4017a8e76a3SSteven Rostedt struct ring_buffer_per_cpu { 4027a8e76a3SSteven Rostedt int cpu; 4037a8e76a3SSteven Rostedt struct ring_buffer *buffer; 404f83c9d0fSSteven Rostedt spinlock_t reader_lock; /* serialize readers */ 4053e03fb7fSSteven Rostedt raw_spinlock_t lock; 4067a8e76a3SSteven Rostedt struct lock_class_key lock_key; 4077a8e76a3SSteven Rostedt struct list_head pages; 4086f807acdSSteven Rostedt struct buffer_page *head_page; /* read from head */ 4096f807acdSSteven Rostedt struct buffer_page *tail_page; /* write to tail */ 410c3706f00SWenji Huang struct buffer_page *commit_page; /* committed pages */ 411d769041fSSteven Rostedt struct buffer_page *reader_page; 412f0d2c681SSteven Rostedt unsigned long nmi_dropped; 413f0d2c681SSteven Rostedt unsigned long commit_overrun; 4147a8e76a3SSteven Rostedt unsigned long overrun; 415e4906effSSteven Rostedt unsigned long read; 416e4906effSSteven Rostedt local_t entries; 4177a8e76a3SSteven Rostedt u64 write_stamp; 4187a8e76a3SSteven Rostedt u64 read_stamp; 4197a8e76a3SSteven Rostedt atomic_t record_disabled; 4207a8e76a3SSteven Rostedt }; 4217a8e76a3SSteven Rostedt 4227a8e76a3SSteven Rostedt struct ring_buffer { 4237a8e76a3SSteven Rostedt unsigned pages; 4247a8e76a3SSteven Rostedt unsigned flags; 4257a8e76a3SSteven Rostedt int cpus; 4267a8e76a3SSteven Rostedt atomic_t record_disabled; 42700f62f61SArnaldo Carvalho de Melo cpumask_var_t cpumask; 4287a8e76a3SSteven Rostedt 4291f8a6a10SPeter Zijlstra struct lock_class_key *reader_lock_key; 4301f8a6a10SPeter Zijlstra 4317a8e76a3SSteven Rostedt struct mutex mutex; 4327a8e76a3SSteven Rostedt 4337a8e76a3SSteven Rostedt struct ring_buffer_per_cpu **buffers; 434554f786eSSteven Rostedt 43559222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 436554f786eSSteven Rostedt struct notifier_block cpu_notify; 437554f786eSSteven Rostedt #endif 43837886f6aSSteven Rostedt u64 (*clock)(void); 4397a8e76a3SSteven Rostedt }; 4407a8e76a3SSteven Rostedt 4417a8e76a3SSteven Rostedt struct ring_buffer_iter { 4427a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4437a8e76a3SSteven Rostedt unsigned long head; 4447a8e76a3SSteven Rostedt struct buffer_page *head_page; 4457a8e76a3SSteven Rostedt u64 read_stamp; 4467a8e76a3SSteven Rostedt }; 4477a8e76a3SSteven Rostedt 448f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 4497a8e76a3SSteven Rostedt #define RB_WARN_ON(buffer, cond) \ 4503e89c7bbSSteven Rostedt ({ \ 4513e89c7bbSSteven Rostedt int _____ret = unlikely(cond); \ 4523e89c7bbSSteven Rostedt if (_____ret) { \ 453bf41a158SSteven Rostedt atomic_inc(&buffer->record_disabled); \ 454bf41a158SSteven Rostedt WARN_ON(1); \ 455bf41a158SSteven Rostedt } \ 4563e89c7bbSSteven Rostedt _____ret; \ 4573e89c7bbSSteven Rostedt }) 458f536aafcSSteven Rostedt 45937886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */ 46037886f6aSSteven Rostedt #define DEBUG_SHIFT 0 46137886f6aSSteven Rostedt 46288eb0125SSteven Rostedt static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 46388eb0125SSteven Rostedt { 46488eb0125SSteven Rostedt /* shift to debug/test normalization and TIME_EXTENTS */ 46588eb0125SSteven Rostedt return buffer->clock() << DEBUG_SHIFT; 46688eb0125SSteven Rostedt } 46788eb0125SSteven Rostedt 46837886f6aSSteven Rostedt u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) 46937886f6aSSteven Rostedt { 47037886f6aSSteven Rostedt u64 time; 47137886f6aSSteven Rostedt 47237886f6aSSteven Rostedt preempt_disable_notrace(); 47388eb0125SSteven Rostedt time = rb_time_stamp(buffer, cpu); 47437886f6aSSteven Rostedt preempt_enable_no_resched_notrace(); 47537886f6aSSteven Rostedt 47637886f6aSSteven Rostedt return time; 47737886f6aSSteven Rostedt } 47837886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 47937886f6aSSteven Rostedt 48037886f6aSSteven Rostedt void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 48137886f6aSSteven Rostedt int cpu, u64 *ts) 48237886f6aSSteven Rostedt { 48337886f6aSSteven Rostedt /* Just stupid testing the normalize function and deltas */ 48437886f6aSSteven Rostedt *ts >>= DEBUG_SHIFT; 48537886f6aSSteven Rostedt } 48637886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 48737886f6aSSteven Rostedt 4887a8e76a3SSteven Rostedt /** 4897a8e76a3SSteven Rostedt * check_pages - integrity check of buffer pages 4907a8e76a3SSteven Rostedt * @cpu_buffer: CPU buffer with pages to test 4917a8e76a3SSteven Rostedt * 492c3706f00SWenji Huang * As a safety measure we check to make sure the data pages have not 4937a8e76a3SSteven Rostedt * been corrupted. 4947a8e76a3SSteven Rostedt */ 4957a8e76a3SSteven Rostedt static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 4967a8e76a3SSteven Rostedt { 4977a8e76a3SSteven Rostedt struct list_head *head = &cpu_buffer->pages; 498044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 4997a8e76a3SSteven Rostedt 5003e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 5013e89c7bbSSteven Rostedt return -1; 5023e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 5033e89c7bbSSteven Rostedt return -1; 5047a8e76a3SSteven Rostedt 505044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 5063e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 507044fa782SSteven Rostedt bpage->list.next->prev != &bpage->list)) 5083e89c7bbSSteven Rostedt return -1; 5093e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 510044fa782SSteven Rostedt bpage->list.prev->next != &bpage->list)) 5113e89c7bbSSteven Rostedt return -1; 5127a8e76a3SSteven Rostedt } 5137a8e76a3SSteven Rostedt 5147a8e76a3SSteven Rostedt return 0; 5157a8e76a3SSteven Rostedt } 5167a8e76a3SSteven Rostedt 5177a8e76a3SSteven Rostedt static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 5187a8e76a3SSteven Rostedt unsigned nr_pages) 5197a8e76a3SSteven Rostedt { 5207a8e76a3SSteven Rostedt struct list_head *head = &cpu_buffer->pages; 521044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 5227a8e76a3SSteven Rostedt unsigned long addr; 5237a8e76a3SSteven Rostedt LIST_HEAD(pages); 5247a8e76a3SSteven Rostedt unsigned i; 5257a8e76a3SSteven Rostedt 5267a8e76a3SSteven Rostedt for (i = 0; i < nr_pages; i++) { 527044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 528aa1e0e3bSSteven Rostedt GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 529044fa782SSteven Rostedt if (!bpage) 530e4c2ce82SSteven Rostedt goto free_pages; 531044fa782SSteven Rostedt list_add(&bpage->list, &pages); 532e4c2ce82SSteven Rostedt 5337a8e76a3SSteven Rostedt addr = __get_free_page(GFP_KERNEL); 5347a8e76a3SSteven Rostedt if (!addr) 5357a8e76a3SSteven Rostedt goto free_pages; 536044fa782SSteven Rostedt bpage->page = (void *)addr; 537044fa782SSteven Rostedt rb_init_page(bpage->page); 5387a8e76a3SSteven Rostedt } 5397a8e76a3SSteven Rostedt 5407a8e76a3SSteven Rostedt list_splice(&pages, head); 5417a8e76a3SSteven Rostedt 5427a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 5437a8e76a3SSteven Rostedt 5447a8e76a3SSteven Rostedt return 0; 5457a8e76a3SSteven Rostedt 5467a8e76a3SSteven Rostedt free_pages: 547044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, &pages, list) { 548044fa782SSteven Rostedt list_del_init(&bpage->list); 549044fa782SSteven Rostedt free_buffer_page(bpage); 5507a8e76a3SSteven Rostedt } 5517a8e76a3SSteven Rostedt return -ENOMEM; 5527a8e76a3SSteven Rostedt } 5537a8e76a3SSteven Rostedt 5547a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu * 5557a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) 5567a8e76a3SSteven Rostedt { 5577a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 558044fa782SSteven Rostedt struct buffer_page *bpage; 559d769041fSSteven Rostedt unsigned long addr; 5607a8e76a3SSteven Rostedt int ret; 5617a8e76a3SSteven Rostedt 5627a8e76a3SSteven Rostedt cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 5637a8e76a3SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 5647a8e76a3SSteven Rostedt if (!cpu_buffer) 5657a8e76a3SSteven Rostedt return NULL; 5667a8e76a3SSteven Rostedt 5677a8e76a3SSteven Rostedt cpu_buffer->cpu = cpu; 5687a8e76a3SSteven Rostedt cpu_buffer->buffer = buffer; 569f83c9d0fSSteven Rostedt spin_lock_init(&cpu_buffer->reader_lock); 5701f8a6a10SPeter Zijlstra lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 5713e03fb7fSSteven Rostedt cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 5727a8e76a3SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->pages); 5737a8e76a3SSteven Rostedt 574044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 575e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 576044fa782SSteven Rostedt if (!bpage) 577e4c2ce82SSteven Rostedt goto fail_free_buffer; 578e4c2ce82SSteven Rostedt 579044fa782SSteven Rostedt cpu_buffer->reader_page = bpage; 580d769041fSSteven Rostedt addr = __get_free_page(GFP_KERNEL); 581d769041fSSteven Rostedt if (!addr) 582e4c2ce82SSteven Rostedt goto fail_free_reader; 583044fa782SSteven Rostedt bpage->page = (void *)addr; 584044fa782SSteven Rostedt rb_init_page(bpage->page); 585e4c2ce82SSteven Rostedt 586d769041fSSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 587d769041fSSteven Rostedt 5887a8e76a3SSteven Rostedt ret = rb_allocate_pages(cpu_buffer, buffer->pages); 5897a8e76a3SSteven Rostedt if (ret < 0) 590d769041fSSteven Rostedt goto fail_free_reader; 5917a8e76a3SSteven Rostedt 5927a8e76a3SSteven Rostedt cpu_buffer->head_page 5937a8e76a3SSteven Rostedt = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 594bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 5957a8e76a3SSteven Rostedt 5967a8e76a3SSteven Rostedt return cpu_buffer; 5977a8e76a3SSteven Rostedt 598d769041fSSteven Rostedt fail_free_reader: 599d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 600d769041fSSteven Rostedt 6017a8e76a3SSteven Rostedt fail_free_buffer: 6027a8e76a3SSteven Rostedt kfree(cpu_buffer); 6037a8e76a3SSteven Rostedt return NULL; 6047a8e76a3SSteven Rostedt } 6057a8e76a3SSteven Rostedt 6067a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 6077a8e76a3SSteven Rostedt { 6087a8e76a3SSteven Rostedt struct list_head *head = &cpu_buffer->pages; 609044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 6107a8e76a3SSteven Rostedt 611d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 612d769041fSSteven Rostedt 613044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 614044fa782SSteven Rostedt list_del_init(&bpage->list); 615044fa782SSteven Rostedt free_buffer_page(bpage); 6167a8e76a3SSteven Rostedt } 6177a8e76a3SSteven Rostedt kfree(cpu_buffer); 6187a8e76a3SSteven Rostedt } 6197a8e76a3SSteven Rostedt 620a7b13743SSteven Rostedt /* 621a7b13743SSteven Rostedt * Causes compile errors if the struct buffer_page gets bigger 622a7b13743SSteven Rostedt * than the struct page. 623a7b13743SSteven Rostedt */ 624a7b13743SSteven Rostedt extern int ring_buffer_page_too_big(void); 625a7b13743SSteven Rostedt 62659222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 62709c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 628554f786eSSteven Rostedt unsigned long action, void *hcpu); 629554f786eSSteven Rostedt #endif 630554f786eSSteven Rostedt 6317a8e76a3SSteven Rostedt /** 6327a8e76a3SSteven Rostedt * ring_buffer_alloc - allocate a new ring_buffer 63368814b58SRobert Richter * @size: the size in bytes per cpu that is needed. 6347a8e76a3SSteven Rostedt * @flags: attributes to set for the ring buffer. 6357a8e76a3SSteven Rostedt * 6367a8e76a3SSteven Rostedt * Currently the only flag that is available is the RB_FL_OVERWRITE 6377a8e76a3SSteven Rostedt * flag. This flag means that the buffer will overwrite old data 6387a8e76a3SSteven Rostedt * when the buffer wraps. If this flag is not set, the buffer will 6397a8e76a3SSteven Rostedt * drop data when the tail hits the head. 6407a8e76a3SSteven Rostedt */ 6411f8a6a10SPeter Zijlstra struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 6421f8a6a10SPeter Zijlstra struct lock_class_key *key) 6437a8e76a3SSteven Rostedt { 6447a8e76a3SSteven Rostedt struct ring_buffer *buffer; 6457a8e76a3SSteven Rostedt int bsize; 6467a8e76a3SSteven Rostedt int cpu; 6477a8e76a3SSteven Rostedt 648a7b13743SSteven Rostedt /* Paranoid! Optimizes out when all is well */ 649a7b13743SSteven Rostedt if (sizeof(struct buffer_page) > sizeof(struct page)) 650a7b13743SSteven Rostedt ring_buffer_page_too_big(); 651a7b13743SSteven Rostedt 652a7b13743SSteven Rostedt 6537a8e76a3SSteven Rostedt /* keep it in its own cache line */ 6547a8e76a3SSteven Rostedt buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 6557a8e76a3SSteven Rostedt GFP_KERNEL); 6567a8e76a3SSteven Rostedt if (!buffer) 6577a8e76a3SSteven Rostedt return NULL; 6587a8e76a3SSteven Rostedt 6599e01c1b7SRusty Russell if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 6609e01c1b7SRusty Russell goto fail_free_buffer; 6619e01c1b7SRusty Russell 6627a8e76a3SSteven Rostedt buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 6637a8e76a3SSteven Rostedt buffer->flags = flags; 66437886f6aSSteven Rostedt buffer->clock = trace_clock_local; 6651f8a6a10SPeter Zijlstra buffer->reader_lock_key = key; 6667a8e76a3SSteven Rostedt 6677a8e76a3SSteven Rostedt /* need at least two pages */ 6687a8e76a3SSteven Rostedt if (buffer->pages == 1) 6697a8e76a3SSteven Rostedt buffer->pages++; 6707a8e76a3SSteven Rostedt 6713bf832ceSFrederic Weisbecker /* 6723bf832ceSFrederic Weisbecker * In case of non-hotplug cpu, if the ring-buffer is allocated 6733bf832ceSFrederic Weisbecker * in early initcall, it will not be notified of secondary cpus. 6743bf832ceSFrederic Weisbecker * In that off case, we need to allocate for all possible cpus. 6753bf832ceSFrederic Weisbecker */ 6763bf832ceSFrederic Weisbecker #ifdef CONFIG_HOTPLUG_CPU 677554f786eSSteven Rostedt get_online_cpus(); 678554f786eSSteven Rostedt cpumask_copy(buffer->cpumask, cpu_online_mask); 6793bf832ceSFrederic Weisbecker #else 6803bf832ceSFrederic Weisbecker cpumask_copy(buffer->cpumask, cpu_possible_mask); 6813bf832ceSFrederic Weisbecker #endif 6827a8e76a3SSteven Rostedt buffer->cpus = nr_cpu_ids; 6837a8e76a3SSteven Rostedt 6847a8e76a3SSteven Rostedt bsize = sizeof(void *) * nr_cpu_ids; 6857a8e76a3SSteven Rostedt buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 6867a8e76a3SSteven Rostedt GFP_KERNEL); 6877a8e76a3SSteven Rostedt if (!buffer->buffers) 6889e01c1b7SRusty Russell goto fail_free_cpumask; 6897a8e76a3SSteven Rostedt 6907a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 6917a8e76a3SSteven Rostedt buffer->buffers[cpu] = 6927a8e76a3SSteven Rostedt rb_allocate_cpu_buffer(buffer, cpu); 6937a8e76a3SSteven Rostedt if (!buffer->buffers[cpu]) 6947a8e76a3SSteven Rostedt goto fail_free_buffers; 6957a8e76a3SSteven Rostedt } 6967a8e76a3SSteven Rostedt 69759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 698554f786eSSteven Rostedt buffer->cpu_notify.notifier_call = rb_cpu_notify; 699554f786eSSteven Rostedt buffer->cpu_notify.priority = 0; 700554f786eSSteven Rostedt register_cpu_notifier(&buffer->cpu_notify); 701554f786eSSteven Rostedt #endif 702554f786eSSteven Rostedt 703554f786eSSteven Rostedt put_online_cpus(); 7047a8e76a3SSteven Rostedt mutex_init(&buffer->mutex); 7057a8e76a3SSteven Rostedt 7067a8e76a3SSteven Rostedt return buffer; 7077a8e76a3SSteven Rostedt 7087a8e76a3SSteven Rostedt fail_free_buffers: 7097a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 7107a8e76a3SSteven Rostedt if (buffer->buffers[cpu]) 7117a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 7127a8e76a3SSteven Rostedt } 7137a8e76a3SSteven Rostedt kfree(buffer->buffers); 7147a8e76a3SSteven Rostedt 7159e01c1b7SRusty Russell fail_free_cpumask: 7169e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 717554f786eSSteven Rostedt put_online_cpus(); 7189e01c1b7SRusty Russell 7197a8e76a3SSteven Rostedt fail_free_buffer: 7207a8e76a3SSteven Rostedt kfree(buffer); 7217a8e76a3SSteven Rostedt return NULL; 7227a8e76a3SSteven Rostedt } 7231f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 7247a8e76a3SSteven Rostedt 7257a8e76a3SSteven Rostedt /** 7267a8e76a3SSteven Rostedt * ring_buffer_free - free a ring buffer. 7277a8e76a3SSteven Rostedt * @buffer: the buffer to free. 7287a8e76a3SSteven Rostedt */ 7297a8e76a3SSteven Rostedt void 7307a8e76a3SSteven Rostedt ring_buffer_free(struct ring_buffer *buffer) 7317a8e76a3SSteven Rostedt { 7327a8e76a3SSteven Rostedt int cpu; 7337a8e76a3SSteven Rostedt 734554f786eSSteven Rostedt get_online_cpus(); 735554f786eSSteven Rostedt 73659222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 737554f786eSSteven Rostedt unregister_cpu_notifier(&buffer->cpu_notify); 738554f786eSSteven Rostedt #endif 739554f786eSSteven Rostedt 7407a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 7417a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 7427a8e76a3SSteven Rostedt 743554f786eSSteven Rostedt put_online_cpus(); 744554f786eSSteven Rostedt 7459e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 7469e01c1b7SRusty Russell 7477a8e76a3SSteven Rostedt kfree(buffer); 7487a8e76a3SSteven Rostedt } 749c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free); 7507a8e76a3SSteven Rostedt 75137886f6aSSteven Rostedt void ring_buffer_set_clock(struct ring_buffer *buffer, 75237886f6aSSteven Rostedt u64 (*clock)(void)) 75337886f6aSSteven Rostedt { 75437886f6aSSteven Rostedt buffer->clock = clock; 75537886f6aSSteven Rostedt } 75637886f6aSSteven Rostedt 7577a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 7587a8e76a3SSteven Rostedt 7597a8e76a3SSteven Rostedt static void 7607a8e76a3SSteven Rostedt rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) 7617a8e76a3SSteven Rostedt { 762044fa782SSteven Rostedt struct buffer_page *bpage; 7637a8e76a3SSteven Rostedt struct list_head *p; 7647a8e76a3SSteven Rostedt unsigned i; 7657a8e76a3SSteven Rostedt 7667a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 7677a8e76a3SSteven Rostedt synchronize_sched(); 7687a8e76a3SSteven Rostedt 7697a8e76a3SSteven Rostedt for (i = 0; i < nr_pages; i++) { 7703e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 7713e89c7bbSSteven Rostedt return; 7727a8e76a3SSteven Rostedt p = cpu_buffer->pages.next; 773044fa782SSteven Rostedt bpage = list_entry(p, struct buffer_page, list); 774044fa782SSteven Rostedt list_del_init(&bpage->list); 775044fa782SSteven Rostedt free_buffer_page(bpage); 7767a8e76a3SSteven Rostedt } 7773e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 7783e89c7bbSSteven Rostedt return; 7797a8e76a3SSteven Rostedt 7807a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 7817a8e76a3SSteven Rostedt 7827a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 7837a8e76a3SSteven Rostedt 7847a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 7857a8e76a3SSteven Rostedt 7867a8e76a3SSteven Rostedt } 7877a8e76a3SSteven Rostedt 7887a8e76a3SSteven Rostedt static void 7897a8e76a3SSteven Rostedt rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, 7907a8e76a3SSteven Rostedt struct list_head *pages, unsigned nr_pages) 7917a8e76a3SSteven Rostedt { 792044fa782SSteven Rostedt struct buffer_page *bpage; 7937a8e76a3SSteven Rostedt struct list_head *p; 7947a8e76a3SSteven Rostedt unsigned i; 7957a8e76a3SSteven Rostedt 7967a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 7977a8e76a3SSteven Rostedt synchronize_sched(); 7987a8e76a3SSteven Rostedt 7997a8e76a3SSteven Rostedt for (i = 0; i < nr_pages; i++) { 8003e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 8013e89c7bbSSteven Rostedt return; 8027a8e76a3SSteven Rostedt p = pages->next; 803044fa782SSteven Rostedt bpage = list_entry(p, struct buffer_page, list); 804044fa782SSteven Rostedt list_del_init(&bpage->list); 805044fa782SSteven Rostedt list_add_tail(&bpage->list, &cpu_buffer->pages); 8067a8e76a3SSteven Rostedt } 8077a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 8087a8e76a3SSteven Rostedt 8097a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 8107a8e76a3SSteven Rostedt 8117a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 8127a8e76a3SSteven Rostedt } 8137a8e76a3SSteven Rostedt 8147a8e76a3SSteven Rostedt /** 8157a8e76a3SSteven Rostedt * ring_buffer_resize - resize the ring buffer 8167a8e76a3SSteven Rostedt * @buffer: the buffer to resize. 8177a8e76a3SSteven Rostedt * @size: the new size. 8187a8e76a3SSteven Rostedt * 8197a8e76a3SSteven Rostedt * The tracer is responsible for making sure that the buffer is 8207a8e76a3SSteven Rostedt * not being used while changing the size. 8217a8e76a3SSteven Rostedt * Note: We may be able to change the above requirement by using 8227a8e76a3SSteven Rostedt * RCU synchronizations. 8237a8e76a3SSteven Rostedt * 8247a8e76a3SSteven Rostedt * Minimum size is 2 * BUF_PAGE_SIZE. 8257a8e76a3SSteven Rostedt * 8267a8e76a3SSteven Rostedt * Returns -1 on failure. 8277a8e76a3SSteven Rostedt */ 8287a8e76a3SSteven Rostedt int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) 8297a8e76a3SSteven Rostedt { 8307a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 8317a8e76a3SSteven Rostedt unsigned nr_pages, rm_pages, new_pages; 832044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 8337a8e76a3SSteven Rostedt unsigned long buffer_size; 8347a8e76a3SSteven Rostedt unsigned long addr; 8357a8e76a3SSteven Rostedt LIST_HEAD(pages); 8367a8e76a3SSteven Rostedt int i, cpu; 8377a8e76a3SSteven Rostedt 838ee51a1deSIngo Molnar /* 839ee51a1deSIngo Molnar * Always succeed at resizing a non-existent buffer: 840ee51a1deSIngo Molnar */ 841ee51a1deSIngo Molnar if (!buffer) 842ee51a1deSIngo Molnar return size; 843ee51a1deSIngo Molnar 8447a8e76a3SSteven Rostedt size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 8457a8e76a3SSteven Rostedt size *= BUF_PAGE_SIZE; 8467a8e76a3SSteven Rostedt buffer_size = buffer->pages * BUF_PAGE_SIZE; 8477a8e76a3SSteven Rostedt 8487a8e76a3SSteven Rostedt /* we need a minimum of two pages */ 8497a8e76a3SSteven Rostedt if (size < BUF_PAGE_SIZE * 2) 8507a8e76a3SSteven Rostedt size = BUF_PAGE_SIZE * 2; 8517a8e76a3SSteven Rostedt 8527a8e76a3SSteven Rostedt if (size == buffer_size) 8537a8e76a3SSteven Rostedt return size; 8547a8e76a3SSteven Rostedt 8557a8e76a3SSteven Rostedt mutex_lock(&buffer->mutex); 856554f786eSSteven Rostedt get_online_cpus(); 8577a8e76a3SSteven Rostedt 8587a8e76a3SSteven Rostedt nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 8597a8e76a3SSteven Rostedt 8607a8e76a3SSteven Rostedt if (size < buffer_size) { 8617a8e76a3SSteven Rostedt 8627a8e76a3SSteven Rostedt /* easy case, just free pages */ 863554f786eSSteven Rostedt if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) 864554f786eSSteven Rostedt goto out_fail; 8657a8e76a3SSteven Rostedt 8667a8e76a3SSteven Rostedt rm_pages = buffer->pages - nr_pages; 8677a8e76a3SSteven Rostedt 8687a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 8697a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 8707a8e76a3SSteven Rostedt rb_remove_pages(cpu_buffer, rm_pages); 8717a8e76a3SSteven Rostedt } 8727a8e76a3SSteven Rostedt goto out; 8737a8e76a3SSteven Rostedt } 8747a8e76a3SSteven Rostedt 8757a8e76a3SSteven Rostedt /* 8767a8e76a3SSteven Rostedt * This is a bit more difficult. We only want to add pages 8777a8e76a3SSteven Rostedt * when we can allocate enough for all CPUs. We do this 8787a8e76a3SSteven Rostedt * by allocating all the pages and storing them on a local 8797a8e76a3SSteven Rostedt * link list. If we succeed in our allocation, then we 8807a8e76a3SSteven Rostedt * add these pages to the cpu_buffers. Otherwise we just free 8817a8e76a3SSteven Rostedt * them all and return -ENOMEM; 8827a8e76a3SSteven Rostedt */ 883554f786eSSteven Rostedt if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) 884554f786eSSteven Rostedt goto out_fail; 885f536aafcSSteven Rostedt 8867a8e76a3SSteven Rostedt new_pages = nr_pages - buffer->pages; 8877a8e76a3SSteven Rostedt 8887a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 8897a8e76a3SSteven Rostedt for (i = 0; i < new_pages; i++) { 890044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), 891e4c2ce82SSteven Rostedt cache_line_size()), 892e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 893044fa782SSteven Rostedt if (!bpage) 894e4c2ce82SSteven Rostedt goto free_pages; 895044fa782SSteven Rostedt list_add(&bpage->list, &pages); 8967a8e76a3SSteven Rostedt addr = __get_free_page(GFP_KERNEL); 8977a8e76a3SSteven Rostedt if (!addr) 8987a8e76a3SSteven Rostedt goto free_pages; 899044fa782SSteven Rostedt bpage->page = (void *)addr; 900044fa782SSteven Rostedt rb_init_page(bpage->page); 9017a8e76a3SSteven Rostedt } 9027a8e76a3SSteven Rostedt } 9037a8e76a3SSteven Rostedt 9047a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 9057a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 9067a8e76a3SSteven Rostedt rb_insert_pages(cpu_buffer, &pages, new_pages); 9077a8e76a3SSteven Rostedt } 9087a8e76a3SSteven Rostedt 909554f786eSSteven Rostedt if (RB_WARN_ON(buffer, !list_empty(&pages))) 910554f786eSSteven Rostedt goto out_fail; 9117a8e76a3SSteven Rostedt 9127a8e76a3SSteven Rostedt out: 9137a8e76a3SSteven Rostedt buffer->pages = nr_pages; 914554f786eSSteven Rostedt put_online_cpus(); 9157a8e76a3SSteven Rostedt mutex_unlock(&buffer->mutex); 9167a8e76a3SSteven Rostedt 9177a8e76a3SSteven Rostedt return size; 9187a8e76a3SSteven Rostedt 9197a8e76a3SSteven Rostedt free_pages: 920044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, &pages, list) { 921044fa782SSteven Rostedt list_del_init(&bpage->list); 922044fa782SSteven Rostedt free_buffer_page(bpage); 9237a8e76a3SSteven Rostedt } 924554f786eSSteven Rostedt put_online_cpus(); 925641d2f63SVegard Nossum mutex_unlock(&buffer->mutex); 9267a8e76a3SSteven Rostedt return -ENOMEM; 927554f786eSSteven Rostedt 928554f786eSSteven Rostedt /* 929554f786eSSteven Rostedt * Something went totally wrong, and we are too paranoid 930554f786eSSteven Rostedt * to even clean up the mess. 931554f786eSSteven Rostedt */ 932554f786eSSteven Rostedt out_fail: 933554f786eSSteven Rostedt put_online_cpus(); 934554f786eSSteven Rostedt mutex_unlock(&buffer->mutex); 935554f786eSSteven Rostedt return -1; 9367a8e76a3SSteven Rostedt } 937c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize); 9387a8e76a3SSteven Rostedt 9398789a9e7SSteven Rostedt static inline void * 940044fa782SSteven Rostedt __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) 9418789a9e7SSteven Rostedt { 942044fa782SSteven Rostedt return bpage->data + index; 9438789a9e7SSteven Rostedt } 9448789a9e7SSteven Rostedt 945044fa782SSteven Rostedt static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 9467a8e76a3SSteven Rostedt { 947044fa782SSteven Rostedt return bpage->page->data + index; 9487a8e76a3SSteven Rostedt } 9497a8e76a3SSteven Rostedt 9507a8e76a3SSteven Rostedt static inline struct ring_buffer_event * 951d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 9527a8e76a3SSteven Rostedt { 9536f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->reader_page, 9546f807acdSSteven Rostedt cpu_buffer->reader_page->read); 9556f807acdSSteven Rostedt } 9566f807acdSSteven Rostedt 9576f807acdSSteven Rostedt static inline struct ring_buffer_event * 9586f807acdSSteven Rostedt rb_head_event(struct ring_buffer_per_cpu *cpu_buffer) 9596f807acdSSteven Rostedt { 9606f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->head_page, 9616f807acdSSteven Rostedt cpu_buffer->head_page->read); 9627a8e76a3SSteven Rostedt } 9637a8e76a3SSteven Rostedt 9647a8e76a3SSteven Rostedt static inline struct ring_buffer_event * 9657a8e76a3SSteven Rostedt rb_iter_head_event(struct ring_buffer_iter *iter) 9667a8e76a3SSteven Rostedt { 9676f807acdSSteven Rostedt return __rb_page_index(iter->head_page, iter->head); 9687a8e76a3SSteven Rostedt } 9697a8e76a3SSteven Rostedt 970bf41a158SSteven Rostedt static inline unsigned rb_page_write(struct buffer_page *bpage) 971bf41a158SSteven Rostedt { 972bf41a158SSteven Rostedt return local_read(&bpage->write); 973bf41a158SSteven Rostedt } 974bf41a158SSteven Rostedt 975bf41a158SSteven Rostedt static inline unsigned rb_page_commit(struct buffer_page *bpage) 976bf41a158SSteven Rostedt { 977abc9b56dSSteven Rostedt return local_read(&bpage->page->commit); 978bf41a158SSteven Rostedt } 979bf41a158SSteven Rostedt 980bf41a158SSteven Rostedt /* Size is determined by what has been commited */ 981bf41a158SSteven Rostedt static inline unsigned rb_page_size(struct buffer_page *bpage) 982bf41a158SSteven Rostedt { 983bf41a158SSteven Rostedt return rb_page_commit(bpage); 984bf41a158SSteven Rostedt } 985bf41a158SSteven Rostedt 986bf41a158SSteven Rostedt static inline unsigned 987bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 988bf41a158SSteven Rostedt { 989bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->commit_page); 990bf41a158SSteven Rostedt } 991bf41a158SSteven Rostedt 992bf41a158SSteven Rostedt static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer) 993bf41a158SSteven Rostedt { 994bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->head_page); 995bf41a158SSteven Rostedt } 996bf41a158SSteven Rostedt 9977a8e76a3SSteven Rostedt static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, 998044fa782SSteven Rostedt struct buffer_page **bpage) 9997a8e76a3SSteven Rostedt { 1000044fa782SSteven Rostedt struct list_head *p = (*bpage)->list.next; 10017a8e76a3SSteven Rostedt 10027a8e76a3SSteven Rostedt if (p == &cpu_buffer->pages) 10037a8e76a3SSteven Rostedt p = p->next; 10047a8e76a3SSteven Rostedt 1005044fa782SSteven Rostedt *bpage = list_entry(p, struct buffer_page, list); 10067a8e76a3SSteven Rostedt } 10077a8e76a3SSteven Rostedt 1008bf41a158SSteven Rostedt static inline unsigned 1009bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event) 10107a8e76a3SSteven Rostedt { 1011bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1012bf41a158SSteven Rostedt 1013bf41a158SSteven Rostedt return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); 10147a8e76a3SSteven Rostedt } 10157a8e76a3SSteven Rostedt 10160f0c85fcSSteven Rostedt static inline int 1017bf41a158SSteven Rostedt rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 1018bf41a158SSteven Rostedt struct ring_buffer_event *event) 10197a8e76a3SSteven Rostedt { 1020bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1021bf41a158SSteven Rostedt unsigned long index; 1022bf41a158SSteven Rostedt 1023bf41a158SSteven Rostedt index = rb_event_index(event); 1024bf41a158SSteven Rostedt addr &= PAGE_MASK; 1025bf41a158SSteven Rostedt 1026bf41a158SSteven Rostedt return cpu_buffer->commit_page->page == (void *)addr && 1027bf41a158SSteven Rostedt rb_commit_index(cpu_buffer) == index; 1028bf41a158SSteven Rostedt } 1029bf41a158SSteven Rostedt 103034a148bfSAndrew Morton static void 1031bf41a158SSteven Rostedt rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, 1032bf41a158SSteven Rostedt struct ring_buffer_event *event) 1033bf41a158SSteven Rostedt { 1034bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 1035bf41a158SSteven Rostedt unsigned long index; 1036bf41a158SSteven Rostedt 1037bf41a158SSteven Rostedt index = rb_event_index(event); 1038bf41a158SSteven Rostedt addr &= PAGE_MASK; 1039bf41a158SSteven Rostedt 1040bf41a158SSteven Rostedt while (cpu_buffer->commit_page->page != (void *)addr) { 10413e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 10423e89c7bbSSteven Rostedt cpu_buffer->commit_page == cpu_buffer->tail_page)) 10433e89c7bbSSteven Rostedt return; 1044abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->commit = 1045bf41a158SSteven Rostedt cpu_buffer->commit_page->write; 1046bf41a158SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1047abc9b56dSSteven Rostedt cpu_buffer->write_stamp = 1048abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 1049bf41a158SSteven Rostedt } 1050bf41a158SSteven Rostedt 1051bf41a158SSteven Rostedt /* Now set the commit to the event's index */ 1052abc9b56dSSteven Rostedt local_set(&cpu_buffer->commit_page->page->commit, index); 1053bf41a158SSteven Rostedt } 1054bf41a158SSteven Rostedt 105534a148bfSAndrew Morton static void 1056bf41a158SSteven Rostedt rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1057bf41a158SSteven Rostedt { 1058bf41a158SSteven Rostedt /* 1059bf41a158SSteven Rostedt * We only race with interrupts and NMIs on this CPU. 1060bf41a158SSteven Rostedt * If we own the commit event, then we can commit 1061bf41a158SSteven Rostedt * all others that interrupted us, since the interruptions 1062bf41a158SSteven Rostedt * are in stack format (they finish before they come 1063bf41a158SSteven Rostedt * back to us). This allows us to do a simple loop to 1064bf41a158SSteven Rostedt * assign the commit to the tail. 1065bf41a158SSteven Rostedt */ 1066a8ccf1d6SSteven Rostedt again: 1067bf41a158SSteven Rostedt while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1068abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->commit = 1069bf41a158SSteven Rostedt cpu_buffer->commit_page->write; 1070bf41a158SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1071abc9b56dSSteven Rostedt cpu_buffer->write_stamp = 1072abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->time_stamp; 1073bf41a158SSteven Rostedt /* add barrier to keep gcc from optimizing too much */ 1074bf41a158SSteven Rostedt barrier(); 1075bf41a158SSteven Rostedt } 1076bf41a158SSteven Rostedt while (rb_commit_index(cpu_buffer) != 1077bf41a158SSteven Rostedt rb_page_write(cpu_buffer->commit_page)) { 1078abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->commit = 1079bf41a158SSteven Rostedt cpu_buffer->commit_page->write; 1080bf41a158SSteven Rostedt barrier(); 1081bf41a158SSteven Rostedt } 1082a8ccf1d6SSteven Rostedt 1083a8ccf1d6SSteven Rostedt /* again, keep gcc from optimizing */ 1084a8ccf1d6SSteven Rostedt barrier(); 1085a8ccf1d6SSteven Rostedt 1086a8ccf1d6SSteven Rostedt /* 1087a8ccf1d6SSteven Rostedt * If an interrupt came in just after the first while loop 1088a8ccf1d6SSteven Rostedt * and pushed the tail page forward, we will be left with 1089a8ccf1d6SSteven Rostedt * a dangling commit that will never go forward. 1090a8ccf1d6SSteven Rostedt */ 1091a8ccf1d6SSteven Rostedt if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) 1092a8ccf1d6SSteven Rostedt goto again; 10937a8e76a3SSteven Rostedt } 10947a8e76a3SSteven Rostedt 1095d769041fSSteven Rostedt static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 10967a8e76a3SSteven Rostedt { 1097abc9b56dSSteven Rostedt cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; 10986f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 1099d769041fSSteven Rostedt } 1100d769041fSSteven Rostedt 110134a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter) 1102d769041fSSteven Rostedt { 1103d769041fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1104d769041fSSteven Rostedt 1105d769041fSSteven Rostedt /* 1106d769041fSSteven Rostedt * The iterator could be on the reader page (it starts there). 1107d769041fSSteven Rostedt * But the head could have moved, since the reader was 1108d769041fSSteven Rostedt * found. Check for this case and assign the iterator 1109d769041fSSteven Rostedt * to the head page instead of next. 1110d769041fSSteven Rostedt */ 1111d769041fSSteven Rostedt if (iter->head_page == cpu_buffer->reader_page) 1112d769041fSSteven Rostedt iter->head_page = cpu_buffer->head_page; 1113d769041fSSteven Rostedt else 1114d769041fSSteven Rostedt rb_inc_page(cpu_buffer, &iter->head_page); 1115d769041fSSteven Rostedt 1116abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 11177a8e76a3SSteven Rostedt iter->head = 0; 11187a8e76a3SSteven Rostedt } 11197a8e76a3SSteven Rostedt 11207a8e76a3SSteven Rostedt /** 11217a8e76a3SSteven Rostedt * ring_buffer_update_event - update event type and data 11227a8e76a3SSteven Rostedt * @event: the even to update 11237a8e76a3SSteven Rostedt * @type: the type of event 11247a8e76a3SSteven Rostedt * @length: the size of the event field in the ring buffer 11257a8e76a3SSteven Rostedt * 11267a8e76a3SSteven Rostedt * Update the type and data fields of the event. The length 11277a8e76a3SSteven Rostedt * is the actual size that is written to the ring buffer, 11287a8e76a3SSteven Rostedt * and with this, we can determine what to place into the 11297a8e76a3SSteven Rostedt * data field. 11307a8e76a3SSteven Rostedt */ 113134a148bfSAndrew Morton static void 11327a8e76a3SSteven Rostedt rb_update_event(struct ring_buffer_event *event, 11337a8e76a3SSteven Rostedt unsigned type, unsigned length) 11347a8e76a3SSteven Rostedt { 1135334d4169SLai Jiangshan event->type_len = type; 11367a8e76a3SSteven Rostedt 11377a8e76a3SSteven Rostedt switch (type) { 11387a8e76a3SSteven Rostedt 11397a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 11407a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 11417a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 11427a8e76a3SSteven Rostedt break; 11437a8e76a3SSteven Rostedt 1144334d4169SLai Jiangshan case 0: 11457a8e76a3SSteven Rostedt length -= RB_EVNT_HDR_SIZE; 1146334d4169SLai Jiangshan if (length > RB_MAX_SMALL_DATA) 11477a8e76a3SSteven Rostedt event->array[0] = length; 1148334d4169SLai Jiangshan else 1149334d4169SLai Jiangshan event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 11507a8e76a3SSteven Rostedt break; 11517a8e76a3SSteven Rostedt default: 11527a8e76a3SSteven Rostedt BUG(); 11537a8e76a3SSteven Rostedt } 11547a8e76a3SSteven Rostedt } 11557a8e76a3SSteven Rostedt 115634a148bfSAndrew Morton static unsigned rb_calculate_event_length(unsigned length) 11577a8e76a3SSteven Rostedt { 11587a8e76a3SSteven Rostedt struct ring_buffer_event event; /* Used only for sizeof array */ 11597a8e76a3SSteven Rostedt 11607a8e76a3SSteven Rostedt /* zero length can cause confusions */ 11617a8e76a3SSteven Rostedt if (!length) 11627a8e76a3SSteven Rostedt length = 1; 11637a8e76a3SSteven Rostedt 11647a8e76a3SSteven Rostedt if (length > RB_MAX_SMALL_DATA) 11657a8e76a3SSteven Rostedt length += sizeof(event.array[0]); 11667a8e76a3SSteven Rostedt 11677a8e76a3SSteven Rostedt length += RB_EVNT_HDR_SIZE; 11687a8e76a3SSteven Rostedt length = ALIGN(length, RB_ALIGNMENT); 11697a8e76a3SSteven Rostedt 11707a8e76a3SSteven Rostedt return length; 11717a8e76a3SSteven Rostedt } 11727a8e76a3SSteven Rostedt 11736634ff26SSteven Rostedt 11747a8e76a3SSteven Rostedt static struct ring_buffer_event * 11756634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 11766634ff26SSteven Rostedt unsigned long length, unsigned long tail, 11776634ff26SSteven Rostedt struct buffer_page *commit_page, 11786634ff26SSteven Rostedt struct buffer_page *tail_page, u64 *ts) 11797a8e76a3SSteven Rostedt { 11806634ff26SSteven Rostedt struct buffer_page *next_page, *head_page, *reader_page; 11817a8e76a3SSteven Rostedt struct ring_buffer *buffer = cpu_buffer->buffer; 11827a8e76a3SSteven Rostedt struct ring_buffer_event *event; 118378d904b4SSteven Rostedt bool lock_taken = false; 11846634ff26SSteven Rostedt unsigned long flags; 1185aa20ae84SSteven Rostedt 1186aa20ae84SSteven Rostedt next_page = tail_page; 11877a8e76a3SSteven Rostedt 11883e03fb7fSSteven Rostedt local_irq_save(flags); 118978d904b4SSteven Rostedt /* 1190a81bd80aSSteven Rostedt * Since the write to the buffer is still not 1191a81bd80aSSteven Rostedt * fully lockless, we must be careful with NMIs. 1192a81bd80aSSteven Rostedt * The locks in the writers are taken when a write 1193a81bd80aSSteven Rostedt * crosses to a new page. The locks protect against 1194a81bd80aSSteven Rostedt * races with the readers (this will soon be fixed 1195a81bd80aSSteven Rostedt * with a lockless solution). 1196a81bd80aSSteven Rostedt * 1197a81bd80aSSteven Rostedt * Because we can not protect against NMIs, and we 1198a81bd80aSSteven Rostedt * want to keep traces reentrant, we need to manage 1199a81bd80aSSteven Rostedt * what happens when we are in an NMI. 1200a81bd80aSSteven Rostedt * 120178d904b4SSteven Rostedt * NMIs can happen after we take the lock. 120278d904b4SSteven Rostedt * If we are in an NMI, only take the lock 120378d904b4SSteven Rostedt * if it is not already taken. Otherwise 120478d904b4SSteven Rostedt * simply fail. 120578d904b4SSteven Rostedt */ 1206a81bd80aSSteven Rostedt if (unlikely(in_nmi())) { 1207f0d2c681SSteven Rostedt if (!__raw_spin_trylock(&cpu_buffer->lock)) { 1208f0d2c681SSteven Rostedt cpu_buffer->nmi_dropped++; 120945141d46SSteven Rostedt goto out_reset; 1210f0d2c681SSteven Rostedt } 121178d904b4SSteven Rostedt } else 12123e03fb7fSSteven Rostedt __raw_spin_lock(&cpu_buffer->lock); 1213bf41a158SSteven Rostedt 121478d904b4SSteven Rostedt lock_taken = true; 121578d904b4SSteven Rostedt 12167a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &next_page); 12177a8e76a3SSteven Rostedt 1218d769041fSSteven Rostedt head_page = cpu_buffer->head_page; 1219d769041fSSteven Rostedt reader_page = cpu_buffer->reader_page; 1220d769041fSSteven Rostedt 1221d769041fSSteven Rostedt /* we grabbed the lock before incrementing */ 12223e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) 122345141d46SSteven Rostedt goto out_reset; 1224bf41a158SSteven Rostedt 1225bf41a158SSteven Rostedt /* 1226bf41a158SSteven Rostedt * If for some reason, we had an interrupt storm that made 1227bf41a158SSteven Rostedt * it all the way around the buffer, bail, and warn 1228bf41a158SSteven Rostedt * about it. 1229bf41a158SSteven Rostedt */ 123098db8df7SSteven Rostedt if (unlikely(next_page == commit_page)) { 1231f0d2c681SSteven Rostedt cpu_buffer->commit_overrun++; 123245141d46SSteven Rostedt goto out_reset; 1233bf41a158SSteven Rostedt } 1234d769041fSSteven Rostedt 12357a8e76a3SSteven Rostedt if (next_page == head_page) { 12366f3b3440SLai Jiangshan if (!(buffer->flags & RB_FL_OVERWRITE)) 123745141d46SSteven Rostedt goto out_reset; 12387a8e76a3SSteven Rostedt 1239bf41a158SSteven Rostedt /* tail_page has not moved yet? */ 1240bf41a158SSteven Rostedt if (tail_page == cpu_buffer->tail_page) { 12417a8e76a3SSteven Rostedt /* count overflows */ 1242778c55d4SSteven Rostedt cpu_buffer->overrun += 1243778c55d4SSteven Rostedt local_read(&head_page->entries); 12447a8e76a3SSteven Rostedt 12457a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &head_page); 12467a8e76a3SSteven Rostedt cpu_buffer->head_page = head_page; 1247bf41a158SSteven Rostedt cpu_buffer->head_page->read = 0; 1248bf41a158SSteven Rostedt } 12497a8e76a3SSteven Rostedt } 12507a8e76a3SSteven Rostedt 1251bf41a158SSteven Rostedt /* 1252bf41a158SSteven Rostedt * If the tail page is still the same as what we think 1253bf41a158SSteven Rostedt * it is, then it is up to us to update the tail 1254bf41a158SSteven Rostedt * pointer. 1255bf41a158SSteven Rostedt */ 1256bf41a158SSteven Rostedt if (tail_page == cpu_buffer->tail_page) { 1257bf41a158SSteven Rostedt local_set(&next_page->write, 0); 1258778c55d4SSteven Rostedt local_set(&next_page->entries, 0); 1259abc9b56dSSteven Rostedt local_set(&next_page->page->commit, 0); 1260bf41a158SSteven Rostedt cpu_buffer->tail_page = next_page; 1261bf41a158SSteven Rostedt 1262bf41a158SSteven Rostedt /* reread the time stamp */ 126388eb0125SSteven Rostedt *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1264abc9b56dSSteven Rostedt cpu_buffer->tail_page->page->time_stamp = *ts; 1265bf41a158SSteven Rostedt } 1266bf41a158SSteven Rostedt 1267bf41a158SSteven Rostedt /* 1268bf41a158SSteven Rostedt * The actual tail page has moved forward. 1269bf41a158SSteven Rostedt */ 1270bf41a158SSteven Rostedt if (tail < BUF_PAGE_SIZE) { 1271bf41a158SSteven Rostedt /* Mark the rest of the page with padding */ 12726f807acdSSteven Rostedt event = __rb_page_index(tail_page, tail); 12732d622719STom Zanussi rb_event_set_padding(event); 12747a8e76a3SSteven Rostedt } 12757a8e76a3SSteven Rostedt 1276bf41a158SSteven Rostedt /* Set the write back to the previous setting */ 12778e7abf1cSSteven Rostedt local_sub(length, &tail_page->write); 1278bf41a158SSteven Rostedt 1279bf41a158SSteven Rostedt /* 1280bf41a158SSteven Rostedt * If this was a commit entry that failed, 1281bf41a158SSteven Rostedt * increment that too 1282bf41a158SSteven Rostedt */ 1283bf41a158SSteven Rostedt if (tail_page == cpu_buffer->commit_page && 1284bf41a158SSteven Rostedt tail == rb_commit_index(cpu_buffer)) { 1285bf41a158SSteven Rostedt rb_set_commit_to_write(cpu_buffer); 12867a8e76a3SSteven Rostedt } 12877a8e76a3SSteven Rostedt 12883e03fb7fSSteven Rostedt __raw_spin_unlock(&cpu_buffer->lock); 12893e03fb7fSSteven Rostedt local_irq_restore(flags); 1290bf41a158SSteven Rostedt 1291bf41a158SSteven Rostedt /* fail and let the caller try again */ 1292bf41a158SSteven Rostedt return ERR_PTR(-EAGAIN); 1293bf41a158SSteven Rostedt 129445141d46SSteven Rostedt out_reset: 12956f3b3440SLai Jiangshan /* reset write */ 12968e7abf1cSSteven Rostedt local_sub(length, &tail_page->write); 12976f3b3440SLai Jiangshan 129878d904b4SSteven Rostedt if (likely(lock_taken)) 12993e03fb7fSSteven Rostedt __raw_spin_unlock(&cpu_buffer->lock); 13003e03fb7fSSteven Rostedt local_irq_restore(flags); 1301bf41a158SSteven Rostedt return NULL; 13027a8e76a3SSteven Rostedt } 13037a8e76a3SSteven Rostedt 13046634ff26SSteven Rostedt static struct ring_buffer_event * 13056634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 13066634ff26SSteven Rostedt unsigned type, unsigned long length, u64 *ts) 13076634ff26SSteven Rostedt { 13086634ff26SSteven Rostedt struct buffer_page *tail_page, *commit_page; 13096634ff26SSteven Rostedt struct ring_buffer_event *event; 13106634ff26SSteven Rostedt unsigned long tail, write; 13116634ff26SSteven Rostedt 13126634ff26SSteven Rostedt commit_page = cpu_buffer->commit_page; 13136634ff26SSteven Rostedt /* we just need to protect against interrupts */ 13146634ff26SSteven Rostedt barrier(); 13156634ff26SSteven Rostedt tail_page = cpu_buffer->tail_page; 13166634ff26SSteven Rostedt write = local_add_return(length, &tail_page->write); 13176634ff26SSteven Rostedt tail = write - length; 13186634ff26SSteven Rostedt 13196634ff26SSteven Rostedt /* See if we shot pass the end of this buffer page */ 13206634ff26SSteven Rostedt if (write > BUF_PAGE_SIZE) 13216634ff26SSteven Rostedt return rb_move_tail(cpu_buffer, length, tail, 13226634ff26SSteven Rostedt commit_page, tail_page, ts); 13236634ff26SSteven Rostedt 13246634ff26SSteven Rostedt /* We reserved something on the buffer */ 13256634ff26SSteven Rostedt 13266634ff26SSteven Rostedt if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) 13276634ff26SSteven Rostedt return NULL; 13286634ff26SSteven Rostedt 13296634ff26SSteven Rostedt event = __rb_page_index(tail_page, tail); 13306634ff26SSteven Rostedt rb_update_event(event, type, length); 13316634ff26SSteven Rostedt 13326634ff26SSteven Rostedt /* The passed in type is zero for DATA */ 13336634ff26SSteven Rostedt if (likely(!type)) 13346634ff26SSteven Rostedt local_inc(&tail_page->entries); 13356634ff26SSteven Rostedt 13366634ff26SSteven Rostedt /* 13376634ff26SSteven Rostedt * If this is a commit and the tail is zero, then update 13386634ff26SSteven Rostedt * this page's time stamp. 13396634ff26SSteven Rostedt */ 13406634ff26SSteven Rostedt if (!tail && rb_is_commit(cpu_buffer, event)) 13416634ff26SSteven Rostedt cpu_buffer->commit_page->page->time_stamp = *ts; 13426634ff26SSteven Rostedt 13436634ff26SSteven Rostedt return event; 13446634ff26SSteven Rostedt } 13456634ff26SSteven Rostedt 1346edd813bfSSteven Rostedt static inline int 1347edd813bfSSteven Rostedt rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 1348edd813bfSSteven Rostedt struct ring_buffer_event *event) 1349edd813bfSSteven Rostedt { 1350edd813bfSSteven Rostedt unsigned long new_index, old_index; 1351edd813bfSSteven Rostedt struct buffer_page *bpage; 1352edd813bfSSteven Rostedt unsigned long index; 1353edd813bfSSteven Rostedt unsigned long addr; 1354edd813bfSSteven Rostedt 1355edd813bfSSteven Rostedt new_index = rb_event_index(event); 1356edd813bfSSteven Rostedt old_index = new_index + rb_event_length(event); 1357edd813bfSSteven Rostedt addr = (unsigned long)event; 1358edd813bfSSteven Rostedt addr &= PAGE_MASK; 1359edd813bfSSteven Rostedt 1360edd813bfSSteven Rostedt bpage = cpu_buffer->tail_page; 1361edd813bfSSteven Rostedt 1362edd813bfSSteven Rostedt if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 1363edd813bfSSteven Rostedt /* 1364edd813bfSSteven Rostedt * This is on the tail page. It is possible that 1365edd813bfSSteven Rostedt * a write could come in and move the tail page 1366edd813bfSSteven Rostedt * and write to the next page. That is fine 1367edd813bfSSteven Rostedt * because we just shorten what is on this page. 1368edd813bfSSteven Rostedt */ 1369edd813bfSSteven Rostedt index = local_cmpxchg(&bpage->write, old_index, new_index); 1370edd813bfSSteven Rostedt if (index == old_index) 1371edd813bfSSteven Rostedt return 1; 1372edd813bfSSteven Rostedt } 1373edd813bfSSteven Rostedt 1374edd813bfSSteven Rostedt /* could not discard */ 1375edd813bfSSteven Rostedt return 0; 1376edd813bfSSteven Rostedt } 1377edd813bfSSteven Rostedt 13787a8e76a3SSteven Rostedt static int 13797a8e76a3SSteven Rostedt rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, 13807a8e76a3SSteven Rostedt u64 *ts, u64 *delta) 13817a8e76a3SSteven Rostedt { 13827a8e76a3SSteven Rostedt struct ring_buffer_event *event; 13837a8e76a3SSteven Rostedt static int once; 1384bf41a158SSteven Rostedt int ret; 13857a8e76a3SSteven Rostedt 13867a8e76a3SSteven Rostedt if (unlikely(*delta > (1ULL << 59) && !once++)) { 13877a8e76a3SSteven Rostedt printk(KERN_WARNING "Delta way too big! %llu" 13887a8e76a3SSteven Rostedt " ts=%llu write stamp = %llu\n", 1389e2862c94SStephen Rothwell (unsigned long long)*delta, 1390e2862c94SStephen Rothwell (unsigned long long)*ts, 1391e2862c94SStephen Rothwell (unsigned long long)cpu_buffer->write_stamp); 13927a8e76a3SSteven Rostedt WARN_ON(1); 13937a8e76a3SSteven Rostedt } 13947a8e76a3SSteven Rostedt 13957a8e76a3SSteven Rostedt /* 13967a8e76a3SSteven Rostedt * The delta is too big, we to add a 13977a8e76a3SSteven Rostedt * new timestamp. 13987a8e76a3SSteven Rostedt */ 13997a8e76a3SSteven Rostedt event = __rb_reserve_next(cpu_buffer, 14007a8e76a3SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND, 14017a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND, 14027a8e76a3SSteven Rostedt ts); 14037a8e76a3SSteven Rostedt if (!event) 1404bf41a158SSteven Rostedt return -EBUSY; 14057a8e76a3SSteven Rostedt 1406bf41a158SSteven Rostedt if (PTR_ERR(event) == -EAGAIN) 1407bf41a158SSteven Rostedt return -EAGAIN; 1408bf41a158SSteven Rostedt 1409bf41a158SSteven Rostedt /* Only a commited time event can update the write stamp */ 1410bf41a158SSteven Rostedt if (rb_is_commit(cpu_buffer, event)) { 1411bf41a158SSteven Rostedt /* 1412bf41a158SSteven Rostedt * If this is the first on the page, then we need to 1413bf41a158SSteven Rostedt * update the page itself, and just put in a zero. 1414bf41a158SSteven Rostedt */ 1415bf41a158SSteven Rostedt if (rb_event_index(event)) { 14167a8e76a3SSteven Rostedt event->time_delta = *delta & TS_MASK; 14177a8e76a3SSteven Rostedt event->array[0] = *delta >> TS_SHIFT; 1418bf41a158SSteven Rostedt } else { 1419abc9b56dSSteven Rostedt cpu_buffer->commit_page->page->time_stamp = *ts; 1420ea05b57cSSteven Rostedt /* try to discard, since we do not need this */ 1421ea05b57cSSteven Rostedt if (!rb_try_to_discard(cpu_buffer, event)) { 1422ea05b57cSSteven Rostedt /* nope, just zero it */ 1423bf41a158SSteven Rostedt event->time_delta = 0; 1424bf41a158SSteven Rostedt event->array[0] = 0; 1425bf41a158SSteven Rostedt } 1426ea05b57cSSteven Rostedt } 14277a8e76a3SSteven Rostedt cpu_buffer->write_stamp = *ts; 1428bf41a158SSteven Rostedt /* let the caller know this was the commit */ 1429bf41a158SSteven Rostedt ret = 1; 1430bf41a158SSteven Rostedt } else { 1431edd813bfSSteven Rostedt /* Try to discard the event */ 1432edd813bfSSteven Rostedt if (!rb_try_to_discard(cpu_buffer, event)) { 1433bf41a158SSteven Rostedt /* Darn, this is just wasted space */ 1434bf41a158SSteven Rostedt event->time_delta = 0; 1435bf41a158SSteven Rostedt event->array[0] = 0; 14367a8e76a3SSteven Rostedt } 1437*f57a8a19SSteven Rostedt ret = 0; 1438edd813bfSSteven Rostedt } 14397a8e76a3SSteven Rostedt 1440bf41a158SSteven Rostedt *delta = 0; 1441bf41a158SSteven Rostedt 1442bf41a158SSteven Rostedt return ret; 14437a8e76a3SSteven Rostedt } 14447a8e76a3SSteven Rostedt 14457a8e76a3SSteven Rostedt static struct ring_buffer_event * 14467a8e76a3SSteven Rostedt rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, 14471cd8d735SSteven Rostedt unsigned long length) 14487a8e76a3SSteven Rostedt { 14497a8e76a3SSteven Rostedt struct ring_buffer_event *event; 1450168b6b1dSSteven Rostedt u64 ts, delta = 0; 1451bf41a158SSteven Rostedt int commit = 0; 1452818e3dd3SSteven Rostedt int nr_loops = 0; 14537a8e76a3SSteven Rostedt 1454be957c44SSteven Rostedt length = rb_calculate_event_length(length); 1455bf41a158SSteven Rostedt again: 1456818e3dd3SSteven Rostedt /* 1457818e3dd3SSteven Rostedt * We allow for interrupts to reenter here and do a trace. 1458818e3dd3SSteven Rostedt * If one does, it will cause this original code to loop 1459818e3dd3SSteven Rostedt * back here. Even with heavy interrupts happening, this 1460818e3dd3SSteven Rostedt * should only happen a few times in a row. If this happens 1461818e3dd3SSteven Rostedt * 1000 times in a row, there must be either an interrupt 1462818e3dd3SSteven Rostedt * storm or we have something buggy. 1463818e3dd3SSteven Rostedt * Bail! 1464818e3dd3SSteven Rostedt */ 14653e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 1466818e3dd3SSteven Rostedt return NULL; 1467818e3dd3SSteven Rostedt 146888eb0125SSteven Rostedt ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 14697a8e76a3SSteven Rostedt 1470bf41a158SSteven Rostedt /* 1471bf41a158SSteven Rostedt * Only the first commit can update the timestamp. 1472bf41a158SSteven Rostedt * Yes there is a race here. If an interrupt comes in 1473bf41a158SSteven Rostedt * just after the conditional and it traces too, then it 1474bf41a158SSteven Rostedt * will also check the deltas. More than one timestamp may 1475bf41a158SSteven Rostedt * also be made. But only the entry that did the actual 1476bf41a158SSteven Rostedt * commit will be something other than zero. 1477bf41a158SSteven Rostedt */ 14780f0c85fcSSteven Rostedt if (likely(cpu_buffer->tail_page == cpu_buffer->commit_page && 1479bf41a158SSteven Rostedt rb_page_write(cpu_buffer->tail_page) == 14800f0c85fcSSteven Rostedt rb_commit_index(cpu_buffer))) { 1481168b6b1dSSteven Rostedt u64 diff; 1482bf41a158SSteven Rostedt 1483168b6b1dSSteven Rostedt diff = ts - cpu_buffer->write_stamp; 14847a8e76a3SSteven Rostedt 1485168b6b1dSSteven Rostedt /* make sure this diff is calculated here */ 1486bf41a158SSteven Rostedt barrier(); 14877a8e76a3SSteven Rostedt 1488bf41a158SSteven Rostedt /* Did the write stamp get updated already? */ 1489bf41a158SSteven Rostedt if (unlikely(ts < cpu_buffer->write_stamp)) 1490168b6b1dSSteven Rostedt goto get_event; 1491bf41a158SSteven Rostedt 1492168b6b1dSSteven Rostedt delta = diff; 1493168b6b1dSSteven Rostedt if (unlikely(test_time_stamp(delta))) { 1494bf41a158SSteven Rostedt 1495bf41a158SSteven Rostedt commit = rb_add_time_stamp(cpu_buffer, &ts, &delta); 1496bf41a158SSteven Rostedt if (commit == -EBUSY) 14977a8e76a3SSteven Rostedt return NULL; 1498bf41a158SSteven Rostedt 1499bf41a158SSteven Rostedt if (commit == -EAGAIN) 1500bf41a158SSteven Rostedt goto again; 1501bf41a158SSteven Rostedt 1502bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, commit < 0); 15037a8e76a3SSteven Rostedt } 1504168b6b1dSSteven Rostedt } 15057a8e76a3SSteven Rostedt 1506168b6b1dSSteven Rostedt get_event: 15071cd8d735SSteven Rostedt event = __rb_reserve_next(cpu_buffer, 0, length, &ts); 1508168b6b1dSSteven Rostedt if (unlikely(PTR_ERR(event) == -EAGAIN)) 1509bf41a158SSteven Rostedt goto again; 15107a8e76a3SSteven Rostedt 1511bf41a158SSteven Rostedt if (!event) { 1512bf41a158SSteven Rostedt if (unlikely(commit)) 1513bf41a158SSteven Rostedt /* 1514bf41a158SSteven Rostedt * Ouch! We needed a timestamp and it was commited. But 1515bf41a158SSteven Rostedt * we didn't get our event reserved. 1516bf41a158SSteven Rostedt */ 1517bf41a158SSteven Rostedt rb_set_commit_to_write(cpu_buffer); 1518bf41a158SSteven Rostedt return NULL; 1519bf41a158SSteven Rostedt } 1520bf41a158SSteven Rostedt 1521bf41a158SSteven Rostedt /* 1522bf41a158SSteven Rostedt * If the timestamp was commited, make the commit our entry 1523bf41a158SSteven Rostedt * now so that we will update it when needed. 1524bf41a158SSteven Rostedt */ 15250f0c85fcSSteven Rostedt if (unlikely(commit)) 1526bf41a158SSteven Rostedt rb_set_commit_event(cpu_buffer, event); 1527bf41a158SSteven Rostedt else if (!rb_is_commit(cpu_buffer, event)) 15287a8e76a3SSteven Rostedt delta = 0; 15297a8e76a3SSteven Rostedt 15307a8e76a3SSteven Rostedt event->time_delta = delta; 15317a8e76a3SSteven Rostedt 15327a8e76a3SSteven Rostedt return event; 15337a8e76a3SSteven Rostedt } 15347a8e76a3SSteven Rostedt 1535aa18efb2SSteven Rostedt #define TRACE_RECURSIVE_DEPTH 16 1536261842b7SSteven Rostedt 1537261842b7SSteven Rostedt static int trace_recursive_lock(void) 1538261842b7SSteven Rostedt { 1539aa18efb2SSteven Rostedt current->trace_recursion++; 1540261842b7SSteven Rostedt 1541aa18efb2SSteven Rostedt if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) 1542aa18efb2SSteven Rostedt return 0; 1543261842b7SSteven Rostedt 1544261842b7SSteven Rostedt /* Disable all tracing before we do anything else */ 1545261842b7SSteven Rostedt tracing_off_permanent(); 1546e057a5e5SFrederic Weisbecker 15477d7d2b80SSteven Rostedt printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:" 1548e057a5e5SFrederic Weisbecker "HC[%lu]:SC[%lu]:NMI[%lu]\n", 1549aa18efb2SSteven Rostedt current->trace_recursion, 1550e057a5e5SFrederic Weisbecker hardirq_count() >> HARDIRQ_SHIFT, 1551e057a5e5SFrederic Weisbecker softirq_count() >> SOFTIRQ_SHIFT, 1552e057a5e5SFrederic Weisbecker in_nmi()); 1553e057a5e5SFrederic Weisbecker 1554261842b7SSteven Rostedt WARN_ON_ONCE(1); 1555261842b7SSteven Rostedt return -1; 1556261842b7SSteven Rostedt } 1557261842b7SSteven Rostedt 1558261842b7SSteven Rostedt static void trace_recursive_unlock(void) 1559261842b7SSteven Rostedt { 1560aa18efb2SSteven Rostedt WARN_ON_ONCE(!current->trace_recursion); 1561261842b7SSteven Rostedt 1562aa18efb2SSteven Rostedt current->trace_recursion--; 1563261842b7SSteven Rostedt } 1564261842b7SSteven Rostedt 1565bf41a158SSteven Rostedt static DEFINE_PER_CPU(int, rb_need_resched); 1566bf41a158SSteven Rostedt 15677a8e76a3SSteven Rostedt /** 15687a8e76a3SSteven Rostedt * ring_buffer_lock_reserve - reserve a part of the buffer 15697a8e76a3SSteven Rostedt * @buffer: the ring buffer to reserve from 15707a8e76a3SSteven Rostedt * @length: the length of the data to reserve (excluding event header) 15717a8e76a3SSteven Rostedt * 15727a8e76a3SSteven Rostedt * Returns a reseverd event on the ring buffer to copy directly to. 15737a8e76a3SSteven Rostedt * The user of this interface will need to get the body to write into 15747a8e76a3SSteven Rostedt * and can use the ring_buffer_event_data() interface. 15757a8e76a3SSteven Rostedt * 15767a8e76a3SSteven Rostedt * The length is the length of the data needed, not the event length 15777a8e76a3SSteven Rostedt * which also includes the event header. 15787a8e76a3SSteven Rostedt * 15797a8e76a3SSteven Rostedt * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 15807a8e76a3SSteven Rostedt * If NULL is returned, then nothing has been allocated or locked. 15817a8e76a3SSteven Rostedt */ 15827a8e76a3SSteven Rostedt struct ring_buffer_event * 15830a987751SArnaldo Carvalho de Melo ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) 15847a8e76a3SSteven Rostedt { 15857a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 15867a8e76a3SSteven Rostedt struct ring_buffer_event *event; 1587bf41a158SSteven Rostedt int cpu, resched; 15887a8e76a3SSteven Rostedt 1589033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 1590a3583244SSteven Rostedt return NULL; 1591a3583244SSteven Rostedt 15927a8e76a3SSteven Rostedt if (atomic_read(&buffer->record_disabled)) 15937a8e76a3SSteven Rostedt return NULL; 15947a8e76a3SSteven Rostedt 1595bf41a158SSteven Rostedt /* If we are tracing schedule, we don't want to recurse */ 1596182e9f5fSSteven Rostedt resched = ftrace_preempt_disable(); 1597bf41a158SSteven Rostedt 1598261842b7SSteven Rostedt if (trace_recursive_lock()) 1599261842b7SSteven Rostedt goto out_nocheck; 1600261842b7SSteven Rostedt 16017a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 16027a8e76a3SSteven Rostedt 16039e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1604d769041fSSteven Rostedt goto out; 16057a8e76a3SSteven Rostedt 16067a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 16077a8e76a3SSteven Rostedt 16087a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 1609d769041fSSteven Rostedt goto out; 16107a8e76a3SSteven Rostedt 1611be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 1612bf41a158SSteven Rostedt goto out; 16137a8e76a3SSteven Rostedt 16141cd8d735SSteven Rostedt event = rb_reserve_next_event(cpu_buffer, length); 16157a8e76a3SSteven Rostedt if (!event) 1616d769041fSSteven Rostedt goto out; 16177a8e76a3SSteven Rostedt 1618bf41a158SSteven Rostedt /* 1619bf41a158SSteven Rostedt * Need to store resched state on this cpu. 1620bf41a158SSteven Rostedt * Only the first needs to. 1621bf41a158SSteven Rostedt */ 1622bf41a158SSteven Rostedt 1623bf41a158SSteven Rostedt if (preempt_count() == 1) 1624bf41a158SSteven Rostedt per_cpu(rb_need_resched, cpu) = resched; 1625bf41a158SSteven Rostedt 16267a8e76a3SSteven Rostedt return event; 16277a8e76a3SSteven Rostedt 1628d769041fSSteven Rostedt out: 1629261842b7SSteven Rostedt trace_recursive_unlock(); 1630261842b7SSteven Rostedt 1631261842b7SSteven Rostedt out_nocheck: 1632182e9f5fSSteven Rostedt ftrace_preempt_enable(resched); 16337a8e76a3SSteven Rostedt return NULL; 16347a8e76a3SSteven Rostedt } 1635c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 16367a8e76a3SSteven Rostedt 16377a8e76a3SSteven Rostedt static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 16387a8e76a3SSteven Rostedt struct ring_buffer_event *event) 16397a8e76a3SSteven Rostedt { 1640e4906effSSteven Rostedt local_inc(&cpu_buffer->entries); 1641bf41a158SSteven Rostedt 1642bf41a158SSteven Rostedt /* Only process further if we own the commit */ 1643bf41a158SSteven Rostedt if (!rb_is_commit(cpu_buffer, event)) 1644bf41a158SSteven Rostedt return; 1645bf41a158SSteven Rostedt 1646bf41a158SSteven Rostedt cpu_buffer->write_stamp += event->time_delta; 1647bf41a158SSteven Rostedt 1648bf41a158SSteven Rostedt rb_set_commit_to_write(cpu_buffer); 16497a8e76a3SSteven Rostedt } 16507a8e76a3SSteven Rostedt 16517a8e76a3SSteven Rostedt /** 16527a8e76a3SSteven Rostedt * ring_buffer_unlock_commit - commit a reserved 16537a8e76a3SSteven Rostedt * @buffer: The buffer to commit to 16547a8e76a3SSteven Rostedt * @event: The event pointer to commit. 16557a8e76a3SSteven Rostedt * 16567a8e76a3SSteven Rostedt * This commits the data to the ring buffer, and releases any locks held. 16577a8e76a3SSteven Rostedt * 16587a8e76a3SSteven Rostedt * Must be paired with ring_buffer_lock_reserve. 16597a8e76a3SSteven Rostedt */ 16607a8e76a3SSteven Rostedt int ring_buffer_unlock_commit(struct ring_buffer *buffer, 16610a987751SArnaldo Carvalho de Melo struct ring_buffer_event *event) 16627a8e76a3SSteven Rostedt { 16637a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 16647a8e76a3SSteven Rostedt int cpu = raw_smp_processor_id(); 16657a8e76a3SSteven Rostedt 16667a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 16677a8e76a3SSteven Rostedt 16687a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 16697a8e76a3SSteven Rostedt 1670261842b7SSteven Rostedt trace_recursive_unlock(); 1671261842b7SSteven Rostedt 1672bf41a158SSteven Rostedt /* 1673bf41a158SSteven Rostedt * Only the last preempt count needs to restore preemption. 1674bf41a158SSteven Rostedt */ 1675182e9f5fSSteven Rostedt if (preempt_count() == 1) 1676182e9f5fSSteven Rostedt ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); 1677bf41a158SSteven Rostedt else 1678bf41a158SSteven Rostedt preempt_enable_no_resched_notrace(); 16797a8e76a3SSteven Rostedt 16807a8e76a3SSteven Rostedt return 0; 16817a8e76a3SSteven Rostedt } 1682c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 16837a8e76a3SSteven Rostedt 1684f3b9aae1SFrederic Weisbecker static inline void rb_event_discard(struct ring_buffer_event *event) 1685f3b9aae1SFrederic Weisbecker { 1686334d4169SLai Jiangshan /* array[0] holds the actual length for the discarded event */ 1687334d4169SLai Jiangshan event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 1688334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 1689f3b9aae1SFrederic Weisbecker /* time delta must be non zero */ 1690f3b9aae1SFrederic Weisbecker if (!event->time_delta) 1691f3b9aae1SFrederic Weisbecker event->time_delta = 1; 1692f3b9aae1SFrederic Weisbecker } 1693f3b9aae1SFrederic Weisbecker 16947a8e76a3SSteven Rostedt /** 1695fa1b47ddSSteven Rostedt * ring_buffer_event_discard - discard any event in the ring buffer 1696fa1b47ddSSteven Rostedt * @event: the event to discard 1697fa1b47ddSSteven Rostedt * 1698fa1b47ddSSteven Rostedt * Sometimes a event that is in the ring buffer needs to be ignored. 1699fa1b47ddSSteven Rostedt * This function lets the user discard an event in the ring buffer 1700fa1b47ddSSteven Rostedt * and then that event will not be read later. 1701fa1b47ddSSteven Rostedt * 1702fa1b47ddSSteven Rostedt * Note, it is up to the user to be careful with this, and protect 1703fa1b47ddSSteven Rostedt * against races. If the user discards an event that has been consumed 1704fa1b47ddSSteven Rostedt * it is possible that it could corrupt the ring buffer. 1705fa1b47ddSSteven Rostedt */ 1706fa1b47ddSSteven Rostedt void ring_buffer_event_discard(struct ring_buffer_event *event) 1707fa1b47ddSSteven Rostedt { 1708f3b9aae1SFrederic Weisbecker rb_event_discard(event); 1709fa1b47ddSSteven Rostedt } 1710fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_event_discard); 1711fa1b47ddSSteven Rostedt 1712fa1b47ddSSteven Rostedt /** 1713fa1b47ddSSteven Rostedt * ring_buffer_commit_discard - discard an event that has not been committed 1714fa1b47ddSSteven Rostedt * @buffer: the ring buffer 1715fa1b47ddSSteven Rostedt * @event: non committed event to discard 1716fa1b47ddSSteven Rostedt * 1717fa1b47ddSSteven Rostedt * This is similar to ring_buffer_event_discard but must only be 1718fa1b47ddSSteven Rostedt * performed on an event that has not been committed yet. The difference 1719fa1b47ddSSteven Rostedt * is that this will also try to free the event from the ring buffer 1720fa1b47ddSSteven Rostedt * if another event has not been added behind it. 1721fa1b47ddSSteven Rostedt * 1722fa1b47ddSSteven Rostedt * If another event has been added behind it, it will set the event 1723fa1b47ddSSteven Rostedt * up as discarded, and perform the commit. 1724fa1b47ddSSteven Rostedt * 1725fa1b47ddSSteven Rostedt * If this function is called, do not call ring_buffer_unlock_commit on 1726fa1b47ddSSteven Rostedt * the event. 1727fa1b47ddSSteven Rostedt */ 1728fa1b47ddSSteven Rostedt void ring_buffer_discard_commit(struct ring_buffer *buffer, 1729fa1b47ddSSteven Rostedt struct ring_buffer_event *event) 1730fa1b47ddSSteven Rostedt { 1731fa1b47ddSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1732fa1b47ddSSteven Rostedt int cpu; 1733fa1b47ddSSteven Rostedt 1734fa1b47ddSSteven Rostedt /* The event is discarded regardless */ 1735f3b9aae1SFrederic Weisbecker rb_event_discard(event); 1736fa1b47ddSSteven Rostedt 1737fa1b47ddSSteven Rostedt /* 1738fa1b47ddSSteven Rostedt * This must only be called if the event has not been 1739fa1b47ddSSteven Rostedt * committed yet. Thus we can assume that preemption 1740fa1b47ddSSteven Rostedt * is still disabled. 1741fa1b47ddSSteven Rostedt */ 174274f4fd21SSteven Rostedt RB_WARN_ON(buffer, preemptible()); 1743fa1b47ddSSteven Rostedt 1744fa1b47ddSSteven Rostedt cpu = smp_processor_id(); 1745fa1b47ddSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1746fa1b47ddSSteven Rostedt 1747edd813bfSSteven Rostedt if (!rb_try_to_discard(cpu_buffer, event)) 1748fa1b47ddSSteven Rostedt goto out; 1749fa1b47ddSSteven Rostedt 1750fa1b47ddSSteven Rostedt /* 1751fa1b47ddSSteven Rostedt * The commit is still visible by the reader, so we 1752fa1b47ddSSteven Rostedt * must increment entries. 1753fa1b47ddSSteven Rostedt */ 1754e4906effSSteven Rostedt local_inc(&cpu_buffer->entries); 1755fa1b47ddSSteven Rostedt out: 1756fa1b47ddSSteven Rostedt /* 1757fa1b47ddSSteven Rostedt * If a write came in and pushed the tail page 1758fa1b47ddSSteven Rostedt * we still need to update the commit pointer 1759fa1b47ddSSteven Rostedt * if we were the commit. 1760fa1b47ddSSteven Rostedt */ 1761fa1b47ddSSteven Rostedt if (rb_is_commit(cpu_buffer, event)) 1762fa1b47ddSSteven Rostedt rb_set_commit_to_write(cpu_buffer); 1763fa1b47ddSSteven Rostedt 1764f3b9aae1SFrederic Weisbecker trace_recursive_unlock(); 1765f3b9aae1SFrederic Weisbecker 1766fa1b47ddSSteven Rostedt /* 1767fa1b47ddSSteven Rostedt * Only the last preempt count needs to restore preemption. 1768fa1b47ddSSteven Rostedt */ 1769fa1b47ddSSteven Rostedt if (preempt_count() == 1) 1770fa1b47ddSSteven Rostedt ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); 1771fa1b47ddSSteven Rostedt else 1772fa1b47ddSSteven Rostedt preempt_enable_no_resched_notrace(); 1773fa1b47ddSSteven Rostedt 1774fa1b47ddSSteven Rostedt } 1775fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 1776fa1b47ddSSteven Rostedt 1777fa1b47ddSSteven Rostedt /** 17787a8e76a3SSteven Rostedt * ring_buffer_write - write data to the buffer without reserving 17797a8e76a3SSteven Rostedt * @buffer: The ring buffer to write to. 17807a8e76a3SSteven Rostedt * @length: The length of the data being written (excluding the event header) 17817a8e76a3SSteven Rostedt * @data: The data to write to the buffer. 17827a8e76a3SSteven Rostedt * 17837a8e76a3SSteven Rostedt * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 17847a8e76a3SSteven Rostedt * one function. If you already have the data to write to the buffer, it 17857a8e76a3SSteven Rostedt * may be easier to simply call this function. 17867a8e76a3SSteven Rostedt * 17877a8e76a3SSteven Rostedt * Note, like ring_buffer_lock_reserve, the length is the length of the data 17887a8e76a3SSteven Rostedt * and not the length of the event which would hold the header. 17897a8e76a3SSteven Rostedt */ 17907a8e76a3SSteven Rostedt int ring_buffer_write(struct ring_buffer *buffer, 17917a8e76a3SSteven Rostedt unsigned long length, 17927a8e76a3SSteven Rostedt void *data) 17937a8e76a3SSteven Rostedt { 17947a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 17957a8e76a3SSteven Rostedt struct ring_buffer_event *event; 17967a8e76a3SSteven Rostedt void *body; 17977a8e76a3SSteven Rostedt int ret = -EBUSY; 1798bf41a158SSteven Rostedt int cpu, resched; 17997a8e76a3SSteven Rostedt 1800033601a3SSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 1801a3583244SSteven Rostedt return -EBUSY; 1802a3583244SSteven Rostedt 18037a8e76a3SSteven Rostedt if (atomic_read(&buffer->record_disabled)) 18047a8e76a3SSteven Rostedt return -EBUSY; 18057a8e76a3SSteven Rostedt 1806182e9f5fSSteven Rostedt resched = ftrace_preempt_disable(); 1807bf41a158SSteven Rostedt 18087a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 18097a8e76a3SSteven Rostedt 18109e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1811d769041fSSteven Rostedt goto out; 18127a8e76a3SSteven Rostedt 18137a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 18147a8e76a3SSteven Rostedt 18157a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 18167a8e76a3SSteven Rostedt goto out; 18177a8e76a3SSteven Rostedt 1818be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 1819be957c44SSteven Rostedt goto out; 1820be957c44SSteven Rostedt 1821be957c44SSteven Rostedt event = rb_reserve_next_event(cpu_buffer, length); 18227a8e76a3SSteven Rostedt if (!event) 18237a8e76a3SSteven Rostedt goto out; 18247a8e76a3SSteven Rostedt 18257a8e76a3SSteven Rostedt body = rb_event_data(event); 18267a8e76a3SSteven Rostedt 18277a8e76a3SSteven Rostedt memcpy(body, data, length); 18287a8e76a3SSteven Rostedt 18297a8e76a3SSteven Rostedt rb_commit(cpu_buffer, event); 18307a8e76a3SSteven Rostedt 18317a8e76a3SSteven Rostedt ret = 0; 18327a8e76a3SSteven Rostedt out: 1833182e9f5fSSteven Rostedt ftrace_preempt_enable(resched); 18347a8e76a3SSteven Rostedt 18357a8e76a3SSteven Rostedt return ret; 18367a8e76a3SSteven Rostedt } 1837c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write); 18387a8e76a3SSteven Rostedt 183934a148bfSAndrew Morton static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1840bf41a158SSteven Rostedt { 1841bf41a158SSteven Rostedt struct buffer_page *reader = cpu_buffer->reader_page; 1842bf41a158SSteven Rostedt struct buffer_page *head = cpu_buffer->head_page; 1843bf41a158SSteven Rostedt struct buffer_page *commit = cpu_buffer->commit_page; 1844bf41a158SSteven Rostedt 1845bf41a158SSteven Rostedt return reader->read == rb_page_commit(reader) && 1846bf41a158SSteven Rostedt (commit == reader || 1847bf41a158SSteven Rostedt (commit == head && 1848bf41a158SSteven Rostedt head->read == rb_page_commit(commit))); 1849bf41a158SSteven Rostedt } 1850bf41a158SSteven Rostedt 18517a8e76a3SSteven Rostedt /** 18527a8e76a3SSteven Rostedt * ring_buffer_record_disable - stop all writes into the buffer 18537a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 18547a8e76a3SSteven Rostedt * 18557a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 18567a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 18577a8e76a3SSteven Rostedt * 18587a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 18597a8e76a3SSteven Rostedt */ 18607a8e76a3SSteven Rostedt void ring_buffer_record_disable(struct ring_buffer *buffer) 18617a8e76a3SSteven Rostedt { 18627a8e76a3SSteven Rostedt atomic_inc(&buffer->record_disabled); 18637a8e76a3SSteven Rostedt } 1864c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 18657a8e76a3SSteven Rostedt 18667a8e76a3SSteven Rostedt /** 18677a8e76a3SSteven Rostedt * ring_buffer_record_enable - enable writes to the buffer 18687a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 18697a8e76a3SSteven Rostedt * 18707a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 18717a8e76a3SSteven Rostedt * to truely enable the writing (much like preempt_disable). 18727a8e76a3SSteven Rostedt */ 18737a8e76a3SSteven Rostedt void ring_buffer_record_enable(struct ring_buffer *buffer) 18747a8e76a3SSteven Rostedt { 18757a8e76a3SSteven Rostedt atomic_dec(&buffer->record_disabled); 18767a8e76a3SSteven Rostedt } 1877c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 18787a8e76a3SSteven Rostedt 18797a8e76a3SSteven Rostedt /** 18807a8e76a3SSteven Rostedt * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 18817a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 18827a8e76a3SSteven Rostedt * @cpu: The CPU buffer to stop 18837a8e76a3SSteven Rostedt * 18847a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 18857a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 18867a8e76a3SSteven Rostedt * 18877a8e76a3SSteven Rostedt * The caller should call synchronize_sched() after this. 18887a8e76a3SSteven Rostedt */ 18897a8e76a3SSteven Rostedt void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) 18907a8e76a3SSteven Rostedt { 18917a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 18927a8e76a3SSteven Rostedt 18939e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 18948aabee57SSteven Rostedt return; 18957a8e76a3SSteven Rostedt 18967a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 18977a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 18987a8e76a3SSteven Rostedt } 1899c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 19007a8e76a3SSteven Rostedt 19017a8e76a3SSteven Rostedt /** 19027a8e76a3SSteven Rostedt * ring_buffer_record_enable_cpu - enable writes to the buffer 19037a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 19047a8e76a3SSteven Rostedt * @cpu: The CPU to enable. 19057a8e76a3SSteven Rostedt * 19067a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 19077a8e76a3SSteven Rostedt * to truely enable the writing (much like preempt_disable). 19087a8e76a3SSteven Rostedt */ 19097a8e76a3SSteven Rostedt void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) 19107a8e76a3SSteven Rostedt { 19117a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 19127a8e76a3SSteven Rostedt 19139e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 19148aabee57SSteven Rostedt return; 19157a8e76a3SSteven Rostedt 19167a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 19177a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 19187a8e76a3SSteven Rostedt } 1919c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 19207a8e76a3SSteven Rostedt 19217a8e76a3SSteven Rostedt /** 19227a8e76a3SSteven Rostedt * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 19237a8e76a3SSteven Rostedt * @buffer: The ring buffer 19247a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the entries from. 19257a8e76a3SSteven Rostedt */ 19267a8e76a3SSteven Rostedt unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) 19277a8e76a3SSteven Rostedt { 19287a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 19298aabee57SSteven Rostedt unsigned long ret; 19307a8e76a3SSteven Rostedt 19319e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 19328aabee57SSteven Rostedt return 0; 19337a8e76a3SSteven Rostedt 19347a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1935e4906effSSteven Rostedt ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun) 1936e4906effSSteven Rostedt - cpu_buffer->read; 1937554f786eSSteven Rostedt 1938554f786eSSteven Rostedt return ret; 19397a8e76a3SSteven Rostedt } 1940c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 19417a8e76a3SSteven Rostedt 19427a8e76a3SSteven Rostedt /** 19437a8e76a3SSteven Rostedt * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 19447a8e76a3SSteven Rostedt * @buffer: The ring buffer 19457a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 19467a8e76a3SSteven Rostedt */ 19477a8e76a3SSteven Rostedt unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) 19487a8e76a3SSteven Rostedt { 19497a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 19508aabee57SSteven Rostedt unsigned long ret; 19517a8e76a3SSteven Rostedt 19529e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 19538aabee57SSteven Rostedt return 0; 19547a8e76a3SSteven Rostedt 19557a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1956554f786eSSteven Rostedt ret = cpu_buffer->overrun; 1957554f786eSSteven Rostedt 1958554f786eSSteven Rostedt return ret; 19597a8e76a3SSteven Rostedt } 1960c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 19617a8e76a3SSteven Rostedt 19627a8e76a3SSteven Rostedt /** 1963f0d2c681SSteven Rostedt * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped 1964f0d2c681SSteven Rostedt * @buffer: The ring buffer 1965f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 1966f0d2c681SSteven Rostedt */ 1967f0d2c681SSteven Rostedt unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu) 1968f0d2c681SSteven Rostedt { 1969f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1970f0d2c681SSteven Rostedt unsigned long ret; 1971f0d2c681SSteven Rostedt 1972f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1973f0d2c681SSteven Rostedt return 0; 1974f0d2c681SSteven Rostedt 1975f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1976f0d2c681SSteven Rostedt ret = cpu_buffer->nmi_dropped; 1977f0d2c681SSteven Rostedt 1978f0d2c681SSteven Rostedt return ret; 1979f0d2c681SSteven Rostedt } 1980f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu); 1981f0d2c681SSteven Rostedt 1982f0d2c681SSteven Rostedt /** 1983f0d2c681SSteven Rostedt * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 1984f0d2c681SSteven Rostedt * @buffer: The ring buffer 1985f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 1986f0d2c681SSteven Rostedt */ 1987f0d2c681SSteven Rostedt unsigned long 1988f0d2c681SSteven Rostedt ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) 1989f0d2c681SSteven Rostedt { 1990f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1991f0d2c681SSteven Rostedt unsigned long ret; 1992f0d2c681SSteven Rostedt 1993f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 1994f0d2c681SSteven Rostedt return 0; 1995f0d2c681SSteven Rostedt 1996f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 1997f0d2c681SSteven Rostedt ret = cpu_buffer->commit_overrun; 1998f0d2c681SSteven Rostedt 1999f0d2c681SSteven Rostedt return ret; 2000f0d2c681SSteven Rostedt } 2001f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 2002f0d2c681SSteven Rostedt 2003f0d2c681SSteven Rostedt /** 20047a8e76a3SSteven Rostedt * ring_buffer_entries - get the number of entries in a buffer 20057a8e76a3SSteven Rostedt * @buffer: The ring buffer 20067a8e76a3SSteven Rostedt * 20077a8e76a3SSteven Rostedt * Returns the total number of entries in the ring buffer 20087a8e76a3SSteven Rostedt * (all CPU entries) 20097a8e76a3SSteven Rostedt */ 20107a8e76a3SSteven Rostedt unsigned long ring_buffer_entries(struct ring_buffer *buffer) 20117a8e76a3SSteven Rostedt { 20127a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 20137a8e76a3SSteven Rostedt unsigned long entries = 0; 20147a8e76a3SSteven Rostedt int cpu; 20157a8e76a3SSteven Rostedt 20167a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 20177a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 20187a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2019e4906effSSteven Rostedt entries += (local_read(&cpu_buffer->entries) - 2020e4906effSSteven Rostedt cpu_buffer->overrun) - cpu_buffer->read; 20217a8e76a3SSteven Rostedt } 20227a8e76a3SSteven Rostedt 20237a8e76a3SSteven Rostedt return entries; 20247a8e76a3SSteven Rostedt } 2025c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries); 20267a8e76a3SSteven Rostedt 20277a8e76a3SSteven Rostedt /** 20287a8e76a3SSteven Rostedt * ring_buffer_overrun_cpu - get the number of overruns in buffer 20297a8e76a3SSteven Rostedt * @buffer: The ring buffer 20307a8e76a3SSteven Rostedt * 20317a8e76a3SSteven Rostedt * Returns the total number of overruns in the ring buffer 20327a8e76a3SSteven Rostedt * (all CPU entries) 20337a8e76a3SSteven Rostedt */ 20347a8e76a3SSteven Rostedt unsigned long ring_buffer_overruns(struct ring_buffer *buffer) 20357a8e76a3SSteven Rostedt { 20367a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 20377a8e76a3SSteven Rostedt unsigned long overruns = 0; 20387a8e76a3SSteven Rostedt int cpu; 20397a8e76a3SSteven Rostedt 20407a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 20417a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 20427a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 20437a8e76a3SSteven Rostedt overruns += cpu_buffer->overrun; 20447a8e76a3SSteven Rostedt } 20457a8e76a3SSteven Rostedt 20467a8e76a3SSteven Rostedt return overruns; 20477a8e76a3SSteven Rostedt } 2048c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns); 20497a8e76a3SSteven Rostedt 2050642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter) 20517a8e76a3SSteven Rostedt { 20527a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 20537a8e76a3SSteven Rostedt 2054d769041fSSteven Rostedt /* Iterator usage is expected to have record disabled */ 2055d769041fSSteven Rostedt if (list_empty(&cpu_buffer->reader_page->list)) { 20567a8e76a3SSteven Rostedt iter->head_page = cpu_buffer->head_page; 20576f807acdSSteven Rostedt iter->head = cpu_buffer->head_page->read; 2058d769041fSSteven Rostedt } else { 2059d769041fSSteven Rostedt iter->head_page = cpu_buffer->reader_page; 20606f807acdSSteven Rostedt iter->head = cpu_buffer->reader_page->read; 2061d769041fSSteven Rostedt } 2062d769041fSSteven Rostedt if (iter->head) 2063d769041fSSteven Rostedt iter->read_stamp = cpu_buffer->read_stamp; 2064d769041fSSteven Rostedt else 2065abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 2066642edba5SSteven Rostedt } 2067f83c9d0fSSteven Rostedt 2068642edba5SSteven Rostedt /** 2069642edba5SSteven Rostedt * ring_buffer_iter_reset - reset an iterator 2070642edba5SSteven Rostedt * @iter: The iterator to reset 2071642edba5SSteven Rostedt * 2072642edba5SSteven Rostedt * Resets the iterator, so that it will start from the beginning 2073642edba5SSteven Rostedt * again. 2074642edba5SSteven Rostedt */ 2075642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 2076642edba5SSteven Rostedt { 2077554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 2078642edba5SSteven Rostedt unsigned long flags; 2079642edba5SSteven Rostedt 2080554f786eSSteven Rostedt if (!iter) 2081554f786eSSteven Rostedt return; 2082554f786eSSteven Rostedt 2083554f786eSSteven Rostedt cpu_buffer = iter->cpu_buffer; 2084554f786eSSteven Rostedt 2085642edba5SSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2086642edba5SSteven Rostedt rb_iter_reset(iter); 2087f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 20887a8e76a3SSteven Rostedt } 2089c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 20907a8e76a3SSteven Rostedt 20917a8e76a3SSteven Rostedt /** 20927a8e76a3SSteven Rostedt * ring_buffer_iter_empty - check if an iterator has no more to read 20937a8e76a3SSteven Rostedt * @iter: The iterator to check 20947a8e76a3SSteven Rostedt */ 20957a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 20967a8e76a3SSteven Rostedt { 20977a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 20987a8e76a3SSteven Rostedt 20997a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 21007a8e76a3SSteven Rostedt 2101bf41a158SSteven Rostedt return iter->head_page == cpu_buffer->commit_page && 2102bf41a158SSteven Rostedt iter->head == rb_commit_index(cpu_buffer); 21037a8e76a3SSteven Rostedt } 2104c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 21057a8e76a3SSteven Rostedt 21067a8e76a3SSteven Rostedt static void 21077a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 21087a8e76a3SSteven Rostedt struct ring_buffer_event *event) 21097a8e76a3SSteven Rostedt { 21107a8e76a3SSteven Rostedt u64 delta; 21117a8e76a3SSteven Rostedt 2112334d4169SLai Jiangshan switch (event->type_len) { 21137a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 21147a8e76a3SSteven Rostedt return; 21157a8e76a3SSteven Rostedt 21167a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 21177a8e76a3SSteven Rostedt delta = event->array[0]; 21187a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 21197a8e76a3SSteven Rostedt delta += event->time_delta; 21207a8e76a3SSteven Rostedt cpu_buffer->read_stamp += delta; 21217a8e76a3SSteven Rostedt return; 21227a8e76a3SSteven Rostedt 21237a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 21247a8e76a3SSteven Rostedt /* FIXME: not implemented */ 21257a8e76a3SSteven Rostedt return; 21267a8e76a3SSteven Rostedt 21277a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 21287a8e76a3SSteven Rostedt cpu_buffer->read_stamp += event->time_delta; 21297a8e76a3SSteven Rostedt return; 21307a8e76a3SSteven Rostedt 21317a8e76a3SSteven Rostedt default: 21327a8e76a3SSteven Rostedt BUG(); 21337a8e76a3SSteven Rostedt } 21347a8e76a3SSteven Rostedt return; 21357a8e76a3SSteven Rostedt } 21367a8e76a3SSteven Rostedt 21377a8e76a3SSteven Rostedt static void 21387a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 21397a8e76a3SSteven Rostedt struct ring_buffer_event *event) 21407a8e76a3SSteven Rostedt { 21417a8e76a3SSteven Rostedt u64 delta; 21427a8e76a3SSteven Rostedt 2143334d4169SLai Jiangshan switch (event->type_len) { 21447a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 21457a8e76a3SSteven Rostedt return; 21467a8e76a3SSteven Rostedt 21477a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 21487a8e76a3SSteven Rostedt delta = event->array[0]; 21497a8e76a3SSteven Rostedt delta <<= TS_SHIFT; 21507a8e76a3SSteven Rostedt delta += event->time_delta; 21517a8e76a3SSteven Rostedt iter->read_stamp += delta; 21527a8e76a3SSteven Rostedt return; 21537a8e76a3SSteven Rostedt 21547a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 21557a8e76a3SSteven Rostedt /* FIXME: not implemented */ 21567a8e76a3SSteven Rostedt return; 21577a8e76a3SSteven Rostedt 21587a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 21597a8e76a3SSteven Rostedt iter->read_stamp += event->time_delta; 21607a8e76a3SSteven Rostedt return; 21617a8e76a3SSteven Rostedt 21627a8e76a3SSteven Rostedt default: 21637a8e76a3SSteven Rostedt BUG(); 21647a8e76a3SSteven Rostedt } 21657a8e76a3SSteven Rostedt return; 21667a8e76a3SSteven Rostedt } 21677a8e76a3SSteven Rostedt 2168d769041fSSteven Rostedt static struct buffer_page * 2169d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 21707a8e76a3SSteven Rostedt { 2171d769041fSSteven Rostedt struct buffer_page *reader = NULL; 2172d769041fSSteven Rostedt unsigned long flags; 2173818e3dd3SSteven Rostedt int nr_loops = 0; 2174d769041fSSteven Rostedt 21753e03fb7fSSteven Rostedt local_irq_save(flags); 21763e03fb7fSSteven Rostedt __raw_spin_lock(&cpu_buffer->lock); 2177d769041fSSteven Rostedt 2178d769041fSSteven Rostedt again: 2179818e3dd3SSteven Rostedt /* 2180818e3dd3SSteven Rostedt * This should normally only loop twice. But because the 2181818e3dd3SSteven Rostedt * start of the reader inserts an empty page, it causes 2182818e3dd3SSteven Rostedt * a case where we will loop three times. There should be no 2183818e3dd3SSteven Rostedt * reason to loop four times (that I know of). 2184818e3dd3SSteven Rostedt */ 21853e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 2186818e3dd3SSteven Rostedt reader = NULL; 2187818e3dd3SSteven Rostedt goto out; 2188818e3dd3SSteven Rostedt } 2189818e3dd3SSteven Rostedt 2190d769041fSSteven Rostedt reader = cpu_buffer->reader_page; 2191d769041fSSteven Rostedt 2192d769041fSSteven Rostedt /* If there's more to read, return this page */ 2193bf41a158SSteven Rostedt if (cpu_buffer->reader_page->read < rb_page_size(reader)) 2194d769041fSSteven Rostedt goto out; 2195d769041fSSteven Rostedt 2196d769041fSSteven Rostedt /* Never should we have an index greater than the size */ 21973e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 21983e89c7bbSSteven Rostedt cpu_buffer->reader_page->read > rb_page_size(reader))) 21993e89c7bbSSteven Rostedt goto out; 2200d769041fSSteven Rostedt 2201d769041fSSteven Rostedt /* check if we caught up to the tail */ 2202d769041fSSteven Rostedt reader = NULL; 2203bf41a158SSteven Rostedt if (cpu_buffer->commit_page == cpu_buffer->reader_page) 2204d769041fSSteven Rostedt goto out; 22057a8e76a3SSteven Rostedt 22067a8e76a3SSteven Rostedt /* 2207d769041fSSteven Rostedt * Splice the empty reader page into the list around the head. 2208d769041fSSteven Rostedt * Reset the reader page to size zero. 22097a8e76a3SSteven Rostedt */ 2210d769041fSSteven Rostedt 2211d769041fSSteven Rostedt reader = cpu_buffer->head_page; 2212d769041fSSteven Rostedt cpu_buffer->reader_page->list.next = reader->list.next; 2213d769041fSSteven Rostedt cpu_buffer->reader_page->list.prev = reader->list.prev; 2214bf41a158SSteven Rostedt 2215bf41a158SSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 2216778c55d4SSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 2217abc9b56dSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 2218d769041fSSteven Rostedt 2219d769041fSSteven Rostedt /* Make the reader page now replace the head */ 2220d769041fSSteven Rostedt reader->list.prev->next = &cpu_buffer->reader_page->list; 2221d769041fSSteven Rostedt reader->list.next->prev = &cpu_buffer->reader_page->list; 2222d769041fSSteven Rostedt 2223d769041fSSteven Rostedt /* 2224d769041fSSteven Rostedt * If the tail is on the reader, then we must set the head 2225d769041fSSteven Rostedt * to the inserted page, otherwise we set it one before. 2226d769041fSSteven Rostedt */ 2227d769041fSSteven Rostedt cpu_buffer->head_page = cpu_buffer->reader_page; 2228d769041fSSteven Rostedt 2229bf41a158SSteven Rostedt if (cpu_buffer->commit_page != reader) 22307a8e76a3SSteven Rostedt rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2231d769041fSSteven Rostedt 2232d769041fSSteven Rostedt /* Finally update the reader page to the new head */ 2233d769041fSSteven Rostedt cpu_buffer->reader_page = reader; 2234d769041fSSteven Rostedt rb_reset_reader_page(cpu_buffer); 2235d769041fSSteven Rostedt 2236d769041fSSteven Rostedt goto again; 2237d769041fSSteven Rostedt 2238d769041fSSteven Rostedt out: 22393e03fb7fSSteven Rostedt __raw_spin_unlock(&cpu_buffer->lock); 22403e03fb7fSSteven Rostedt local_irq_restore(flags); 2241d769041fSSteven Rostedt 2242d769041fSSteven Rostedt return reader; 22437a8e76a3SSteven Rostedt } 22447a8e76a3SSteven Rostedt 2245d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 2246d769041fSSteven Rostedt { 2247d769041fSSteven Rostedt struct ring_buffer_event *event; 2248d769041fSSteven Rostedt struct buffer_page *reader; 2249d769041fSSteven Rostedt unsigned length; 2250d769041fSSteven Rostedt 2251d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 2252d769041fSSteven Rostedt 2253d769041fSSteven Rostedt /* This function should not be called when buffer is empty */ 22543e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !reader)) 22553e89c7bbSSteven Rostedt return; 2256d769041fSSteven Rostedt 2257d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 22587a8e76a3SSteven Rostedt 2259334d4169SLai Jiangshan if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX 2260334d4169SLai Jiangshan || rb_discarded_event(event)) 2261e4906effSSteven Rostedt cpu_buffer->read++; 22627a8e76a3SSteven Rostedt 22637a8e76a3SSteven Rostedt rb_update_read_stamp(cpu_buffer, event); 22647a8e76a3SSteven Rostedt 2265d769041fSSteven Rostedt length = rb_event_length(event); 22666f807acdSSteven Rostedt cpu_buffer->reader_page->read += length; 22677a8e76a3SSteven Rostedt } 22687a8e76a3SSteven Rostedt 22697a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter) 22707a8e76a3SSteven Rostedt { 22717a8e76a3SSteven Rostedt struct ring_buffer *buffer; 22727a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 22737a8e76a3SSteven Rostedt struct ring_buffer_event *event; 22747a8e76a3SSteven Rostedt unsigned length; 22757a8e76a3SSteven Rostedt 22767a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 22777a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer; 22787a8e76a3SSteven Rostedt 22797a8e76a3SSteven Rostedt /* 22807a8e76a3SSteven Rostedt * Check if we are at the end of the buffer. 22817a8e76a3SSteven Rostedt */ 2282bf41a158SSteven Rostedt if (iter->head >= rb_page_size(iter->head_page)) { 2283ea05b57cSSteven Rostedt /* discarded commits can make the page empty */ 2284ea05b57cSSteven Rostedt if (iter->head_page == cpu_buffer->commit_page) 22853e89c7bbSSteven Rostedt return; 2286d769041fSSteven Rostedt rb_inc_iter(iter); 22877a8e76a3SSteven Rostedt return; 22887a8e76a3SSteven Rostedt } 22897a8e76a3SSteven Rostedt 22907a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 22917a8e76a3SSteven Rostedt 22927a8e76a3SSteven Rostedt length = rb_event_length(event); 22937a8e76a3SSteven Rostedt 22947a8e76a3SSteven Rostedt /* 22957a8e76a3SSteven Rostedt * This should not be called to advance the header if we are 22967a8e76a3SSteven Rostedt * at the tail of the buffer. 22977a8e76a3SSteven Rostedt */ 22983e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 2299f536aafcSSteven Rostedt (iter->head_page == cpu_buffer->commit_page) && 23003e89c7bbSSteven Rostedt (iter->head + length > rb_commit_index(cpu_buffer)))) 23013e89c7bbSSteven Rostedt return; 23027a8e76a3SSteven Rostedt 23037a8e76a3SSteven Rostedt rb_update_iter_read_stamp(iter, event); 23047a8e76a3SSteven Rostedt 23057a8e76a3SSteven Rostedt iter->head += length; 23067a8e76a3SSteven Rostedt 23077a8e76a3SSteven Rostedt /* check for end of page padding */ 2308bf41a158SSteven Rostedt if ((iter->head >= rb_page_size(iter->head_page)) && 2309bf41a158SSteven Rostedt (iter->head_page != cpu_buffer->commit_page)) 23107a8e76a3SSteven Rostedt rb_advance_iter(iter); 23117a8e76a3SSteven Rostedt } 23127a8e76a3SSteven Rostedt 2313f83c9d0fSSteven Rostedt static struct ring_buffer_event * 2314f83c9d0fSSteven Rostedt rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) 23157a8e76a3SSteven Rostedt { 23167a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 23177a8e76a3SSteven Rostedt struct ring_buffer_event *event; 2318d769041fSSteven Rostedt struct buffer_page *reader; 2319818e3dd3SSteven Rostedt int nr_loops = 0; 23207a8e76a3SSteven Rostedt 23217a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 23227a8e76a3SSteven Rostedt 23237a8e76a3SSteven Rostedt again: 2324818e3dd3SSteven Rostedt /* 2325818e3dd3SSteven Rostedt * We repeat when a timestamp is encountered. It is possible 2326818e3dd3SSteven Rostedt * to get multiple timestamps from an interrupt entering just 2327ea05b57cSSteven Rostedt * as one timestamp is about to be written, or from discarded 2328ea05b57cSSteven Rostedt * commits. The most that we can have is the number on a single page. 2329818e3dd3SSteven Rostedt */ 2330ea05b57cSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) 2331818e3dd3SSteven Rostedt return NULL; 2332818e3dd3SSteven Rostedt 2333d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 2334d769041fSSteven Rostedt if (!reader) 23357a8e76a3SSteven Rostedt return NULL; 23367a8e76a3SSteven Rostedt 2337d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 23387a8e76a3SSteven Rostedt 2339334d4169SLai Jiangshan switch (event->type_len) { 23407a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 23412d622719STom Zanussi if (rb_null_event(event)) 2342bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 23432d622719STom Zanussi /* 23442d622719STom Zanussi * Because the writer could be discarding every 23452d622719STom Zanussi * event it creates (which would probably be bad) 23462d622719STom Zanussi * if we were to go back to "again" then we may never 23472d622719STom Zanussi * catch up, and will trigger the warn on, or lock 23482d622719STom Zanussi * the box. Return the padding, and we will release 23492d622719STom Zanussi * the current locks, and try again. 23502d622719STom Zanussi */ 2351d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 23522d622719STom Zanussi return event; 23537a8e76a3SSteven Rostedt 23547a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 23557a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 2356d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 23577a8e76a3SSteven Rostedt goto again; 23587a8e76a3SSteven Rostedt 23597a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 23607a8e76a3SSteven Rostedt /* FIXME: not implemented */ 2361d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 23627a8e76a3SSteven Rostedt goto again; 23637a8e76a3SSteven Rostedt 23647a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 23657a8e76a3SSteven Rostedt if (ts) { 23667a8e76a3SSteven Rostedt *ts = cpu_buffer->read_stamp + event->time_delta; 236737886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer, 236837886f6aSSteven Rostedt cpu_buffer->cpu, ts); 23697a8e76a3SSteven Rostedt } 23707a8e76a3SSteven Rostedt return event; 23717a8e76a3SSteven Rostedt 23727a8e76a3SSteven Rostedt default: 23737a8e76a3SSteven Rostedt BUG(); 23747a8e76a3SSteven Rostedt } 23757a8e76a3SSteven Rostedt 23767a8e76a3SSteven Rostedt return NULL; 23777a8e76a3SSteven Rostedt } 2378c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek); 23797a8e76a3SSteven Rostedt 2380f83c9d0fSSteven Rostedt static struct ring_buffer_event * 2381f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 23827a8e76a3SSteven Rostedt { 23837a8e76a3SSteven Rostedt struct ring_buffer *buffer; 23847a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 23857a8e76a3SSteven Rostedt struct ring_buffer_event *event; 2386818e3dd3SSteven Rostedt int nr_loops = 0; 23877a8e76a3SSteven Rostedt 23887a8e76a3SSteven Rostedt if (ring_buffer_iter_empty(iter)) 23897a8e76a3SSteven Rostedt return NULL; 23907a8e76a3SSteven Rostedt 23917a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 23927a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer; 23937a8e76a3SSteven Rostedt 23947a8e76a3SSteven Rostedt again: 2395818e3dd3SSteven Rostedt /* 2396ea05b57cSSteven Rostedt * We repeat when a timestamp is encountered. 2397ea05b57cSSteven Rostedt * We can get multiple timestamps by nested interrupts or also 2398ea05b57cSSteven Rostedt * if filtering is on (discarding commits). Since discarding 2399ea05b57cSSteven Rostedt * commits can be frequent we can get a lot of timestamps. 2400ea05b57cSSteven Rostedt * But we limit them by not adding timestamps if they begin 2401ea05b57cSSteven Rostedt * at the start of a page. 2402818e3dd3SSteven Rostedt */ 2403ea05b57cSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE)) 2404818e3dd3SSteven Rostedt return NULL; 2405818e3dd3SSteven Rostedt 24067a8e76a3SSteven Rostedt if (rb_per_cpu_empty(cpu_buffer)) 24077a8e76a3SSteven Rostedt return NULL; 24087a8e76a3SSteven Rostedt 24097a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 24107a8e76a3SSteven Rostedt 2411334d4169SLai Jiangshan switch (event->type_len) { 24127a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 24132d622719STom Zanussi if (rb_null_event(event)) { 2414d769041fSSteven Rostedt rb_inc_iter(iter); 24157a8e76a3SSteven Rostedt goto again; 24162d622719STom Zanussi } 24172d622719STom Zanussi rb_advance_iter(iter); 24182d622719STom Zanussi return event; 24197a8e76a3SSteven Rostedt 24207a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 24217a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 24227a8e76a3SSteven Rostedt rb_advance_iter(iter); 24237a8e76a3SSteven Rostedt goto again; 24247a8e76a3SSteven Rostedt 24257a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 24267a8e76a3SSteven Rostedt /* FIXME: not implemented */ 24277a8e76a3SSteven Rostedt rb_advance_iter(iter); 24287a8e76a3SSteven Rostedt goto again; 24297a8e76a3SSteven Rostedt 24307a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 24317a8e76a3SSteven Rostedt if (ts) { 24327a8e76a3SSteven Rostedt *ts = iter->read_stamp + event->time_delta; 243337886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer, 243437886f6aSSteven Rostedt cpu_buffer->cpu, ts); 24357a8e76a3SSteven Rostedt } 24367a8e76a3SSteven Rostedt return event; 24377a8e76a3SSteven Rostedt 24387a8e76a3SSteven Rostedt default: 24397a8e76a3SSteven Rostedt BUG(); 24407a8e76a3SSteven Rostedt } 24417a8e76a3SSteven Rostedt 24427a8e76a3SSteven Rostedt return NULL; 24437a8e76a3SSteven Rostedt } 2444c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 24457a8e76a3SSteven Rostedt 24467a8e76a3SSteven Rostedt /** 2447f83c9d0fSSteven Rostedt * ring_buffer_peek - peek at the next event to be read 2448f83c9d0fSSteven Rostedt * @buffer: The ring buffer to read 2449f83c9d0fSSteven Rostedt * @cpu: The cpu to peak at 2450f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 2451f83c9d0fSSteven Rostedt * 2452f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 2453f83c9d0fSSteven Rostedt * not consume the data. 2454f83c9d0fSSteven Rostedt */ 2455f83c9d0fSSteven Rostedt struct ring_buffer_event * 2456f83c9d0fSSteven Rostedt ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) 2457f83c9d0fSSteven Rostedt { 2458f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 24598aabee57SSteven Rostedt struct ring_buffer_event *event; 2460f83c9d0fSSteven Rostedt unsigned long flags; 2461f83c9d0fSSteven Rostedt 2462554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 24638aabee57SSteven Rostedt return NULL; 2464554f786eSSteven Rostedt 24652d622719STom Zanussi again: 2466f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2467f83c9d0fSSteven Rostedt event = rb_buffer_peek(buffer, cpu, ts); 2468f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2469f83c9d0fSSteven Rostedt 2470334d4169SLai Jiangshan if (event && event->type_len == RINGBUF_TYPE_PADDING) { 24712d622719STom Zanussi cpu_relax(); 24722d622719STom Zanussi goto again; 24732d622719STom Zanussi } 24742d622719STom Zanussi 2475f83c9d0fSSteven Rostedt return event; 2476f83c9d0fSSteven Rostedt } 2477f83c9d0fSSteven Rostedt 2478f83c9d0fSSteven Rostedt /** 2479f83c9d0fSSteven Rostedt * ring_buffer_iter_peek - peek at the next event to be read 2480f83c9d0fSSteven Rostedt * @iter: The ring buffer iterator 2481f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 2482f83c9d0fSSteven Rostedt * 2483f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 2484f83c9d0fSSteven Rostedt * not increment the iterator. 2485f83c9d0fSSteven Rostedt */ 2486f83c9d0fSSteven Rostedt struct ring_buffer_event * 2487f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 2488f83c9d0fSSteven Rostedt { 2489f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2490f83c9d0fSSteven Rostedt struct ring_buffer_event *event; 2491f83c9d0fSSteven Rostedt unsigned long flags; 2492f83c9d0fSSteven Rostedt 24932d622719STom Zanussi again: 2494f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2495f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 2496f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2497f83c9d0fSSteven Rostedt 2498334d4169SLai Jiangshan if (event && event->type_len == RINGBUF_TYPE_PADDING) { 24992d622719STom Zanussi cpu_relax(); 25002d622719STom Zanussi goto again; 25012d622719STom Zanussi } 25022d622719STom Zanussi 2503f83c9d0fSSteven Rostedt return event; 2504f83c9d0fSSteven Rostedt } 2505f83c9d0fSSteven Rostedt 2506f83c9d0fSSteven Rostedt /** 25077a8e76a3SSteven Rostedt * ring_buffer_consume - return an event and consume it 25087a8e76a3SSteven Rostedt * @buffer: The ring buffer to get the next event from 25097a8e76a3SSteven Rostedt * 25107a8e76a3SSteven Rostedt * Returns the next event in the ring buffer, and that event is consumed. 25117a8e76a3SSteven Rostedt * Meaning, that sequential reads will keep returning a different event, 25127a8e76a3SSteven Rostedt * and eventually empty the ring buffer if the producer is slower. 25137a8e76a3SSteven Rostedt */ 25147a8e76a3SSteven Rostedt struct ring_buffer_event * 25157a8e76a3SSteven Rostedt ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) 25167a8e76a3SSteven Rostedt { 2517554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 2518554f786eSSteven Rostedt struct ring_buffer_event *event = NULL; 2519f83c9d0fSSteven Rostedt unsigned long flags; 25207a8e76a3SSteven Rostedt 25212d622719STom Zanussi again: 2522554f786eSSteven Rostedt /* might be called in atomic */ 2523554f786eSSteven Rostedt preempt_disable(); 25247a8e76a3SSteven Rostedt 2525554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2526554f786eSSteven Rostedt goto out; 2527554f786eSSteven Rostedt 2528554f786eSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2529f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 25307a8e76a3SSteven Rostedt 2531f83c9d0fSSteven Rostedt event = rb_buffer_peek(buffer, cpu, ts); 2532f83c9d0fSSteven Rostedt if (!event) 2533554f786eSSteven Rostedt goto out_unlock; 2534f83c9d0fSSteven Rostedt 2535d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 25367a8e76a3SSteven Rostedt 2537554f786eSSteven Rostedt out_unlock: 2538f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2539f83c9d0fSSteven Rostedt 2540554f786eSSteven Rostedt out: 2541554f786eSSteven Rostedt preempt_enable(); 2542554f786eSSteven Rostedt 2543334d4169SLai Jiangshan if (event && event->type_len == RINGBUF_TYPE_PADDING) { 25442d622719STom Zanussi cpu_relax(); 25452d622719STom Zanussi goto again; 25462d622719STom Zanussi } 25472d622719STom Zanussi 25487a8e76a3SSteven Rostedt return event; 25497a8e76a3SSteven Rostedt } 2550c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume); 25517a8e76a3SSteven Rostedt 25527a8e76a3SSteven Rostedt /** 25537a8e76a3SSteven Rostedt * ring_buffer_read_start - start a non consuming read of the buffer 25547a8e76a3SSteven Rostedt * @buffer: The ring buffer to read from 25557a8e76a3SSteven Rostedt * @cpu: The cpu buffer to iterate over 25567a8e76a3SSteven Rostedt * 25577a8e76a3SSteven Rostedt * This starts up an iteration through the buffer. It also disables 25587a8e76a3SSteven Rostedt * the recording to the buffer until the reading is finished. 25597a8e76a3SSteven Rostedt * This prevents the reading from being corrupted. This is not 25607a8e76a3SSteven Rostedt * a consuming read, so a producer is not expected. 25617a8e76a3SSteven Rostedt * 25627a8e76a3SSteven Rostedt * Must be paired with ring_buffer_finish. 25637a8e76a3SSteven Rostedt */ 25647a8e76a3SSteven Rostedt struct ring_buffer_iter * 25657a8e76a3SSteven Rostedt ring_buffer_read_start(struct ring_buffer *buffer, int cpu) 25667a8e76a3SSteven Rostedt { 25677a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 25688aabee57SSteven Rostedt struct ring_buffer_iter *iter; 2569d769041fSSteven Rostedt unsigned long flags; 25707a8e76a3SSteven Rostedt 25719e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 25728aabee57SSteven Rostedt return NULL; 25737a8e76a3SSteven Rostedt 25747a8e76a3SSteven Rostedt iter = kmalloc(sizeof(*iter), GFP_KERNEL); 25757a8e76a3SSteven Rostedt if (!iter) 25768aabee57SSteven Rostedt return NULL; 25777a8e76a3SSteven Rostedt 25787a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 25797a8e76a3SSteven Rostedt 25807a8e76a3SSteven Rostedt iter->cpu_buffer = cpu_buffer; 25817a8e76a3SSteven Rostedt 25827a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 25837a8e76a3SSteven Rostedt synchronize_sched(); 25847a8e76a3SSteven Rostedt 2585f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 25863e03fb7fSSteven Rostedt __raw_spin_lock(&cpu_buffer->lock); 2587642edba5SSteven Rostedt rb_iter_reset(iter); 25883e03fb7fSSteven Rostedt __raw_spin_unlock(&cpu_buffer->lock); 2589f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 25907a8e76a3SSteven Rostedt 25917a8e76a3SSteven Rostedt return iter; 25927a8e76a3SSteven Rostedt } 2593c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start); 25947a8e76a3SSteven Rostedt 25957a8e76a3SSteven Rostedt /** 25967a8e76a3SSteven Rostedt * ring_buffer_finish - finish reading the iterator of the buffer 25977a8e76a3SSteven Rostedt * @iter: The iterator retrieved by ring_buffer_start 25987a8e76a3SSteven Rostedt * 25997a8e76a3SSteven Rostedt * This re-enables the recording to the buffer, and frees the 26007a8e76a3SSteven Rostedt * iterator. 26017a8e76a3SSteven Rostedt */ 26027a8e76a3SSteven Rostedt void 26037a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter) 26047a8e76a3SSteven Rostedt { 26057a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 26067a8e76a3SSteven Rostedt 26077a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 26087a8e76a3SSteven Rostedt kfree(iter); 26097a8e76a3SSteven Rostedt } 2610c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 26117a8e76a3SSteven Rostedt 26127a8e76a3SSteven Rostedt /** 26137a8e76a3SSteven Rostedt * ring_buffer_read - read the next item in the ring buffer by the iterator 26147a8e76a3SSteven Rostedt * @iter: The ring buffer iterator 26157a8e76a3SSteven Rostedt * @ts: The time stamp of the event read. 26167a8e76a3SSteven Rostedt * 26177a8e76a3SSteven Rostedt * This reads the next event in the ring buffer and increments the iterator. 26187a8e76a3SSteven Rostedt */ 26197a8e76a3SSteven Rostedt struct ring_buffer_event * 26207a8e76a3SSteven Rostedt ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) 26217a8e76a3SSteven Rostedt { 26227a8e76a3SSteven Rostedt struct ring_buffer_event *event; 2623f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2624f83c9d0fSSteven Rostedt unsigned long flags; 26257a8e76a3SSteven Rostedt 26262d622719STom Zanussi again: 2627f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2628f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 26297a8e76a3SSteven Rostedt if (!event) 2630f83c9d0fSSteven Rostedt goto out; 26317a8e76a3SSteven Rostedt 26327a8e76a3SSteven Rostedt rb_advance_iter(iter); 2633f83c9d0fSSteven Rostedt out: 2634f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 26357a8e76a3SSteven Rostedt 2636334d4169SLai Jiangshan if (event && event->type_len == RINGBUF_TYPE_PADDING) { 26372d622719STom Zanussi cpu_relax(); 26382d622719STom Zanussi goto again; 26392d622719STom Zanussi } 26402d622719STom Zanussi 26417a8e76a3SSteven Rostedt return event; 26427a8e76a3SSteven Rostedt } 2643c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read); 26447a8e76a3SSteven Rostedt 26457a8e76a3SSteven Rostedt /** 26467a8e76a3SSteven Rostedt * ring_buffer_size - return the size of the ring buffer (in bytes) 26477a8e76a3SSteven Rostedt * @buffer: The ring buffer. 26487a8e76a3SSteven Rostedt */ 26497a8e76a3SSteven Rostedt unsigned long ring_buffer_size(struct ring_buffer *buffer) 26507a8e76a3SSteven Rostedt { 26517a8e76a3SSteven Rostedt return BUF_PAGE_SIZE * buffer->pages; 26527a8e76a3SSteven Rostedt } 2653c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size); 26547a8e76a3SSteven Rostedt 26557a8e76a3SSteven Rostedt static void 26567a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 26577a8e76a3SSteven Rostedt { 26587a8e76a3SSteven Rostedt cpu_buffer->head_page 26597a8e76a3SSteven Rostedt = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 2660bf41a158SSteven Rostedt local_set(&cpu_buffer->head_page->write, 0); 2661778c55d4SSteven Rostedt local_set(&cpu_buffer->head_page->entries, 0); 2662abc9b56dSSteven Rostedt local_set(&cpu_buffer->head_page->page->commit, 0); 26637a8e76a3SSteven Rostedt 26646f807acdSSteven Rostedt cpu_buffer->head_page->read = 0; 2665bf41a158SSteven Rostedt 2666bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->head_page; 2667bf41a158SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->head_page; 2668bf41a158SSteven Rostedt 2669bf41a158SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 2670bf41a158SSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 2671778c55d4SSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 2672abc9b56dSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 26736f807acdSSteven Rostedt cpu_buffer->reader_page->read = 0; 2674d769041fSSteven Rostedt 2675f0d2c681SSteven Rostedt cpu_buffer->nmi_dropped = 0; 2676f0d2c681SSteven Rostedt cpu_buffer->commit_overrun = 0; 26777a8e76a3SSteven Rostedt cpu_buffer->overrun = 0; 2678e4906effSSteven Rostedt cpu_buffer->read = 0; 2679e4906effSSteven Rostedt local_set(&cpu_buffer->entries, 0); 268069507c06SSteven Rostedt 268169507c06SSteven Rostedt cpu_buffer->write_stamp = 0; 268269507c06SSteven Rostedt cpu_buffer->read_stamp = 0; 26837a8e76a3SSteven Rostedt } 26847a8e76a3SSteven Rostedt 26857a8e76a3SSteven Rostedt /** 26867a8e76a3SSteven Rostedt * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 26877a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset a per cpu buffer of 26887a8e76a3SSteven Rostedt * @cpu: The CPU buffer to be reset 26897a8e76a3SSteven Rostedt */ 26907a8e76a3SSteven Rostedt void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) 26917a8e76a3SSteven Rostedt { 26927a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 26937a8e76a3SSteven Rostedt unsigned long flags; 26947a8e76a3SSteven Rostedt 26959e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 26968aabee57SSteven Rostedt return; 26977a8e76a3SSteven Rostedt 269841ede23eSSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 269941ede23eSSteven Rostedt 2700f83c9d0fSSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2701f83c9d0fSSteven Rostedt 27023e03fb7fSSteven Rostedt __raw_spin_lock(&cpu_buffer->lock); 27037a8e76a3SSteven Rostedt 27047a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 27057a8e76a3SSteven Rostedt 27063e03fb7fSSteven Rostedt __raw_spin_unlock(&cpu_buffer->lock); 2707f83c9d0fSSteven Rostedt 2708f83c9d0fSSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 270941ede23eSSteven Rostedt 271041ede23eSSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 27117a8e76a3SSteven Rostedt } 2712c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 27137a8e76a3SSteven Rostedt 27147a8e76a3SSteven Rostedt /** 27157a8e76a3SSteven Rostedt * ring_buffer_reset - reset a ring buffer 27167a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset all cpu buffers 27177a8e76a3SSteven Rostedt */ 27187a8e76a3SSteven Rostedt void ring_buffer_reset(struct ring_buffer *buffer) 27197a8e76a3SSteven Rostedt { 27207a8e76a3SSteven Rostedt int cpu; 27217a8e76a3SSteven Rostedt 27227a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 2723d769041fSSteven Rostedt ring_buffer_reset_cpu(buffer, cpu); 27247a8e76a3SSteven Rostedt } 2725c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset); 27267a8e76a3SSteven Rostedt 27277a8e76a3SSteven Rostedt /** 27287a8e76a3SSteven Rostedt * rind_buffer_empty - is the ring buffer empty? 27297a8e76a3SSteven Rostedt * @buffer: The ring buffer to test 27307a8e76a3SSteven Rostedt */ 27317a8e76a3SSteven Rostedt int ring_buffer_empty(struct ring_buffer *buffer) 27327a8e76a3SSteven Rostedt { 27337a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 27347a8e76a3SSteven Rostedt int cpu; 27357a8e76a3SSteven Rostedt 27367a8e76a3SSteven Rostedt /* yes this is racy, but if you don't like the race, lock the buffer */ 27377a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 27387a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 27397a8e76a3SSteven Rostedt if (!rb_per_cpu_empty(cpu_buffer)) 27407a8e76a3SSteven Rostedt return 0; 27417a8e76a3SSteven Rostedt } 2742554f786eSSteven Rostedt 27437a8e76a3SSteven Rostedt return 1; 27447a8e76a3SSteven Rostedt } 2745c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty); 27467a8e76a3SSteven Rostedt 27477a8e76a3SSteven Rostedt /** 27487a8e76a3SSteven Rostedt * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 27497a8e76a3SSteven Rostedt * @buffer: The ring buffer 27507a8e76a3SSteven Rostedt * @cpu: The CPU buffer to test 27517a8e76a3SSteven Rostedt */ 27527a8e76a3SSteven Rostedt int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 27537a8e76a3SSteven Rostedt { 27547a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 27558aabee57SSteven Rostedt int ret; 27567a8e76a3SSteven Rostedt 27579e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 27588aabee57SSteven Rostedt return 1; 27597a8e76a3SSteven Rostedt 27607a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2761554f786eSSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 2762554f786eSSteven Rostedt 2763554f786eSSteven Rostedt 2764554f786eSSteven Rostedt return ret; 27657a8e76a3SSteven Rostedt } 2766c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 27677a8e76a3SSteven Rostedt 27687a8e76a3SSteven Rostedt /** 27697a8e76a3SSteven Rostedt * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 27707a8e76a3SSteven Rostedt * @buffer_a: One buffer to swap with 27717a8e76a3SSteven Rostedt * @buffer_b: The other buffer to swap with 27727a8e76a3SSteven Rostedt * 27737a8e76a3SSteven Rostedt * This function is useful for tracers that want to take a "snapshot" 27747a8e76a3SSteven Rostedt * of a CPU buffer and has another back up buffer lying around. 27757a8e76a3SSteven Rostedt * it is expected that the tracer handles the cpu buffer not being 27767a8e76a3SSteven Rostedt * used at the moment. 27777a8e76a3SSteven Rostedt */ 27787a8e76a3SSteven Rostedt int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 27797a8e76a3SSteven Rostedt struct ring_buffer *buffer_b, int cpu) 27807a8e76a3SSteven Rostedt { 27817a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_a; 27827a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_b; 2783554f786eSSteven Rostedt int ret = -EINVAL; 2784554f786eSSteven Rostedt 27859e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 27869e01c1b7SRusty Russell !cpumask_test_cpu(cpu, buffer_b->cpumask)) 2787554f786eSSteven Rostedt goto out; 27887a8e76a3SSteven Rostedt 27897a8e76a3SSteven Rostedt /* At least make sure the two buffers are somewhat the same */ 27906d102bc6SLai Jiangshan if (buffer_a->pages != buffer_b->pages) 2791554f786eSSteven Rostedt goto out; 2792554f786eSSteven Rostedt 2793554f786eSSteven Rostedt ret = -EAGAIN; 27947a8e76a3SSteven Rostedt 279597b17efeSSteven Rostedt if (ring_buffer_flags != RB_BUFFERS_ON) 2796554f786eSSteven Rostedt goto out; 279797b17efeSSteven Rostedt 279897b17efeSSteven Rostedt if (atomic_read(&buffer_a->record_disabled)) 2799554f786eSSteven Rostedt goto out; 280097b17efeSSteven Rostedt 280197b17efeSSteven Rostedt if (atomic_read(&buffer_b->record_disabled)) 2802554f786eSSteven Rostedt goto out; 280397b17efeSSteven Rostedt 28047a8e76a3SSteven Rostedt cpu_buffer_a = buffer_a->buffers[cpu]; 28057a8e76a3SSteven Rostedt cpu_buffer_b = buffer_b->buffers[cpu]; 28067a8e76a3SSteven Rostedt 280797b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_a->record_disabled)) 2808554f786eSSteven Rostedt goto out; 280997b17efeSSteven Rostedt 281097b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_b->record_disabled)) 2811554f786eSSteven Rostedt goto out; 281297b17efeSSteven Rostedt 28137a8e76a3SSteven Rostedt /* 28147a8e76a3SSteven Rostedt * We can't do a synchronize_sched here because this 28157a8e76a3SSteven Rostedt * function can be called in atomic context. 28167a8e76a3SSteven Rostedt * Normally this will be called from the same CPU as cpu. 28177a8e76a3SSteven Rostedt * If not it's up to the caller to protect this. 28187a8e76a3SSteven Rostedt */ 28197a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_a->record_disabled); 28207a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_b->record_disabled); 28217a8e76a3SSteven Rostedt 28227a8e76a3SSteven Rostedt buffer_a->buffers[cpu] = cpu_buffer_b; 28237a8e76a3SSteven Rostedt buffer_b->buffers[cpu] = cpu_buffer_a; 28247a8e76a3SSteven Rostedt 28257a8e76a3SSteven Rostedt cpu_buffer_b->buffer = buffer_a; 28267a8e76a3SSteven Rostedt cpu_buffer_a->buffer = buffer_b; 28277a8e76a3SSteven Rostedt 28287a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_a->record_disabled); 28297a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_b->record_disabled); 28307a8e76a3SSteven Rostedt 2831554f786eSSteven Rostedt ret = 0; 2832554f786eSSteven Rostedt out: 2833554f786eSSteven Rostedt return ret; 28347a8e76a3SSteven Rostedt } 2835c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 28367a8e76a3SSteven Rostedt 28378789a9e7SSteven Rostedt /** 28388789a9e7SSteven Rostedt * ring_buffer_alloc_read_page - allocate a page to read from buffer 28398789a9e7SSteven Rostedt * @buffer: the buffer to allocate for. 28408789a9e7SSteven Rostedt * 28418789a9e7SSteven Rostedt * This function is used in conjunction with ring_buffer_read_page. 28428789a9e7SSteven Rostedt * When reading a full page from the ring buffer, these functions 28438789a9e7SSteven Rostedt * can be used to speed up the process. The calling function should 28448789a9e7SSteven Rostedt * allocate a few pages first with this function. Then when it 28458789a9e7SSteven Rostedt * needs to get pages from the ring buffer, it passes the result 28468789a9e7SSteven Rostedt * of this function into ring_buffer_read_page, which will swap 28478789a9e7SSteven Rostedt * the page that was allocated, with the read page of the buffer. 28488789a9e7SSteven Rostedt * 28498789a9e7SSteven Rostedt * Returns: 28508789a9e7SSteven Rostedt * The page allocated, or NULL on error. 28518789a9e7SSteven Rostedt */ 28528789a9e7SSteven Rostedt void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) 28538789a9e7SSteven Rostedt { 2854044fa782SSteven Rostedt struct buffer_data_page *bpage; 2855ef7a4a16SSteven Rostedt unsigned long addr; 28568789a9e7SSteven Rostedt 28578789a9e7SSteven Rostedt addr = __get_free_page(GFP_KERNEL); 28588789a9e7SSteven Rostedt if (!addr) 28598789a9e7SSteven Rostedt return NULL; 28608789a9e7SSteven Rostedt 2861044fa782SSteven Rostedt bpage = (void *)addr; 28628789a9e7SSteven Rostedt 2863ef7a4a16SSteven Rostedt rb_init_page(bpage); 2864ef7a4a16SSteven Rostedt 2865044fa782SSteven Rostedt return bpage; 28668789a9e7SSteven Rostedt } 2867d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 28688789a9e7SSteven Rostedt 28698789a9e7SSteven Rostedt /** 28708789a9e7SSteven Rostedt * ring_buffer_free_read_page - free an allocated read page 28718789a9e7SSteven Rostedt * @buffer: the buffer the page was allocate for 28728789a9e7SSteven Rostedt * @data: the page to free 28738789a9e7SSteven Rostedt * 28748789a9e7SSteven Rostedt * Free a page allocated from ring_buffer_alloc_read_page. 28758789a9e7SSteven Rostedt */ 28768789a9e7SSteven Rostedt void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) 28778789a9e7SSteven Rostedt { 28788789a9e7SSteven Rostedt free_page((unsigned long)data); 28798789a9e7SSteven Rostedt } 2880d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 28818789a9e7SSteven Rostedt 28828789a9e7SSteven Rostedt /** 28838789a9e7SSteven Rostedt * ring_buffer_read_page - extract a page from the ring buffer 28848789a9e7SSteven Rostedt * @buffer: buffer to extract from 28858789a9e7SSteven Rostedt * @data_page: the page to use allocated from ring_buffer_alloc_read_page 2886ef7a4a16SSteven Rostedt * @len: amount to extract 28878789a9e7SSteven Rostedt * @cpu: the cpu of the buffer to extract 28888789a9e7SSteven Rostedt * @full: should the extraction only happen when the page is full. 28898789a9e7SSteven Rostedt * 28908789a9e7SSteven Rostedt * This function will pull out a page from the ring buffer and consume it. 28918789a9e7SSteven Rostedt * @data_page must be the address of the variable that was returned 28928789a9e7SSteven Rostedt * from ring_buffer_alloc_read_page. This is because the page might be used 28938789a9e7SSteven Rostedt * to swap with a page in the ring buffer. 28948789a9e7SSteven Rostedt * 28958789a9e7SSteven Rostedt * for example: 2896b85fa01eSLai Jiangshan * rpage = ring_buffer_alloc_read_page(buffer); 28978789a9e7SSteven Rostedt * if (!rpage) 28988789a9e7SSteven Rostedt * return error; 2899ef7a4a16SSteven Rostedt * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 2900667d2412SLai Jiangshan * if (ret >= 0) 2901667d2412SLai Jiangshan * process_page(rpage, ret); 29028789a9e7SSteven Rostedt * 29038789a9e7SSteven Rostedt * When @full is set, the function will not return true unless 29048789a9e7SSteven Rostedt * the writer is off the reader page. 29058789a9e7SSteven Rostedt * 29068789a9e7SSteven Rostedt * Note: it is up to the calling functions to handle sleeps and wakeups. 29078789a9e7SSteven Rostedt * The ring buffer can be used anywhere in the kernel and can not 29088789a9e7SSteven Rostedt * blindly call wake_up. The layer that uses the ring buffer must be 29098789a9e7SSteven Rostedt * responsible for that. 29108789a9e7SSteven Rostedt * 29118789a9e7SSteven Rostedt * Returns: 2912667d2412SLai Jiangshan * >=0 if data has been transferred, returns the offset of consumed data. 2913667d2412SLai Jiangshan * <0 if no data has been transferred. 29148789a9e7SSteven Rostedt */ 29158789a9e7SSteven Rostedt int ring_buffer_read_page(struct ring_buffer *buffer, 2916ef7a4a16SSteven Rostedt void **data_page, size_t len, int cpu, int full) 29178789a9e7SSteven Rostedt { 29188789a9e7SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 29198789a9e7SSteven Rostedt struct ring_buffer_event *event; 2920044fa782SSteven Rostedt struct buffer_data_page *bpage; 2921ef7a4a16SSteven Rostedt struct buffer_page *reader; 29228789a9e7SSteven Rostedt unsigned long flags; 2923ef7a4a16SSteven Rostedt unsigned int commit; 2924667d2412SLai Jiangshan unsigned int read; 29254f3640f8SSteven Rostedt u64 save_timestamp; 2926667d2412SLai Jiangshan int ret = -1; 29278789a9e7SSteven Rostedt 2928554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2929554f786eSSteven Rostedt goto out; 2930554f786eSSteven Rostedt 2931474d32b6SSteven Rostedt /* 2932474d32b6SSteven Rostedt * If len is not big enough to hold the page header, then 2933474d32b6SSteven Rostedt * we can not copy anything. 2934474d32b6SSteven Rostedt */ 2935474d32b6SSteven Rostedt if (len <= BUF_PAGE_HDR_SIZE) 2936554f786eSSteven Rostedt goto out; 2937474d32b6SSteven Rostedt 2938474d32b6SSteven Rostedt len -= BUF_PAGE_HDR_SIZE; 2939474d32b6SSteven Rostedt 29408789a9e7SSteven Rostedt if (!data_page) 2941554f786eSSteven Rostedt goto out; 29428789a9e7SSteven Rostedt 2943044fa782SSteven Rostedt bpage = *data_page; 2944044fa782SSteven Rostedt if (!bpage) 2945554f786eSSteven Rostedt goto out; 29468789a9e7SSteven Rostedt 29478789a9e7SSteven Rostedt spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 29488789a9e7SSteven Rostedt 2949ef7a4a16SSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 2950ef7a4a16SSteven Rostedt if (!reader) 2951554f786eSSteven Rostedt goto out_unlock; 29528789a9e7SSteven Rostedt 2953ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 2954667d2412SLai Jiangshan 2955ef7a4a16SSteven Rostedt read = reader->read; 2956ef7a4a16SSteven Rostedt commit = rb_page_commit(reader); 2957ef7a4a16SSteven Rostedt 29588789a9e7SSteven Rostedt /* 2959474d32b6SSteven Rostedt * If this page has been partially read or 2960474d32b6SSteven Rostedt * if len is not big enough to read the rest of the page or 2961474d32b6SSteven Rostedt * a writer is still on the page, then 2962474d32b6SSteven Rostedt * we must copy the data from the page to the buffer. 2963474d32b6SSteven Rostedt * Otherwise, we can simply swap the page with the one passed in. 29648789a9e7SSteven Rostedt */ 2965474d32b6SSteven Rostedt if (read || (len < (commit - read)) || 2966ef7a4a16SSteven Rostedt cpu_buffer->reader_page == cpu_buffer->commit_page) { 2967667d2412SLai Jiangshan struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 2968474d32b6SSteven Rostedt unsigned int rpos = read; 2969474d32b6SSteven Rostedt unsigned int pos = 0; 2970ef7a4a16SSteven Rostedt unsigned int size; 29718789a9e7SSteven Rostedt 29728789a9e7SSteven Rostedt if (full) 2973554f786eSSteven Rostedt goto out_unlock; 29748789a9e7SSteven Rostedt 2975ef7a4a16SSteven Rostedt if (len > (commit - read)) 2976ef7a4a16SSteven Rostedt len = (commit - read); 2977ef7a4a16SSteven Rostedt 2978ef7a4a16SSteven Rostedt size = rb_event_length(event); 2979ef7a4a16SSteven Rostedt 2980ef7a4a16SSteven Rostedt if (len < size) 2981554f786eSSteven Rostedt goto out_unlock; 2982ef7a4a16SSteven Rostedt 29834f3640f8SSteven Rostedt /* save the current timestamp, since the user will need it */ 29844f3640f8SSteven Rostedt save_timestamp = cpu_buffer->read_stamp; 29854f3640f8SSteven Rostedt 2986ef7a4a16SSteven Rostedt /* Need to copy one event at a time */ 2987ef7a4a16SSteven Rostedt do { 2988474d32b6SSteven Rostedt memcpy(bpage->data + pos, rpage->data + rpos, size); 2989ef7a4a16SSteven Rostedt 2990ef7a4a16SSteven Rostedt len -= size; 2991ef7a4a16SSteven Rostedt 2992ef7a4a16SSteven Rostedt rb_advance_reader(cpu_buffer); 2993474d32b6SSteven Rostedt rpos = reader->read; 2994474d32b6SSteven Rostedt pos += size; 2995ef7a4a16SSteven Rostedt 2996ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 2997ef7a4a16SSteven Rostedt size = rb_event_length(event); 2998ef7a4a16SSteven Rostedt } while (len > size); 2999667d2412SLai Jiangshan 3000667d2412SLai Jiangshan /* update bpage */ 3001ef7a4a16SSteven Rostedt local_set(&bpage->commit, pos); 30024f3640f8SSteven Rostedt bpage->time_stamp = save_timestamp; 3003ef7a4a16SSteven Rostedt 3004474d32b6SSteven Rostedt /* we copied everything to the beginning */ 3005474d32b6SSteven Rostedt read = 0; 30068789a9e7SSteven Rostedt } else { 3007afbab76aSSteven Rostedt /* update the entry counter */ 3008afbab76aSSteven Rostedt cpu_buffer->read += local_read(&reader->entries); 3009afbab76aSSteven Rostedt 30108789a9e7SSteven Rostedt /* swap the pages */ 3011044fa782SSteven Rostedt rb_init_page(bpage); 3012ef7a4a16SSteven Rostedt bpage = reader->page; 3013ef7a4a16SSteven Rostedt reader->page = *data_page; 3014ef7a4a16SSteven Rostedt local_set(&reader->write, 0); 3015778c55d4SSteven Rostedt local_set(&reader->entries, 0); 3016ef7a4a16SSteven Rostedt reader->read = 0; 3017044fa782SSteven Rostedt *data_page = bpage; 3018ef7a4a16SSteven Rostedt } 3019ef7a4a16SSteven Rostedt ret = read; 3020ef7a4a16SSteven Rostedt 3021554f786eSSteven Rostedt out_unlock: 30228789a9e7SSteven Rostedt spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 30238789a9e7SSteven Rostedt 3024554f786eSSteven Rostedt out: 30258789a9e7SSteven Rostedt return ret; 30268789a9e7SSteven Rostedt } 3027d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page); 30288789a9e7SSteven Rostedt 3029a3583244SSteven Rostedt static ssize_t 3030a3583244SSteven Rostedt rb_simple_read(struct file *filp, char __user *ubuf, 3031a3583244SSteven Rostedt size_t cnt, loff_t *ppos) 3032a3583244SSteven Rostedt { 30335e39841cSHannes Eder unsigned long *p = filp->private_data; 3034a3583244SSteven Rostedt char buf[64]; 3035a3583244SSteven Rostedt int r; 3036a3583244SSteven Rostedt 3037033601a3SSteven Rostedt if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) 3038033601a3SSteven Rostedt r = sprintf(buf, "permanently disabled\n"); 3039033601a3SSteven Rostedt else 3040033601a3SSteven Rostedt r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); 3041a3583244SSteven Rostedt 3042a3583244SSteven Rostedt return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3043a3583244SSteven Rostedt } 3044a3583244SSteven Rostedt 3045a3583244SSteven Rostedt static ssize_t 3046a3583244SSteven Rostedt rb_simple_write(struct file *filp, const char __user *ubuf, 3047a3583244SSteven Rostedt size_t cnt, loff_t *ppos) 3048a3583244SSteven Rostedt { 30495e39841cSHannes Eder unsigned long *p = filp->private_data; 3050a3583244SSteven Rostedt char buf[64]; 30515e39841cSHannes Eder unsigned long val; 3052a3583244SSteven Rostedt int ret; 3053a3583244SSteven Rostedt 3054a3583244SSteven Rostedt if (cnt >= sizeof(buf)) 3055a3583244SSteven Rostedt return -EINVAL; 3056a3583244SSteven Rostedt 3057a3583244SSteven Rostedt if (copy_from_user(&buf, ubuf, cnt)) 3058a3583244SSteven Rostedt return -EFAULT; 3059a3583244SSteven Rostedt 3060a3583244SSteven Rostedt buf[cnt] = 0; 3061a3583244SSteven Rostedt 3062a3583244SSteven Rostedt ret = strict_strtoul(buf, 10, &val); 3063a3583244SSteven Rostedt if (ret < 0) 3064a3583244SSteven Rostedt return ret; 3065a3583244SSteven Rostedt 3066033601a3SSteven Rostedt if (val) 3067033601a3SSteven Rostedt set_bit(RB_BUFFERS_ON_BIT, p); 3068033601a3SSteven Rostedt else 3069033601a3SSteven Rostedt clear_bit(RB_BUFFERS_ON_BIT, p); 3070a3583244SSteven Rostedt 3071a3583244SSteven Rostedt (*ppos)++; 3072a3583244SSteven Rostedt 3073a3583244SSteven Rostedt return cnt; 3074a3583244SSteven Rostedt } 3075a3583244SSteven Rostedt 30765e2336a0SSteven Rostedt static const struct file_operations rb_simple_fops = { 3077a3583244SSteven Rostedt .open = tracing_open_generic, 3078a3583244SSteven Rostedt .read = rb_simple_read, 3079a3583244SSteven Rostedt .write = rb_simple_write, 3080a3583244SSteven Rostedt }; 3081a3583244SSteven Rostedt 3082a3583244SSteven Rostedt 3083a3583244SSteven Rostedt static __init int rb_init_debugfs(void) 3084a3583244SSteven Rostedt { 3085a3583244SSteven Rostedt struct dentry *d_tracer; 3086a3583244SSteven Rostedt 3087a3583244SSteven Rostedt d_tracer = tracing_init_dentry(); 3088a3583244SSteven Rostedt 30895452af66SFrederic Weisbecker trace_create_file("tracing_on", 0644, d_tracer, 3090033601a3SSteven Rostedt &ring_buffer_flags, &rb_simple_fops); 3091a3583244SSteven Rostedt 3092a3583244SSteven Rostedt return 0; 3093a3583244SSteven Rostedt } 3094a3583244SSteven Rostedt 3095a3583244SSteven Rostedt fs_initcall(rb_init_debugfs); 3096554f786eSSteven Rostedt 309759222efeSSteven Rostedt #ifdef CONFIG_HOTPLUG_CPU 309809c9e84dSFrederic Weisbecker static int rb_cpu_notify(struct notifier_block *self, 3099554f786eSSteven Rostedt unsigned long action, void *hcpu) 3100554f786eSSteven Rostedt { 3101554f786eSSteven Rostedt struct ring_buffer *buffer = 3102554f786eSSteven Rostedt container_of(self, struct ring_buffer, cpu_notify); 3103554f786eSSteven Rostedt long cpu = (long)hcpu; 3104554f786eSSteven Rostedt 3105554f786eSSteven Rostedt switch (action) { 3106554f786eSSteven Rostedt case CPU_UP_PREPARE: 3107554f786eSSteven Rostedt case CPU_UP_PREPARE_FROZEN: 3108554f786eSSteven Rostedt if (cpu_isset(cpu, *buffer->cpumask)) 3109554f786eSSteven Rostedt return NOTIFY_OK; 3110554f786eSSteven Rostedt 3111554f786eSSteven Rostedt buffer->buffers[cpu] = 3112554f786eSSteven Rostedt rb_allocate_cpu_buffer(buffer, cpu); 3113554f786eSSteven Rostedt if (!buffer->buffers[cpu]) { 3114554f786eSSteven Rostedt WARN(1, "failed to allocate ring buffer on CPU %ld\n", 3115554f786eSSteven Rostedt cpu); 3116554f786eSSteven Rostedt return NOTIFY_OK; 3117554f786eSSteven Rostedt } 3118554f786eSSteven Rostedt smp_wmb(); 3119554f786eSSteven Rostedt cpu_set(cpu, *buffer->cpumask); 3120554f786eSSteven Rostedt break; 3121554f786eSSteven Rostedt case CPU_DOWN_PREPARE: 3122554f786eSSteven Rostedt case CPU_DOWN_PREPARE_FROZEN: 3123554f786eSSteven Rostedt /* 3124554f786eSSteven Rostedt * Do nothing. 3125554f786eSSteven Rostedt * If we were to free the buffer, then the user would 3126554f786eSSteven Rostedt * lose any trace that was in the buffer. 3127554f786eSSteven Rostedt */ 3128554f786eSSteven Rostedt break; 3129554f786eSSteven Rostedt default: 3130554f786eSSteven Rostedt break; 3131554f786eSSteven Rostedt } 3132554f786eSSteven Rostedt return NOTIFY_OK; 3133554f786eSSteven Rostedt } 3134554f786eSSteven Rostedt #endif 3135