1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0 27a8e76a3SSteven Rostedt /* 37a8e76a3SSteven Rostedt * Generic ring buffer 47a8e76a3SSteven Rostedt * 57a8e76a3SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <[email protected]> 67a8e76a3SSteven Rostedt */ 728575c61SSteven Rostedt (VMware) #include <linux/trace_recursion.h> 8af658dcaSSteven Rostedt (Red Hat) #include <linux/trace_events.h> 97a8e76a3SSteven Rostedt #include <linux/ring_buffer.h> 1014131f2fSIngo Molnar #include <linux/trace_clock.h> 11e6017571SIngo Molnar #include <linux/sched/clock.h> 120b07436dSSteven Rostedt #include <linux/trace_seq.h> 137a8e76a3SSteven Rostedt #include <linux/spinlock.h> 1415693458SSteven Rostedt (Red Hat) #include <linux/irq_work.h> 15a356646aSSteven Rostedt (VMware) #include <linux/security.h> 167a8e76a3SSteven Rostedt #include <linux/uaccess.h> 17a81bd80aSSteven Rostedt #include <linux/hardirq.h> 186c43e554SSteven Rostedt (Red Hat) #include <linux/kthread.h> /* for self test */ 197a8e76a3SSteven Rostedt #include <linux/module.h> 207a8e76a3SSteven Rostedt #include <linux/percpu.h> 217a8e76a3SSteven Rostedt #include <linux/mutex.h> 226c43e554SSteven Rostedt (Red Hat) #include <linux/delay.h> 235a0e3ad6STejun Heo #include <linux/slab.h> 247a8e76a3SSteven Rostedt #include <linux/init.h> 257a8e76a3SSteven Rostedt #include <linux/hash.h> 267a8e76a3SSteven Rostedt #include <linux/list.h> 27554f786eSSteven Rostedt #include <linux/cpu.h> 28927e56dbSSteven Rostedt (VMware) #include <linux/oom.h> 297a8e76a3SSteven Rostedt 3079615760SChristoph Lameter #include <asm/local.h> 31182e9f5fSSteven Rostedt 326695da58SSteven Rostedt (Google) /* 336695da58SSteven Rostedt (Google) * The "absolute" timestamp in the buffer is only 59 bits. 346695da58SSteven Rostedt (Google) * If a clock has the 5 MSBs set, it needs to be saved and 356695da58SSteven Rostedt (Google) * reinserted. 366695da58SSteven Rostedt (Google) */ 376695da58SSteven Rostedt (Google) #define TS_MSB (0xf8ULL << 56) 386695da58SSteven Rostedt (Google) #define ABS_TS_MASK (~TS_MSB) 396695da58SSteven Rostedt (Google) 4083f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work); 4183f40318SVaibhav Nagarnaik 42033601a3SSteven Rostedt /* 43d1b182a8SSteven Rostedt * The ring buffer header is special. We must manually up keep it. 44d1b182a8SSteven Rostedt */ 45d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s) 46d1b182a8SSteven Rostedt { 47c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "# compressed entry header\n"); 48c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\ttype_len : 5 bits\n"); 49c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\ttime_delta : 27 bits\n"); 50c0cd93aaSSteven Rostedt (Red Hat) trace_seq_puts(s, "\tarray : 32 bits\n"); 51c0cd93aaSSteven Rostedt (Red Hat) trace_seq_putc(s, '\n'); 52c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tpadding : type == %d\n", 53d1b182a8SSteven Rostedt RINGBUF_TYPE_PADDING); 54c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\ttime_extend : type == %d\n", 55d1b182a8SSteven Rostedt RINGBUF_TYPE_TIME_EXTEND); 56dc4e2801STom Zanussi trace_seq_printf(s, "\ttime_stamp : type == %d\n", 57dc4e2801STom Zanussi RINGBUF_TYPE_TIME_STAMP); 58c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tdata max type_len == %d\n", 59334d4169SLai Jiangshan RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 60d1b182a8SSteven Rostedt 61c0cd93aaSSteven Rostedt (Red Hat) return !trace_seq_has_overflowed(s); 62d1b182a8SSteven Rostedt } 63d1b182a8SSteven Rostedt 64d1b182a8SSteven Rostedt /* 655cc98548SSteven Rostedt * The ring buffer is made up of a list of pages. A separate list of pages is 665cc98548SSteven Rostedt * allocated for each CPU. A writer may only write to a buffer that is 675cc98548SSteven Rostedt * associated with the CPU it is currently executing on. A reader may read 685cc98548SSteven Rostedt * from any per cpu buffer. 695cc98548SSteven Rostedt * 705cc98548SSteven Rostedt * The reader is special. For each per cpu buffer, the reader has its own 715cc98548SSteven Rostedt * reader page. When a reader has read the entire reader page, this reader 725cc98548SSteven Rostedt * page is swapped with another page in the ring buffer. 735cc98548SSteven Rostedt * 745cc98548SSteven Rostedt * Now, as long as the writer is off the reader page, the reader can do what 755cc98548SSteven Rostedt * ever it wants with that page. The writer will never write to that page 765cc98548SSteven Rostedt * again (as long as it is out of the ring buffer). 775cc98548SSteven Rostedt * 785cc98548SSteven Rostedt * Here's some silly ASCII art. 795cc98548SSteven Rostedt * 805cc98548SSteven Rostedt * +------+ 815cc98548SSteven Rostedt * |reader| RING BUFFER 825cc98548SSteven Rostedt * |page | 835cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 845cc98548SSteven Rostedt * | |-->| |-->| | 855cc98548SSteven Rostedt * +---+ +---+ +---+ 865cc98548SSteven Rostedt * ^ | 875cc98548SSteven Rostedt * | | 885cc98548SSteven Rostedt * +---------------+ 895cc98548SSteven Rostedt * 905cc98548SSteven Rostedt * 915cc98548SSteven Rostedt * +------+ 925cc98548SSteven Rostedt * |reader| RING BUFFER 935cc98548SSteven Rostedt * |page |------------------v 945cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 955cc98548SSteven Rostedt * | |-->| |-->| | 965cc98548SSteven Rostedt * +---+ +---+ +---+ 975cc98548SSteven Rostedt * ^ | 985cc98548SSteven Rostedt * | | 995cc98548SSteven Rostedt * +---------------+ 1005cc98548SSteven Rostedt * 1015cc98548SSteven Rostedt * 1025cc98548SSteven Rostedt * +------+ 1035cc98548SSteven Rostedt * |reader| RING BUFFER 1045cc98548SSteven Rostedt * |page |------------------v 1055cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 1065cc98548SSteven Rostedt * ^ | |-->| |-->| | 1075cc98548SSteven Rostedt * | +---+ +---+ +---+ 1085cc98548SSteven Rostedt * | | 1095cc98548SSteven Rostedt * | | 1105cc98548SSteven Rostedt * +------------------------------+ 1115cc98548SSteven Rostedt * 1125cc98548SSteven Rostedt * 1135cc98548SSteven Rostedt * +------+ 1145cc98548SSteven Rostedt * |buffer| RING BUFFER 1155cc98548SSteven Rostedt * |page |------------------v 1165cc98548SSteven Rostedt * +------+ +---+ +---+ +---+ 1175cc98548SSteven Rostedt * ^ | | | |-->| | 1185cc98548SSteven Rostedt * | New +---+ +---+ +---+ 1195cc98548SSteven Rostedt * | Reader------^ | 1205cc98548SSteven Rostedt * | page | 1215cc98548SSteven Rostedt * +------------------------------+ 1225cc98548SSteven Rostedt * 1235cc98548SSteven Rostedt * 1245cc98548SSteven Rostedt * After we make this swap, the reader can hand this page off to the splice 1255cc98548SSteven Rostedt * code and be done with it. It can even allocate a new page if it needs to 1265cc98548SSteven Rostedt * and swap that into the ring buffer. 1275cc98548SSteven Rostedt * 1285cc98548SSteven Rostedt * We will be using cmpxchg soon to make all this lockless. 1295cc98548SSteven Rostedt * 1305cc98548SSteven Rostedt */ 1315cc98548SSteven Rostedt 132499e5470SSteven Rostedt /* Used for individual buffers (after the counter) */ 133499e5470SSteven Rostedt #define RB_BUFFER_OFF (1 << 20) 134499e5470SSteven Rostedt 135474d32b6SSteven Rostedt #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 136474d32b6SSteven Rostedt 137e3d6bf0aSSteven Rostedt #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 13867d34724SAndrew Morton #define RB_ALIGNMENT 4U 139334d4169SLai Jiangshan #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 140c7b09308SSteven Rostedt #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 141adab66b7SSteven Rostedt (VMware) 142adab66b7SSteven Rostedt (VMware) #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS 143adab66b7SSteven Rostedt (VMware) # define RB_FORCE_8BYTE_ALIGNMENT 0 144adab66b7SSteven Rostedt (VMware) # define RB_ARCH_ALIGNMENT RB_ALIGNMENT 145adab66b7SSteven Rostedt (VMware) #else 146adab66b7SSteven Rostedt (VMware) # define RB_FORCE_8BYTE_ALIGNMENT 1 147adab66b7SSteven Rostedt (VMware) # define RB_ARCH_ALIGNMENT 8U 148adab66b7SSteven Rostedt (VMware) #endif 149adab66b7SSteven Rostedt (VMware) 150adab66b7SSteven Rostedt (VMware) #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) 151649508f6SJames Hogan 152334d4169SLai Jiangshan /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 153334d4169SLai Jiangshan #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 1547a8e76a3SSteven Rostedt 1557a8e76a3SSteven Rostedt enum { 1567a8e76a3SSteven Rostedt RB_LEN_TIME_EXTEND = 8, 157dc4e2801STom Zanussi RB_LEN_TIME_STAMP = 8, 1587a8e76a3SSteven Rostedt }; 1597a8e76a3SSteven Rostedt 16069d1b839SSteven Rostedt #define skip_time_extend(event) \ 16169d1b839SSteven Rostedt ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) 16269d1b839SSteven Rostedt 163dc4e2801STom Zanussi #define extended_time(event) \ 164dc4e2801STom Zanussi (event->type_len >= RINGBUF_TYPE_TIME_EXTEND) 165dc4e2801STom Zanussi 166bc92b956SUros Bizjak static inline bool rb_null_event(struct ring_buffer_event *event) 1672d622719STom Zanussi { 168a1863c21SSteven Rostedt return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; 1692d622719STom Zanussi } 1702d622719STom Zanussi 1712d622719STom Zanussi static void rb_event_set_padding(struct ring_buffer_event *event) 1722d622719STom Zanussi { 173a1863c21SSteven Rostedt /* padding has a NULL time_delta */ 174334d4169SLai Jiangshan event->type_len = RINGBUF_TYPE_PADDING; 1752d622719STom Zanussi event->time_delta = 0; 1762d622719STom Zanussi } 1772d622719STom Zanussi 1782d622719STom Zanussi static unsigned 1792d622719STom Zanussi rb_event_data_length(struct ring_buffer_event *event) 1802d622719STom Zanussi { 1812d622719STom Zanussi unsigned length; 1822d622719STom Zanussi 183334d4169SLai Jiangshan if (event->type_len) 184334d4169SLai Jiangshan length = event->type_len * RB_ALIGNMENT; 1852d622719STom Zanussi else 1862d622719STom Zanussi length = event->array[0]; 1872d622719STom Zanussi return length + RB_EVNT_HDR_SIZE; 1882d622719STom Zanussi } 1892d622719STom Zanussi 19069d1b839SSteven Rostedt /* 19169d1b839SSteven Rostedt * Return the length of the given event. Will return 19269d1b839SSteven Rostedt * the length of the time extend if the event is a 19369d1b839SSteven Rostedt * time extend. 19469d1b839SSteven Rostedt */ 19569d1b839SSteven Rostedt static inline unsigned 1967a8e76a3SSteven Rostedt rb_event_length(struct ring_buffer_event *event) 1977a8e76a3SSteven Rostedt { 198334d4169SLai Jiangshan switch (event->type_len) { 1997a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 2002d622719STom Zanussi if (rb_null_event(event)) 2017a8e76a3SSteven Rostedt /* undefined */ 2027a8e76a3SSteven Rostedt return -1; 203334d4169SLai Jiangshan return event->array[0] + RB_EVNT_HDR_SIZE; 2047a8e76a3SSteven Rostedt 2057a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 2067a8e76a3SSteven Rostedt return RB_LEN_TIME_EXTEND; 2077a8e76a3SSteven Rostedt 2087a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 2097a8e76a3SSteven Rostedt return RB_LEN_TIME_STAMP; 2107a8e76a3SSteven Rostedt 2117a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 2122d622719STom Zanussi return rb_event_data_length(event); 2137a8e76a3SSteven Rostedt default: 214da4d401aSSteven Rostedt (VMware) WARN_ON_ONCE(1); 2157a8e76a3SSteven Rostedt } 2167a8e76a3SSteven Rostedt /* not hit */ 2177a8e76a3SSteven Rostedt return 0; 2187a8e76a3SSteven Rostedt } 2197a8e76a3SSteven Rostedt 22069d1b839SSteven Rostedt /* 22169d1b839SSteven Rostedt * Return total length of time extend and data, 22269d1b839SSteven Rostedt * or just the event length for all other events. 22369d1b839SSteven Rostedt */ 22469d1b839SSteven Rostedt static inline unsigned 22569d1b839SSteven Rostedt rb_event_ts_length(struct ring_buffer_event *event) 22669d1b839SSteven Rostedt { 22769d1b839SSteven Rostedt unsigned len = 0; 22869d1b839SSteven Rostedt 229dc4e2801STom Zanussi if (extended_time(event)) { 23069d1b839SSteven Rostedt /* time extends include the data event after it */ 23169d1b839SSteven Rostedt len = RB_LEN_TIME_EXTEND; 23269d1b839SSteven Rostedt event = skip_time_extend(event); 23369d1b839SSteven Rostedt } 23469d1b839SSteven Rostedt return len + rb_event_length(event); 23569d1b839SSteven Rostedt } 23669d1b839SSteven Rostedt 2377a8e76a3SSteven Rostedt /** 2387a8e76a3SSteven Rostedt * ring_buffer_event_length - return the length of the event 2397a8e76a3SSteven Rostedt * @event: the event to get the length of 24069d1b839SSteven Rostedt * 24169d1b839SSteven Rostedt * Returns the size of the data load of a data event. 24269d1b839SSteven Rostedt * If the event is something other than a data event, it 24369d1b839SSteven Rostedt * returns the size of the event itself. With the exception 24469d1b839SSteven Rostedt * of a TIME EXTEND, where it still returns the size of the 24569d1b839SSteven Rostedt * data load of the data event after it. 2467a8e76a3SSteven Rostedt */ 2477a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event) 2487a8e76a3SSteven Rostedt { 24969d1b839SSteven Rostedt unsigned length; 25069d1b839SSteven Rostedt 251dc4e2801STom Zanussi if (extended_time(event)) 25269d1b839SSteven Rostedt event = skip_time_extend(event); 25369d1b839SSteven Rostedt 25469d1b839SSteven Rostedt length = rb_event_length(event); 255334d4169SLai Jiangshan if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 256465634adSRobert Richter return length; 257465634adSRobert Richter length -= RB_EVNT_HDR_SIZE; 258465634adSRobert Richter if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) 259465634adSRobert Richter length -= sizeof(event->array[0]); 260465634adSRobert Richter return length; 2617a8e76a3SSteven Rostedt } 262c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_length); 2637a8e76a3SSteven Rostedt 2647a8e76a3SSteven Rostedt /* inline for ring buffer fast paths */ 265929ddbf3SSteven Rostedt (Red Hat) static __always_inline void * 2667a8e76a3SSteven Rostedt rb_event_data(struct ring_buffer_event *event) 2677a8e76a3SSteven Rostedt { 268dc4e2801STom Zanussi if (extended_time(event)) 26969d1b839SSteven Rostedt event = skip_time_extend(event); 270da4d401aSSteven Rostedt (VMware) WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); 2717a8e76a3SSteven Rostedt /* If length is in len field, then array[0] has the data */ 272334d4169SLai Jiangshan if (event->type_len) 2737a8e76a3SSteven Rostedt return (void *)&event->array[0]; 2747a8e76a3SSteven Rostedt /* Otherwise length is in array[0] and array[1] has the data */ 2757a8e76a3SSteven Rostedt return (void *)&event->array[1]; 2767a8e76a3SSteven Rostedt } 2777a8e76a3SSteven Rostedt 2787a8e76a3SSteven Rostedt /** 2797a8e76a3SSteven Rostedt * ring_buffer_event_data - return the data of the event 2807a8e76a3SSteven Rostedt * @event: the event to get the data from 2817a8e76a3SSteven Rostedt */ 2827a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event) 2837a8e76a3SSteven Rostedt { 2847a8e76a3SSteven Rostedt return rb_event_data(event); 2857a8e76a3SSteven Rostedt } 286c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_event_data); 2877a8e76a3SSteven Rostedt 2887a8e76a3SSteven Rostedt #define for_each_buffer_cpu(buffer, cpu) \ 2899e01c1b7SRusty Russell for_each_cpu(cpu, buffer->cpumask) 2907a8e76a3SSteven Rostedt 291b23d7a5fSNicholas Piggin #define for_each_online_buffer_cpu(buffer, cpu) \ 292b23d7a5fSNicholas Piggin for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask) 293b23d7a5fSNicholas Piggin 2947a8e76a3SSteven Rostedt #define TS_SHIFT 27 2957a8e76a3SSteven Rostedt #define TS_MASK ((1ULL << TS_SHIFT) - 1) 2967a8e76a3SSteven Rostedt #define TS_DELTA_TEST (~TS_MASK) 2977a8e76a3SSteven Rostedt 298e20044f7SSteven Rostedt (VMware) static u64 rb_event_time_stamp(struct ring_buffer_event *event) 299e20044f7SSteven Rostedt (VMware) { 300e20044f7SSteven Rostedt (VMware) u64 ts; 301e20044f7SSteven Rostedt (VMware) 302e20044f7SSteven Rostedt (VMware) ts = event->array[0]; 303e20044f7SSteven Rostedt (VMware) ts <<= TS_SHIFT; 304e20044f7SSteven Rostedt (VMware) ts += event->time_delta; 305e20044f7SSteven Rostedt (VMware) 306e20044f7SSteven Rostedt (VMware) return ts; 307e20044f7SSteven Rostedt (VMware) } 308e20044f7SSteven Rostedt (VMware) 30966a8cb95SSteven Rostedt /* Flag when events were overwritten */ 31066a8cb95SSteven Rostedt #define RB_MISSED_EVENTS (1 << 31) 311ff0ff84aSSteven Rostedt /* Missed count stored at end */ 312ff0ff84aSSteven Rostedt #define RB_MISSED_STORED (1 << 30) 31366a8cb95SSteven Rostedt 314abc9b56dSSteven Rostedt struct buffer_data_page { 3157a8e76a3SSteven Rostedt u64 time_stamp; /* page time stamp */ 316c3706f00SWenji Huang local_t commit; /* write committed index */ 317649508f6SJames Hogan unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ 318abc9b56dSSteven Rostedt }; 319abc9b56dSSteven Rostedt 32077ae365eSSteven Rostedt /* 32177ae365eSSteven Rostedt * Note, the buffer_page list must be first. The buffer pages 32277ae365eSSteven Rostedt * are allocated in cache lines, which means that each buffer 32377ae365eSSteven Rostedt * page will be at the beginning of a cache line, and thus 32477ae365eSSteven Rostedt * the least significant bits will be zero. We use this to 32577ae365eSSteven Rostedt * add flags in the list struct pointers, to make the ring buffer 32677ae365eSSteven Rostedt * lockless. 32777ae365eSSteven Rostedt */ 328abc9b56dSSteven Rostedt struct buffer_page { 329778c55d4SSteven Rostedt struct list_head list; /* list of buffer pages */ 330abc9b56dSSteven Rostedt local_t write; /* index for next write */ 3316f807acdSSteven Rostedt unsigned read; /* index for next read */ 332778c55d4SSteven Rostedt local_t entries; /* entries on this page */ 333ff0ff84aSSteven Rostedt unsigned long real_end; /* real end of data */ 334abc9b56dSSteven Rostedt struct buffer_data_page *page; /* Actual data page */ 3357a8e76a3SSteven Rostedt }; 3367a8e76a3SSteven Rostedt 33777ae365eSSteven Rostedt /* 33877ae365eSSteven Rostedt * The buffer page counters, write and entries, must be reset 33977ae365eSSteven Rostedt * atomically when crossing page boundaries. To synchronize this 34077ae365eSSteven Rostedt * update, two counters are inserted into the number. One is 34177ae365eSSteven Rostedt * the actual counter for the write position or count on the page. 34277ae365eSSteven Rostedt * 34377ae365eSSteven Rostedt * The other is a counter of updaters. Before an update happens 34477ae365eSSteven Rostedt * the update partition of the counter is incremented. This will 34577ae365eSSteven Rostedt * allow the updater to update the counter atomically. 34677ae365eSSteven Rostedt * 34777ae365eSSteven Rostedt * The counter is 20 bits, and the state data is 12. 34877ae365eSSteven Rostedt */ 34977ae365eSSteven Rostedt #define RB_WRITE_MASK 0xfffff 35077ae365eSSteven Rostedt #define RB_WRITE_INTCNT (1 << 20) 35177ae365eSSteven Rostedt 352044fa782SSteven Rostedt static void rb_init_page(struct buffer_data_page *bpage) 353abc9b56dSSteven Rostedt { 354044fa782SSteven Rostedt local_set(&bpage->commit, 0); 355abc9b56dSSteven Rostedt } 356abc9b56dSSteven Rostedt 35734a148bfSAndrew Morton static void free_buffer_page(struct buffer_page *bpage) 358ed56829cSSteven Rostedt { 3596ae2a076SSteven Rostedt free_page((unsigned long)bpage->page); 360e4c2ce82SSteven Rostedt kfree(bpage); 361ed56829cSSteven Rostedt } 362ed56829cSSteven Rostedt 363ed56829cSSteven Rostedt /* 3647a8e76a3SSteven Rostedt * We need to fit the time_stamp delta into 27 bits. 3657a8e76a3SSteven Rostedt */ 366bc92b956SUros Bizjak static inline bool test_time_stamp(u64 delta) 3677a8e76a3SSteven Rostedt { 368bc92b956SUros Bizjak return !!(delta & TS_DELTA_TEST); 3697a8e76a3SSteven Rostedt } 3707a8e76a3SSteven Rostedt 371474d32b6SSteven Rostedt #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) 3727a8e76a3SSteven Rostedt 373be957c44SSteven Rostedt /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ 374be957c44SSteven Rostedt #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) 375be957c44SSteven Rostedt 376d1b182a8SSteven Rostedt int ring_buffer_print_page_header(struct trace_seq *s) 377d1b182a8SSteven Rostedt { 378d1b182a8SSteven Rostedt struct buffer_data_page field; 379d1b182a8SSteven Rostedt 380c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tfield: u64 timestamp;\t" 38126a50744STom Zanussi "offset:0;\tsize:%u;\tsigned:%u;\n", 38226a50744STom Zanussi (unsigned int)sizeof(field.time_stamp), 38326a50744STom Zanussi (unsigned int)is_signed_type(u64)); 384d1b182a8SSteven Rostedt 385c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tfield: local_t commit;\t" 38626a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 387d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 38826a50744STom Zanussi (unsigned int)sizeof(field.commit), 38926a50744STom Zanussi (unsigned int)is_signed_type(long)); 390d1b182a8SSteven Rostedt 391c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tfield: int overwrite;\t" 39266a8cb95SSteven Rostedt "offset:%u;\tsize:%u;\tsigned:%u;\n", 39366a8cb95SSteven Rostedt (unsigned int)offsetof(typeof(field), commit), 39466a8cb95SSteven Rostedt 1, 39566a8cb95SSteven Rostedt (unsigned int)is_signed_type(long)); 39666a8cb95SSteven Rostedt 397c0cd93aaSSteven Rostedt (Red Hat) trace_seq_printf(s, "\tfield: char data;\t" 39826a50744STom Zanussi "offset:%u;\tsize:%u;\tsigned:%u;\n", 399d1b182a8SSteven Rostedt (unsigned int)offsetof(typeof(field), data), 40026a50744STom Zanussi (unsigned int)BUF_PAGE_SIZE, 40126a50744STom Zanussi (unsigned int)is_signed_type(char)); 402d1b182a8SSteven Rostedt 403c0cd93aaSSteven Rostedt (Red Hat) return !trace_seq_has_overflowed(s); 404d1b182a8SSteven Rostedt } 405d1b182a8SSteven Rostedt 40615693458SSteven Rostedt (Red Hat) struct rb_irq_work { 40715693458SSteven Rostedt (Red Hat) struct irq_work work; 40815693458SSteven Rostedt (Red Hat) wait_queue_head_t waiters; 4091e0d6714SSteven Rostedt (Red Hat) wait_queue_head_t full_waiters; 4107e9fbbb1SSteven Rostedt (Google) long wait_index; 41115693458SSteven Rostedt (Red Hat) bool waiters_pending; 4121e0d6714SSteven Rostedt (Red Hat) bool full_waiters_pending; 4131e0d6714SSteven Rostedt (Red Hat) bool wakeup_full; 41415693458SSteven Rostedt (Red Hat) }; 41515693458SSteven Rostedt (Red Hat) 4167a8e76a3SSteven Rostedt /* 417fcc742eaSSteven Rostedt (Red Hat) * Structure to hold event state and handle nested events. 418fcc742eaSSteven Rostedt (Red Hat) */ 419fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info { 420fcc742eaSSteven Rostedt (Red Hat) u64 ts; 421fcc742eaSSteven Rostedt (Red Hat) u64 delta; 42258fbc3c6SSteven Rostedt (VMware) u64 before; 42358fbc3c6SSteven Rostedt (VMware) u64 after; 424fcc742eaSSteven Rostedt (Red Hat) unsigned long length; 425fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page; 426fcc742eaSSteven Rostedt (Red Hat) int add_timestamp; 427fcc742eaSSteven Rostedt (Red Hat) }; 428fcc742eaSSteven Rostedt (Red Hat) 429fcc742eaSSteven Rostedt (Red Hat) /* 430a389d86fSSteven Rostedt (VMware) * Used for the add_timestamp 431a389d86fSSteven Rostedt (VMware) * NONE 4327c4b4a51SSteven Rostedt (VMware) * EXTEND - wants a time extend 4337c4b4a51SSteven Rostedt (VMware) * ABSOLUTE - the buffer requests all events to have absolute time stamps 434a389d86fSSteven Rostedt (VMware) * FORCE - force a full time stamp. 435a389d86fSSteven Rostedt (VMware) */ 436a389d86fSSteven Rostedt (VMware) enum { 4377c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_NONE = 0, 4387c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_EXTEND = BIT(1), 4397c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_ABSOLUTE = BIT(2), 4407c4b4a51SSteven Rostedt (VMware) RB_ADD_STAMP_FORCE = BIT(3) 441a389d86fSSteven Rostedt (VMware) }; 442a389d86fSSteven Rostedt (VMware) /* 443a497adb4SSteven Rostedt (Red Hat) * Used for which event context the event is in. 444b02414c8SSteven Rostedt (VMware) * TRANSITION = 0 445b02414c8SSteven Rostedt (VMware) * NMI = 1 446b02414c8SSteven Rostedt (VMware) * IRQ = 2 447b02414c8SSteven Rostedt (VMware) * SOFTIRQ = 3 448b02414c8SSteven Rostedt (VMware) * NORMAL = 4 449a497adb4SSteven Rostedt (Red Hat) * 450a497adb4SSteven Rostedt (Red Hat) * See trace_recursive_lock() comment below for more details. 451a497adb4SSteven Rostedt (Red Hat) */ 452a497adb4SSteven Rostedt (Red Hat) enum { 453b02414c8SSteven Rostedt (VMware) RB_CTX_TRANSITION, 454a497adb4SSteven Rostedt (Red Hat) RB_CTX_NMI, 455a497adb4SSteven Rostedt (Red Hat) RB_CTX_IRQ, 456a497adb4SSteven Rostedt (Red Hat) RB_CTX_SOFTIRQ, 457a497adb4SSteven Rostedt (Red Hat) RB_CTX_NORMAL, 458a497adb4SSteven Rostedt (Red Hat) RB_CTX_MAX 459a497adb4SSteven Rostedt (Red Hat) }; 460a497adb4SSteven Rostedt (Red Hat) 46110464b4aSSteven Rostedt (VMware) #if BITS_PER_LONG == 32 46210464b4aSSteven Rostedt (VMware) #define RB_TIME_32 46310464b4aSSteven Rostedt (VMware) #endif 46410464b4aSSteven Rostedt (VMware) 46510464b4aSSteven Rostedt (VMware) /* To test on 64 bit machines */ 46610464b4aSSteven Rostedt (VMware) //#define RB_TIME_32 46710464b4aSSteven Rostedt (VMware) 46810464b4aSSteven Rostedt (VMware) #ifdef RB_TIME_32 46910464b4aSSteven Rostedt (VMware) 47010464b4aSSteven Rostedt (VMware) struct rb_time_struct { 47110464b4aSSteven Rostedt (VMware) local_t cnt; 47210464b4aSSteven Rostedt (VMware) local_t top; 47310464b4aSSteven Rostedt (VMware) local_t bottom; 474f03f2abcSSteven Rostedt (Google) local_t msb; 47510464b4aSSteven Rostedt (VMware) }; 47610464b4aSSteven Rostedt (VMware) #else 47710464b4aSSteven Rostedt (VMware) #include <asm/local64.h> 47810464b4aSSteven Rostedt (VMware) struct rb_time_struct { 47910464b4aSSteven Rostedt (VMware) local64_t time; 48010464b4aSSteven Rostedt (VMware) }; 48110464b4aSSteven Rostedt (VMware) #endif 48210464b4aSSteven Rostedt (VMware) typedef struct rb_time_struct rb_time_t; 48310464b4aSSteven Rostedt (VMware) 4848672e494SSteven Rostedt (VMware) #define MAX_NEST 5 4858672e494SSteven Rostedt (VMware) 486a497adb4SSteven Rostedt (Red Hat) /* 4877a8e76a3SSteven Rostedt * head_page == tail_page && head == tail then buffer is empty. 4887a8e76a3SSteven Rostedt */ 4897a8e76a3SSteven Rostedt struct ring_buffer_per_cpu { 4907a8e76a3SSteven Rostedt int cpu; 491985023deSRichard Kennedy atomic_t record_disabled; 49207b8b10eSSteven Rostedt (VMware) atomic_t resize_disabled; 49313292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 4945389f6faSThomas Gleixner raw_spinlock_t reader_lock; /* serialize readers */ 495445c8951SThomas Gleixner arch_spinlock_t lock; 4967a8e76a3SSteven Rostedt struct lock_class_key lock_key; 49773a757e6SSteven Rostedt (VMware) struct buffer_data_page *free_page; 4989b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages; 49958a09ec6SSteven Rostedt (Red Hat) unsigned int current_context; 5003adc54faSSteven Rostedt struct list_head *pages; 5016f807acdSSteven Rostedt struct buffer_page *head_page; /* read from head */ 5026f807acdSSteven Rostedt struct buffer_page *tail_page; /* write to tail */ 503c3706f00SWenji Huang struct buffer_page *commit_page; /* committed pages */ 504d769041fSSteven Rostedt struct buffer_page *reader_page; 50566a8cb95SSteven Rostedt unsigned long lost_events; 50666a8cb95SSteven Rostedt unsigned long last_overrun; 5078e012066SSteven Rostedt (VMware) unsigned long nest; 508c64e148aSVaibhav Nagarnaik local_t entries_bytes; 509e4906effSSteven Rostedt local_t entries; 510884bfe89SSlava Pestov local_t overrun; 511884bfe89SSlava Pestov local_t commit_overrun; 512884bfe89SSlava Pestov local_t dropped_events; 513fa743953SSteven Rostedt local_t committing; 514fa743953SSteven Rostedt local_t commits; 5152c2b0a78SSteven Rostedt (VMware) local_t pages_touched; 51631029a8bSSteven Rostedt (Google) local_t pages_lost; 5172c2b0a78SSteven Rostedt (VMware) local_t pages_read; 51803329f99SSteven Rostedt (VMware) long last_pages_touch; 5192c2b0a78SSteven Rostedt (VMware) size_t shortest_full; 52077ae365eSSteven Rostedt unsigned long read; 521c64e148aSVaibhav Nagarnaik unsigned long read_bytes; 52210464b4aSSteven Rostedt (VMware) rb_time_t write_stamp; 52310464b4aSSteven Rostedt (VMware) rb_time_t before_stamp; 5248672e494SSteven Rostedt (VMware) u64 event_stamp[MAX_NEST]; 5257a8e76a3SSteven Rostedt u64 read_stamp; 5262d093282SZheng Yejian /* pages removed since last reset */ 5272d093282SZheng Yejian unsigned long pages_removed; 528438ced17SVaibhav Nagarnaik /* ring buffer pages to update, > 0 to add, < 0 to remove */ 5299b94a8fbSSteven Rostedt (Red Hat) long nr_pages_to_update; 530438ced17SVaibhav Nagarnaik struct list_head new_pages; /* new pages to add */ 53183f40318SVaibhav Nagarnaik struct work_struct update_pages_work; 53205fdd70dSVaibhav Nagarnaik struct completion update_done; 53315693458SSteven Rostedt (Red Hat) 53415693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work; 5357a8e76a3SSteven Rostedt }; 5367a8e76a3SSteven Rostedt 53713292494SSteven Rostedt (VMware) struct trace_buffer { 5387a8e76a3SSteven Rostedt unsigned flags; 5397a8e76a3SSteven Rostedt int cpus; 5407a8e76a3SSteven Rostedt atomic_t record_disabled; 5418a96c028SChen Lin atomic_t resizing; 54200f62f61SArnaldo Carvalho de Melo cpumask_var_t cpumask; 5437a8e76a3SSteven Rostedt 5441f8a6a10SPeter Zijlstra struct lock_class_key *reader_lock_key; 5451f8a6a10SPeter Zijlstra 5467a8e76a3SSteven Rostedt struct mutex mutex; 5477a8e76a3SSteven Rostedt 5487a8e76a3SSteven Rostedt struct ring_buffer_per_cpu **buffers; 549554f786eSSteven Rostedt 550b32614c0SSebastian Andrzej Siewior struct hlist_node node; 55137886f6aSSteven Rostedt u64 (*clock)(void); 55215693458SSteven Rostedt (Red Hat) 55315693458SSteven Rostedt (Red Hat) struct rb_irq_work irq_work; 55400b41452STom Zanussi bool time_stamp_abs; 5557a8e76a3SSteven Rostedt }; 5567a8e76a3SSteven Rostedt 5577a8e76a3SSteven Rostedt struct ring_buffer_iter { 5587a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5597a8e76a3SSteven Rostedt unsigned long head; 560785888c5SSteven Rostedt (VMware) unsigned long next_event; 5617a8e76a3SSteven Rostedt struct buffer_page *head_page; 562492a74f4SSteven Rostedt struct buffer_page *cache_reader_page; 563492a74f4SSteven Rostedt unsigned long cache_read; 5642d093282SZheng Yejian unsigned long cache_pages_removed; 5657a8e76a3SSteven Rostedt u64 read_stamp; 56628e3fc56SSteven Rostedt (VMware) u64 page_stamp; 567785888c5SSteven Rostedt (VMware) struct ring_buffer_event *event; 568c9b7a4a7SSteven Rostedt (VMware) int missed_events; 5697a8e76a3SSteven Rostedt }; 5707a8e76a3SSteven Rostedt 57110464b4aSSteven Rostedt (VMware) #ifdef RB_TIME_32 57210464b4aSSteven Rostedt (VMware) 57310464b4aSSteven Rostedt (VMware) /* 57410464b4aSSteven Rostedt (VMware) * On 32 bit machines, local64_t is very expensive. As the ring 57510464b4aSSteven Rostedt (VMware) * buffer doesn't need all the features of a true 64 bit atomic, 57610464b4aSSteven Rostedt (VMware) * on 32 bit, it uses these functions (64 still uses local64_t). 57710464b4aSSteven Rostedt (VMware) * 57810464b4aSSteven Rostedt (VMware) * For the ring buffer, 64 bit required operations for the time is 57910464b4aSSteven Rostedt (VMware) * the following: 58010464b4aSSteven Rostedt (VMware) * 58110464b4aSSteven Rostedt (VMware) * - Reads may fail if it interrupted a modification of the time stamp. 58210464b4aSSteven Rostedt (VMware) * It will succeed if it did not interrupt another write even if 58310464b4aSSteven Rostedt (VMware) * the read itself is interrupted by a write. 58410464b4aSSteven Rostedt (VMware) * It returns whether it was successful or not. 58510464b4aSSteven Rostedt (VMware) * 58610464b4aSSteven Rostedt (VMware) * - Writes always succeed and will overwrite other writes and writes 58710464b4aSSteven Rostedt (VMware) * that were done by events interrupting the current write. 58810464b4aSSteven Rostedt (VMware) * 58910464b4aSSteven Rostedt (VMware) * - A write followed by a read of the same time stamp will always succeed, 59010464b4aSSteven Rostedt (VMware) * but may not contain the same value. 59110464b4aSSteven Rostedt (VMware) * 59210464b4aSSteven Rostedt (VMware) * - A cmpxchg will fail if it interrupted another write or cmpxchg. 59310464b4aSSteven Rostedt (VMware) * Other than that, it acts like a normal cmpxchg. 59410464b4aSSteven Rostedt (VMware) * 59510464b4aSSteven Rostedt (VMware) * The 60 bit time stamp is broken up by 30 bits in a top and bottom half 59610464b4aSSteven Rostedt (VMware) * (bottom being the least significant 30 bits of the 60 bit time stamp). 59710464b4aSSteven Rostedt (VMware) * 59810464b4aSSteven Rostedt (VMware) * The two most significant bits of each half holds a 2 bit counter (0-3). 59910464b4aSSteven Rostedt (VMware) * Each update will increment this counter by one. 60010464b4aSSteven Rostedt (VMware) * When reading the top and bottom, if the two counter bits match then the 60110464b4aSSteven Rostedt (VMware) * top and bottom together make a valid 60 bit number. 60210464b4aSSteven Rostedt (VMware) */ 60310464b4aSSteven Rostedt (VMware) #define RB_TIME_SHIFT 30 60410464b4aSSteven Rostedt (VMware) #define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1) 605f03f2abcSSteven Rostedt (Google) #define RB_TIME_MSB_SHIFT 60 60610464b4aSSteven Rostedt (VMware) 60710464b4aSSteven Rostedt (VMware) static inline int rb_time_cnt(unsigned long val) 60810464b4aSSteven Rostedt (VMware) { 60910464b4aSSteven Rostedt (VMware) return (val >> RB_TIME_SHIFT) & 3; 61010464b4aSSteven Rostedt (VMware) } 61110464b4aSSteven Rostedt (VMware) 61210464b4aSSteven Rostedt (VMware) static inline u64 rb_time_val(unsigned long top, unsigned long bottom) 61310464b4aSSteven Rostedt (VMware) { 61410464b4aSSteven Rostedt (VMware) u64 val; 61510464b4aSSteven Rostedt (VMware) 61610464b4aSSteven Rostedt (VMware) val = top & RB_TIME_VAL_MASK; 61710464b4aSSteven Rostedt (VMware) val <<= RB_TIME_SHIFT; 61810464b4aSSteven Rostedt (VMware) val |= bottom & RB_TIME_VAL_MASK; 61910464b4aSSteven Rostedt (VMware) 62010464b4aSSteven Rostedt (VMware) return val; 62110464b4aSSteven Rostedt (VMware) } 62210464b4aSSteven Rostedt (VMware) 62310464b4aSSteven Rostedt (VMware) static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt) 62410464b4aSSteven Rostedt (VMware) { 625f03f2abcSSteven Rostedt (Google) unsigned long top, bottom, msb; 62610464b4aSSteven Rostedt (VMware) unsigned long c; 62710464b4aSSteven Rostedt (VMware) 62810464b4aSSteven Rostedt (VMware) /* 62910464b4aSSteven Rostedt (VMware) * If the read is interrupted by a write, then the cnt will 63010464b4aSSteven Rostedt (VMware) * be different. Loop until both top and bottom have been read 63110464b4aSSteven Rostedt (VMware) * without interruption. 63210464b4aSSteven Rostedt (VMware) */ 63310464b4aSSteven Rostedt (VMware) do { 63410464b4aSSteven Rostedt (VMware) c = local_read(&t->cnt); 63510464b4aSSteven Rostedt (VMware) top = local_read(&t->top); 63610464b4aSSteven Rostedt (VMware) bottom = local_read(&t->bottom); 637f03f2abcSSteven Rostedt (Google) msb = local_read(&t->msb); 63810464b4aSSteven Rostedt (VMware) } while (c != local_read(&t->cnt)); 63910464b4aSSteven Rostedt (VMware) 64010464b4aSSteven Rostedt (VMware) *cnt = rb_time_cnt(top); 64110464b4aSSteven Rostedt (VMware) 64210464b4aSSteven Rostedt (VMware) /* If top and bottom counts don't match, this interrupted a write */ 64310464b4aSSteven Rostedt (VMware) if (*cnt != rb_time_cnt(bottom)) 64410464b4aSSteven Rostedt (VMware) return false; 64510464b4aSSteven Rostedt (VMware) 646f03f2abcSSteven Rostedt (Google) /* The shift to msb will lose its cnt bits */ 647f03f2abcSSteven Rostedt (Google) *ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT); 64810464b4aSSteven Rostedt (VMware) return true; 64910464b4aSSteven Rostedt (VMware) } 65010464b4aSSteven Rostedt (VMware) 65110464b4aSSteven Rostedt (VMware) static bool rb_time_read(rb_time_t *t, u64 *ret) 65210464b4aSSteven Rostedt (VMware) { 65310464b4aSSteven Rostedt (VMware) unsigned long cnt; 65410464b4aSSteven Rostedt (VMware) 65510464b4aSSteven Rostedt (VMware) return __rb_time_read(t, ret, &cnt); 65610464b4aSSteven Rostedt (VMware) } 65710464b4aSSteven Rostedt (VMware) 65810464b4aSSteven Rostedt (VMware) static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt) 65910464b4aSSteven Rostedt (VMware) { 66010464b4aSSteven Rostedt (VMware) return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT); 66110464b4aSSteven Rostedt (VMware) } 66210464b4aSSteven Rostedt (VMware) 663f03f2abcSSteven Rostedt (Google) static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom, 664f03f2abcSSteven Rostedt (Google) unsigned long *msb) 66510464b4aSSteven Rostedt (VMware) { 66610464b4aSSteven Rostedt (VMware) *top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK); 66710464b4aSSteven Rostedt (VMware) *bottom = (unsigned long)(val & RB_TIME_VAL_MASK); 668f03f2abcSSteven Rostedt (Google) *msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT); 66910464b4aSSteven Rostedt (VMware) } 67010464b4aSSteven Rostedt (VMware) 67110464b4aSSteven Rostedt (VMware) static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt) 67210464b4aSSteven Rostedt (VMware) { 67310464b4aSSteven Rostedt (VMware) val = rb_time_val_cnt(val, cnt); 67410464b4aSSteven Rostedt (VMware) local_set(t, val); 67510464b4aSSteven Rostedt (VMware) } 67610464b4aSSteven Rostedt (VMware) 67710464b4aSSteven Rostedt (VMware) static void rb_time_set(rb_time_t *t, u64 val) 67810464b4aSSteven Rostedt (VMware) { 679f03f2abcSSteven Rostedt (Google) unsigned long cnt, top, bottom, msb; 68010464b4aSSteven Rostedt (VMware) 681f03f2abcSSteven Rostedt (Google) rb_time_split(val, &top, &bottom, &msb); 68210464b4aSSteven Rostedt (VMware) 68310464b4aSSteven Rostedt (VMware) /* Writes always succeed with a valid number even if it gets interrupted. */ 68410464b4aSSteven Rostedt (VMware) do { 68510464b4aSSteven Rostedt (VMware) cnt = local_inc_return(&t->cnt); 68610464b4aSSteven Rostedt (VMware) rb_time_val_set(&t->top, top, cnt); 68710464b4aSSteven Rostedt (VMware) rb_time_val_set(&t->bottom, bottom, cnt); 688f03f2abcSSteven Rostedt (Google) rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt); 68910464b4aSSteven Rostedt (VMware) } while (cnt != local_read(&t->cnt)); 69010464b4aSSteven Rostedt (VMware) } 69110464b4aSSteven Rostedt (VMware) 69210464b4aSSteven Rostedt (VMware) static inline bool 69310464b4aSSteven Rostedt (VMware) rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set) 69410464b4aSSteven Rostedt (VMware) { 695*00a8478fSUros Bizjak return local_try_cmpxchg(l, &expect, set); 69610464b4aSSteven Rostedt (VMware) } 69710464b4aSSteven Rostedt (VMware) 698bc92b956SUros Bizjak static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 69910464b4aSSteven Rostedt (VMware) { 700f03f2abcSSteven Rostedt (Google) unsigned long cnt, top, bottom, msb; 701f03f2abcSSteven Rostedt (Google) unsigned long cnt2, top2, bottom2, msb2; 70210464b4aSSteven Rostedt (VMware) u64 val; 70310464b4aSSteven Rostedt (VMware) 70410464b4aSSteven Rostedt (VMware) /* The cmpxchg always fails if it interrupted an update */ 70510464b4aSSteven Rostedt (VMware) if (!__rb_time_read(t, &val, &cnt2)) 70610464b4aSSteven Rostedt (VMware) return false; 70710464b4aSSteven Rostedt (VMware) 70810464b4aSSteven Rostedt (VMware) if (val != expect) 70910464b4aSSteven Rostedt (VMware) return false; 71010464b4aSSteven Rostedt (VMware) 71110464b4aSSteven Rostedt (VMware) cnt = local_read(&t->cnt); 71210464b4aSSteven Rostedt (VMware) if ((cnt & 3) != cnt2) 71310464b4aSSteven Rostedt (VMware) return false; 71410464b4aSSteven Rostedt (VMware) 71510464b4aSSteven Rostedt (VMware) cnt2 = cnt + 1; 71610464b4aSSteven Rostedt (VMware) 717f03f2abcSSteven Rostedt (Google) rb_time_split(val, &top, &bottom, &msb); 71810464b4aSSteven Rostedt (VMware) top = rb_time_val_cnt(top, cnt); 71910464b4aSSteven Rostedt (VMware) bottom = rb_time_val_cnt(bottom, cnt); 72010464b4aSSteven Rostedt (VMware) 721f03f2abcSSteven Rostedt (Google) rb_time_split(set, &top2, &bottom2, &msb2); 72210464b4aSSteven Rostedt (VMware) top2 = rb_time_val_cnt(top2, cnt2); 72310464b4aSSteven Rostedt (VMware) bottom2 = rb_time_val_cnt(bottom2, cnt2); 72410464b4aSSteven Rostedt (VMware) 72510464b4aSSteven Rostedt (VMware) if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2)) 72610464b4aSSteven Rostedt (VMware) return false; 727f03f2abcSSteven Rostedt (Google) if (!rb_time_read_cmpxchg(&t->msb, msb, msb2)) 728f03f2abcSSteven Rostedt (Google) return false; 72910464b4aSSteven Rostedt (VMware) if (!rb_time_read_cmpxchg(&t->top, top, top2)) 73010464b4aSSteven Rostedt (VMware) return false; 73110464b4aSSteven Rostedt (VMware) if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2)) 73210464b4aSSteven Rostedt (VMware) return false; 73310464b4aSSteven Rostedt (VMware) return true; 73410464b4aSSteven Rostedt (VMware) } 73510464b4aSSteven Rostedt (VMware) 73610464b4aSSteven Rostedt (VMware) #else /* 64 bits */ 73710464b4aSSteven Rostedt (VMware) 73810464b4aSSteven Rostedt (VMware) /* local64_t always succeeds */ 73910464b4aSSteven Rostedt (VMware) 74010464b4aSSteven Rostedt (VMware) static inline bool rb_time_read(rb_time_t *t, u64 *ret) 74110464b4aSSteven Rostedt (VMware) { 74210464b4aSSteven Rostedt (VMware) *ret = local64_read(&t->time); 74310464b4aSSteven Rostedt (VMware) return true; 74410464b4aSSteven Rostedt (VMware) } 74510464b4aSSteven Rostedt (VMware) static void rb_time_set(rb_time_t *t, u64 val) 74610464b4aSSteven Rostedt (VMware) { 74710464b4aSSteven Rostedt (VMware) local64_set(&t->time, val); 74810464b4aSSteven Rostedt (VMware) } 74910464b4aSSteven Rostedt (VMware) 75010464b4aSSteven Rostedt (VMware) static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set) 75110464b4aSSteven Rostedt (VMware) { 752*00a8478fSUros Bizjak return local64_try_cmpxchg(&t->time, &expect, set); 75310464b4aSSteven Rostedt (VMware) } 75410464b4aSSteven Rostedt (VMware) #endif 75510464b4aSSteven Rostedt (VMware) 756a948c69dSSteven Rostedt (VMware) /* 757a948c69dSSteven Rostedt (VMware) * Enable this to make sure that the event passed to 758a948c69dSSteven Rostedt (VMware) * ring_buffer_event_time_stamp() is not committed and also 759a948c69dSSteven Rostedt (VMware) * is on the buffer that it passed in. 760a948c69dSSteven Rostedt (VMware) */ 761a948c69dSSteven Rostedt (VMware) //#define RB_VERIFY_EVENT 762a948c69dSSteven Rostedt (VMware) #ifdef RB_VERIFY_EVENT 763a948c69dSSteven Rostedt (VMware) static struct list_head *rb_list_head(struct list_head *list); 764a948c69dSSteven Rostedt (VMware) static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 765a948c69dSSteven Rostedt (VMware) void *event) 766a948c69dSSteven Rostedt (VMware) { 767a948c69dSSteven Rostedt (VMware) struct buffer_page *page = cpu_buffer->commit_page; 768a948c69dSSteven Rostedt (VMware) struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); 769a948c69dSSteven Rostedt (VMware) struct list_head *next; 770a948c69dSSteven Rostedt (VMware) long commit, write; 771a948c69dSSteven Rostedt (VMware) unsigned long addr = (unsigned long)event; 772a948c69dSSteven Rostedt (VMware) bool done = false; 773a948c69dSSteven Rostedt (VMware) int stop = 0; 774a948c69dSSteven Rostedt (VMware) 775a948c69dSSteven Rostedt (VMware) /* Make sure the event exists and is not committed yet */ 776a948c69dSSteven Rostedt (VMware) do { 777a948c69dSSteven Rostedt (VMware) if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) 778a948c69dSSteven Rostedt (VMware) done = true; 779a948c69dSSteven Rostedt (VMware) commit = local_read(&page->page->commit); 780a948c69dSSteven Rostedt (VMware) write = local_read(&page->write); 781a948c69dSSteven Rostedt (VMware) if (addr >= (unsigned long)&page->page->data[commit] && 782a948c69dSSteven Rostedt (VMware) addr < (unsigned long)&page->page->data[write]) 783a948c69dSSteven Rostedt (VMware) return; 784a948c69dSSteven Rostedt (VMware) 785a948c69dSSteven Rostedt (VMware) next = rb_list_head(page->list.next); 786a948c69dSSteven Rostedt (VMware) page = list_entry(next, struct buffer_page, list); 787a948c69dSSteven Rostedt (VMware) } while (!done); 788a948c69dSSteven Rostedt (VMware) WARN_ON_ONCE(1); 789a948c69dSSteven Rostedt (VMware) } 790a948c69dSSteven Rostedt (VMware) #else 791a948c69dSSteven Rostedt (VMware) static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, 792a948c69dSSteven Rostedt (VMware) void *event) 793a948c69dSSteven Rostedt (VMware) { 794a948c69dSSteven Rostedt (VMware) } 795a948c69dSSteven Rostedt (VMware) #endif 796a948c69dSSteven Rostedt (VMware) 7976695da58SSteven Rostedt (Google) /* 7986695da58SSteven Rostedt (Google) * The absolute time stamp drops the 5 MSBs and some clocks may 7996695da58SSteven Rostedt (Google) * require them. The rb_fix_abs_ts() will take a previous full 8006695da58SSteven Rostedt (Google) * time stamp, and add the 5 MSB of that time stamp on to the 8016695da58SSteven Rostedt (Google) * saved absolute time stamp. Then they are compared in case of 8026695da58SSteven Rostedt (Google) * the unlikely event that the latest time stamp incremented 8036695da58SSteven Rostedt (Google) * the 5 MSB. 8046695da58SSteven Rostedt (Google) */ 8056695da58SSteven Rostedt (Google) static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts) 8066695da58SSteven Rostedt (Google) { 8076695da58SSteven Rostedt (Google) if (save_ts & TS_MSB) { 8086695da58SSteven Rostedt (Google) abs |= save_ts & TS_MSB; 8096695da58SSteven Rostedt (Google) /* Check for overflow */ 8106695da58SSteven Rostedt (Google) if (unlikely(abs < save_ts)) 8116695da58SSteven Rostedt (Google) abs += 1ULL << 59; 8126695da58SSteven Rostedt (Google) } 8136695da58SSteven Rostedt (Google) return abs; 8146695da58SSteven Rostedt (Google) } 815a948c69dSSteven Rostedt (VMware) 816efe6196aSSteven Rostedt (VMware) static inline u64 rb_time_stamp(struct trace_buffer *buffer); 817efe6196aSSteven Rostedt (VMware) 818efe6196aSSteven Rostedt (VMware) /** 819efe6196aSSteven Rostedt (VMware) * ring_buffer_event_time_stamp - return the event's current time stamp 820efe6196aSSteven Rostedt (VMware) * @buffer: The buffer that the event is on 821efe6196aSSteven Rostedt (VMware) * @event: the event to get the time stamp of 822efe6196aSSteven Rostedt (VMware) * 823efe6196aSSteven Rostedt (VMware) * Note, this must be called after @event is reserved, and before it is 824efe6196aSSteven Rostedt (VMware) * committed to the ring buffer. And must be called from the same 825efe6196aSSteven Rostedt (VMware) * context where the event was reserved (normal, softirq, irq, etc). 826efe6196aSSteven Rostedt (VMware) * 827efe6196aSSteven Rostedt (VMware) * Returns the time stamp associated with the current event. 828efe6196aSSteven Rostedt (VMware) * If the event has an extended time stamp, then that is used as 829efe6196aSSteven Rostedt (VMware) * the time stamp to return. 830efe6196aSSteven Rostedt (VMware) * In the highly unlikely case that the event was nested more than 831efe6196aSSteven Rostedt (VMware) * the max nesting, then the write_stamp of the buffer is returned, 832efe6196aSSteven Rostedt (VMware) * otherwise current time is returned, but that really neither of 833efe6196aSSteven Rostedt (VMware) * the last two cases should ever happen. 834efe6196aSSteven Rostedt (VMware) */ 835efe6196aSSteven Rostedt (VMware) u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer, 836efe6196aSSteven Rostedt (VMware) struct ring_buffer_event *event) 837efe6196aSSteven Rostedt (VMware) { 838efe6196aSSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; 839efe6196aSSteven Rostedt (VMware) unsigned int nest; 840efe6196aSSteven Rostedt (VMware) u64 ts; 841efe6196aSSteven Rostedt (VMware) 842efe6196aSSteven Rostedt (VMware) /* If the event includes an absolute time, then just use that */ 8436695da58SSteven Rostedt (Google) if (event->type_len == RINGBUF_TYPE_TIME_STAMP) { 8446695da58SSteven Rostedt (Google) ts = rb_event_time_stamp(event); 8456695da58SSteven Rostedt (Google) return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); 8466695da58SSteven Rostedt (Google) } 847efe6196aSSteven Rostedt (VMware) 848a948c69dSSteven Rostedt (VMware) nest = local_read(&cpu_buffer->committing); 849a948c69dSSteven Rostedt (VMware) verify_event(cpu_buffer, event); 850a948c69dSSteven Rostedt (VMware) if (WARN_ON_ONCE(!nest)) 851a948c69dSSteven Rostedt (VMware) goto fail; 852a948c69dSSteven Rostedt (VMware) 853efe6196aSSteven Rostedt (VMware) /* Read the current saved nesting level time stamp */ 854a948c69dSSteven Rostedt (VMware) if (likely(--nest < MAX_NEST)) 855efe6196aSSteven Rostedt (VMware) return cpu_buffer->event_stamp[nest]; 856efe6196aSSteven Rostedt (VMware) 857a948c69dSSteven Rostedt (VMware) /* Shouldn't happen, warn if it does */ 858a948c69dSSteven Rostedt (VMware) WARN_ONCE(1, "nest (%d) greater than max", nest); 859efe6196aSSteven Rostedt (VMware) 860a948c69dSSteven Rostedt (VMware) fail: 861efe6196aSSteven Rostedt (VMware) /* Can only fail on 32 bit */ 862efe6196aSSteven Rostedt (VMware) if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) 863efe6196aSSteven Rostedt (VMware) /* Screw it, just read the current time */ 864efe6196aSSteven Rostedt (VMware) ts = rb_time_stamp(cpu_buffer->buffer); 865efe6196aSSteven Rostedt (VMware) 866efe6196aSSteven Rostedt (VMware) return ts; 867efe6196aSSteven Rostedt (VMware) } 868efe6196aSSteven Rostedt (VMware) 8692c2b0a78SSteven Rostedt (VMware) /** 8702c2b0a78SSteven Rostedt (VMware) * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer 8712c2b0a78SSteven Rostedt (VMware) * @buffer: The ring_buffer to get the number of pages from 8722c2b0a78SSteven Rostedt (VMware) * @cpu: The cpu of the ring_buffer to get the number of pages from 8732c2b0a78SSteven Rostedt (VMware) * 8742c2b0a78SSteven Rostedt (VMware) * Returns the number of pages used by a per_cpu buffer of the ring buffer. 8752c2b0a78SSteven Rostedt (VMware) */ 87613292494SSteven Rostedt (VMware) size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) 8772c2b0a78SSteven Rostedt (VMware) { 8782c2b0a78SSteven Rostedt (VMware) return buffer->buffers[cpu]->nr_pages; 8792c2b0a78SSteven Rostedt (VMware) } 8802c2b0a78SSteven Rostedt (VMware) 8812c2b0a78SSteven Rostedt (VMware) /** 882b7085b6fSJiapeng Chong * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer 8832c2b0a78SSteven Rostedt (VMware) * @buffer: The ring_buffer to get the number of pages from 8842c2b0a78SSteven Rostedt (VMware) * @cpu: The cpu of the ring_buffer to get the number of pages from 8852c2b0a78SSteven Rostedt (VMware) * 8862c2b0a78SSteven Rostedt (VMware) * Returns the number of pages that have content in the ring buffer. 8872c2b0a78SSteven Rostedt (VMware) */ 88813292494SSteven Rostedt (VMware) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) 8892c2b0a78SSteven Rostedt (VMware) { 8902c2b0a78SSteven Rostedt (VMware) size_t read; 89131029a8bSSteven Rostedt (Google) size_t lost; 8922c2b0a78SSteven Rostedt (VMware) size_t cnt; 8932c2b0a78SSteven Rostedt (VMware) 8942c2b0a78SSteven Rostedt (VMware) read = local_read(&buffer->buffers[cpu]->pages_read); 89531029a8bSSteven Rostedt (Google) lost = local_read(&buffer->buffers[cpu]->pages_lost); 8962c2b0a78SSteven Rostedt (VMware) cnt = local_read(&buffer->buffers[cpu]->pages_touched); 89731029a8bSSteven Rostedt (Google) 89831029a8bSSteven Rostedt (Google) if (WARN_ON_ONCE(cnt < lost)) 89931029a8bSSteven Rostedt (Google) return 0; 90031029a8bSSteven Rostedt (Google) 90131029a8bSSteven Rostedt (Google) cnt -= lost; 90231029a8bSSteven Rostedt (Google) 9032c2b0a78SSteven Rostedt (VMware) /* The reader can read an empty page, but not more than that */ 9042c2b0a78SSteven Rostedt (VMware) if (cnt < read) { 9052c2b0a78SSteven Rostedt (VMware) WARN_ON_ONCE(read > cnt + 1); 9062c2b0a78SSteven Rostedt (VMware) return 0; 9072c2b0a78SSteven Rostedt (VMware) } 9082c2b0a78SSteven Rostedt (VMware) 9092c2b0a78SSteven Rostedt (VMware) return cnt - read; 9102c2b0a78SSteven Rostedt (VMware) } 9112c2b0a78SSteven Rostedt (VMware) 91242fb0a1eSSteven Rostedt (Google) static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) 91342fb0a1eSSteven Rostedt (Google) { 91442fb0a1eSSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 91542fb0a1eSSteven Rostedt (Google) size_t nr_pages; 91642fb0a1eSSteven Rostedt (Google) size_t dirty; 91742fb0a1eSSteven Rostedt (Google) 91842fb0a1eSSteven Rostedt (Google) nr_pages = cpu_buffer->nr_pages; 91942fb0a1eSSteven Rostedt (Google) if (!nr_pages || !full) 92042fb0a1eSSteven Rostedt (Google) return true; 92142fb0a1eSSteven Rostedt (Google) 92242fb0a1eSSteven Rostedt (Google) dirty = ring_buffer_nr_dirty_pages(buffer, cpu); 92342fb0a1eSSteven Rostedt (Google) 92442fb0a1eSSteven Rostedt (Google) return (dirty * 100) > (full * nr_pages); 92542fb0a1eSSteven Rostedt (Google) } 92642fb0a1eSSteven Rostedt (Google) 92715693458SSteven Rostedt (Red Hat) /* 92815693458SSteven Rostedt (Red Hat) * rb_wake_up_waiters - wake up tasks waiting for ring buffer input 92915693458SSteven Rostedt (Red Hat) * 93015693458SSteven Rostedt (Red Hat) * Schedules a delayed work to wake up any task that is blocked on the 93115693458SSteven Rostedt (Red Hat) * ring buffer waiters queue. 93215693458SSteven Rostedt (Red Hat) */ 93315693458SSteven Rostedt (Red Hat) static void rb_wake_up_waiters(struct irq_work *work) 93415693458SSteven Rostedt (Red Hat) { 93515693458SSteven Rostedt (Red Hat) struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); 93615693458SSteven Rostedt (Red Hat) 93715693458SSteven Rostedt (Red Hat) wake_up_all(&rbwork->waiters); 938ec0bbc5eSSteven Rostedt (Google) if (rbwork->full_waiters_pending || rbwork->wakeup_full) { 9391e0d6714SSteven Rostedt (Red Hat) rbwork->wakeup_full = false; 940ec0bbc5eSSteven Rostedt (Google) rbwork->full_waiters_pending = false; 9411e0d6714SSteven Rostedt (Red Hat) wake_up_all(&rbwork->full_waiters); 9421e0d6714SSteven Rostedt (Red Hat) } 94315693458SSteven Rostedt (Red Hat) } 94415693458SSteven Rostedt (Red Hat) 94515693458SSteven Rostedt (Red Hat) /** 9467e9fbbb1SSteven Rostedt (Google) * ring_buffer_wake_waiters - wake up any waiters on this ring buffer 9477e9fbbb1SSteven Rostedt (Google) * @buffer: The ring buffer to wake waiters on 948151e34d1SGaosheng Cui * @cpu: The CPU buffer to wake waiters on 9497e9fbbb1SSteven Rostedt (Google) * 9507e9fbbb1SSteven Rostedt (Google) * In the case of a file that represents a ring buffer is closing, 9517e9fbbb1SSteven Rostedt (Google) * it is prudent to wake up any waiters that are on this. 9527e9fbbb1SSteven Rostedt (Google) */ 9537e9fbbb1SSteven Rostedt (Google) void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) 9547e9fbbb1SSteven Rostedt (Google) { 9557e9fbbb1SSteven Rostedt (Google) struct ring_buffer_per_cpu *cpu_buffer; 9567e9fbbb1SSteven Rostedt (Google) struct rb_irq_work *rbwork; 9577e9fbbb1SSteven Rostedt (Google) 9587433632cSSteven Rostedt (Google) if (!buffer) 9597433632cSSteven Rostedt (Google) return; 9607433632cSSteven Rostedt (Google) 9617e9fbbb1SSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS) { 9627e9fbbb1SSteven Rostedt (Google) 9637e9fbbb1SSteven Rostedt (Google) /* Wake up individual ones too. One level recursion */ 9647e9fbbb1SSteven Rostedt (Google) for_each_buffer_cpu(buffer, cpu) 9657e9fbbb1SSteven Rostedt (Google) ring_buffer_wake_waiters(buffer, cpu); 9667e9fbbb1SSteven Rostedt (Google) 9677e9fbbb1SSteven Rostedt (Google) rbwork = &buffer->irq_work; 9687e9fbbb1SSteven Rostedt (Google) } else { 9697433632cSSteven Rostedt (Google) if (WARN_ON_ONCE(!buffer->buffers)) 9707433632cSSteven Rostedt (Google) return; 9717433632cSSteven Rostedt (Google) if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 9727433632cSSteven Rostedt (Google) return; 9737433632cSSteven Rostedt (Google) 9747e9fbbb1SSteven Rostedt (Google) cpu_buffer = buffer->buffers[cpu]; 9757433632cSSteven Rostedt (Google) /* The CPU buffer may not have been initialized yet */ 9767433632cSSteven Rostedt (Google) if (!cpu_buffer) 9777433632cSSteven Rostedt (Google) return; 9787e9fbbb1SSteven Rostedt (Google) rbwork = &cpu_buffer->irq_work; 9797e9fbbb1SSteven Rostedt (Google) } 9807e9fbbb1SSteven Rostedt (Google) 9817e9fbbb1SSteven Rostedt (Google) rbwork->wait_index++; 9827e9fbbb1SSteven Rostedt (Google) /* make sure the waiters see the new index */ 9837e9fbbb1SSteven Rostedt (Google) smp_wmb(); 9847e9fbbb1SSteven Rostedt (Google) 9857e9fbbb1SSteven Rostedt (Google) rb_wake_up_waiters(&rbwork->work); 9867e9fbbb1SSteven Rostedt (Google) } 9877e9fbbb1SSteven Rostedt (Google) 9887e9fbbb1SSteven Rostedt (Google) /** 98915693458SSteven Rostedt (Red Hat) * ring_buffer_wait - wait for input to the ring buffer 99015693458SSteven Rostedt (Red Hat) * @buffer: buffer to wait on 99115693458SSteven Rostedt (Red Hat) * @cpu: the cpu buffer to wait on 992e1981f75SQiujun Huang * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 99315693458SSteven Rostedt (Red Hat) * 99415693458SSteven Rostedt (Red Hat) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 99515693458SSteven Rostedt (Red Hat) * as data is added to any of the @buffer's cpu buffers. Otherwise 99615693458SSteven Rostedt (Red Hat) * it will wait for data to be added to a specific cpu buffer. 99715693458SSteven Rostedt (Red Hat) */ 99813292494SSteven Rostedt (VMware) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) 99915693458SSteven Rostedt (Red Hat) { 10003f649ab7SKees Cook struct ring_buffer_per_cpu *cpu_buffer; 100115693458SSteven Rostedt (Red Hat) DEFINE_WAIT(wait); 100215693458SSteven Rostedt (Red Hat) struct rb_irq_work *work; 10037e9fbbb1SSteven Rostedt (Google) long wait_index; 1004e30f53aaSRabin Vincent int ret = 0; 100515693458SSteven Rostedt (Red Hat) 100615693458SSteven Rostedt (Red Hat) /* 100715693458SSteven Rostedt (Red Hat) * Depending on what the caller is waiting for, either any 100815693458SSteven Rostedt (Red Hat) * data in any cpu buffer, or a specific buffer, put the 100915693458SSteven Rostedt (Red Hat) * caller on the appropriate wait queue. 101015693458SSteven Rostedt (Red Hat) */ 10111e0d6714SSteven Rostedt (Red Hat) if (cpu == RING_BUFFER_ALL_CPUS) { 101215693458SSteven Rostedt (Red Hat) work = &buffer->irq_work; 10131e0d6714SSteven Rostedt (Red Hat) /* Full only makes sense on per cpu reads */ 10142c2b0a78SSteven Rostedt (VMware) full = 0; 10151e0d6714SSteven Rostedt (Red Hat) } else { 10168b8b3683SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 10178b8b3683SSteven Rostedt (Red Hat) return -ENODEV; 101815693458SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 101915693458SSteven Rostedt (Red Hat) work = &cpu_buffer->irq_work; 102015693458SSteven Rostedt (Red Hat) } 102115693458SSteven Rostedt (Red Hat) 10227e9fbbb1SSteven Rostedt (Google) wait_index = READ_ONCE(work->wait_index); 102315693458SSteven Rostedt (Red Hat) 1024e30f53aaSRabin Vincent while (true) { 10251e0d6714SSteven Rostedt (Red Hat) if (full) 10261e0d6714SSteven Rostedt (Red Hat) prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE); 10271e0d6714SSteven Rostedt (Red Hat) else 102815693458SSteven Rostedt (Red Hat) prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 102915693458SSteven Rostedt (Red Hat) 103015693458SSteven Rostedt (Red Hat) /* 103115693458SSteven Rostedt (Red Hat) * The events can happen in critical sections where 103215693458SSteven Rostedt (Red Hat) * checking a work queue can cause deadlocks. 103315693458SSteven Rostedt (Red Hat) * After adding a task to the queue, this flag is set 103415693458SSteven Rostedt (Red Hat) * only to notify events to try to wake up the queue 103515693458SSteven Rostedt (Red Hat) * using irq_work. 103615693458SSteven Rostedt (Red Hat) * 103715693458SSteven Rostedt (Red Hat) * We don't clear it even if the buffer is no longer 103815693458SSteven Rostedt (Red Hat) * empty. The flag only causes the next event to run 103915693458SSteven Rostedt (Red Hat) * irq_work to do the work queue wake up. The worse 104015693458SSteven Rostedt (Red Hat) * that can happen if we race with !trace_empty() is that 104115693458SSteven Rostedt (Red Hat) * an event will cause an irq_work to try to wake up 104215693458SSteven Rostedt (Red Hat) * an empty queue. 104315693458SSteven Rostedt (Red Hat) * 104415693458SSteven Rostedt (Red Hat) * There's no reason to protect this flag either, as 104515693458SSteven Rostedt (Red Hat) * the work queue and irq_work logic will do the necessary 104615693458SSteven Rostedt (Red Hat) * synchronization for the wake ups. The only thing 104715693458SSteven Rostedt (Red Hat) * that is necessary is that the wake up happens after 104815693458SSteven Rostedt (Red Hat) * a task has been queued. It's OK for spurious wake ups. 104915693458SSteven Rostedt (Red Hat) */ 10501e0d6714SSteven Rostedt (Red Hat) if (full) 10511e0d6714SSteven Rostedt (Red Hat) work->full_waiters_pending = true; 10521e0d6714SSteven Rostedt (Red Hat) else 105315693458SSteven Rostedt (Red Hat) work->waiters_pending = true; 105415693458SSteven Rostedt (Red Hat) 1055e30f53aaSRabin Vincent if (signal_pending(current)) { 1056e30f53aaSRabin Vincent ret = -EINTR; 1057e30f53aaSRabin Vincent break; 1058e30f53aaSRabin Vincent } 1059e30f53aaSRabin Vincent 1060e30f53aaSRabin Vincent if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) 1061e30f53aaSRabin Vincent break; 1062e30f53aaSRabin Vincent 1063e30f53aaSRabin Vincent if (cpu != RING_BUFFER_ALL_CPUS && 1064e30f53aaSRabin Vincent !ring_buffer_empty_cpu(buffer, cpu)) { 1065e30f53aaSRabin Vincent unsigned long flags; 1066e30f53aaSRabin Vincent bool pagebusy; 106742fb0a1eSSteven Rostedt (Google) bool done; 1068e30f53aaSRabin Vincent 1069e30f53aaSRabin Vincent if (!full) 1070e30f53aaSRabin Vincent break; 1071e30f53aaSRabin Vincent 1072e30f53aaSRabin Vincent raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 1073e30f53aaSRabin Vincent pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; 107442fb0a1eSSteven Rostedt (Google) done = !pagebusy && full_hit(buffer, cpu, full); 107542fb0a1eSSteven Rostedt (Google) 10762c2b0a78SSteven Rostedt (VMware) if (!cpu_buffer->shortest_full || 10773b19d614SSteven Rostedt (Google) cpu_buffer->shortest_full > full) 10782c2b0a78SSteven Rostedt (VMware) cpu_buffer->shortest_full = full; 1079e30f53aaSRabin Vincent raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 108042fb0a1eSSteven Rostedt (Google) if (done) 1081e30f53aaSRabin Vincent break; 1082e30f53aaSRabin Vincent } 1083e30f53aaSRabin Vincent 108415693458SSteven Rostedt (Red Hat) schedule(); 10857e9fbbb1SSteven Rostedt (Google) 10867e9fbbb1SSteven Rostedt (Google) /* Make sure to see the new wait index */ 10877e9fbbb1SSteven Rostedt (Google) smp_rmb(); 10887e9fbbb1SSteven Rostedt (Google) if (wait_index != work->wait_index) 10897e9fbbb1SSteven Rostedt (Google) break; 1090e30f53aaSRabin Vincent } 109115693458SSteven Rostedt (Red Hat) 10921e0d6714SSteven Rostedt (Red Hat) if (full) 10931e0d6714SSteven Rostedt (Red Hat) finish_wait(&work->full_waiters, &wait); 10941e0d6714SSteven Rostedt (Red Hat) else 109515693458SSteven Rostedt (Red Hat) finish_wait(&work->waiters, &wait); 1096e30f53aaSRabin Vincent 1097e30f53aaSRabin Vincent return ret; 109815693458SSteven Rostedt (Red Hat) } 109915693458SSteven Rostedt (Red Hat) 110015693458SSteven Rostedt (Red Hat) /** 110115693458SSteven Rostedt (Red Hat) * ring_buffer_poll_wait - poll on buffer input 110215693458SSteven Rostedt (Red Hat) * @buffer: buffer to wait on 110315693458SSteven Rostedt (Red Hat) * @cpu: the cpu buffer to wait on 110415693458SSteven Rostedt (Red Hat) * @filp: the file descriptor 110515693458SSteven Rostedt (Red Hat) * @poll_table: The poll descriptor 110642fb0a1eSSteven Rostedt (Google) * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS 110715693458SSteven Rostedt (Red Hat) * 110815693458SSteven Rostedt (Red Hat) * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 110915693458SSteven Rostedt (Red Hat) * as data is added to any of the @buffer's cpu buffers. Otherwise 111015693458SSteven Rostedt (Red Hat) * it will wait for data to be added to a specific cpu buffer. 111115693458SSteven Rostedt (Red Hat) * 1112a9a08845SLinus Torvalds * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, 111315693458SSteven Rostedt (Red Hat) * zero otherwise. 111415693458SSteven Rostedt (Red Hat) */ 111513292494SSteven Rostedt (VMware) __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, 111642fb0a1eSSteven Rostedt (Google) struct file *filp, poll_table *poll_table, int full) 111715693458SSteven Rostedt (Red Hat) { 111815693458SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 111915693458SSteven Rostedt (Red Hat) struct rb_irq_work *work; 112015693458SSteven Rostedt (Red Hat) 112142fb0a1eSSteven Rostedt (Google) if (cpu == RING_BUFFER_ALL_CPUS) { 112215693458SSteven Rostedt (Red Hat) work = &buffer->irq_work; 112342fb0a1eSSteven Rostedt (Google) full = 0; 112442fb0a1eSSteven Rostedt (Google) } else { 11256721cb60SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 11266721cb60SSteven Rostedt (Red Hat) return -EINVAL; 11276721cb60SSteven Rostedt (Red Hat) 112815693458SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 112915693458SSteven Rostedt (Red Hat) work = &cpu_buffer->irq_work; 113015693458SSteven Rostedt (Red Hat) } 113115693458SSteven Rostedt (Red Hat) 113242fb0a1eSSteven Rostedt (Google) if (full) { 113342fb0a1eSSteven Rostedt (Google) poll_wait(filp, &work->full_waiters, poll_table); 113442fb0a1eSSteven Rostedt (Google) work->full_waiters_pending = true; 113542fb0a1eSSteven Rostedt (Google) } else { 113615693458SSteven Rostedt (Red Hat) poll_wait(filp, &work->waiters, poll_table); 11374ce97dbfSJosef Bacik work->waiters_pending = true; 113842fb0a1eSSteven Rostedt (Google) } 113942fb0a1eSSteven Rostedt (Google) 11404ce97dbfSJosef Bacik /* 11414ce97dbfSJosef Bacik * There's a tight race between setting the waiters_pending and 11424ce97dbfSJosef Bacik * checking if the ring buffer is empty. Once the waiters_pending bit 11434ce97dbfSJosef Bacik * is set, the next event will wake the task up, but we can get stuck 11444ce97dbfSJosef Bacik * if there's only a single event in. 11454ce97dbfSJosef Bacik * 11464ce97dbfSJosef Bacik * FIXME: Ideally, we need a memory barrier on the writer side as well, 11474ce97dbfSJosef Bacik * but adding a memory barrier to all events will cause too much of a 11484ce97dbfSJosef Bacik * performance hit in the fast path. We only need a memory barrier when 11494ce97dbfSJosef Bacik * the buffer goes from empty to having content. But as this race is 11504ce97dbfSJosef Bacik * extremely small, and it's not a problem if another event comes in, we 11514ce97dbfSJosef Bacik * will fix it later. 11524ce97dbfSJosef Bacik */ 11534ce97dbfSJosef Bacik smp_mb(); 115415693458SSteven Rostedt (Red Hat) 115542fb0a1eSSteven Rostedt (Google) if (full) 115642fb0a1eSSteven Rostedt (Google) return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; 115742fb0a1eSSteven Rostedt (Google) 115815693458SSteven Rostedt (Red Hat) if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || 115915693458SSteven Rostedt (Red Hat) (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) 1160a9a08845SLinus Torvalds return EPOLLIN | EPOLLRDNORM; 116115693458SSteven Rostedt (Red Hat) return 0; 116215693458SSteven Rostedt (Red Hat) } 116315693458SSteven Rostedt (Red Hat) 1164f536aafcSSteven Rostedt /* buffer may be either ring_buffer or ring_buffer_per_cpu */ 1165077c5407SSteven Rostedt #define RB_WARN_ON(b, cond) \ 11663e89c7bbSSteven Rostedt ({ \ 11673e89c7bbSSteven Rostedt int _____ret = unlikely(cond); \ 11683e89c7bbSSteven Rostedt if (_____ret) { \ 1169077c5407SSteven Rostedt if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ 1170077c5407SSteven Rostedt struct ring_buffer_per_cpu *__b = \ 1171077c5407SSteven Rostedt (void *)b; \ 1172077c5407SSteven Rostedt atomic_inc(&__b->buffer->record_disabled); \ 1173077c5407SSteven Rostedt } else \ 1174077c5407SSteven Rostedt atomic_inc(&b->record_disabled); \ 1175bf41a158SSteven Rostedt WARN_ON(1); \ 1176bf41a158SSteven Rostedt } \ 11773e89c7bbSSteven Rostedt _____ret; \ 11783e89c7bbSSteven Rostedt }) 1179f536aafcSSteven Rostedt 118037886f6aSSteven Rostedt /* Up this if you want to test the TIME_EXTENTS and normalization */ 118137886f6aSSteven Rostedt #define DEBUG_SHIFT 0 118237886f6aSSteven Rostedt 118313292494SSteven Rostedt (VMware) static inline u64 rb_time_stamp(struct trace_buffer *buffer) 118488eb0125SSteven Rostedt { 1185bbeba3e5SSteven Rostedt (VMware) u64 ts; 1186bbeba3e5SSteven Rostedt (VMware) 1187bbeba3e5SSteven Rostedt (VMware) /* Skip retpolines :-( */ 1188bbeba3e5SSteven Rostedt (VMware) if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local)) 1189bbeba3e5SSteven Rostedt (VMware) ts = trace_clock_local(); 1190bbeba3e5SSteven Rostedt (VMware) else 1191bbeba3e5SSteven Rostedt (VMware) ts = buffer->clock(); 1192bbeba3e5SSteven Rostedt (VMware) 119388eb0125SSteven Rostedt /* shift to debug/test normalization and TIME_EXTENTS */ 1194bbeba3e5SSteven Rostedt (VMware) return ts << DEBUG_SHIFT; 119588eb0125SSteven Rostedt } 119688eb0125SSteven Rostedt 1197f3ef7202SYordan Karadzhov (VMware) u64 ring_buffer_time_stamp(struct trace_buffer *buffer) 119837886f6aSSteven Rostedt { 119937886f6aSSteven Rostedt u64 time; 120037886f6aSSteven Rostedt 120137886f6aSSteven Rostedt preempt_disable_notrace(); 12026d3f1e12SJiri Olsa time = rb_time_stamp(buffer); 1203d6097c9eSPeter Zijlstra preempt_enable_notrace(); 120437886f6aSSteven Rostedt 120537886f6aSSteven Rostedt return time; 120637886f6aSSteven Rostedt } 120737886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); 120837886f6aSSteven Rostedt 120913292494SSteven Rostedt (VMware) void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer, 121037886f6aSSteven Rostedt int cpu, u64 *ts) 121137886f6aSSteven Rostedt { 121237886f6aSSteven Rostedt /* Just stupid testing the normalize function and deltas */ 121337886f6aSSteven Rostedt *ts >>= DEBUG_SHIFT; 121437886f6aSSteven Rostedt } 121537886f6aSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 121637886f6aSSteven Rostedt 121777ae365eSSteven Rostedt /* 121877ae365eSSteven Rostedt * Making the ring buffer lockless makes things tricky. 121977ae365eSSteven Rostedt * Although writes only happen on the CPU that they are on, 122077ae365eSSteven Rostedt * and they only need to worry about interrupts. Reads can 122177ae365eSSteven Rostedt * happen on any CPU. 122277ae365eSSteven Rostedt * 122377ae365eSSteven Rostedt * The reader page is always off the ring buffer, but when the 122477ae365eSSteven Rostedt * reader finishes with a page, it needs to swap its page with 122577ae365eSSteven Rostedt * a new one from the buffer. The reader needs to take from 122677ae365eSSteven Rostedt * the head (writes go to the tail). But if a writer is in overwrite 122777ae365eSSteven Rostedt * mode and wraps, it must push the head page forward. 122877ae365eSSteven Rostedt * 122977ae365eSSteven Rostedt * Here lies the problem. 123077ae365eSSteven Rostedt * 123177ae365eSSteven Rostedt * The reader must be careful to replace only the head page, and 123277ae365eSSteven Rostedt * not another one. As described at the top of the file in the 123377ae365eSSteven Rostedt * ASCII art, the reader sets its old page to point to the next 123477ae365eSSteven Rostedt * page after head. It then sets the page after head to point to 123577ae365eSSteven Rostedt * the old reader page. But if the writer moves the head page 123677ae365eSSteven Rostedt * during this operation, the reader could end up with the tail. 123777ae365eSSteven Rostedt * 123877ae365eSSteven Rostedt * We use cmpxchg to help prevent this race. We also do something 123977ae365eSSteven Rostedt * special with the page before head. We set the LSB to 1. 124077ae365eSSteven Rostedt * 124177ae365eSSteven Rostedt * When the writer must push the page forward, it will clear the 124277ae365eSSteven Rostedt * bit that points to the head page, move the head, and then set 124377ae365eSSteven Rostedt * the bit that points to the new head page. 124477ae365eSSteven Rostedt * 124577ae365eSSteven Rostedt * We also don't want an interrupt coming in and moving the head 124677ae365eSSteven Rostedt * page on another writer. Thus we use the second LSB to catch 124777ae365eSSteven Rostedt * that too. Thus: 124877ae365eSSteven Rostedt * 124977ae365eSSteven Rostedt * head->list->prev->next bit 1 bit 0 125077ae365eSSteven Rostedt * ------- ------- 125177ae365eSSteven Rostedt * Normal page 0 0 125277ae365eSSteven Rostedt * Points to head page 0 1 125377ae365eSSteven Rostedt * New head page 1 0 125477ae365eSSteven Rostedt * 125577ae365eSSteven Rostedt * Note we can not trust the prev pointer of the head page, because: 125677ae365eSSteven Rostedt * 125777ae365eSSteven Rostedt * +----+ +-----+ +-----+ 125877ae365eSSteven Rostedt * | |------>| T |---X--->| N | 125977ae365eSSteven Rostedt * | |<------| | | | 126077ae365eSSteven Rostedt * +----+ +-----+ +-----+ 126177ae365eSSteven Rostedt * ^ ^ | 126277ae365eSSteven Rostedt * | +-----+ | | 126377ae365eSSteven Rostedt * +----------| R |----------+ | 126477ae365eSSteven Rostedt * | |<-----------+ 126577ae365eSSteven Rostedt * +-----+ 126677ae365eSSteven Rostedt * 126777ae365eSSteven Rostedt * Key: ---X--> HEAD flag set in pointer 126877ae365eSSteven Rostedt * T Tail page 126977ae365eSSteven Rostedt * R Reader page 127077ae365eSSteven Rostedt * N Next page 127177ae365eSSteven Rostedt * 127277ae365eSSteven Rostedt * (see __rb_reserve_next() to see where this happens) 127377ae365eSSteven Rostedt * 127477ae365eSSteven Rostedt * What the above shows is that the reader just swapped out 127577ae365eSSteven Rostedt * the reader page with a page in the buffer, but before it 127677ae365eSSteven Rostedt * could make the new header point back to the new page added 127777ae365eSSteven Rostedt * it was preempted by a writer. The writer moved forward onto 127877ae365eSSteven Rostedt * the new page added by the reader and is about to move forward 127977ae365eSSteven Rostedt * again. 128077ae365eSSteven Rostedt * 128177ae365eSSteven Rostedt * You can see, it is legitimate for the previous pointer of 128277ae365eSSteven Rostedt * the head (or any page) not to point back to itself. But only 12836167c205SSteven Rostedt (VMware) * temporarily. 128477ae365eSSteven Rostedt */ 128577ae365eSSteven Rostedt 128677ae365eSSteven Rostedt #define RB_PAGE_NORMAL 0UL 128777ae365eSSteven Rostedt #define RB_PAGE_HEAD 1UL 128877ae365eSSteven Rostedt #define RB_PAGE_UPDATE 2UL 128977ae365eSSteven Rostedt 129077ae365eSSteven Rostedt 129177ae365eSSteven Rostedt #define RB_FLAG_MASK 3UL 129277ae365eSSteven Rostedt 129377ae365eSSteven Rostedt /* PAGE_MOVED is not part of the mask */ 129477ae365eSSteven Rostedt #define RB_PAGE_MOVED 4UL 129577ae365eSSteven Rostedt 129677ae365eSSteven Rostedt /* 129777ae365eSSteven Rostedt * rb_list_head - remove any bit 129877ae365eSSteven Rostedt */ 129977ae365eSSteven Rostedt static struct list_head *rb_list_head(struct list_head *list) 130077ae365eSSteven Rostedt { 130177ae365eSSteven Rostedt unsigned long val = (unsigned long)list; 130277ae365eSSteven Rostedt 130377ae365eSSteven Rostedt return (struct list_head *)(val & ~RB_FLAG_MASK); 130477ae365eSSteven Rostedt } 130577ae365eSSteven Rostedt 130677ae365eSSteven Rostedt /* 13076d3f1e12SJiri Olsa * rb_is_head_page - test if the given page is the head page 130877ae365eSSteven Rostedt * 130977ae365eSSteven Rostedt * Because the reader may move the head_page pointer, we can 131077ae365eSSteven Rostedt * not trust what the head page is (it may be pointing to 131177ae365eSSteven Rostedt * the reader page). But if the next page is a header page, 131277ae365eSSteven Rostedt * its flags will be non zero. 131377ae365eSSteven Rostedt */ 131442b16b3fSJesper Juhl static inline int 13156689bed3SQiujun Huang rb_is_head_page(struct buffer_page *page, struct list_head *list) 131677ae365eSSteven Rostedt { 131777ae365eSSteven Rostedt unsigned long val; 131877ae365eSSteven Rostedt 131977ae365eSSteven Rostedt val = (unsigned long)list->next; 132077ae365eSSteven Rostedt 132177ae365eSSteven Rostedt if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) 132277ae365eSSteven Rostedt return RB_PAGE_MOVED; 132377ae365eSSteven Rostedt 132477ae365eSSteven Rostedt return val & RB_FLAG_MASK; 132577ae365eSSteven Rostedt } 132677ae365eSSteven Rostedt 132777ae365eSSteven Rostedt /* 132877ae365eSSteven Rostedt * rb_is_reader_page 132977ae365eSSteven Rostedt * 133077ae365eSSteven Rostedt * The unique thing about the reader page, is that, if the 133177ae365eSSteven Rostedt * writer is ever on it, the previous pointer never points 133277ae365eSSteven Rostedt * back to the reader page. 133377ae365eSSteven Rostedt */ 133406ca3209SYaowei Bai static bool rb_is_reader_page(struct buffer_page *page) 133577ae365eSSteven Rostedt { 133677ae365eSSteven Rostedt struct list_head *list = page->list.prev; 133777ae365eSSteven Rostedt 133877ae365eSSteven Rostedt return rb_list_head(list->next) != &page->list; 133977ae365eSSteven Rostedt } 134077ae365eSSteven Rostedt 134177ae365eSSteven Rostedt /* 134277ae365eSSteven Rostedt * rb_set_list_to_head - set a list_head to be pointing to head. 134377ae365eSSteven Rostedt */ 13446689bed3SQiujun Huang static void rb_set_list_to_head(struct list_head *list) 134577ae365eSSteven Rostedt { 134677ae365eSSteven Rostedt unsigned long *ptr; 134777ae365eSSteven Rostedt 134877ae365eSSteven Rostedt ptr = (unsigned long *)&list->next; 134977ae365eSSteven Rostedt *ptr |= RB_PAGE_HEAD; 135077ae365eSSteven Rostedt *ptr &= ~RB_PAGE_UPDATE; 135177ae365eSSteven Rostedt } 135277ae365eSSteven Rostedt 135377ae365eSSteven Rostedt /* 135477ae365eSSteven Rostedt * rb_head_page_activate - sets up head page 135577ae365eSSteven Rostedt */ 135677ae365eSSteven Rostedt static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) 135777ae365eSSteven Rostedt { 135877ae365eSSteven Rostedt struct buffer_page *head; 135977ae365eSSteven Rostedt 136077ae365eSSteven Rostedt head = cpu_buffer->head_page; 136177ae365eSSteven Rostedt if (!head) 136277ae365eSSteven Rostedt return; 136377ae365eSSteven Rostedt 136477ae365eSSteven Rostedt /* 136577ae365eSSteven Rostedt * Set the previous list pointer to have the HEAD flag. 136677ae365eSSteven Rostedt */ 13676689bed3SQiujun Huang rb_set_list_to_head(head->list.prev); 136877ae365eSSteven Rostedt } 136977ae365eSSteven Rostedt 137077ae365eSSteven Rostedt static void rb_list_head_clear(struct list_head *list) 137177ae365eSSteven Rostedt { 137277ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&list->next; 137377ae365eSSteven Rostedt 137477ae365eSSteven Rostedt *ptr &= ~RB_FLAG_MASK; 137577ae365eSSteven Rostedt } 137677ae365eSSteven Rostedt 137777ae365eSSteven Rostedt /* 13786167c205SSteven Rostedt (VMware) * rb_head_page_deactivate - clears head page ptr (for free list) 137977ae365eSSteven Rostedt */ 138077ae365eSSteven Rostedt static void 138177ae365eSSteven Rostedt rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) 138277ae365eSSteven Rostedt { 138377ae365eSSteven Rostedt struct list_head *hd; 138477ae365eSSteven Rostedt 138577ae365eSSteven Rostedt /* Go through the whole list and clear any pointers found. */ 138677ae365eSSteven Rostedt rb_list_head_clear(cpu_buffer->pages); 138777ae365eSSteven Rostedt 138877ae365eSSteven Rostedt list_for_each(hd, cpu_buffer->pages) 138977ae365eSSteven Rostedt rb_list_head_clear(hd); 139077ae365eSSteven Rostedt } 139177ae365eSSteven Rostedt 139277ae365eSSteven Rostedt static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, 139377ae365eSSteven Rostedt struct buffer_page *head, 139477ae365eSSteven Rostedt struct buffer_page *prev, 139577ae365eSSteven Rostedt int old_flag, int new_flag) 139677ae365eSSteven Rostedt { 139777ae365eSSteven Rostedt struct list_head *list; 139877ae365eSSteven Rostedt unsigned long val = (unsigned long)&head->list; 139977ae365eSSteven Rostedt unsigned long ret; 140077ae365eSSteven Rostedt 140177ae365eSSteven Rostedt list = &prev->list; 140277ae365eSSteven Rostedt 140377ae365eSSteven Rostedt val &= ~RB_FLAG_MASK; 140477ae365eSSteven Rostedt 140508a40816SSteven Rostedt ret = cmpxchg((unsigned long *)&list->next, 140677ae365eSSteven Rostedt val | old_flag, val | new_flag); 140777ae365eSSteven Rostedt 140877ae365eSSteven Rostedt /* check if the reader took the page */ 140977ae365eSSteven Rostedt if ((ret & ~RB_FLAG_MASK) != val) 141077ae365eSSteven Rostedt return RB_PAGE_MOVED; 141177ae365eSSteven Rostedt 141277ae365eSSteven Rostedt return ret & RB_FLAG_MASK; 141377ae365eSSteven Rostedt } 141477ae365eSSteven Rostedt 141577ae365eSSteven Rostedt static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, 141677ae365eSSteven Rostedt struct buffer_page *head, 141777ae365eSSteven Rostedt struct buffer_page *prev, 141877ae365eSSteven Rostedt int old_flag) 141977ae365eSSteven Rostedt { 142077ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 142177ae365eSSteven Rostedt old_flag, RB_PAGE_UPDATE); 142277ae365eSSteven Rostedt } 142377ae365eSSteven Rostedt 142477ae365eSSteven Rostedt static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, 142577ae365eSSteven Rostedt struct buffer_page *head, 142677ae365eSSteven Rostedt struct buffer_page *prev, 142777ae365eSSteven Rostedt int old_flag) 142877ae365eSSteven Rostedt { 142977ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 143077ae365eSSteven Rostedt old_flag, RB_PAGE_HEAD); 143177ae365eSSteven Rostedt } 143277ae365eSSteven Rostedt 143377ae365eSSteven Rostedt static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, 143477ae365eSSteven Rostedt struct buffer_page *head, 143577ae365eSSteven Rostedt struct buffer_page *prev, 143677ae365eSSteven Rostedt int old_flag) 143777ae365eSSteven Rostedt { 143877ae365eSSteven Rostedt return rb_head_page_set(cpu_buffer, head, prev, 143977ae365eSSteven Rostedt old_flag, RB_PAGE_NORMAL); 144077ae365eSSteven Rostedt } 144177ae365eSSteven Rostedt 14426689bed3SQiujun Huang static inline void rb_inc_page(struct buffer_page **bpage) 144377ae365eSSteven Rostedt { 144477ae365eSSteven Rostedt struct list_head *p = rb_list_head((*bpage)->list.next); 144577ae365eSSteven Rostedt 144677ae365eSSteven Rostedt *bpage = list_entry(p, struct buffer_page, list); 144777ae365eSSteven Rostedt } 144877ae365eSSteven Rostedt 144977ae365eSSteven Rostedt static struct buffer_page * 145077ae365eSSteven Rostedt rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) 145177ae365eSSteven Rostedt { 145277ae365eSSteven Rostedt struct buffer_page *head; 145377ae365eSSteven Rostedt struct buffer_page *page; 145477ae365eSSteven Rostedt struct list_head *list; 145577ae365eSSteven Rostedt int i; 145677ae365eSSteven Rostedt 145777ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) 145877ae365eSSteven Rostedt return NULL; 145977ae365eSSteven Rostedt 146077ae365eSSteven Rostedt /* sanity check */ 146177ae365eSSteven Rostedt list = cpu_buffer->pages; 146277ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) 146377ae365eSSteven Rostedt return NULL; 146477ae365eSSteven Rostedt 146577ae365eSSteven Rostedt page = head = cpu_buffer->head_page; 146677ae365eSSteven Rostedt /* 146777ae365eSSteven Rostedt * It is possible that the writer moves the header behind 146877ae365eSSteven Rostedt * where we started, and we miss in one loop. 146977ae365eSSteven Rostedt * A second loop should grab the header, but we'll do 147077ae365eSSteven Rostedt * three loops just because I'm paranoid. 147177ae365eSSteven Rostedt */ 147277ae365eSSteven Rostedt for (i = 0; i < 3; i++) { 147377ae365eSSteven Rostedt do { 14746689bed3SQiujun Huang if (rb_is_head_page(page, page->list.prev)) { 147577ae365eSSteven Rostedt cpu_buffer->head_page = page; 147677ae365eSSteven Rostedt return page; 147777ae365eSSteven Rostedt } 14786689bed3SQiujun Huang rb_inc_page(&page); 147977ae365eSSteven Rostedt } while (page != head); 148077ae365eSSteven Rostedt } 148177ae365eSSteven Rostedt 148277ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 148377ae365eSSteven Rostedt 148477ae365eSSteven Rostedt return NULL; 148577ae365eSSteven Rostedt } 148677ae365eSSteven Rostedt 1487bc92b956SUros Bizjak static bool rb_head_page_replace(struct buffer_page *old, 148877ae365eSSteven Rostedt struct buffer_page *new) 148977ae365eSSteven Rostedt { 149077ae365eSSteven Rostedt unsigned long *ptr = (unsigned long *)&old->list.prev->next; 149177ae365eSSteven Rostedt unsigned long val; 149277ae365eSSteven Rostedt 149377ae365eSSteven Rostedt val = *ptr & ~RB_FLAG_MASK; 149477ae365eSSteven Rostedt val |= RB_PAGE_HEAD; 149577ae365eSSteven Rostedt 1496*00a8478fSUros Bizjak return try_cmpxchg(ptr, &val, (unsigned long)&new->list); 149777ae365eSSteven Rostedt } 149877ae365eSSteven Rostedt 149977ae365eSSteven Rostedt /* 150077ae365eSSteven Rostedt * rb_tail_page_update - move the tail page forward 150177ae365eSSteven Rostedt */ 150270004986SSteven Rostedt (Red Hat) static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, 150377ae365eSSteven Rostedt struct buffer_page *tail_page, 150477ae365eSSteven Rostedt struct buffer_page *next_page) 150577ae365eSSteven Rostedt { 150677ae365eSSteven Rostedt unsigned long old_entries; 150777ae365eSSteven Rostedt unsigned long old_write; 150877ae365eSSteven Rostedt 150977ae365eSSteven Rostedt /* 151077ae365eSSteven Rostedt * The tail page now needs to be moved forward. 151177ae365eSSteven Rostedt * 151277ae365eSSteven Rostedt * We need to reset the tail page, but without messing 151377ae365eSSteven Rostedt * with possible erasing of data brought in by interrupts 151477ae365eSSteven Rostedt * that have moved the tail page and are currently on it. 151577ae365eSSteven Rostedt * 151677ae365eSSteven Rostedt * We add a counter to the write field to denote this. 151777ae365eSSteven Rostedt */ 151877ae365eSSteven Rostedt old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); 151977ae365eSSteven Rostedt old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); 152077ae365eSSteven Rostedt 15212c2b0a78SSteven Rostedt (VMware) local_inc(&cpu_buffer->pages_touched); 152277ae365eSSteven Rostedt /* 152377ae365eSSteven Rostedt * Just make sure we have seen our old_write and synchronize 152477ae365eSSteven Rostedt * with any interrupts that come in. 152577ae365eSSteven Rostedt */ 152677ae365eSSteven Rostedt barrier(); 152777ae365eSSteven Rostedt 152877ae365eSSteven Rostedt /* 152977ae365eSSteven Rostedt * If the tail page is still the same as what we think 153077ae365eSSteven Rostedt * it is, then it is up to us to update the tail 153177ae365eSSteven Rostedt * pointer. 153277ae365eSSteven Rostedt */ 15338573636eSSteven Rostedt (Red Hat) if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { 153477ae365eSSteven Rostedt /* Zero the write counter */ 153577ae365eSSteven Rostedt unsigned long val = old_write & ~RB_WRITE_MASK; 153677ae365eSSteven Rostedt unsigned long eval = old_entries & ~RB_WRITE_MASK; 153777ae365eSSteven Rostedt 153877ae365eSSteven Rostedt /* 153977ae365eSSteven Rostedt * This will only succeed if an interrupt did 154077ae365eSSteven Rostedt * not come in and change it. In which case, we 154177ae365eSSteven Rostedt * do not want to modify it. 1542da706d8bSLai Jiangshan * 1543da706d8bSLai Jiangshan * We add (void) to let the compiler know that we do not care 1544da706d8bSLai Jiangshan * about the return value of these functions. We use the 1545da706d8bSLai Jiangshan * cmpxchg to only update if an interrupt did not already 1546da706d8bSLai Jiangshan * do it for us. If the cmpxchg fails, we don't care. 154777ae365eSSteven Rostedt */ 1548da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->write, old_write, val); 1549da706d8bSLai Jiangshan (void)local_cmpxchg(&next_page->entries, old_entries, eval); 155077ae365eSSteven Rostedt 155177ae365eSSteven Rostedt /* 155277ae365eSSteven Rostedt * No need to worry about races with clearing out the commit. 155377ae365eSSteven Rostedt * it only can increment when a commit takes place. But that 155477ae365eSSteven Rostedt * only happens in the outer most nested commit. 155577ae365eSSteven Rostedt */ 155677ae365eSSteven Rostedt local_set(&next_page->page->commit, 0); 155777ae365eSSteven Rostedt 155870004986SSteven Rostedt (Red Hat) /* Again, either we update tail_page or an interrupt does */ 155970004986SSteven Rostedt (Red Hat) (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); 156077ae365eSSteven Rostedt } 156177ae365eSSteven Rostedt } 156277ae365eSSteven Rostedt 1563b4b55dfdSUros Bizjak static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, 156477ae365eSSteven Rostedt struct buffer_page *bpage) 156577ae365eSSteven Rostedt { 156677ae365eSSteven Rostedt unsigned long val = (unsigned long)bpage; 156777ae365eSSteven Rostedt 1568b4b55dfdSUros Bizjak RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); 156977ae365eSSteven Rostedt } 157077ae365eSSteven Rostedt 157177ae365eSSteven Rostedt /** 1572d611851bSzhangwei(Jovi) * rb_check_pages - integrity check of buffer pages 15737a8e76a3SSteven Rostedt * @cpu_buffer: CPU buffer with pages to test 15747a8e76a3SSteven Rostedt * 1575c3706f00SWenji Huang * As a safety measure we check to make sure the data pages have not 15767a8e76a3SSteven Rostedt * been corrupted. 15777a8e76a3SSteven Rostedt */ 1578b4b55dfdSUros Bizjak static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 15797a8e76a3SSteven Rostedt { 15808843e06fSMukesh Ojha struct list_head *head = rb_list_head(cpu_buffer->pages); 15818843e06fSMukesh Ojha struct list_head *tmp; 15827a8e76a3SSteven Rostedt 15833e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 15848843e06fSMukesh Ojha rb_list_head(rb_list_head(head->next)->prev) != head)) 1585b4b55dfdSUros Bizjak return; 15868843e06fSMukesh Ojha 15873e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 15888843e06fSMukesh Ojha rb_list_head(rb_list_head(head->prev)->next) != head)) 1589b4b55dfdSUros Bizjak return; 15908843e06fSMukesh Ojha 15918843e06fSMukesh Ojha for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { 15928843e06fSMukesh Ojha if (RB_WARN_ON(cpu_buffer, 15938843e06fSMukesh Ojha rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) 1594b4b55dfdSUros Bizjak return; 15958843e06fSMukesh Ojha 15968843e06fSMukesh Ojha if (RB_WARN_ON(cpu_buffer, 15978843e06fSMukesh Ojha rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) 1598b4b55dfdSUros Bizjak return; 15997a8e76a3SSteven Rostedt } 16007a8e76a3SSteven Rostedt } 16017a8e76a3SSteven Rostedt 160274e2afc6SQiujun Huang static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 160374e2afc6SQiujun Huang long nr_pages, struct list_head *pages) 16047a8e76a3SSteven Rostedt { 1605044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 1606927e56dbSSteven Rostedt (VMware) bool user_thread = current->mm != NULL; 1607927e56dbSSteven Rostedt (VMware) gfp_t mflags; 16089b94a8fbSSteven Rostedt (Red Hat) long i; 16093adc54faSSteven Rostedt 1610927e56dbSSteven Rostedt (VMware) /* 1611927e56dbSSteven Rostedt (VMware) * Check if the available memory is there first. 1612927e56dbSSteven Rostedt (VMware) * Note, si_mem_available() only gives us a rough estimate of available 1613927e56dbSSteven Rostedt (VMware) * memory. It may not be accurate. But we don't care, we just want 1614927e56dbSSteven Rostedt (VMware) * to prevent doing any allocation when it is obvious that it is 1615927e56dbSSteven Rostedt (VMware) * not going to succeed. 1616927e56dbSSteven Rostedt (VMware) */ 16172a872fa4SSteven Rostedt (VMware) i = si_mem_available(); 16182a872fa4SSteven Rostedt (VMware) if (i < nr_pages) 16192a872fa4SSteven Rostedt (VMware) return -ENOMEM; 16202a872fa4SSteven Rostedt (VMware) 1621d7ec4bfeSVaibhav Nagarnaik /* 162284861885SJoel Fernandes * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails 162384861885SJoel Fernandes * gracefully without invoking oom-killer and the system is not 162484861885SJoel Fernandes * destabilized. 1625d7ec4bfeSVaibhav Nagarnaik */ 1626927e56dbSSteven Rostedt (VMware) mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL; 1627927e56dbSSteven Rostedt (VMware) 1628927e56dbSSteven Rostedt (VMware) /* 1629927e56dbSSteven Rostedt (VMware) * If a user thread allocates too much, and si_mem_available() 1630927e56dbSSteven Rostedt (VMware) * reports there's enough memory, even though there is not. 1631927e56dbSSteven Rostedt (VMware) * Make sure the OOM killer kills this thread. This can happen 1632927e56dbSSteven Rostedt (VMware) * even with RETRY_MAYFAIL because another task may be doing 1633927e56dbSSteven Rostedt (VMware) * an allocation after this task has taken all memory. 1634927e56dbSSteven Rostedt (VMware) * This is the task the OOM killer needs to take out during this 1635927e56dbSSteven Rostedt (VMware) * loop, even if it was triggered by an allocation somewhere else. 1636927e56dbSSteven Rostedt (VMware) */ 1637927e56dbSSteven Rostedt (VMware) if (user_thread) 1638927e56dbSSteven Rostedt (VMware) set_current_oom_origin(); 1639927e56dbSSteven Rostedt (VMware) for (i = 0; i < nr_pages; i++) { 1640927e56dbSSteven Rostedt (VMware) struct page *page; 1641927e56dbSSteven Rostedt (VMware) 1642044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 164374e2afc6SQiujun Huang mflags, cpu_to_node(cpu_buffer->cpu)); 1644044fa782SSteven Rostedt if (!bpage) 1645e4c2ce82SSteven Rostedt goto free_pages; 164677ae365eSSteven Rostedt 164774e2afc6SQiujun Huang rb_check_bpage(cpu_buffer, bpage); 164874e2afc6SQiujun Huang 1649438ced17SVaibhav Nagarnaik list_add(&bpage->list, pages); 165077ae365eSSteven Rostedt 165174e2afc6SQiujun Huang page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); 16527ea59064SVaibhav Nagarnaik if (!page) 16537a8e76a3SSteven Rostedt goto free_pages; 16547ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1655044fa782SSteven Rostedt rb_init_page(bpage->page); 1656927e56dbSSteven Rostedt (VMware) 1657927e56dbSSteven Rostedt (VMware) if (user_thread && fatal_signal_pending(current)) 1658927e56dbSSteven Rostedt (VMware) goto free_pages; 16597a8e76a3SSteven Rostedt } 1660927e56dbSSteven Rostedt (VMware) if (user_thread) 1661927e56dbSSteven Rostedt (VMware) clear_current_oom_origin(); 16627a8e76a3SSteven Rostedt 1663438ced17SVaibhav Nagarnaik return 0; 1664438ced17SVaibhav Nagarnaik 1665438ced17SVaibhav Nagarnaik free_pages: 1666438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, pages, list) { 1667438ced17SVaibhav Nagarnaik list_del_init(&bpage->list); 1668438ced17SVaibhav Nagarnaik free_buffer_page(bpage); 1669438ced17SVaibhav Nagarnaik } 1670927e56dbSSteven Rostedt (VMware) if (user_thread) 1671927e56dbSSteven Rostedt (VMware) clear_current_oom_origin(); 1672438ced17SVaibhav Nagarnaik 1673438ced17SVaibhav Nagarnaik return -ENOMEM; 1674438ced17SVaibhav Nagarnaik } 1675438ced17SVaibhav Nagarnaik 1676438ced17SVaibhav Nagarnaik static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 16779b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages) 1678438ced17SVaibhav Nagarnaik { 1679438ced17SVaibhav Nagarnaik LIST_HEAD(pages); 1680438ced17SVaibhav Nagarnaik 1681438ced17SVaibhav Nagarnaik WARN_ON(!nr_pages); 1682438ced17SVaibhav Nagarnaik 168374e2afc6SQiujun Huang if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) 1684438ced17SVaibhav Nagarnaik return -ENOMEM; 1685438ced17SVaibhav Nagarnaik 16863adc54faSSteven Rostedt /* 16873adc54faSSteven Rostedt * The ring buffer page list is a circular list that does not 16883adc54faSSteven Rostedt * start and end with a list head. All page list items point to 16893adc54faSSteven Rostedt * other pages. 16903adc54faSSteven Rostedt */ 16913adc54faSSteven Rostedt cpu_buffer->pages = pages.next; 16923adc54faSSteven Rostedt list_del(&pages); 16937a8e76a3SSteven Rostedt 1694438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages = nr_pages; 1695438ced17SVaibhav Nagarnaik 16967a8e76a3SSteven Rostedt rb_check_pages(cpu_buffer); 16977a8e76a3SSteven Rostedt 16987a8e76a3SSteven Rostedt return 0; 16997a8e76a3SSteven Rostedt } 17007a8e76a3SSteven Rostedt 17017a8e76a3SSteven Rostedt static struct ring_buffer_per_cpu * 170213292494SSteven Rostedt (VMware) rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) 17037a8e76a3SSteven Rostedt { 17047a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 1705044fa782SSteven Rostedt struct buffer_page *bpage; 17067ea59064SVaibhav Nagarnaik struct page *page; 17077a8e76a3SSteven Rostedt int ret; 17087a8e76a3SSteven Rostedt 17097a8e76a3SSteven Rostedt cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), 17107a8e76a3SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 17117a8e76a3SSteven Rostedt if (!cpu_buffer) 17127a8e76a3SSteven Rostedt return NULL; 17137a8e76a3SSteven Rostedt 17147a8e76a3SSteven Rostedt cpu_buffer->cpu = cpu; 17157a8e76a3SSteven Rostedt cpu_buffer->buffer = buffer; 17165389f6faSThomas Gleixner raw_spin_lock_init(&cpu_buffer->reader_lock); 17171f8a6a10SPeter Zijlstra lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1718edc35bd7SThomas Gleixner cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 171983f40318SVaibhav Nagarnaik INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); 172005fdd70dSVaibhav Nagarnaik init_completion(&cpu_buffer->update_done); 172115693458SSteven Rostedt (Red Hat) init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); 1722f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&cpu_buffer->irq_work.waiters); 17231e0d6714SSteven Rostedt (Red Hat) init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); 17247a8e76a3SSteven Rostedt 1725044fa782SSteven Rostedt bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1726e4c2ce82SSteven Rostedt GFP_KERNEL, cpu_to_node(cpu)); 1727044fa782SSteven Rostedt if (!bpage) 1728e4c2ce82SSteven Rostedt goto fail_free_buffer; 1729e4c2ce82SSteven Rostedt 173077ae365eSSteven Rostedt rb_check_bpage(cpu_buffer, bpage); 173177ae365eSSteven Rostedt 1732044fa782SSteven Rostedt cpu_buffer->reader_page = bpage; 17337ea59064SVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); 17347ea59064SVaibhav Nagarnaik if (!page) 1735e4c2ce82SSteven Rostedt goto fail_free_reader; 17367ea59064SVaibhav Nagarnaik bpage->page = page_address(page); 1737044fa782SSteven Rostedt rb_init_page(bpage->page); 1738e4c2ce82SSteven Rostedt 1739d769041fSSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 174044b99462SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 1741d769041fSSteven Rostedt 1742438ced17SVaibhav Nagarnaik ret = rb_allocate_pages(cpu_buffer, nr_pages); 17437a8e76a3SSteven Rostedt if (ret < 0) 1744d769041fSSteven Rostedt goto fail_free_reader; 17457a8e76a3SSteven Rostedt 17467a8e76a3SSteven Rostedt cpu_buffer->head_page 17473adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 1748bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 17497a8e76a3SSteven Rostedt 175077ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 175177ae365eSSteven Rostedt 17527a8e76a3SSteven Rostedt return cpu_buffer; 17537a8e76a3SSteven Rostedt 1754d769041fSSteven Rostedt fail_free_reader: 1755d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1756d769041fSSteven Rostedt 17577a8e76a3SSteven Rostedt fail_free_buffer: 17587a8e76a3SSteven Rostedt kfree(cpu_buffer); 17597a8e76a3SSteven Rostedt return NULL; 17607a8e76a3SSteven Rostedt } 17617a8e76a3SSteven Rostedt 17627a8e76a3SSteven Rostedt static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 17637a8e76a3SSteven Rostedt { 17643adc54faSSteven Rostedt struct list_head *head = cpu_buffer->pages; 1765044fa782SSteven Rostedt struct buffer_page *bpage, *tmp; 17667a8e76a3SSteven Rostedt 1767675751bbSJohannes Berg irq_work_sync(&cpu_buffer->irq_work.work); 1768675751bbSJohannes Berg 1769d769041fSSteven Rostedt free_buffer_page(cpu_buffer->reader_page); 1770d769041fSSteven Rostedt 177156f4ca0aSDaniil Tatianin if (head) { 177277ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 177377ae365eSSteven Rostedt 1774044fa782SSteven Rostedt list_for_each_entry_safe(bpage, tmp, head, list) { 1775044fa782SSteven Rostedt list_del_init(&bpage->list); 1776044fa782SSteven Rostedt free_buffer_page(bpage); 17777a8e76a3SSteven Rostedt } 17783adc54faSSteven Rostedt bpage = list_entry(head, struct buffer_page, list); 17793adc54faSSteven Rostedt free_buffer_page(bpage); 17803adc54faSSteven Rostedt } 17813adc54faSSteven Rostedt 17827a8e76a3SSteven Rostedt kfree(cpu_buffer); 17837a8e76a3SSteven Rostedt } 17847a8e76a3SSteven Rostedt 17857a8e76a3SSteven Rostedt /** 1786d611851bSzhangwei(Jovi) * __ring_buffer_alloc - allocate a new ring_buffer 178768814b58SRobert Richter * @size: the size in bytes per cpu that is needed. 17887a8e76a3SSteven Rostedt * @flags: attributes to set for the ring buffer. 178959e7cffeSFabian Frederick * @key: ring buffer reader_lock_key. 17907a8e76a3SSteven Rostedt * 17917a8e76a3SSteven Rostedt * Currently the only flag that is available is the RB_FL_OVERWRITE 17927a8e76a3SSteven Rostedt * flag. This flag means that the buffer will overwrite old data 17937a8e76a3SSteven Rostedt * when the buffer wraps. If this flag is not set, the buffer will 17947a8e76a3SSteven Rostedt * drop data when the tail hits the head. 17957a8e76a3SSteven Rostedt */ 179613292494SSteven Rostedt (VMware) struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, 17971f8a6a10SPeter Zijlstra struct lock_class_key *key) 17987a8e76a3SSteven Rostedt { 179913292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 18009b94a8fbSSteven Rostedt (Red Hat) long nr_pages; 18017a8e76a3SSteven Rostedt int bsize; 18029b94a8fbSSteven Rostedt (Red Hat) int cpu; 1803b32614c0SSebastian Andrzej Siewior int ret; 18047a8e76a3SSteven Rostedt 18057a8e76a3SSteven Rostedt /* keep it in its own cache line */ 18067a8e76a3SSteven Rostedt buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 18077a8e76a3SSteven Rostedt GFP_KERNEL); 18087a8e76a3SSteven Rostedt if (!buffer) 18097a8e76a3SSteven Rostedt return NULL; 18107a8e76a3SSteven Rostedt 1811b18cc3deSSebastian Andrzej Siewior if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) 18129e01c1b7SRusty Russell goto fail_free_buffer; 18139e01c1b7SRusty Russell 1814438ced17SVaibhav Nagarnaik nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 18157a8e76a3SSteven Rostedt buffer->flags = flags; 181637886f6aSSteven Rostedt buffer->clock = trace_clock_local; 18171f8a6a10SPeter Zijlstra buffer->reader_lock_key = key; 18187a8e76a3SSteven Rostedt 181915693458SSteven Rostedt (Red Hat) init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); 1820f1dc6725SSteven Rostedt (Red Hat) init_waitqueue_head(&buffer->irq_work.waiters); 182115693458SSteven Rostedt (Red Hat) 18227a8e76a3SSteven Rostedt /* need at least two pages */ 1823438ced17SVaibhav Nagarnaik if (nr_pages < 2) 1824438ced17SVaibhav Nagarnaik nr_pages = 2; 18257a8e76a3SSteven Rostedt 18267a8e76a3SSteven Rostedt buffer->cpus = nr_cpu_ids; 18277a8e76a3SSteven Rostedt 18287a8e76a3SSteven Rostedt bsize = sizeof(void *) * nr_cpu_ids; 18297a8e76a3SSteven Rostedt buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 18307a8e76a3SSteven Rostedt GFP_KERNEL); 18317a8e76a3SSteven Rostedt if (!buffer->buffers) 18329e01c1b7SRusty Russell goto fail_free_cpumask; 18337a8e76a3SSteven Rostedt 1834b32614c0SSebastian Andrzej Siewior cpu = raw_smp_processor_id(); 1835b32614c0SSebastian Andrzej Siewior cpumask_set_cpu(cpu, buffer->cpumask); 1836b32614c0SSebastian Andrzej Siewior buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 18377a8e76a3SSteven Rostedt if (!buffer->buffers[cpu]) 18387a8e76a3SSteven Rostedt goto fail_free_buffers; 18397a8e76a3SSteven Rostedt 1840b32614c0SSebastian Andrzej Siewior ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1841b32614c0SSebastian Andrzej Siewior if (ret < 0) 1842b32614c0SSebastian Andrzej Siewior goto fail_free_buffers; 1843554f786eSSteven Rostedt 18447a8e76a3SSteven Rostedt mutex_init(&buffer->mutex); 18457a8e76a3SSteven Rostedt 18467a8e76a3SSteven Rostedt return buffer; 18477a8e76a3SSteven Rostedt 18487a8e76a3SSteven Rostedt fail_free_buffers: 18497a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 18507a8e76a3SSteven Rostedt if (buffer->buffers[cpu]) 18517a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 18527a8e76a3SSteven Rostedt } 18537a8e76a3SSteven Rostedt kfree(buffer->buffers); 18547a8e76a3SSteven Rostedt 18559e01c1b7SRusty Russell fail_free_cpumask: 18569e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 18579e01c1b7SRusty Russell 18587a8e76a3SSteven Rostedt fail_free_buffer: 18597a8e76a3SSteven Rostedt kfree(buffer); 18607a8e76a3SSteven Rostedt return NULL; 18617a8e76a3SSteven Rostedt } 18621f8a6a10SPeter Zijlstra EXPORT_SYMBOL_GPL(__ring_buffer_alloc); 18637a8e76a3SSteven Rostedt 18647a8e76a3SSteven Rostedt /** 18657a8e76a3SSteven Rostedt * ring_buffer_free - free a ring buffer. 18667a8e76a3SSteven Rostedt * @buffer: the buffer to free. 18677a8e76a3SSteven Rostedt */ 18687a8e76a3SSteven Rostedt void 186913292494SSteven Rostedt (VMware) ring_buffer_free(struct trace_buffer *buffer) 18707a8e76a3SSteven Rostedt { 18717a8e76a3SSteven Rostedt int cpu; 18727a8e76a3SSteven Rostedt 1873b32614c0SSebastian Andrzej Siewior cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); 1874554f786eSSteven Rostedt 1875675751bbSJohannes Berg irq_work_sync(&buffer->irq_work.work); 1876675751bbSJohannes Berg 18777a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) 18787a8e76a3SSteven Rostedt rb_free_cpu_buffer(buffer->buffers[cpu]); 18797a8e76a3SSteven Rostedt 1880bd3f0221SEric Dumazet kfree(buffer->buffers); 18819e01c1b7SRusty Russell free_cpumask_var(buffer->cpumask); 18829e01c1b7SRusty Russell 18837a8e76a3SSteven Rostedt kfree(buffer); 18847a8e76a3SSteven Rostedt } 1885c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_free); 18867a8e76a3SSteven Rostedt 188713292494SSteven Rostedt (VMware) void ring_buffer_set_clock(struct trace_buffer *buffer, 188837886f6aSSteven Rostedt u64 (*clock)(void)) 188937886f6aSSteven Rostedt { 189037886f6aSSteven Rostedt buffer->clock = clock; 189137886f6aSSteven Rostedt } 189237886f6aSSteven Rostedt 189313292494SSteven Rostedt (VMware) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs) 189400b41452STom Zanussi { 189500b41452STom Zanussi buffer->time_stamp_abs = abs; 189600b41452STom Zanussi } 189700b41452STom Zanussi 189813292494SSteven Rostedt (VMware) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer) 189900b41452STom Zanussi { 190000b41452STom Zanussi return buffer->time_stamp_abs; 190100b41452STom Zanussi } 190200b41452STom Zanussi 19037a8e76a3SSteven Rostedt static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 19047a8e76a3SSteven Rostedt 190583f40318SVaibhav Nagarnaik static inline unsigned long rb_page_entries(struct buffer_page *bpage) 19067a8e76a3SSteven Rostedt { 190783f40318SVaibhav Nagarnaik return local_read(&bpage->entries) & RB_WRITE_MASK; 190883f40318SVaibhav Nagarnaik } 190983f40318SVaibhav Nagarnaik 191083f40318SVaibhav Nagarnaik static inline unsigned long rb_page_write(struct buffer_page *bpage) 191183f40318SVaibhav Nagarnaik { 191283f40318SVaibhav Nagarnaik return local_read(&bpage->write) & RB_WRITE_MASK; 191383f40318SVaibhav Nagarnaik } 191483f40318SVaibhav Nagarnaik 1915bc92b956SUros Bizjak static bool 19169b94a8fbSSteven Rostedt (Red Hat) rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) 191783f40318SVaibhav Nagarnaik { 191883f40318SVaibhav Nagarnaik struct list_head *tail_page, *to_remove, *next_page; 191983f40318SVaibhav Nagarnaik struct buffer_page *to_remove_page, *tmp_iter_page; 192083f40318SVaibhav Nagarnaik struct buffer_page *last_page, *first_page; 19219b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_removed; 192283f40318SVaibhav Nagarnaik unsigned long head_bit; 192383f40318SVaibhav Nagarnaik int page_entries; 192483f40318SVaibhav Nagarnaik 192583f40318SVaibhav Nagarnaik head_bit = 0; 19267a8e76a3SSteven Rostedt 19275389f6faSThomas Gleixner raw_spin_lock_irq(&cpu_buffer->reader_lock); 192883f40318SVaibhav Nagarnaik atomic_inc(&cpu_buffer->record_disabled); 192983f40318SVaibhav Nagarnaik /* 193083f40318SVaibhav Nagarnaik * We don't race with the readers since we have acquired the reader 193183f40318SVaibhav Nagarnaik * lock. We also don't race with writers after disabling recording. 193283f40318SVaibhav Nagarnaik * This makes it easy to figure out the first and the last page to be 193383f40318SVaibhav Nagarnaik * removed from the list. We unlink all the pages in between including 193483f40318SVaibhav Nagarnaik * the first and last pages. This is done in a busy loop so that we 193583f40318SVaibhav Nagarnaik * lose the least number of traces. 193683f40318SVaibhav Nagarnaik * The pages are freed after we restart recording and unlock readers. 193783f40318SVaibhav Nagarnaik */ 193883f40318SVaibhav Nagarnaik tail_page = &cpu_buffer->tail_page->list; 193977ae365eSSteven Rostedt 194083f40318SVaibhav Nagarnaik /* 194183f40318SVaibhav Nagarnaik * tail page might be on reader page, we remove the next page 194283f40318SVaibhav Nagarnaik * from the ring buffer 194383f40318SVaibhav Nagarnaik */ 194483f40318SVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 194583f40318SVaibhav Nagarnaik tail_page = rb_list_head(tail_page->next); 194683f40318SVaibhav Nagarnaik to_remove = tail_page; 194783f40318SVaibhav Nagarnaik 194883f40318SVaibhav Nagarnaik /* start of pages to remove */ 194983f40318SVaibhav Nagarnaik first_page = list_entry(rb_list_head(to_remove->next), 195083f40318SVaibhav Nagarnaik struct buffer_page, list); 195183f40318SVaibhav Nagarnaik 195283f40318SVaibhav Nagarnaik for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { 195383f40318SVaibhav Nagarnaik to_remove = rb_list_head(to_remove)->next; 195483f40318SVaibhav Nagarnaik head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; 19557a8e76a3SSteven Rostedt } 19562d093282SZheng Yejian /* Read iterators need to reset themselves when some pages removed */ 19572d093282SZheng Yejian cpu_buffer->pages_removed += nr_removed; 19587a8e76a3SSteven Rostedt 195983f40318SVaibhav Nagarnaik next_page = rb_list_head(to_remove)->next; 19607a8e76a3SSteven Rostedt 196183f40318SVaibhav Nagarnaik /* 196283f40318SVaibhav Nagarnaik * Now we remove all pages between tail_page and next_page. 196383f40318SVaibhav Nagarnaik * Make sure that we have head_bit value preserved for the 196483f40318SVaibhav Nagarnaik * next page 196583f40318SVaibhav Nagarnaik */ 196683f40318SVaibhav Nagarnaik tail_page->next = (struct list_head *)((unsigned long)next_page | 196783f40318SVaibhav Nagarnaik head_bit); 196883f40318SVaibhav Nagarnaik next_page = rb_list_head(next_page); 196983f40318SVaibhav Nagarnaik next_page->prev = tail_page; 197083f40318SVaibhav Nagarnaik 197183f40318SVaibhav Nagarnaik /* make sure pages points to a valid page in the ring buffer */ 197283f40318SVaibhav Nagarnaik cpu_buffer->pages = next_page; 197383f40318SVaibhav Nagarnaik 197483f40318SVaibhav Nagarnaik /* update head page */ 197583f40318SVaibhav Nagarnaik if (head_bit) 197683f40318SVaibhav Nagarnaik cpu_buffer->head_page = list_entry(next_page, 197783f40318SVaibhav Nagarnaik struct buffer_page, list); 197883f40318SVaibhav Nagarnaik 197983f40318SVaibhav Nagarnaik /* pages are removed, resume tracing and then free the pages */ 198083f40318SVaibhav Nagarnaik atomic_dec(&cpu_buffer->record_disabled); 19815389f6faSThomas Gleixner raw_spin_unlock_irq(&cpu_buffer->reader_lock); 198283f40318SVaibhav Nagarnaik 198383f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); 198483f40318SVaibhav Nagarnaik 198583f40318SVaibhav Nagarnaik /* last buffer page to remove */ 198683f40318SVaibhav Nagarnaik last_page = list_entry(rb_list_head(to_remove), struct buffer_page, 198783f40318SVaibhav Nagarnaik list); 198883f40318SVaibhav Nagarnaik tmp_iter_page = first_page; 198983f40318SVaibhav Nagarnaik 199083f40318SVaibhav Nagarnaik do { 199183f36555SVaibhav Nagarnaik cond_resched(); 199283f36555SVaibhav Nagarnaik 199383f40318SVaibhav Nagarnaik to_remove_page = tmp_iter_page; 19946689bed3SQiujun Huang rb_inc_page(&tmp_iter_page); 199583f40318SVaibhav Nagarnaik 199683f40318SVaibhav Nagarnaik /* update the counters */ 199783f40318SVaibhav Nagarnaik page_entries = rb_page_entries(to_remove_page); 199883f40318SVaibhav Nagarnaik if (page_entries) { 199983f40318SVaibhav Nagarnaik /* 200083f40318SVaibhav Nagarnaik * If something was added to this page, it was full 200183f40318SVaibhav Nagarnaik * since it is not the tail page. So we deduct the 200283f40318SVaibhav Nagarnaik * bytes consumed in ring buffer from here. 200348fdc72fSVaibhav Nagarnaik * Increment overrun to account for the lost events. 200483f40318SVaibhav Nagarnaik */ 200548fdc72fSVaibhav Nagarnaik local_add(page_entries, &cpu_buffer->overrun); 200683f40318SVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 200731029a8bSSteven Rostedt (Google) local_inc(&cpu_buffer->pages_lost); 200883f40318SVaibhav Nagarnaik } 200983f40318SVaibhav Nagarnaik 201083f40318SVaibhav Nagarnaik /* 201183f40318SVaibhav Nagarnaik * We have already removed references to this list item, just 201283f40318SVaibhav Nagarnaik * free up the buffer_page and its page 201383f40318SVaibhav Nagarnaik */ 201483f40318SVaibhav Nagarnaik free_buffer_page(to_remove_page); 201583f40318SVaibhav Nagarnaik nr_removed--; 201683f40318SVaibhav Nagarnaik 201783f40318SVaibhav Nagarnaik } while (to_remove_page != last_page); 201883f40318SVaibhav Nagarnaik 201983f40318SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, nr_removed); 20205040b4b7SVaibhav Nagarnaik 20215040b4b7SVaibhav Nagarnaik return nr_removed == 0; 20227a8e76a3SSteven Rostedt } 20237a8e76a3SSteven Rostedt 2024bc92b956SUros Bizjak static bool 20255040b4b7SVaibhav Nagarnaik rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) 20267a8e76a3SSteven Rostedt { 20275040b4b7SVaibhav Nagarnaik struct list_head *pages = &cpu_buffer->new_pages; 202888ca6a71SSteven Rostedt unsigned long flags; 2029bc92b956SUros Bizjak bool success; 2030bc92b956SUros Bizjak int retries; 20317a8e76a3SSteven Rostedt 203288ca6a71SSteven Rostedt /* Can be called at early boot up, where interrupts must not been enabled */ 203388ca6a71SSteven Rostedt raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 20345040b4b7SVaibhav Nagarnaik /* 20355040b4b7SVaibhav Nagarnaik * We are holding the reader lock, so the reader page won't be swapped 20365040b4b7SVaibhav Nagarnaik * in the ring buffer. Now we are racing with the writer trying to 20375040b4b7SVaibhav Nagarnaik * move head page and the tail page. 20385040b4b7SVaibhav Nagarnaik * We are going to adapt the reader page update process where: 20395040b4b7SVaibhav Nagarnaik * 1. We first splice the start and end of list of new pages between 20405040b4b7SVaibhav Nagarnaik * the head page and its previous page. 20415040b4b7SVaibhav Nagarnaik * 2. We cmpxchg the prev_page->next to point from head page to the 20425040b4b7SVaibhav Nagarnaik * start of new pages list. 20435040b4b7SVaibhav Nagarnaik * 3. Finally, we update the head->prev to the end of new list. 20445040b4b7SVaibhav Nagarnaik * 20455040b4b7SVaibhav Nagarnaik * We will try this process 10 times, to make sure that we don't keep 20465040b4b7SVaibhav Nagarnaik * spinning. 20475040b4b7SVaibhav Nagarnaik */ 20485040b4b7SVaibhav Nagarnaik retries = 10; 2049bc92b956SUros Bizjak success = false; 20505040b4b7SVaibhav Nagarnaik while (retries--) { 20515040b4b7SVaibhav Nagarnaik struct list_head *head_page, *prev_page, *r; 20525040b4b7SVaibhav Nagarnaik struct list_head *last_page, *first_page; 20535040b4b7SVaibhav Nagarnaik struct list_head *head_page_with_bit; 2054625ed527SZheng Yejian struct buffer_page *hpage = rb_set_head_page(cpu_buffer); 205577ae365eSSteven Rostedt 2056625ed527SZheng Yejian if (!hpage) 205754f7be5bSSteven Rostedt break; 2058625ed527SZheng Yejian head_page = &hpage->list; 20595040b4b7SVaibhav Nagarnaik prev_page = head_page->prev; 20605040b4b7SVaibhav Nagarnaik 20615040b4b7SVaibhav Nagarnaik first_page = pages->next; 20625040b4b7SVaibhav Nagarnaik last_page = pages->prev; 20635040b4b7SVaibhav Nagarnaik 20645040b4b7SVaibhav Nagarnaik head_page_with_bit = (struct list_head *) 20655040b4b7SVaibhav Nagarnaik ((unsigned long)head_page | RB_PAGE_HEAD); 20665040b4b7SVaibhav Nagarnaik 20675040b4b7SVaibhav Nagarnaik last_page->next = head_page_with_bit; 20685040b4b7SVaibhav Nagarnaik first_page->prev = prev_page; 20695040b4b7SVaibhav Nagarnaik 20705040b4b7SVaibhav Nagarnaik r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); 20715040b4b7SVaibhav Nagarnaik 20725040b4b7SVaibhav Nagarnaik if (r == head_page_with_bit) { 20735040b4b7SVaibhav Nagarnaik /* 20745040b4b7SVaibhav Nagarnaik * yay, we replaced the page pointer to our new list, 20755040b4b7SVaibhav Nagarnaik * now, we just have to update to head page's prev 20765040b4b7SVaibhav Nagarnaik * pointer to point to end of list 20775040b4b7SVaibhav Nagarnaik */ 20785040b4b7SVaibhav Nagarnaik head_page->prev = last_page; 2079bc92b956SUros Bizjak success = true; 20805040b4b7SVaibhav Nagarnaik break; 20817a8e76a3SSteven Rostedt } 20825040b4b7SVaibhav Nagarnaik } 20837a8e76a3SSteven Rostedt 20845040b4b7SVaibhav Nagarnaik if (success) 20855040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(pages); 20865040b4b7SVaibhav Nagarnaik /* 20875040b4b7SVaibhav Nagarnaik * If we weren't successful in adding in new pages, warn and stop 20885040b4b7SVaibhav Nagarnaik * tracing 20895040b4b7SVaibhav Nagarnaik */ 20905040b4b7SVaibhav Nagarnaik RB_WARN_ON(cpu_buffer, !success); 209188ca6a71SSteven Rostedt raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 20925040b4b7SVaibhav Nagarnaik 20935040b4b7SVaibhav Nagarnaik /* free pages if they weren't inserted */ 20945040b4b7SVaibhav Nagarnaik if (!success) { 20955040b4b7SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 20965040b4b7SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 20975040b4b7SVaibhav Nagarnaik list) { 20985040b4b7SVaibhav Nagarnaik list_del_init(&bpage->list); 20995040b4b7SVaibhav Nagarnaik free_buffer_page(bpage); 21005040b4b7SVaibhav Nagarnaik } 21015040b4b7SVaibhav Nagarnaik } 21025040b4b7SVaibhav Nagarnaik return success; 21037a8e76a3SSteven Rostedt } 21047a8e76a3SSteven Rostedt 210583f40318SVaibhav Nagarnaik static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) 2106438ced17SVaibhav Nagarnaik { 2107bc92b956SUros Bizjak bool success; 210883f40318SVaibhav Nagarnaik 21095040b4b7SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0) 21105040b4b7SVaibhav Nagarnaik success = rb_insert_pages(cpu_buffer); 21115040b4b7SVaibhav Nagarnaik else 21125040b4b7SVaibhav Nagarnaik success = rb_remove_pages(cpu_buffer, 21135040b4b7SVaibhav Nagarnaik -cpu_buffer->nr_pages_to_update); 21145040b4b7SVaibhav Nagarnaik 21155040b4b7SVaibhav Nagarnaik if (success) 2116438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; 211783f40318SVaibhav Nagarnaik } 211883f40318SVaibhav Nagarnaik 211983f40318SVaibhav Nagarnaik static void update_pages_handler(struct work_struct *work) 212083f40318SVaibhav Nagarnaik { 212183f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer = container_of(work, 212283f40318SVaibhav Nagarnaik struct ring_buffer_per_cpu, update_pages_work); 212383f40318SVaibhav Nagarnaik rb_update_pages(cpu_buffer); 212405fdd70dSVaibhav Nagarnaik complete(&cpu_buffer->update_done); 2125438ced17SVaibhav Nagarnaik } 2126438ced17SVaibhav Nagarnaik 21277a8e76a3SSteven Rostedt /** 21287a8e76a3SSteven Rostedt * ring_buffer_resize - resize the ring buffer 21297a8e76a3SSteven Rostedt * @buffer: the buffer to resize. 21307a8e76a3SSteven Rostedt * @size: the new size. 2131d611851bSzhangwei(Jovi) * @cpu_id: the cpu buffer to resize 21327a8e76a3SSteven Rostedt * 21337a8e76a3SSteven Rostedt * Minimum size is 2 * BUF_PAGE_SIZE. 21347a8e76a3SSteven Rostedt * 213583f40318SVaibhav Nagarnaik * Returns 0 on success and < 0 on failure. 21367a8e76a3SSteven Rostedt */ 213713292494SSteven Rostedt (VMware) int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, 2138438ced17SVaibhav Nagarnaik int cpu_id) 21397a8e76a3SSteven Rostedt { 21407a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 21419b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages; 21420a1754b2SQiujun Huang int cpu, err; 21437a8e76a3SSteven Rostedt 2144ee51a1deSIngo Molnar /* 2145ee51a1deSIngo Molnar * Always succeed at resizing a non-existent buffer: 2146ee51a1deSIngo Molnar */ 2147ee51a1deSIngo Molnar if (!buffer) 21480a1754b2SQiujun Huang return 0; 2149ee51a1deSIngo Molnar 21506a31e1f1SSteven Rostedt /* Make sure the requested buffer exists */ 21516a31e1f1SSteven Rostedt if (cpu_id != RING_BUFFER_ALL_CPUS && 21526a31e1f1SSteven Rostedt !cpumask_test_cpu(cpu_id, buffer->cpumask)) 21530a1754b2SQiujun Huang return 0; 21546a31e1f1SSteven Rostedt 215559643d15SSteven Rostedt (Red Hat) nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 21567a8e76a3SSteven Rostedt 21577a8e76a3SSteven Rostedt /* we need a minimum of two pages */ 215859643d15SSteven Rostedt (Red Hat) if (nr_pages < 2) 215959643d15SSteven Rostedt (Red Hat) nr_pages = 2; 21607a8e76a3SSteven Rostedt 216107b8b10eSSteven Rostedt (VMware) /* prevent another thread from changing buffer sizes */ 216207b8b10eSSteven Rostedt (VMware) mutex_lock(&buffer->mutex); 21638a96c028SChen Lin atomic_inc(&buffer->resizing); 216407b8b10eSSteven Rostedt (VMware) 216507b8b10eSSteven Rostedt (VMware) if (cpu_id == RING_BUFFER_ALL_CPUS) { 216683f40318SVaibhav Nagarnaik /* 216783f40318SVaibhav Nagarnaik * Don't succeed if resizing is disabled, as a reader might be 216883f40318SVaibhav Nagarnaik * manipulating the ring buffer and is expecting a sane state while 216983f40318SVaibhav Nagarnaik * this is true. 217083f40318SVaibhav Nagarnaik */ 217107b8b10eSSteven Rostedt (VMware) for_each_buffer_cpu(buffer, cpu) { 217207b8b10eSSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu]; 217307b8b10eSSteven Rostedt (VMware) if (atomic_read(&cpu_buffer->resize_disabled)) { 217407b8b10eSSteven Rostedt (VMware) err = -EBUSY; 217507b8b10eSSteven Rostedt (VMware) goto out_err_unlock; 217607b8b10eSSteven Rostedt (VMware) } 217707b8b10eSSteven Rostedt (VMware) } 217883f40318SVaibhav Nagarnaik 2179438ced17SVaibhav Nagarnaik /* calculate the pages to update */ 21807a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 21817a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2182438ced17SVaibhav Nagarnaik 2183438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 2184438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 2185438ced17SVaibhav Nagarnaik /* 2186438ced17SVaibhav Nagarnaik * nothing more to do for removing pages or no update 2187438ced17SVaibhav Nagarnaik */ 2188438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update <= 0) 2189438ced17SVaibhav Nagarnaik continue; 2190438ced17SVaibhav Nagarnaik /* 2191438ced17SVaibhav Nagarnaik * to add pages, make sure all new pages can be 2192438ced17SVaibhav Nagarnaik * allocated without receiving ENOMEM 2193438ced17SVaibhav Nagarnaik */ 2194438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 219574e2afc6SQiujun Huang if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 219674e2afc6SQiujun Huang &cpu_buffer->new_pages)) { 2197438ced17SVaibhav Nagarnaik /* not enough memory for new pages */ 219883f40318SVaibhav Nagarnaik err = -ENOMEM; 219983f40318SVaibhav Nagarnaik goto out_err; 220083f40318SVaibhav Nagarnaik } 220183f40318SVaibhav Nagarnaik } 220283f40318SVaibhav Nagarnaik 220399c37d1aSSebastian Andrzej Siewior cpus_read_lock(); 220483f40318SVaibhav Nagarnaik /* 220583f40318SVaibhav Nagarnaik * Fire off all the required work handlers 220605fdd70dSVaibhav Nagarnaik * We can't schedule on offline CPUs, but it's not necessary 220783f40318SVaibhav Nagarnaik * since we can change their buffer sizes without any race. 220883f40318SVaibhav Nagarnaik */ 220983f40318SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 221083f40318SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 221105fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 221283f40318SVaibhav Nagarnaik continue; 221383f40318SVaibhav Nagarnaik 2214021c5b34SCorey Minyard /* Can't run something on an offline CPU. */ 2215021c5b34SCorey Minyard if (!cpu_online(cpu)) { 2216f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer); 2217f5eb5588SSteven Rostedt (Red Hat) cpu_buffer->nr_pages_to_update = 0; 2218f5eb5588SSteven Rostedt (Red Hat) } else { 221988ca6a71SSteven Rostedt /* Run directly if possible. */ 222088ca6a71SSteven Rostedt migrate_disable(); 222188ca6a71SSteven Rostedt if (cpu != smp_processor_id()) { 222288ca6a71SSteven Rostedt migrate_enable(); 222305fdd70dSVaibhav Nagarnaik schedule_work_on(cpu, 222405fdd70dSVaibhav Nagarnaik &cpu_buffer->update_pages_work); 222588ca6a71SSteven Rostedt } else { 222688ca6a71SSteven Rostedt update_pages_handler(&cpu_buffer->update_pages_work); 222788ca6a71SSteven Rostedt migrate_enable(); 222888ca6a71SSteven Rostedt } 2229f5eb5588SSteven Rostedt (Red Hat) } 22307a8e76a3SSteven Rostedt } 2231438ced17SVaibhav Nagarnaik 2232438ced17SVaibhav Nagarnaik /* wait for all the updates to complete */ 2233438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 2234438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 223505fdd70dSVaibhav Nagarnaik if (!cpu_buffer->nr_pages_to_update) 223683f40318SVaibhav Nagarnaik continue; 223783f40318SVaibhav Nagarnaik 223805fdd70dSVaibhav Nagarnaik if (cpu_online(cpu)) 223905fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 224083f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 2241438ced17SVaibhav Nagarnaik } 224283f40318SVaibhav Nagarnaik 224399c37d1aSSebastian Andrzej Siewior cpus_read_unlock(); 2244438ced17SVaibhav Nagarnaik } else { 2245438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu_id]; 224683f40318SVaibhav Nagarnaik 2247438ced17SVaibhav Nagarnaik if (nr_pages == cpu_buffer->nr_pages) 22487a8e76a3SSteven Rostedt goto out; 2249438ced17SVaibhav Nagarnaik 225007b8b10eSSteven Rostedt (VMware) /* 225107b8b10eSSteven Rostedt (VMware) * Don't succeed if resizing is disabled, as a reader might be 225207b8b10eSSteven Rostedt (VMware) * manipulating the ring buffer and is expecting a sane state while 225307b8b10eSSteven Rostedt (VMware) * this is true. 225407b8b10eSSteven Rostedt (VMware) */ 225507b8b10eSSteven Rostedt (VMware) if (atomic_read(&cpu_buffer->resize_disabled)) { 225607b8b10eSSteven Rostedt (VMware) err = -EBUSY; 225707b8b10eSSteven Rostedt (VMware) goto out_err_unlock; 225807b8b10eSSteven Rostedt (VMware) } 225907b8b10eSSteven Rostedt (VMware) 2260438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = nr_pages - 2261438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages; 2262438ced17SVaibhav Nagarnaik 2263438ced17SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 2264438ced17SVaibhav Nagarnaik if (cpu_buffer->nr_pages_to_update > 0 && 226574e2afc6SQiujun Huang __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, 226674e2afc6SQiujun Huang &cpu_buffer->new_pages)) { 226783f40318SVaibhav Nagarnaik err = -ENOMEM; 226883f40318SVaibhav Nagarnaik goto out_err; 226983f40318SVaibhav Nagarnaik } 2270438ced17SVaibhav Nagarnaik 227199c37d1aSSebastian Andrzej Siewior cpus_read_lock(); 227283f40318SVaibhav Nagarnaik 2273021c5b34SCorey Minyard /* Can't run something on an offline CPU. */ 2274021c5b34SCorey Minyard if (!cpu_online(cpu_id)) 2275f5eb5588SSteven Rostedt (Red Hat) rb_update_pages(cpu_buffer); 2276f5eb5588SSteven Rostedt (Red Hat) else { 227788ca6a71SSteven Rostedt /* Run directly if possible. */ 227888ca6a71SSteven Rostedt migrate_disable(); 227988ca6a71SSteven Rostedt if (cpu_id == smp_processor_id()) { 228088ca6a71SSteven Rostedt rb_update_pages(cpu_buffer); 228188ca6a71SSteven Rostedt migrate_enable(); 228288ca6a71SSteven Rostedt } else { 228388ca6a71SSteven Rostedt migrate_enable(); 228483f40318SVaibhav Nagarnaik schedule_work_on(cpu_id, 228583f40318SVaibhav Nagarnaik &cpu_buffer->update_pages_work); 228605fdd70dSVaibhav Nagarnaik wait_for_completion(&cpu_buffer->update_done); 2287f5eb5588SSteven Rostedt (Red Hat) } 228888ca6a71SSteven Rostedt } 228983f40318SVaibhav Nagarnaik 229083f40318SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 229199c37d1aSSebastian Andrzej Siewior cpus_read_unlock(); 22927a8e76a3SSteven Rostedt } 22937a8e76a3SSteven Rostedt 22947a8e76a3SSteven Rostedt out: 2295659f451fSSteven Rostedt /* 2296659f451fSSteven Rostedt * The ring buffer resize can happen with the ring buffer 2297659f451fSSteven Rostedt * enabled, so that the update disturbs the tracing as little 2298659f451fSSteven Rostedt * as possible. But if the buffer is disabled, we do not need 2299659f451fSSteven Rostedt * to worry about that, and we can take the time to verify 2300659f451fSSteven Rostedt * that the buffer is not corrupt. 2301659f451fSSteven Rostedt */ 2302659f451fSSteven Rostedt if (atomic_read(&buffer->record_disabled)) { 2303659f451fSSteven Rostedt atomic_inc(&buffer->record_disabled); 2304659f451fSSteven Rostedt /* 2305659f451fSSteven Rostedt * Even though the buffer was disabled, we must make sure 2306659f451fSSteven Rostedt * that it is truly disabled before calling rb_check_pages. 2307659f451fSSteven Rostedt * There could have been a race between checking 2308659f451fSSteven Rostedt * record_disable and incrementing it. 2309659f451fSSteven Rostedt */ 231074401729SPaul E. McKenney synchronize_rcu(); 2311659f451fSSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 2312659f451fSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 2313659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 2314659f451fSSteven Rostedt } 2315659f451fSSteven Rostedt atomic_dec(&buffer->record_disabled); 2316659f451fSSteven Rostedt } 2317659f451fSSteven Rostedt 23188a96c028SChen Lin atomic_dec(&buffer->resizing); 23197a8e76a3SSteven Rostedt mutex_unlock(&buffer->mutex); 23200a1754b2SQiujun Huang return 0; 23217a8e76a3SSteven Rostedt 232283f40318SVaibhav Nagarnaik out_err: 2323438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu) { 2324438ced17SVaibhav Nagarnaik struct buffer_page *bpage, *tmp; 232583f40318SVaibhav Nagarnaik 2326438ced17SVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 2327438ced17SVaibhav Nagarnaik cpu_buffer->nr_pages_to_update = 0; 232883f40318SVaibhav Nagarnaik 2329438ced17SVaibhav Nagarnaik if (list_empty(&cpu_buffer->new_pages)) 2330438ced17SVaibhav Nagarnaik continue; 233183f40318SVaibhav Nagarnaik 2332438ced17SVaibhav Nagarnaik list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, 2333438ced17SVaibhav Nagarnaik list) { 2334044fa782SSteven Rostedt list_del_init(&bpage->list); 2335044fa782SSteven Rostedt free_buffer_page(bpage); 23367a8e76a3SSteven Rostedt } 2337438ced17SVaibhav Nagarnaik } 233807b8b10eSSteven Rostedt (VMware) out_err_unlock: 23398a96c028SChen Lin atomic_dec(&buffer->resizing); 2340641d2f63SVegard Nossum mutex_unlock(&buffer->mutex); 234183f40318SVaibhav Nagarnaik return err; 23427a8e76a3SSteven Rostedt } 2343c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_resize); 23447a8e76a3SSteven Rostedt 234513292494SSteven Rostedt (VMware) void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val) 2346750912faSDavid Sharp { 2347750912faSDavid Sharp mutex_lock(&buffer->mutex); 2348750912faSDavid Sharp if (val) 2349750912faSDavid Sharp buffer->flags |= RB_FL_OVERWRITE; 2350750912faSDavid Sharp else 2351750912faSDavid Sharp buffer->flags &= ~RB_FL_OVERWRITE; 2352750912faSDavid Sharp mutex_unlock(&buffer->mutex); 2353750912faSDavid Sharp } 2354750912faSDavid Sharp EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 2355750912faSDavid Sharp 23562289d567SSteven Rostedt (Red Hat) static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 23577a8e76a3SSteven Rostedt { 2358044fa782SSteven Rostedt return bpage->page->data + index; 23597a8e76a3SSteven Rostedt } 23607a8e76a3SSteven Rostedt 23612289d567SSteven Rostedt (Red Hat) static __always_inline struct ring_buffer_event * 2362d769041fSSteven Rostedt rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) 23637a8e76a3SSteven Rostedt { 23646f807acdSSteven Rostedt return __rb_page_index(cpu_buffer->reader_page, 23656f807acdSSteven Rostedt cpu_buffer->reader_page->read); 23666f807acdSSteven Rostedt } 23676f807acdSSteven Rostedt 23682289d567SSteven Rostedt (Red Hat) static __always_inline unsigned rb_page_commit(struct buffer_page *bpage) 2369bf41a158SSteven Rostedt { 2370abc9b56dSSteven Rostedt return local_read(&bpage->page->commit); 2371bf41a158SSteven Rostedt } 2372bf41a158SSteven Rostedt 2373785888c5SSteven Rostedt (VMware) static struct ring_buffer_event * 2374785888c5SSteven Rostedt (VMware) rb_iter_head_event(struct ring_buffer_iter *iter) 2375785888c5SSteven Rostedt (VMware) { 2376785888c5SSteven Rostedt (VMware) struct ring_buffer_event *event; 2377785888c5SSteven Rostedt (VMware) struct buffer_page *iter_head_page = iter->head_page; 2378785888c5SSteven Rostedt (VMware) unsigned long commit; 2379785888c5SSteven Rostedt (VMware) unsigned length; 2380785888c5SSteven Rostedt (VMware) 2381153368ceSSteven Rostedt (VMware) if (iter->head != iter->next_event) 2382153368ceSSteven Rostedt (VMware) return iter->event; 2383153368ceSSteven Rostedt (VMware) 2384785888c5SSteven Rostedt (VMware) /* 2385785888c5SSteven Rostedt (VMware) * When the writer goes across pages, it issues a cmpxchg which 2386785888c5SSteven Rostedt (VMware) * is a mb(), which will synchronize with the rmb here. 2387785888c5SSteven Rostedt (VMware) * (see rb_tail_page_update() and __rb_reserve_next()) 2388785888c5SSteven Rostedt (VMware) */ 2389785888c5SSteven Rostedt (VMware) commit = rb_page_commit(iter_head_page); 2390785888c5SSteven Rostedt (VMware) smp_rmb(); 2391785888c5SSteven Rostedt (VMware) event = __rb_page_index(iter_head_page, iter->head); 2392785888c5SSteven Rostedt (VMware) length = rb_event_length(event); 2393785888c5SSteven Rostedt (VMware) 2394785888c5SSteven Rostedt (VMware) /* 2395785888c5SSteven Rostedt (VMware) * READ_ONCE() doesn't work on functions and we don't want the 2396785888c5SSteven Rostedt (VMware) * compiler doing any crazy optimizations with length. 2397785888c5SSteven Rostedt (VMware) */ 2398785888c5SSteven Rostedt (VMware) barrier(); 2399785888c5SSteven Rostedt (VMware) 2400785888c5SSteven Rostedt (VMware) if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE) 2401785888c5SSteven Rostedt (VMware) /* Writer corrupted the read? */ 2402785888c5SSteven Rostedt (VMware) goto reset; 2403785888c5SSteven Rostedt (VMware) 2404785888c5SSteven Rostedt (VMware) memcpy(iter->event, event, length); 2405785888c5SSteven Rostedt (VMware) /* 2406785888c5SSteven Rostedt (VMware) * If the page stamp is still the same after this rmb() then the 2407785888c5SSteven Rostedt (VMware) * event was safely copied without the writer entering the page. 2408785888c5SSteven Rostedt (VMware) */ 2409785888c5SSteven Rostedt (VMware) smp_rmb(); 2410785888c5SSteven Rostedt (VMware) 2411785888c5SSteven Rostedt (VMware) /* Make sure the page didn't change since we read this */ 2412785888c5SSteven Rostedt (VMware) if (iter->page_stamp != iter_head_page->page->time_stamp || 2413785888c5SSteven Rostedt (VMware) commit > rb_page_commit(iter_head_page)) 2414785888c5SSteven Rostedt (VMware) goto reset; 2415785888c5SSteven Rostedt (VMware) 2416785888c5SSteven Rostedt (VMware) iter->next_event = iter->head + length; 2417785888c5SSteven Rostedt (VMware) return iter->event; 2418785888c5SSteven Rostedt (VMware) reset: 2419785888c5SSteven Rostedt (VMware) /* Reset to the beginning */ 2420785888c5SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 2421785888c5SSteven Rostedt (VMware) iter->head = 0; 2422785888c5SSteven Rostedt (VMware) iter->next_event = 0; 2423c9b7a4a7SSteven Rostedt (VMware) iter->missed_events = 1; 2424785888c5SSteven Rostedt (VMware) return NULL; 2425785888c5SSteven Rostedt (VMware) } 2426785888c5SSteven Rostedt (VMware) 242725985edcSLucas De Marchi /* Size is determined by what has been committed */ 24282289d567SSteven Rostedt (Red Hat) static __always_inline unsigned rb_page_size(struct buffer_page *bpage) 2429bf41a158SSteven Rostedt { 2430bf41a158SSteven Rostedt return rb_page_commit(bpage); 2431bf41a158SSteven Rostedt } 2432bf41a158SSteven Rostedt 24332289d567SSteven Rostedt (Red Hat) static __always_inline unsigned 2434bf41a158SSteven Rostedt rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) 2435bf41a158SSteven Rostedt { 2436bf41a158SSteven Rostedt return rb_page_commit(cpu_buffer->commit_page); 2437bf41a158SSteven Rostedt } 2438bf41a158SSteven Rostedt 24392289d567SSteven Rostedt (Red Hat) static __always_inline unsigned 2440bf41a158SSteven Rostedt rb_event_index(struct ring_buffer_event *event) 24417a8e76a3SSteven Rostedt { 2442bf41a158SSteven Rostedt unsigned long addr = (unsigned long)event; 2443bf41a158SSteven Rostedt 244422f470f8SSteven Rostedt return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 24457a8e76a3SSteven Rostedt } 24467a8e76a3SSteven Rostedt 244734a148bfSAndrew Morton static void rb_inc_iter(struct ring_buffer_iter *iter) 2448d769041fSSteven Rostedt { 2449d769041fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 2450d769041fSSteven Rostedt 2451d769041fSSteven Rostedt /* 2452d769041fSSteven Rostedt * The iterator could be on the reader page (it starts there). 2453d769041fSSteven Rostedt * But the head could have moved, since the reader was 2454d769041fSSteven Rostedt * found. Check for this case and assign the iterator 2455d769041fSSteven Rostedt * to the head page instead of next. 2456d769041fSSteven Rostedt */ 2457d769041fSSteven Rostedt if (iter->head_page == cpu_buffer->reader_page) 245877ae365eSSteven Rostedt iter->head_page = rb_set_head_page(cpu_buffer); 2459d769041fSSteven Rostedt else 24606689bed3SQiujun Huang rb_inc_page(&iter->head_page); 2461d769041fSSteven Rostedt 246228e3fc56SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp; 24637a8e76a3SSteven Rostedt iter->head = 0; 2464785888c5SSteven Rostedt (VMware) iter->next_event = 0; 24657a8e76a3SSteven Rostedt } 24667a8e76a3SSteven Rostedt 246777ae365eSSteven Rostedt /* 246877ae365eSSteven Rostedt * rb_handle_head_page - writer hit the head page 246977ae365eSSteven Rostedt * 247077ae365eSSteven Rostedt * Returns: +1 to retry page 247177ae365eSSteven Rostedt * 0 to continue 247277ae365eSSteven Rostedt * -1 on error 247377ae365eSSteven Rostedt */ 247477ae365eSSteven Rostedt static int 247577ae365eSSteven Rostedt rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, 247677ae365eSSteven Rostedt struct buffer_page *tail_page, 247777ae365eSSteven Rostedt struct buffer_page *next_page) 247877ae365eSSteven Rostedt { 247977ae365eSSteven Rostedt struct buffer_page *new_head; 248077ae365eSSteven Rostedt int entries; 248177ae365eSSteven Rostedt int type; 248277ae365eSSteven Rostedt int ret; 248377ae365eSSteven Rostedt 248477ae365eSSteven Rostedt entries = rb_page_entries(next_page); 248577ae365eSSteven Rostedt 248677ae365eSSteven Rostedt /* 248777ae365eSSteven Rostedt * The hard part is here. We need to move the head 248877ae365eSSteven Rostedt * forward, and protect against both readers on 248977ae365eSSteven Rostedt * other CPUs and writers coming in via interrupts. 249077ae365eSSteven Rostedt */ 249177ae365eSSteven Rostedt type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, 249277ae365eSSteven Rostedt RB_PAGE_HEAD); 249377ae365eSSteven Rostedt 249477ae365eSSteven Rostedt /* 249577ae365eSSteven Rostedt * type can be one of four: 249677ae365eSSteven Rostedt * NORMAL - an interrupt already moved it for us 249777ae365eSSteven Rostedt * HEAD - we are the first to get here. 249877ae365eSSteven Rostedt * UPDATE - we are the interrupt interrupting 249977ae365eSSteven Rostedt * a current move. 250077ae365eSSteven Rostedt * MOVED - a reader on another CPU moved the next 250177ae365eSSteven Rostedt * pointer to its reader page. Give up 250277ae365eSSteven Rostedt * and try again. 250377ae365eSSteven Rostedt */ 250477ae365eSSteven Rostedt 250577ae365eSSteven Rostedt switch (type) { 250677ae365eSSteven Rostedt case RB_PAGE_HEAD: 250777ae365eSSteven Rostedt /* 250877ae365eSSteven Rostedt * We changed the head to UPDATE, thus 250977ae365eSSteven Rostedt * it is our responsibility to update 251077ae365eSSteven Rostedt * the counters. 251177ae365eSSteven Rostedt */ 251277ae365eSSteven Rostedt local_add(entries, &cpu_buffer->overrun); 2513c64e148aSVaibhav Nagarnaik local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); 251431029a8bSSteven Rostedt (Google) local_inc(&cpu_buffer->pages_lost); 251577ae365eSSteven Rostedt 251677ae365eSSteven Rostedt /* 251777ae365eSSteven Rostedt * The entries will be zeroed out when we move the 251877ae365eSSteven Rostedt * tail page. 251977ae365eSSteven Rostedt */ 252077ae365eSSteven Rostedt 252177ae365eSSteven Rostedt /* still more to do */ 252277ae365eSSteven Rostedt break; 252377ae365eSSteven Rostedt 252477ae365eSSteven Rostedt case RB_PAGE_UPDATE: 252577ae365eSSteven Rostedt /* 252677ae365eSSteven Rostedt * This is an interrupt that interrupt the 252777ae365eSSteven Rostedt * previous update. Still more to do. 252877ae365eSSteven Rostedt */ 252977ae365eSSteven Rostedt break; 253077ae365eSSteven Rostedt case RB_PAGE_NORMAL: 253177ae365eSSteven Rostedt /* 253277ae365eSSteven Rostedt * An interrupt came in before the update 253377ae365eSSteven Rostedt * and processed this for us. 253477ae365eSSteven Rostedt * Nothing left to do. 253577ae365eSSteven Rostedt */ 253677ae365eSSteven Rostedt return 1; 253777ae365eSSteven Rostedt case RB_PAGE_MOVED: 253877ae365eSSteven Rostedt /* 253977ae365eSSteven Rostedt * The reader is on another CPU and just did 254077ae365eSSteven Rostedt * a swap with our next_page. 254177ae365eSSteven Rostedt * Try again. 254277ae365eSSteven Rostedt */ 254377ae365eSSteven Rostedt return 1; 254477ae365eSSteven Rostedt default: 254577ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ 254677ae365eSSteven Rostedt return -1; 254777ae365eSSteven Rostedt } 254877ae365eSSteven Rostedt 254977ae365eSSteven Rostedt /* 255077ae365eSSteven Rostedt * Now that we are here, the old head pointer is 255177ae365eSSteven Rostedt * set to UPDATE. This will keep the reader from 255277ae365eSSteven Rostedt * swapping the head page with the reader page. 255377ae365eSSteven Rostedt * The reader (on another CPU) will spin till 255477ae365eSSteven Rostedt * we are finished. 255577ae365eSSteven Rostedt * 255677ae365eSSteven Rostedt * We just need to protect against interrupts 255777ae365eSSteven Rostedt * doing the job. We will set the next pointer 255877ae365eSSteven Rostedt * to HEAD. After that, we set the old pointer 255977ae365eSSteven Rostedt * to NORMAL, but only if it was HEAD before. 256077ae365eSSteven Rostedt * otherwise we are an interrupt, and only 256177ae365eSSteven Rostedt * want the outer most commit to reset it. 256277ae365eSSteven Rostedt */ 256377ae365eSSteven Rostedt new_head = next_page; 25646689bed3SQiujun Huang rb_inc_page(&new_head); 256577ae365eSSteven Rostedt 256677ae365eSSteven Rostedt ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, 256777ae365eSSteven Rostedt RB_PAGE_NORMAL); 256877ae365eSSteven Rostedt 256977ae365eSSteven Rostedt /* 257077ae365eSSteven Rostedt * Valid returns are: 257177ae365eSSteven Rostedt * HEAD - an interrupt came in and already set it. 257277ae365eSSteven Rostedt * NORMAL - One of two things: 257377ae365eSSteven Rostedt * 1) We really set it. 257477ae365eSSteven Rostedt * 2) A bunch of interrupts came in and moved 257577ae365eSSteven Rostedt * the page forward again. 257677ae365eSSteven Rostedt */ 257777ae365eSSteven Rostedt switch (ret) { 257877ae365eSSteven Rostedt case RB_PAGE_HEAD: 257977ae365eSSteven Rostedt case RB_PAGE_NORMAL: 258077ae365eSSteven Rostedt /* OK */ 258177ae365eSSteven Rostedt break; 258277ae365eSSteven Rostedt default: 258377ae365eSSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 258477ae365eSSteven Rostedt return -1; 258577ae365eSSteven Rostedt } 258677ae365eSSteven Rostedt 258777ae365eSSteven Rostedt /* 258877ae365eSSteven Rostedt * It is possible that an interrupt came in, 258977ae365eSSteven Rostedt * set the head up, then more interrupts came in 259077ae365eSSteven Rostedt * and moved it again. When we get back here, 259177ae365eSSteven Rostedt * the page would have been set to NORMAL but we 259277ae365eSSteven Rostedt * just set it back to HEAD. 259377ae365eSSteven Rostedt * 259477ae365eSSteven Rostedt * How do you detect this? Well, if that happened 259577ae365eSSteven Rostedt * the tail page would have moved. 259677ae365eSSteven Rostedt */ 259777ae365eSSteven Rostedt if (ret == RB_PAGE_NORMAL) { 25988573636eSSteven Rostedt (Red Hat) struct buffer_page *buffer_tail_page; 25998573636eSSteven Rostedt (Red Hat) 26008573636eSSteven Rostedt (Red Hat) buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); 260177ae365eSSteven Rostedt /* 260277ae365eSSteven Rostedt * If the tail had moved passed next, then we need 260377ae365eSSteven Rostedt * to reset the pointer. 260477ae365eSSteven Rostedt */ 26058573636eSSteven Rostedt (Red Hat) if (buffer_tail_page != tail_page && 26068573636eSSteven Rostedt (Red Hat) buffer_tail_page != next_page) 260777ae365eSSteven Rostedt rb_head_page_set_normal(cpu_buffer, new_head, 260877ae365eSSteven Rostedt next_page, 260977ae365eSSteven Rostedt RB_PAGE_HEAD); 261077ae365eSSteven Rostedt } 261177ae365eSSteven Rostedt 261277ae365eSSteven Rostedt /* 261377ae365eSSteven Rostedt * If this was the outer most commit (the one that 261477ae365eSSteven Rostedt * changed the original pointer from HEAD to UPDATE), 261577ae365eSSteven Rostedt * then it is up to us to reset it to NORMAL. 261677ae365eSSteven Rostedt */ 261777ae365eSSteven Rostedt if (type == RB_PAGE_HEAD) { 261877ae365eSSteven Rostedt ret = rb_head_page_set_normal(cpu_buffer, next_page, 261977ae365eSSteven Rostedt tail_page, 262077ae365eSSteven Rostedt RB_PAGE_UPDATE); 262177ae365eSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 262277ae365eSSteven Rostedt ret != RB_PAGE_UPDATE)) 262377ae365eSSteven Rostedt return -1; 262477ae365eSSteven Rostedt } 262577ae365eSSteven Rostedt 262677ae365eSSteven Rostedt return 0; 262777ae365eSSteven Rostedt } 262877ae365eSSteven Rostedt 2629c7b09308SSteven Rostedt static inline void 2630c7b09308SSteven Rostedt rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, 2631fcc742eaSSteven Rostedt (Red Hat) unsigned long tail, struct rb_event_info *info) 2632c7b09308SSteven Rostedt { 2633fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page = info->tail_page; 2634c7b09308SSteven Rostedt struct ring_buffer_event *event; 2635fcc742eaSSteven Rostedt (Red Hat) unsigned long length = info->length; 2636c7b09308SSteven Rostedt 2637c7b09308SSteven Rostedt /* 2638c7b09308SSteven Rostedt * Only the event that crossed the page boundary 2639c7b09308SSteven Rostedt * must fill the old tail_page with padding. 2640c7b09308SSteven Rostedt */ 2641c7b09308SSteven Rostedt if (tail >= BUF_PAGE_SIZE) { 2642b3230c8bSSteven Rostedt /* 2643b3230c8bSSteven Rostedt * If the page was filled, then we still need 2644b3230c8bSSteven Rostedt * to update the real_end. Reset it to zero 2645b3230c8bSSteven Rostedt * and the reader will ignore it. 2646b3230c8bSSteven Rostedt */ 2647b3230c8bSSteven Rostedt if (tail == BUF_PAGE_SIZE) 2648b3230c8bSSteven Rostedt tail_page->real_end = 0; 2649b3230c8bSSteven Rostedt 2650c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2651c7b09308SSteven Rostedt return; 2652c7b09308SSteven Rostedt } 2653c7b09308SSteven Rostedt 2654c7b09308SSteven Rostedt event = __rb_page_index(tail_page, tail); 2655c7b09308SSteven Rostedt 2656c64e148aSVaibhav Nagarnaik /* account for padding bytes */ 2657c64e148aSVaibhav Nagarnaik local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2658c64e148aSVaibhav Nagarnaik 2659c7b09308SSteven Rostedt /* 2660ff0ff84aSSteven Rostedt * Save the original length to the meta data. 2661ff0ff84aSSteven Rostedt * This will be used by the reader to add lost event 2662ff0ff84aSSteven Rostedt * counter. 2663ff0ff84aSSteven Rostedt */ 2664ff0ff84aSSteven Rostedt tail_page->real_end = tail; 2665ff0ff84aSSteven Rostedt 2666ff0ff84aSSteven Rostedt /* 2667c7b09308SSteven Rostedt * If this event is bigger than the minimum size, then 2668c7b09308SSteven Rostedt * we need to be careful that we don't subtract the 2669c7b09308SSteven Rostedt * write counter enough to allow another writer to slip 2670c7b09308SSteven Rostedt * in on this page. 2671c7b09308SSteven Rostedt * We put in a discarded commit instead, to make sure 2672c7b09308SSteven Rostedt * that this space is not used again. 2673c7b09308SSteven Rostedt * 2674c7b09308SSteven Rostedt * If we are less than the minimum size, we don't need to 2675c7b09308SSteven Rostedt * worry about it. 2676c7b09308SSteven Rostedt */ 2677c7b09308SSteven Rostedt if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { 2678c7b09308SSteven Rostedt /* No room for any events */ 2679c7b09308SSteven Rostedt 2680c7b09308SSteven Rostedt /* Mark the rest of the page with padding */ 2681c7b09308SSteven Rostedt rb_event_set_padding(event); 2682c7b09308SSteven Rostedt 2683a0fcaaedSSteven Rostedt (Google) /* Make sure the padding is visible before the write update */ 2684a0fcaaedSSteven Rostedt (Google) smp_wmb(); 2685a0fcaaedSSteven Rostedt (Google) 2686c7b09308SSteven Rostedt /* Set the write back to the previous setting */ 2687c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2688c7b09308SSteven Rostedt return; 2689c7b09308SSteven Rostedt } 2690c7b09308SSteven Rostedt 2691c7b09308SSteven Rostedt /* Put in a discarded event */ 2692c7b09308SSteven Rostedt event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; 2693c7b09308SSteven Rostedt event->type_len = RINGBUF_TYPE_PADDING; 2694c7b09308SSteven Rostedt /* time delta must be non zero */ 2695c7b09308SSteven Rostedt event->time_delta = 1; 2696c7b09308SSteven Rostedt 2697a0fcaaedSSteven Rostedt (Google) /* Make sure the padding is visible before the tail_page->write update */ 2698a0fcaaedSSteven Rostedt (Google) smp_wmb(); 2699a0fcaaedSSteven Rostedt (Google) 2700c7b09308SSteven Rostedt /* Set write to end of buffer */ 2701c7b09308SSteven Rostedt length = (tail + length) - BUF_PAGE_SIZE; 2702c7b09308SSteven Rostedt local_sub(length, &tail_page->write); 2703c7b09308SSteven Rostedt } 27046634ff26SSteven Rostedt 27054239c38fSSteven Rostedt (Red Hat) static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); 27064239c38fSSteven Rostedt (Red Hat) 2707747e94aeSSteven Rostedt /* 2708747e94aeSSteven Rostedt * This is the slow path, force gcc not to inline it. 2709747e94aeSSteven Rostedt */ 2710747e94aeSSteven Rostedt static noinline struct ring_buffer_event * 27116634ff26SSteven Rostedt rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 2712fcc742eaSSteven Rostedt (Red Hat) unsigned long tail, struct rb_event_info *info) 27137a8e76a3SSteven Rostedt { 2714fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page = info->tail_page; 27155a50e33cSSteven Rostedt struct buffer_page *commit_page = cpu_buffer->commit_page; 271613292494SSteven Rostedt (VMware) struct trace_buffer *buffer = cpu_buffer->buffer; 271777ae365eSSteven Rostedt struct buffer_page *next_page; 271877ae365eSSteven Rostedt int ret; 2719aa20ae84SSteven Rostedt 2720aa20ae84SSteven Rostedt next_page = tail_page; 27217a8e76a3SSteven Rostedt 27226689bed3SQiujun Huang rb_inc_page(&next_page); 27237a8e76a3SSteven Rostedt 2724bf41a158SSteven Rostedt /* 2725bf41a158SSteven Rostedt * If for some reason, we had an interrupt storm that made 2726bf41a158SSteven Rostedt * it all the way around the buffer, bail, and warn 2727bf41a158SSteven Rostedt * about it. 2728bf41a158SSteven Rostedt */ 272998db8df7SSteven Rostedt if (unlikely(next_page == commit_page)) { 273077ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 273145141d46SSteven Rostedt goto out_reset; 2732bf41a158SSteven Rostedt } 2733d769041fSSteven Rostedt 2734bf41a158SSteven Rostedt /* 273577ae365eSSteven Rostedt * This is where the fun begins! 273677ae365eSSteven Rostedt * 273777ae365eSSteven Rostedt * We are fighting against races between a reader that 273877ae365eSSteven Rostedt * could be on another CPU trying to swap its reader 273977ae365eSSteven Rostedt * page with the buffer head. 274077ae365eSSteven Rostedt * 274177ae365eSSteven Rostedt * We are also fighting against interrupts coming in and 274277ae365eSSteven Rostedt * moving the head or tail on us as well. 274377ae365eSSteven Rostedt * 274477ae365eSSteven Rostedt * If the next page is the head page then we have filled 274577ae365eSSteven Rostedt * the buffer, unless the commit page is still on the 274677ae365eSSteven Rostedt * reader page. 2747bf41a158SSteven Rostedt */ 27486689bed3SQiujun Huang if (rb_is_head_page(next_page, &tail_page->list)) { 2749bf41a158SSteven Rostedt 275077ae365eSSteven Rostedt /* 275177ae365eSSteven Rostedt * If the commit is not on the reader page, then 275277ae365eSSteven Rostedt * move the header page. 275377ae365eSSteven Rostedt */ 275477ae365eSSteven Rostedt if (!rb_is_reader_page(cpu_buffer->commit_page)) { 275577ae365eSSteven Rostedt /* 275677ae365eSSteven Rostedt * If we are not in overwrite mode, 275777ae365eSSteven Rostedt * this is easy, just stop here. 275877ae365eSSteven Rostedt */ 2759884bfe89SSlava Pestov if (!(buffer->flags & RB_FL_OVERWRITE)) { 2760884bfe89SSlava Pestov local_inc(&cpu_buffer->dropped_events); 276177ae365eSSteven Rostedt goto out_reset; 2762884bfe89SSlava Pestov } 276377ae365eSSteven Rostedt 276477ae365eSSteven Rostedt ret = rb_handle_head_page(cpu_buffer, 276577ae365eSSteven Rostedt tail_page, 276677ae365eSSteven Rostedt next_page); 276777ae365eSSteven Rostedt if (ret < 0) 276877ae365eSSteven Rostedt goto out_reset; 276977ae365eSSteven Rostedt if (ret) 277077ae365eSSteven Rostedt goto out_again; 277177ae365eSSteven Rostedt } else { 277277ae365eSSteven Rostedt /* 277377ae365eSSteven Rostedt * We need to be careful here too. The 277477ae365eSSteven Rostedt * commit page could still be on the reader 277577ae365eSSteven Rostedt * page. We could have a small buffer, and 277677ae365eSSteven Rostedt * have filled up the buffer with events 277777ae365eSSteven Rostedt * from interrupts and such, and wrapped. 277877ae365eSSteven Rostedt * 2779c6358bacSQiujun Huang * Note, if the tail page is also on the 278077ae365eSSteven Rostedt * reader_page, we let it move out. 278177ae365eSSteven Rostedt */ 278277ae365eSSteven Rostedt if (unlikely((cpu_buffer->commit_page != 278377ae365eSSteven Rostedt cpu_buffer->tail_page) && 278477ae365eSSteven Rostedt (cpu_buffer->commit_page == 278577ae365eSSteven Rostedt cpu_buffer->reader_page))) { 278677ae365eSSteven Rostedt local_inc(&cpu_buffer->commit_overrun); 278777ae365eSSteven Rostedt goto out_reset; 278877ae365eSSteven Rostedt } 278977ae365eSSteven Rostedt } 2790bf41a158SSteven Rostedt } 2791bf41a158SSteven Rostedt 279270004986SSteven Rostedt (Red Hat) rb_tail_page_update(cpu_buffer, tail_page, next_page); 27937a8e76a3SSteven Rostedt 279477ae365eSSteven Rostedt out_again: 279577ae365eSSteven Rostedt 2796fcc742eaSSteven Rostedt (Red Hat) rb_reset_tail(cpu_buffer, tail, info); 2797bf41a158SSteven Rostedt 27984239c38fSSteven Rostedt (Red Hat) /* Commit what we have for now. */ 27994239c38fSSteven Rostedt (Red Hat) rb_end_commit(cpu_buffer); 28004239c38fSSteven Rostedt (Red Hat) /* rb_end_commit() decs committing */ 28014239c38fSSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing); 28024239c38fSSteven Rostedt (Red Hat) 2803bf41a158SSteven Rostedt /* fail and let the caller try again */ 2804bf41a158SSteven Rostedt return ERR_PTR(-EAGAIN); 2805bf41a158SSteven Rostedt 280645141d46SSteven Rostedt out_reset: 28076f3b3440SLai Jiangshan /* reset write */ 2808fcc742eaSSteven Rostedt (Red Hat) rb_reset_tail(cpu_buffer, tail, info); 28096f3b3440SLai Jiangshan 2810bf41a158SSteven Rostedt return NULL; 28117a8e76a3SSteven Rostedt } 28127a8e76a3SSteven Rostedt 281374e87937SSteven Rostedt (VMware) /* Slow path */ 281474e87937SSteven Rostedt (VMware) static struct ring_buffer_event * 2815dc4e2801STom Zanussi rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs) 2816d90fd774SSteven Rostedt (Red Hat) { 2817dc4e2801STom Zanussi if (abs) 2818dc4e2801STom Zanussi event->type_len = RINGBUF_TYPE_TIME_STAMP; 2819dc4e2801STom Zanussi else 2820d90fd774SSteven Rostedt (Red Hat) event->type_len = RINGBUF_TYPE_TIME_EXTEND; 2821d90fd774SSteven Rostedt (Red Hat) 2822dc4e2801STom Zanussi /* Not the first event on the page, or not delta? */ 2823dc4e2801STom Zanussi if (abs || rb_event_index(event)) { 2824d90fd774SSteven Rostedt (Red Hat) event->time_delta = delta & TS_MASK; 2825d90fd774SSteven Rostedt (Red Hat) event->array[0] = delta >> TS_SHIFT; 2826d90fd774SSteven Rostedt (Red Hat) } else { 2827d90fd774SSteven Rostedt (Red Hat) /* nope, just zero it */ 2828d90fd774SSteven Rostedt (Red Hat) event->time_delta = 0; 2829d90fd774SSteven Rostedt (Red Hat) event->array[0] = 0; 2830d90fd774SSteven Rostedt (Red Hat) } 2831d90fd774SSteven Rostedt (Red Hat) 2832d90fd774SSteven Rostedt (Red Hat) return skip_time_extend(event); 2833d90fd774SSteven Rostedt (Red Hat) } 2834d90fd774SSteven Rostedt (Red Hat) 283558fbc3c6SSteven Rostedt (VMware) #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 283658fbc3c6SSteven Rostedt (VMware) static inline bool sched_clock_stable(void) 283758fbc3c6SSteven Rostedt (VMware) { 283858fbc3c6SSteven Rostedt (VMware) return true; 283958fbc3c6SSteven Rostedt (VMware) } 284058fbc3c6SSteven Rostedt (VMware) #endif 284158fbc3c6SSteven Rostedt (VMware) 284274e87937SSteven Rostedt (VMware) static void 284358fbc3c6SSteven Rostedt (VMware) rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 284458fbc3c6SSteven Rostedt (VMware) struct rb_event_info *info) 284558fbc3c6SSteven Rostedt (VMware) { 284658fbc3c6SSteven Rostedt (VMware) u64 write_stamp; 284758fbc3c6SSteven Rostedt (VMware) 284829ce2451SSteven Rostedt (VMware) WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s", 284958fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->delta, 285058fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->ts, 285158fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->before, 285258fbc3c6SSteven Rostedt (VMware) (unsigned long long)info->after, 285358fbc3c6SSteven Rostedt (VMware) (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), 285458fbc3c6SSteven Rostedt (VMware) sched_clock_stable() ? "" : 285558fbc3c6SSteven Rostedt (VMware) "If you just came from a suspend/resume,\n" 285658fbc3c6SSteven Rostedt (VMware) "please switch to the trace global clock:\n" 28572455f0e1SRoss Zwisler " echo global > /sys/kernel/tracing/trace_clock\n" 285858fbc3c6SSteven Rostedt (VMware) "or add trace_clock=global to the kernel command line\n"); 285958fbc3c6SSteven Rostedt (VMware) } 286058fbc3c6SSteven Rostedt (VMware) 286174e87937SSteven Rostedt (VMware) static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, 286274e87937SSteven Rostedt (VMware) struct ring_buffer_event **event, 286374e87937SSteven Rostedt (VMware) struct rb_event_info *info, 286474e87937SSteven Rostedt (VMware) u64 *delta, 286574e87937SSteven Rostedt (VMware) unsigned int *length) 286674e87937SSteven Rostedt (VMware) { 286774e87937SSteven Rostedt (VMware) bool abs = info->add_timestamp & 286874e87937SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE); 286974e87937SSteven Rostedt (VMware) 287029ce2451SSteven Rostedt (VMware) if (unlikely(info->delta > (1ULL << 59))) { 28716695da58SSteven Rostedt (Google) /* 28726695da58SSteven Rostedt (Google) * Some timers can use more than 59 bits, and when a timestamp 28736695da58SSteven Rostedt (Google) * is added to the buffer, it will lose those bits. 28746695da58SSteven Rostedt (Google) */ 28756695da58SSteven Rostedt (Google) if (abs && (info->ts & TS_MSB)) { 28766695da58SSteven Rostedt (Google) info->delta &= ABS_TS_MASK; 28776695da58SSteven Rostedt (Google) 287829ce2451SSteven Rostedt (VMware) /* did the clock go backwards */ 28796695da58SSteven Rostedt (Google) } else if (info->before == info->after && info->before > info->ts) { 288029ce2451SSteven Rostedt (VMware) /* not interrupted */ 288129ce2451SSteven Rostedt (VMware) static int once; 288229ce2451SSteven Rostedt (VMware) 288329ce2451SSteven Rostedt (VMware) /* 288429ce2451SSteven Rostedt (VMware) * This is possible with a recalibrating of the TSC. 288529ce2451SSteven Rostedt (VMware) * Do not produce a call stack, but just report it. 288629ce2451SSteven Rostedt (VMware) */ 288729ce2451SSteven Rostedt (VMware) if (!once) { 288829ce2451SSteven Rostedt (VMware) once++; 288929ce2451SSteven Rostedt (VMware) pr_warn("Ring buffer clock went backwards: %llu -> %llu\n", 289029ce2451SSteven Rostedt (VMware) info->before, info->ts); 289129ce2451SSteven Rostedt (VMware) } 289229ce2451SSteven Rostedt (VMware) } else 289374e87937SSteven Rostedt (VMware) rb_check_timestamp(cpu_buffer, info); 289429ce2451SSteven Rostedt (VMware) if (!abs) 289529ce2451SSteven Rostedt (VMware) info->delta = 0; 289629ce2451SSteven Rostedt (VMware) } 289774e87937SSteven Rostedt (VMware) *event = rb_add_time_stamp(*event, info->delta, abs); 289874e87937SSteven Rostedt (VMware) *length -= RB_LEN_TIME_EXTEND; 289974e87937SSteven Rostedt (VMware) *delta = 0; 290074e87937SSteven Rostedt (VMware) } 290174e87937SSteven Rostedt (VMware) 2902d90fd774SSteven Rostedt (Red Hat) /** 2903d90fd774SSteven Rostedt (Red Hat) * rb_update_event - update event type and data 2904cfc585a4SSteven Rostedt (VMware) * @cpu_buffer: The per cpu buffer of the @event 2905d90fd774SSteven Rostedt (Red Hat) * @event: the event to update 2906cfc585a4SSteven Rostedt (VMware) * @info: The info to update the @event with (contains length and delta) 2907d90fd774SSteven Rostedt (Red Hat) * 2908cfc585a4SSteven Rostedt (VMware) * Update the type and data fields of the @event. The length 2909d90fd774SSteven Rostedt (Red Hat) * is the actual size that is written to the ring buffer, 2910d90fd774SSteven Rostedt (Red Hat) * and with this, we can determine what to place into the 2911d90fd774SSteven Rostedt (Red Hat) * data field. 2912d90fd774SSteven Rostedt (Red Hat) */ 2913b7dc42fdSSteven Rostedt (Red Hat) static void 2914d90fd774SSteven Rostedt (Red Hat) rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, 2915d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event *event, 2916d90fd774SSteven Rostedt (Red Hat) struct rb_event_info *info) 2917d90fd774SSteven Rostedt (Red Hat) { 2918d90fd774SSteven Rostedt (Red Hat) unsigned length = info->length; 2919d90fd774SSteven Rostedt (Red Hat) u64 delta = info->delta; 29208672e494SSteven Rostedt (VMware) unsigned int nest = local_read(&cpu_buffer->committing) - 1; 29218672e494SSteven Rostedt (VMware) 2922a948c69dSSteven Rostedt (VMware) if (!WARN_ON_ONCE(nest >= MAX_NEST)) 29238672e494SSteven Rostedt (VMware) cpu_buffer->event_stamp[nest] = info->ts; 2924d90fd774SSteven Rostedt (Red Hat) 2925d90fd774SSteven Rostedt (Red Hat) /* 2926d90fd774SSteven Rostedt (Red Hat) * If we need to add a timestamp, then we 29276167c205SSteven Rostedt (VMware) * add it to the start of the reserved space. 2928d90fd774SSteven Rostedt (Red Hat) */ 292974e87937SSteven Rostedt (VMware) if (unlikely(info->add_timestamp)) 293074e87937SSteven Rostedt (VMware) rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); 2931d90fd774SSteven Rostedt (Red Hat) 2932d90fd774SSteven Rostedt (Red Hat) event->time_delta = delta; 2933d90fd774SSteven Rostedt (Red Hat) length -= RB_EVNT_HDR_SIZE; 2934adab66b7SSteven Rostedt (VMware) if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { 2935d90fd774SSteven Rostedt (Red Hat) event->type_len = 0; 2936d90fd774SSteven Rostedt (Red Hat) event->array[0] = length; 2937d90fd774SSteven Rostedt (Red Hat) } else 2938d90fd774SSteven Rostedt (Red Hat) event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 2939d90fd774SSteven Rostedt (Red Hat) } 2940d90fd774SSteven Rostedt (Red Hat) 2941d90fd774SSteven Rostedt (Red Hat) static unsigned rb_calculate_event_length(unsigned length) 2942d90fd774SSteven Rostedt (Red Hat) { 2943d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event event; /* Used only for sizeof array */ 2944d90fd774SSteven Rostedt (Red Hat) 2945d90fd774SSteven Rostedt (Red Hat) /* zero length can cause confusions */ 2946d90fd774SSteven Rostedt (Red Hat) if (!length) 2947d90fd774SSteven Rostedt (Red Hat) length++; 2948d90fd774SSteven Rostedt (Red Hat) 2949adab66b7SSteven Rostedt (VMware) if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2950d90fd774SSteven Rostedt (Red Hat) length += sizeof(event.array[0]); 2951d90fd774SSteven Rostedt (Red Hat) 2952d90fd774SSteven Rostedt (Red Hat) length += RB_EVNT_HDR_SIZE; 2953adab66b7SSteven Rostedt (VMware) length = ALIGN(length, RB_ARCH_ALIGNMENT); 2954d90fd774SSteven Rostedt (Red Hat) 2955d90fd774SSteven Rostedt (Red Hat) /* 2956d90fd774SSteven Rostedt (Red Hat) * In case the time delta is larger than the 27 bits for it 2957d90fd774SSteven Rostedt (Red Hat) * in the header, we need to add a timestamp. If another 2958d90fd774SSteven Rostedt (Red Hat) * event comes in when trying to discard this one to increase 2959d90fd774SSteven Rostedt (Red Hat) * the length, then the timestamp will be added in the allocated 2960d90fd774SSteven Rostedt (Red Hat) * space of this event. If length is bigger than the size needed 2961d90fd774SSteven Rostedt (Red Hat) * for the TIME_EXTEND, then padding has to be used. The events 2962d90fd774SSteven Rostedt (Red Hat) * length must be either RB_LEN_TIME_EXTEND, or greater than or equal 2963d90fd774SSteven Rostedt (Red Hat) * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding. 2964d90fd774SSteven Rostedt (Red Hat) * As length is a multiple of 4, we only need to worry if it 2965d90fd774SSteven Rostedt (Red Hat) * is 12 (RB_LEN_TIME_EXTEND + 4). 2966d90fd774SSteven Rostedt (Red Hat) */ 2967d90fd774SSteven Rostedt (Red Hat) if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT) 2968d90fd774SSteven Rostedt (Red Hat) length += RB_ALIGNMENT; 2969d90fd774SSteven Rostedt (Red Hat) 2970d90fd774SSteven Rostedt (Red Hat) return length; 2971d90fd774SSteven Rostedt (Red Hat) } 2972d90fd774SSteven Rostedt (Red Hat) 2973a389d86fSSteven Rostedt (VMware) static u64 rb_time_delta(struct ring_buffer_event *event) 2974a389d86fSSteven Rostedt (VMware) { 2975a389d86fSSteven Rostedt (VMware) switch (event->type_len) { 2976a389d86fSSteven Rostedt (VMware) case RINGBUF_TYPE_PADDING: 2977a389d86fSSteven Rostedt (VMware) return 0; 2978a389d86fSSteven Rostedt (VMware) 2979a389d86fSSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_EXTEND: 2980e20044f7SSteven Rostedt (VMware) return rb_event_time_stamp(event); 2981a389d86fSSteven Rostedt (VMware) 2982a389d86fSSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_STAMP: 2983a389d86fSSteven Rostedt (VMware) return 0; 2984a389d86fSSteven Rostedt (VMware) 2985a389d86fSSteven Rostedt (VMware) case RINGBUF_TYPE_DATA: 2986a389d86fSSteven Rostedt (VMware) return event->time_delta; 2987a389d86fSSteven Rostedt (VMware) default: 2988a389d86fSSteven Rostedt (VMware) return 0; 2989a389d86fSSteven Rostedt (VMware) } 2990a389d86fSSteven Rostedt (VMware) } 29919826b273SSteven Rostedt (Red Hat) 2992bc92b956SUros Bizjak static inline bool 2993a4543a2fSSteven Rostedt (Red Hat) rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, 2994d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_event *event) 2995d90fd774SSteven Rostedt (Red Hat) { 2996d90fd774SSteven Rostedt (Red Hat) unsigned long new_index, old_index; 2997d90fd774SSteven Rostedt (Red Hat) struct buffer_page *bpage; 2998d90fd774SSteven Rostedt (Red Hat) unsigned long addr; 2999a389d86fSSteven Rostedt (VMware) u64 write_stamp; 3000a389d86fSSteven Rostedt (VMware) u64 delta; 3001d90fd774SSteven Rostedt (Red Hat) 3002d90fd774SSteven Rostedt (Red Hat) new_index = rb_event_index(event); 3003d90fd774SSteven Rostedt (Red Hat) old_index = new_index + rb_event_ts_length(event); 3004d90fd774SSteven Rostedt (Red Hat) addr = (unsigned long)event; 3005d90fd774SSteven Rostedt (Red Hat) addr &= PAGE_MASK; 3006d90fd774SSteven Rostedt (Red Hat) 30078573636eSSteven Rostedt (Red Hat) bpage = READ_ONCE(cpu_buffer->tail_page); 3008d90fd774SSteven Rostedt (Red Hat) 3009a389d86fSSteven Rostedt (VMware) delta = rb_time_delta(event); 3010a389d86fSSteven Rostedt (VMware) 301110464b4aSSteven Rostedt (VMware) if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) 3012bc92b956SUros Bizjak return false; 3013a389d86fSSteven Rostedt (VMware) 3014a389d86fSSteven Rostedt (VMware) /* Make sure the write stamp is read before testing the location */ 3015a389d86fSSteven Rostedt (VMware) barrier(); 3016a389d86fSSteven Rostedt (VMware) 3017d90fd774SSteven Rostedt (Red Hat) if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 3018d90fd774SSteven Rostedt (Red Hat) unsigned long write_mask = 3019d90fd774SSteven Rostedt (Red Hat) local_read(&bpage->write) & ~RB_WRITE_MASK; 3020d90fd774SSteven Rostedt (Red Hat) unsigned long event_length = rb_event_length(event); 3021a389d86fSSteven Rostedt (VMware) 3022a389d86fSSteven Rostedt (VMware) /* Something came in, can't discard */ 302310464b4aSSteven Rostedt (VMware) if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, 302410464b4aSSteven Rostedt (VMware) write_stamp, write_stamp - delta)) 3025bc92b956SUros Bizjak return false; 3026a389d86fSSteven Rostedt (VMware) 3027a389d86fSSteven Rostedt (VMware) /* 30286f6be606SSteven Rostedt (VMware) * It's possible that the event time delta is zero 30296f6be606SSteven Rostedt (VMware) * (has the same time stamp as the previous event) 30306f6be606SSteven Rostedt (VMware) * in which case write_stamp and before_stamp could 30316f6be606SSteven Rostedt (VMware) * be the same. In such a case, force before_stamp 30326f6be606SSteven Rostedt (VMware) * to be different than write_stamp. It doesn't 30336f6be606SSteven Rostedt (VMware) * matter what it is, as long as its different. 30346f6be606SSteven Rostedt (VMware) */ 30356f6be606SSteven Rostedt (VMware) if (!delta) 30366f6be606SSteven Rostedt (VMware) rb_time_set(&cpu_buffer->before_stamp, 0); 30376f6be606SSteven Rostedt (VMware) 30386f6be606SSteven Rostedt (VMware) /* 3039a389d86fSSteven Rostedt (VMware) * If an event were to come in now, it would see that the 3040a389d86fSSteven Rostedt (VMware) * write_stamp and the before_stamp are different, and assume 3041a389d86fSSteven Rostedt (VMware) * that this event just added itself before updating 3042a389d86fSSteven Rostedt (VMware) * the write stamp. The interrupting event will fix the 3043a389d86fSSteven Rostedt (VMware) * write stamp for us, and use the before stamp as its delta. 3044a389d86fSSteven Rostedt (VMware) */ 3045a389d86fSSteven Rostedt (VMware) 3046d90fd774SSteven Rostedt (Red Hat) /* 3047d90fd774SSteven Rostedt (Red Hat) * This is on the tail page. It is possible that 3048d90fd774SSteven Rostedt (Red Hat) * a write could come in and move the tail page 3049d90fd774SSteven Rostedt (Red Hat) * and write to the next page. That is fine 3050d90fd774SSteven Rostedt (Red Hat) * because we just shorten what is on this page. 3051d90fd774SSteven Rostedt (Red Hat) */ 3052d90fd774SSteven Rostedt (Red Hat) old_index += write_mask; 3053d90fd774SSteven Rostedt (Red Hat) new_index += write_mask; 3054*00a8478fSUros Bizjak 3055*00a8478fSUros Bizjak /* caution: old_index gets updated on cmpxchg failure */ 3056*00a8478fSUros Bizjak if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) { 3057d90fd774SSteven Rostedt (Red Hat) /* update counters */ 3058d90fd774SSteven Rostedt (Red Hat) local_sub(event_length, &cpu_buffer->entries_bytes); 3059bc92b956SUros Bizjak return true; 3060d90fd774SSteven Rostedt (Red Hat) } 3061d90fd774SSteven Rostedt (Red Hat) } 3062d90fd774SSteven Rostedt (Red Hat) 3063d90fd774SSteven Rostedt (Red Hat) /* could not discard */ 3064bc92b956SUros Bizjak return false; 3065d90fd774SSteven Rostedt (Red Hat) } 3066d90fd774SSteven Rostedt (Red Hat) 3067d90fd774SSteven Rostedt (Red Hat) static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) 3068d90fd774SSteven Rostedt (Red Hat) { 3069d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing); 3070d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->commits); 3071d90fd774SSteven Rostedt (Red Hat) } 3072d90fd774SSteven Rostedt (Red Hat) 307338e11df1SSteven Rostedt (Red Hat) static __always_inline void 3074d90fd774SSteven Rostedt (Red Hat) rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 3075d90fd774SSteven Rostedt (Red Hat) { 3076d90fd774SSteven Rostedt (Red Hat) unsigned long max_count; 3077d90fd774SSteven Rostedt (Red Hat) 3078d90fd774SSteven Rostedt (Red Hat) /* 3079d90fd774SSteven Rostedt (Red Hat) * We only race with interrupts and NMIs on this CPU. 3080d90fd774SSteven Rostedt (Red Hat) * If we own the commit event, then we can commit 3081d90fd774SSteven Rostedt (Red Hat) * all others that interrupted us, since the interruptions 3082d90fd774SSteven Rostedt (Red Hat) * are in stack format (they finish before they come 3083d90fd774SSteven Rostedt (Red Hat) * back to us). This allows us to do a simple loop to 3084d90fd774SSteven Rostedt (Red Hat) * assign the commit to the tail. 3085d90fd774SSteven Rostedt (Red Hat) */ 3086d90fd774SSteven Rostedt (Red Hat) again: 3087d90fd774SSteven Rostedt (Red Hat) max_count = cpu_buffer->nr_pages * 100; 3088d90fd774SSteven Rostedt (Red Hat) 30898573636eSSteven Rostedt (Red Hat) while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { 3090d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer, !(--max_count))) 3091d90fd774SSteven Rostedt (Red Hat) return; 3092d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer, 3093d90fd774SSteven Rostedt (Red Hat) rb_is_reader_page(cpu_buffer->tail_page))) 3094d90fd774SSteven Rostedt (Red Hat) return; 30956455b616SZheng Yejian /* 30966455b616SZheng Yejian * No need for a memory barrier here, as the update 30976455b616SZheng Yejian * of the tail_page did it for this page. 30986455b616SZheng Yejian */ 3099d90fd774SSteven Rostedt (Red Hat) local_set(&cpu_buffer->commit_page->page->commit, 3100d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page)); 31016689bed3SQiujun Huang rb_inc_page(&cpu_buffer->commit_page); 3102d90fd774SSteven Rostedt (Red Hat) /* add barrier to keep gcc from optimizing too much */ 3103d90fd774SSteven Rostedt (Red Hat) barrier(); 3104d90fd774SSteven Rostedt (Red Hat) } 3105d90fd774SSteven Rostedt (Red Hat) while (rb_commit_index(cpu_buffer) != 3106d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page)) { 3107d90fd774SSteven Rostedt (Red Hat) 31086455b616SZheng Yejian /* Make sure the readers see the content of what is committed. */ 31096455b616SZheng Yejian smp_wmb(); 3110d90fd774SSteven Rostedt (Red Hat) local_set(&cpu_buffer->commit_page->page->commit, 3111d90fd774SSteven Rostedt (Red Hat) rb_page_write(cpu_buffer->commit_page)); 3112d90fd774SSteven Rostedt (Red Hat) RB_WARN_ON(cpu_buffer, 3113d90fd774SSteven Rostedt (Red Hat) local_read(&cpu_buffer->commit_page->page->commit) & 3114d90fd774SSteven Rostedt (Red Hat) ~RB_WRITE_MASK); 3115d90fd774SSteven Rostedt (Red Hat) barrier(); 3116d90fd774SSteven Rostedt (Red Hat) } 3117d90fd774SSteven Rostedt (Red Hat) 3118d90fd774SSteven Rostedt (Red Hat) /* again, keep gcc from optimizing */ 3119d90fd774SSteven Rostedt (Red Hat) barrier(); 3120d90fd774SSteven Rostedt (Red Hat) 3121d90fd774SSteven Rostedt (Red Hat) /* 3122d90fd774SSteven Rostedt (Red Hat) * If an interrupt came in just after the first while loop 3123d90fd774SSteven Rostedt (Red Hat) * and pushed the tail page forward, we will be left with 3124d90fd774SSteven Rostedt (Red Hat) * a dangling commit that will never go forward. 3125d90fd774SSteven Rostedt (Red Hat) */ 31268573636eSSteven Rostedt (Red Hat) if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) 3127d90fd774SSteven Rostedt (Red Hat) goto again; 3128d90fd774SSteven Rostedt (Red Hat) } 3129d90fd774SSteven Rostedt (Red Hat) 313038e11df1SSteven Rostedt (Red Hat) static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) 3131d90fd774SSteven Rostedt (Red Hat) { 3132d90fd774SSteven Rostedt (Red Hat) unsigned long commits; 3133d90fd774SSteven Rostedt (Red Hat) 3134d90fd774SSteven Rostedt (Red Hat) if (RB_WARN_ON(cpu_buffer, 3135d90fd774SSteven Rostedt (Red Hat) !local_read(&cpu_buffer->committing))) 3136d90fd774SSteven Rostedt (Red Hat) return; 3137d90fd774SSteven Rostedt (Red Hat) 3138d90fd774SSteven Rostedt (Red Hat) again: 3139d90fd774SSteven Rostedt (Red Hat) commits = local_read(&cpu_buffer->commits); 3140d90fd774SSteven Rostedt (Red Hat) /* synchronize with interrupts */ 3141d90fd774SSteven Rostedt (Red Hat) barrier(); 3142d90fd774SSteven Rostedt (Red Hat) if (local_read(&cpu_buffer->committing) == 1) 3143d90fd774SSteven Rostedt (Red Hat) rb_set_commit_to_write(cpu_buffer); 3144d90fd774SSteven Rostedt (Red Hat) 3145d90fd774SSteven Rostedt (Red Hat) local_dec(&cpu_buffer->committing); 3146d90fd774SSteven Rostedt (Red Hat) 3147d90fd774SSteven Rostedt (Red Hat) /* synchronize with interrupts */ 3148d90fd774SSteven Rostedt (Red Hat) barrier(); 3149d90fd774SSteven Rostedt (Red Hat) 3150d90fd774SSteven Rostedt (Red Hat) /* 3151d90fd774SSteven Rostedt (Red Hat) * Need to account for interrupts coming in between the 3152d90fd774SSteven Rostedt (Red Hat) * updating of the commit page and the clearing of the 3153d90fd774SSteven Rostedt (Red Hat) * committing counter. 3154d90fd774SSteven Rostedt (Red Hat) */ 3155d90fd774SSteven Rostedt (Red Hat) if (unlikely(local_read(&cpu_buffer->commits) != commits) && 3156d90fd774SSteven Rostedt (Red Hat) !local_read(&cpu_buffer->committing)) { 3157d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->committing); 3158d90fd774SSteven Rostedt (Red Hat) goto again; 3159d90fd774SSteven Rostedt (Red Hat) } 3160d90fd774SSteven Rostedt (Red Hat) } 3161d90fd774SSteven Rostedt (Red Hat) 3162d90fd774SSteven Rostedt (Red Hat) static inline void rb_event_discard(struct ring_buffer_event *event) 3163d90fd774SSteven Rostedt (Red Hat) { 3164dc4e2801STom Zanussi if (extended_time(event)) 3165d90fd774SSteven Rostedt (Red Hat) event = skip_time_extend(event); 3166d90fd774SSteven Rostedt (Red Hat) 3167d90fd774SSteven Rostedt (Red Hat) /* array[0] holds the actual length for the discarded event */ 3168d90fd774SSteven Rostedt (Red Hat) event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; 3169d90fd774SSteven Rostedt (Red Hat) event->type_len = RINGBUF_TYPE_PADDING; 3170d90fd774SSteven Rostedt (Red Hat) /* time delta must be non zero */ 3171d90fd774SSteven Rostedt (Red Hat) if (!event->time_delta) 3172d90fd774SSteven Rostedt (Red Hat) event->time_delta = 1; 3173d90fd774SSteven Rostedt (Red Hat) } 3174d90fd774SSteven Rostedt (Red Hat) 317504aabc32SSong Chen static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) 3176d90fd774SSteven Rostedt (Red Hat) { 3177d90fd774SSteven Rostedt (Red Hat) local_inc(&cpu_buffer->entries); 3178d90fd774SSteven Rostedt (Red Hat) rb_end_commit(cpu_buffer); 3179d90fd774SSteven Rostedt (Red Hat) } 3180d90fd774SSteven Rostedt (Red Hat) 3181d90fd774SSteven Rostedt (Red Hat) static __always_inline void 318213292494SSteven Rostedt (VMware) rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) 3183d90fd774SSteven Rostedt (Red Hat) { 3184d90fd774SSteven Rostedt (Red Hat) if (buffer->irq_work.waiters_pending) { 3185d90fd774SSteven Rostedt (Red Hat) buffer->irq_work.waiters_pending = false; 3186d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */ 3187d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&buffer->irq_work.work); 3188d90fd774SSteven Rostedt (Red Hat) } 3189d90fd774SSteven Rostedt (Red Hat) 3190d90fd774SSteven Rostedt (Red Hat) if (cpu_buffer->irq_work.waiters_pending) { 3191d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.waiters_pending = false; 3192d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */ 3193d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&cpu_buffer->irq_work.work); 3194d90fd774SSteven Rostedt (Red Hat) } 3195d90fd774SSteven Rostedt (Red Hat) 319603329f99SSteven Rostedt (VMware) if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) 319703329f99SSteven Rostedt (VMware) return; 3198d90fd774SSteven Rostedt (Red Hat) 319903329f99SSteven Rostedt (VMware) if (cpu_buffer->reader_page == cpu_buffer->commit_page) 320003329f99SSteven Rostedt (VMware) return; 320103329f99SSteven Rostedt (VMware) 320203329f99SSteven Rostedt (VMware) if (!cpu_buffer->irq_work.full_waiters_pending) 320303329f99SSteven Rostedt (VMware) return; 320403329f99SSteven Rostedt (VMware) 320503329f99SSteven Rostedt (VMware) cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); 32062c2b0a78SSteven Rostedt (VMware) 320742fb0a1eSSteven Rostedt (Google) if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) 32082c2b0a78SSteven Rostedt (VMware) return; 32092c2b0a78SSteven Rostedt (VMware) 3210d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.wakeup_full = true; 3211d90fd774SSteven Rostedt (Red Hat) cpu_buffer->irq_work.full_waiters_pending = false; 3212d90fd774SSteven Rostedt (Red Hat) /* irq_work_queue() supplies it's own memory barriers */ 3213d90fd774SSteven Rostedt (Red Hat) irq_work_queue(&cpu_buffer->irq_work.work); 3214d90fd774SSteven Rostedt (Red Hat) } 3215d90fd774SSteven Rostedt (Red Hat) 321628575c61SSteven Rostedt (VMware) #ifdef CONFIG_RING_BUFFER_RECORD_RECURSION 321728575c61SSteven Rostedt (VMware) # define do_ring_buffer_record_recursion() \ 321828575c61SSteven Rostedt (VMware) do_ftrace_record_recursion(_THIS_IP_, _RET_IP_) 321928575c61SSteven Rostedt (VMware) #else 322028575c61SSteven Rostedt (VMware) # define do_ring_buffer_record_recursion() do { } while (0) 322128575c61SSteven Rostedt (VMware) #endif 322228575c61SSteven Rostedt (VMware) 3223d90fd774SSteven Rostedt (Red Hat) /* 3224d90fd774SSteven Rostedt (Red Hat) * The lock and unlock are done within a preempt disable section. 3225d90fd774SSteven Rostedt (Red Hat) * The current_context per_cpu variable can only be modified 3226d90fd774SSteven Rostedt (Red Hat) * by the current task between lock and unlock. But it can 3227a0e3a18fSSteven Rostedt (VMware) * be modified more than once via an interrupt. To pass this 3228a0e3a18fSSteven Rostedt (VMware) * information from the lock to the unlock without having to 3229a0e3a18fSSteven Rostedt (VMware) * access the 'in_interrupt()' functions again (which do show 3230a0e3a18fSSteven Rostedt (VMware) * a bit of overhead in something as critical as function tracing, 3231a0e3a18fSSteven Rostedt (VMware) * we use a bitmask trick. 3232d90fd774SSteven Rostedt (Red Hat) * 3233b02414c8SSteven Rostedt (VMware) * bit 1 = NMI context 3234b02414c8SSteven Rostedt (VMware) * bit 2 = IRQ context 3235b02414c8SSteven Rostedt (VMware) * bit 3 = SoftIRQ context 3236b02414c8SSteven Rostedt (VMware) * bit 4 = normal context. 3237d90fd774SSteven Rostedt (Red Hat) * 3238a0e3a18fSSteven Rostedt (VMware) * This works because this is the order of contexts that can 3239a0e3a18fSSteven Rostedt (VMware) * preempt other contexts. A SoftIRQ never preempts an IRQ 3240a0e3a18fSSteven Rostedt (VMware) * context. 3241a0e3a18fSSteven Rostedt (VMware) * 3242a0e3a18fSSteven Rostedt (VMware) * When the context is determined, the corresponding bit is 3243a0e3a18fSSteven Rostedt (VMware) * checked and set (if it was set, then a recursion of that context 3244a0e3a18fSSteven Rostedt (VMware) * happened). 3245a0e3a18fSSteven Rostedt (VMware) * 3246a0e3a18fSSteven Rostedt (VMware) * On unlock, we need to clear this bit. To do so, just subtract 3247a0e3a18fSSteven Rostedt (VMware) * 1 from the current_context and AND it to itself. 3248a0e3a18fSSteven Rostedt (VMware) * 3249a0e3a18fSSteven Rostedt (VMware) * (binary) 3250a0e3a18fSSteven Rostedt (VMware) * 101 - 1 = 100 3251a0e3a18fSSteven Rostedt (VMware) * 101 & 100 = 100 (clearing bit zero) 3252a0e3a18fSSteven Rostedt (VMware) * 3253a0e3a18fSSteven Rostedt (VMware) * 1010 - 1 = 1001 3254a0e3a18fSSteven Rostedt (VMware) * 1010 & 1001 = 1000 (clearing bit 1) 3255a0e3a18fSSteven Rostedt (VMware) * 3256a0e3a18fSSteven Rostedt (VMware) * The least significant bit can be cleared this way, and it 3257a0e3a18fSSteven Rostedt (VMware) * just so happens that it is the same bit corresponding to 3258a0e3a18fSSteven Rostedt (VMware) * the current context. 3259b02414c8SSteven Rostedt (VMware) * 3260b02414c8SSteven Rostedt (VMware) * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit 3261b02414c8SSteven Rostedt (VMware) * is set when a recursion is detected at the current context, and if 3262b02414c8SSteven Rostedt (VMware) * the TRANSITION bit is already set, it will fail the recursion. 3263b02414c8SSteven Rostedt (VMware) * This is needed because there's a lag between the changing of 3264b02414c8SSteven Rostedt (VMware) * interrupt context and updating the preempt count. In this case, 3265b02414c8SSteven Rostedt (VMware) * a false positive will be found. To handle this, one extra recursion 3266b02414c8SSteven Rostedt (VMware) * is allowed, and this is done by the TRANSITION bit. If the TRANSITION 3267b02414c8SSteven Rostedt (VMware) * bit is already set, then it is considered a recursion and the function 3268b02414c8SSteven Rostedt (VMware) * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. 3269b02414c8SSteven Rostedt (VMware) * 3270b02414c8SSteven Rostedt (VMware) * On the trace_recursive_unlock(), the TRANSITION bit will be the first 3271b02414c8SSteven Rostedt (VMware) * to be cleared. Even if it wasn't the context that set it. That is, 3272b02414c8SSteven Rostedt (VMware) * if an interrupt comes in while NORMAL bit is set and the ring buffer 3273b02414c8SSteven Rostedt (VMware) * is called before preempt_count() is updated, since the check will 3274b02414c8SSteven Rostedt (VMware) * be on the NORMAL bit, the TRANSITION bit will then be set. If an 3275b02414c8SSteven Rostedt (VMware) * NMI then comes in, it will set the NMI bit, but when the NMI code 3276f2cc020dSIngo Molnar * does the trace_recursive_unlock() it will clear the TRANSITION bit 3277b02414c8SSteven Rostedt (VMware) * and leave the NMI bit set. But this is fine, because the interrupt 3278b02414c8SSteven Rostedt (VMware) * code that set the TRANSITION bit will then clear the NMI bit when it 3279b02414c8SSteven Rostedt (VMware) * calls trace_recursive_unlock(). If another NMI comes in, it will 3280b02414c8SSteven Rostedt (VMware) * set the TRANSITION bit and continue. 3281b02414c8SSteven Rostedt (VMware) * 3282b02414c8SSteven Rostedt (VMware) * Note: The TRANSITION bit only handles a single transition between context. 3283d90fd774SSteven Rostedt (Red Hat) */ 3284d90fd774SSteven Rostedt (Red Hat) 3285bc92b956SUros Bizjak static __always_inline bool 3286d90fd774SSteven Rostedt (Red Hat) trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 3287d90fd774SSteven Rostedt (Red Hat) { 3288a0e3a18fSSteven Rostedt (VMware) unsigned int val = cpu_buffer->current_context; 328991ebe8bcSSteven Rostedt (VMware) int bit = interrupt_context_level(); 32909b84fadcSSteven Rostedt (VMware) 32919b84fadcSSteven Rostedt (VMware) bit = RB_CTX_NORMAL - bit; 3292a0e3a18fSSteven Rostedt (VMware) 3293b02414c8SSteven Rostedt (VMware) if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { 3294b02414c8SSteven Rostedt (VMware) /* 3295b02414c8SSteven Rostedt (VMware) * It is possible that this was called by transitioning 3296b02414c8SSteven Rostedt (VMware) * between interrupt context, and preempt_count() has not 3297b02414c8SSteven Rostedt (VMware) * been updated yet. In this case, use the TRANSITION bit. 3298b02414c8SSteven Rostedt (VMware) */ 3299b02414c8SSteven Rostedt (VMware) bit = RB_CTX_TRANSITION; 330028575c61SSteven Rostedt (VMware) if (val & (1 << (bit + cpu_buffer->nest))) { 330128575c61SSteven Rostedt (VMware) do_ring_buffer_record_recursion(); 3302bc92b956SUros Bizjak return true; 3303b02414c8SSteven Rostedt (VMware) } 330428575c61SSteven Rostedt (VMware) } 3305d90fd774SSteven Rostedt (Red Hat) 33068e012066SSteven Rostedt (VMware) val |= (1 << (bit + cpu_buffer->nest)); 3307a0e3a18fSSteven Rostedt (VMware) cpu_buffer->current_context = val; 3308d90fd774SSteven Rostedt (Red Hat) 3309bc92b956SUros Bizjak return false; 3310d90fd774SSteven Rostedt (Red Hat) } 3311d90fd774SSteven Rostedt (Red Hat) 3312d90fd774SSteven Rostedt (Red Hat) static __always_inline void 3313d90fd774SSteven Rostedt (Red Hat) trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 3314d90fd774SSteven Rostedt (Red Hat) { 33158e012066SSteven Rostedt (VMware) cpu_buffer->current_context &= 33168e012066SSteven Rostedt (VMware) cpu_buffer->current_context - (1 << cpu_buffer->nest); 33178e012066SSteven Rostedt (VMware) } 33188e012066SSteven Rostedt (VMware) 3319b02414c8SSteven Rostedt (VMware) /* The recursive locking above uses 5 bits */ 3320b02414c8SSteven Rostedt (VMware) #define NESTED_BITS 5 33218e012066SSteven Rostedt (VMware) 33228e012066SSteven Rostedt (VMware) /** 33238e012066SSteven Rostedt (VMware) * ring_buffer_nest_start - Allow to trace while nested 33248e012066SSteven Rostedt (VMware) * @buffer: The ring buffer to modify 33258e012066SSteven Rostedt (VMware) * 33266167c205SSteven Rostedt (VMware) * The ring buffer has a safety mechanism to prevent recursion. 33278e012066SSteven Rostedt (VMware) * But there may be a case where a trace needs to be done while 33288e012066SSteven Rostedt (VMware) * tracing something else. In this case, calling this function 33298e012066SSteven Rostedt (VMware) * will allow this function to nest within a currently active 33308e012066SSteven Rostedt (VMware) * ring_buffer_lock_reserve(). 33318e012066SSteven Rostedt (VMware) * 33328e012066SSteven Rostedt (VMware) * Call this function before calling another ring_buffer_lock_reserve() and 33338e012066SSteven Rostedt (VMware) * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). 33348e012066SSteven Rostedt (VMware) */ 333513292494SSteven Rostedt (VMware) void ring_buffer_nest_start(struct trace_buffer *buffer) 33368e012066SSteven Rostedt (VMware) { 33378e012066SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer; 33388e012066SSteven Rostedt (VMware) int cpu; 33398e012066SSteven Rostedt (VMware) 33408e012066SSteven Rostedt (VMware) /* Enabled by ring_buffer_nest_end() */ 33418e012066SSteven Rostedt (VMware) preempt_disable_notrace(); 33428e012066SSteven Rostedt (VMware) cpu = raw_smp_processor_id(); 33438e012066SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu]; 33446167c205SSteven Rostedt (VMware) /* This is the shift value for the above recursive locking */ 33458e012066SSteven Rostedt (VMware) cpu_buffer->nest += NESTED_BITS; 33468e012066SSteven Rostedt (VMware) } 33478e012066SSteven Rostedt (VMware) 33488e012066SSteven Rostedt (VMware) /** 33498e012066SSteven Rostedt (VMware) * ring_buffer_nest_end - Allow to trace while nested 33508e012066SSteven Rostedt (VMware) * @buffer: The ring buffer to modify 33518e012066SSteven Rostedt (VMware) * 33528e012066SSteven Rostedt (VMware) * Must be called after ring_buffer_nest_start() and after the 33538e012066SSteven Rostedt (VMware) * ring_buffer_unlock_commit(). 33548e012066SSteven Rostedt (VMware) */ 335513292494SSteven Rostedt (VMware) void ring_buffer_nest_end(struct trace_buffer *buffer) 33568e012066SSteven Rostedt (VMware) { 33578e012066SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer; 33588e012066SSteven Rostedt (VMware) int cpu; 33598e012066SSteven Rostedt (VMware) 33608e012066SSteven Rostedt (VMware) /* disabled by ring_buffer_nest_start() */ 33618e012066SSteven Rostedt (VMware) cpu = raw_smp_processor_id(); 33628e012066SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu]; 33636167c205SSteven Rostedt (VMware) /* This is the shift value for the above recursive locking */ 33648e012066SSteven Rostedt (VMware) cpu_buffer->nest -= NESTED_BITS; 33658e012066SSteven Rostedt (VMware) preempt_enable_notrace(); 3366d90fd774SSteven Rostedt (Red Hat) } 3367d90fd774SSteven Rostedt (Red Hat) 3368d90fd774SSteven Rostedt (Red Hat) /** 3369d90fd774SSteven Rostedt (Red Hat) * ring_buffer_unlock_commit - commit a reserved 3370d90fd774SSteven Rostedt (Red Hat) * @buffer: The buffer to commit to 3371d90fd774SSteven Rostedt (Red Hat) * 3372d90fd774SSteven Rostedt (Red Hat) * This commits the data to the ring buffer, and releases any locks held. 3373d90fd774SSteven Rostedt (Red Hat) * 3374d90fd774SSteven Rostedt (Red Hat) * Must be paired with ring_buffer_lock_reserve. 3375d90fd774SSteven Rostedt (Red Hat) */ 337604aabc32SSong Chen int ring_buffer_unlock_commit(struct trace_buffer *buffer) 3377d90fd774SSteven Rostedt (Red Hat) { 3378d90fd774SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 3379d90fd774SSteven Rostedt (Red Hat) int cpu = raw_smp_processor_id(); 3380d90fd774SSteven Rostedt (Red Hat) 3381d90fd774SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 3382d90fd774SSteven Rostedt (Red Hat) 338304aabc32SSong Chen rb_commit(cpu_buffer); 3384d90fd774SSteven Rostedt (Red Hat) 3385d90fd774SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer); 3386d90fd774SSteven Rostedt (Red Hat) 3387d90fd774SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer); 3388d90fd774SSteven Rostedt (Red Hat) 3389d90fd774SSteven Rostedt (Red Hat) preempt_enable_notrace(); 3390d90fd774SSteven Rostedt (Red Hat) 3391d90fd774SSteven Rostedt (Red Hat) return 0; 3392d90fd774SSteven Rostedt (Red Hat) } 3393d90fd774SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); 3394a4543a2fSSteven Rostedt (Red Hat) 33955b7be9c7SSteven Rostedt (VMware) /* Special value to validate all deltas on a page. */ 33965b7be9c7SSteven Rostedt (VMware) #define CHECK_FULL_PAGE 1L 33975b7be9c7SSteven Rostedt (VMware) 33985b7be9c7SSteven Rostedt (VMware) #ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS 33995b7be9c7SSteven Rostedt (VMware) static void dump_buffer_page(struct buffer_data_page *bpage, 34005b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info, 34015b7be9c7SSteven Rostedt (VMware) unsigned long tail) 34025b7be9c7SSteven Rostedt (VMware) { 34035b7be9c7SSteven Rostedt (VMware) struct ring_buffer_event *event; 34045b7be9c7SSteven Rostedt (VMware) u64 ts, delta; 34055b7be9c7SSteven Rostedt (VMware) int e; 34065b7be9c7SSteven Rostedt (VMware) 34075b7be9c7SSteven Rostedt (VMware) ts = bpage->time_stamp; 34085b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] PAGE TIME STAMP\n", ts); 34095b7be9c7SSteven Rostedt (VMware) 34105b7be9c7SSteven Rostedt (VMware) for (e = 0; e < tail; e += rb_event_length(event)) { 34115b7be9c7SSteven Rostedt (VMware) 34125b7be9c7SSteven Rostedt (VMware) event = (struct ring_buffer_event *)(bpage->data + e); 34135b7be9c7SSteven Rostedt (VMware) 34145b7be9c7SSteven Rostedt (VMware) switch (event->type_len) { 34155b7be9c7SSteven Rostedt (VMware) 34165b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_EXTEND: 3417e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 34185b7be9c7SSteven Rostedt (VMware) ts += delta; 34195b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] delta:%lld TIME EXTEND\n", ts, delta); 34205b7be9c7SSteven Rostedt (VMware) break; 34215b7be9c7SSteven Rostedt (VMware) 34225b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_STAMP: 3423e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 34246695da58SSteven Rostedt (Google) ts = rb_fix_abs_ts(delta, ts); 34255b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] absolute:%lld TIME STAMP\n", ts, delta); 34265b7be9c7SSteven Rostedt (VMware) break; 34275b7be9c7SSteven Rostedt (VMware) 34285b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_PADDING: 34295b7be9c7SSteven Rostedt (VMware) ts += event->time_delta; 34305b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] delta:%d PADDING\n", ts, event->time_delta); 34315b7be9c7SSteven Rostedt (VMware) break; 34325b7be9c7SSteven Rostedt (VMware) 34335b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_DATA: 34345b7be9c7SSteven Rostedt (VMware) ts += event->time_delta; 34355b7be9c7SSteven Rostedt (VMware) pr_warn(" [%lld] delta:%d\n", ts, event->time_delta); 34365b7be9c7SSteven Rostedt (VMware) break; 34375b7be9c7SSteven Rostedt (VMware) 34385b7be9c7SSteven Rostedt (VMware) default: 34395b7be9c7SSteven Rostedt (VMware) break; 34405b7be9c7SSteven Rostedt (VMware) } 34415b7be9c7SSteven Rostedt (VMware) } 34425b7be9c7SSteven Rostedt (VMware) } 34435b7be9c7SSteven Rostedt (VMware) 34445b7be9c7SSteven Rostedt (VMware) static DEFINE_PER_CPU(atomic_t, checking); 34455b7be9c7SSteven Rostedt (VMware) static atomic_t ts_dump; 34465b7be9c7SSteven Rostedt (VMware) 34475b7be9c7SSteven Rostedt (VMware) /* 34485b7be9c7SSteven Rostedt (VMware) * Check if the current event time stamp matches the deltas on 34495b7be9c7SSteven Rostedt (VMware) * the buffer page. 34505b7be9c7SSteven Rostedt (VMware) */ 34515b7be9c7SSteven Rostedt (VMware) static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 34525b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info, 34535b7be9c7SSteven Rostedt (VMware) unsigned long tail) 34545b7be9c7SSteven Rostedt (VMware) { 34555b7be9c7SSteven Rostedt (VMware) struct ring_buffer_event *event; 34565b7be9c7SSteven Rostedt (VMware) struct buffer_data_page *bpage; 34575b7be9c7SSteven Rostedt (VMware) u64 ts, delta; 34585b7be9c7SSteven Rostedt (VMware) bool full = false; 34595b7be9c7SSteven Rostedt (VMware) int e; 34605b7be9c7SSteven Rostedt (VMware) 34615b7be9c7SSteven Rostedt (VMware) bpage = info->tail_page->page; 34625b7be9c7SSteven Rostedt (VMware) 34635b7be9c7SSteven Rostedt (VMware) if (tail == CHECK_FULL_PAGE) { 34645b7be9c7SSteven Rostedt (VMware) full = true; 34655b7be9c7SSteven Rostedt (VMware) tail = local_read(&bpage->commit); 34665b7be9c7SSteven Rostedt (VMware) } else if (info->add_timestamp & 34675b7be9c7SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) { 34685b7be9c7SSteven Rostedt (VMware) /* Ignore events with absolute time stamps */ 34695b7be9c7SSteven Rostedt (VMware) return; 34705b7be9c7SSteven Rostedt (VMware) } 34715b7be9c7SSteven Rostedt (VMware) 34725b7be9c7SSteven Rostedt (VMware) /* 34735b7be9c7SSteven Rostedt (VMware) * Do not check the first event (skip possible extends too). 34745b7be9c7SSteven Rostedt (VMware) * Also do not check if previous events have not been committed. 34755b7be9c7SSteven Rostedt (VMware) */ 34765b7be9c7SSteven Rostedt (VMware) if (tail <= 8 || tail > local_read(&bpage->commit)) 34775b7be9c7SSteven Rostedt (VMware) return; 34785b7be9c7SSteven Rostedt (VMware) 34795b7be9c7SSteven Rostedt (VMware) /* 34805b7be9c7SSteven Rostedt (VMware) * If this interrupted another event, 34815b7be9c7SSteven Rostedt (VMware) */ 34825b7be9c7SSteven Rostedt (VMware) if (atomic_inc_return(this_cpu_ptr(&checking)) != 1) 34835b7be9c7SSteven Rostedt (VMware) goto out; 34845b7be9c7SSteven Rostedt (VMware) 34855b7be9c7SSteven Rostedt (VMware) ts = bpage->time_stamp; 34865b7be9c7SSteven Rostedt (VMware) 34875b7be9c7SSteven Rostedt (VMware) for (e = 0; e < tail; e += rb_event_length(event)) { 34885b7be9c7SSteven Rostedt (VMware) 34895b7be9c7SSteven Rostedt (VMware) event = (struct ring_buffer_event *)(bpage->data + e); 34905b7be9c7SSteven Rostedt (VMware) 34915b7be9c7SSteven Rostedt (VMware) switch (event->type_len) { 34925b7be9c7SSteven Rostedt (VMware) 34935b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_EXTEND: 3494e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 34955b7be9c7SSteven Rostedt (VMware) ts += delta; 34965b7be9c7SSteven Rostedt (VMware) break; 34975b7be9c7SSteven Rostedt (VMware) 34985b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_TIME_STAMP: 3499e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 35006695da58SSteven Rostedt (Google) ts = rb_fix_abs_ts(delta, ts); 35015b7be9c7SSteven Rostedt (VMware) break; 35025b7be9c7SSteven Rostedt (VMware) 35035b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_PADDING: 35045b7be9c7SSteven Rostedt (VMware) if (event->time_delta == 1) 35055b7be9c7SSteven Rostedt (VMware) break; 3506957cdcd9SWei Ming Chen fallthrough; 35075b7be9c7SSteven Rostedt (VMware) case RINGBUF_TYPE_DATA: 35085b7be9c7SSteven Rostedt (VMware) ts += event->time_delta; 35095b7be9c7SSteven Rostedt (VMware) break; 35105b7be9c7SSteven Rostedt (VMware) 35115b7be9c7SSteven Rostedt (VMware) default: 35125b7be9c7SSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1); 35135b7be9c7SSteven Rostedt (VMware) } 35145b7be9c7SSteven Rostedt (VMware) } 35155b7be9c7SSteven Rostedt (VMware) if ((full && ts > info->ts) || 35165b7be9c7SSteven Rostedt (VMware) (!full && ts + info->delta != info->ts)) { 35175b7be9c7SSteven Rostedt (VMware) /* If another report is happening, ignore this one */ 35185b7be9c7SSteven Rostedt (VMware) if (atomic_inc_return(&ts_dump) != 1) { 35195b7be9c7SSteven Rostedt (VMware) atomic_dec(&ts_dump); 35205b7be9c7SSteven Rostedt (VMware) goto out; 35215b7be9c7SSteven Rostedt (VMware) } 35225b7be9c7SSteven Rostedt (VMware) atomic_inc(&cpu_buffer->record_disabled); 35236549de1fSSteven Rostedt (VMware) /* There's some cases in boot up that this can happen */ 35246549de1fSSteven Rostedt (VMware) WARN_ON_ONCE(system_state != SYSTEM_BOOTING); 35256549de1fSSteven Rostedt (VMware) pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", 35265b7be9c7SSteven Rostedt (VMware) cpu_buffer->cpu, 35276549de1fSSteven Rostedt (VMware) ts + info->delta, info->ts, info->delta, 35286549de1fSSteven Rostedt (VMware) info->before, info->after, 35296549de1fSSteven Rostedt (VMware) full ? " (full)" : ""); 35305b7be9c7SSteven Rostedt (VMware) dump_buffer_page(bpage, info, tail); 35315b7be9c7SSteven Rostedt (VMware) atomic_dec(&ts_dump); 35325b7be9c7SSteven Rostedt (VMware) /* Do not re-enable checking */ 35335b7be9c7SSteven Rostedt (VMware) return; 35345b7be9c7SSteven Rostedt (VMware) } 35355b7be9c7SSteven Rostedt (VMware) out: 35365b7be9c7SSteven Rostedt (VMware) atomic_dec(this_cpu_ptr(&checking)); 35375b7be9c7SSteven Rostedt (VMware) } 35385b7be9c7SSteven Rostedt (VMware) #else 35395b7be9c7SSteven Rostedt (VMware) static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, 35405b7be9c7SSteven Rostedt (VMware) struct rb_event_info *info, 35415b7be9c7SSteven Rostedt (VMware) unsigned long tail) 35425b7be9c7SSteven Rostedt (VMware) { 35435b7be9c7SSteven Rostedt (VMware) } 35445b7be9c7SSteven Rostedt (VMware) #endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */ 35455b7be9c7SSteven Rostedt (VMware) 35466634ff26SSteven Rostedt static struct ring_buffer_event * 35476634ff26SSteven Rostedt __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 3548fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info *info) 35496634ff26SSteven Rostedt { 35506634ff26SSteven Rostedt struct ring_buffer_event *event; 3551fcc742eaSSteven Rostedt (Red Hat) struct buffer_page *tail_page; 3552a389d86fSSteven Rostedt (VMware) unsigned long tail, write, w; 355310464b4aSSteven Rostedt (VMware) bool a_ok; 355410464b4aSSteven Rostedt (VMware) bool b_ok; 355569d1b839SSteven Rostedt 35568573636eSSteven Rostedt (Red Hat) /* Don't let the compiler play games with cpu_buffer->tail_page */ 35578573636eSSteven Rostedt (Red Hat) tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); 3558a389d86fSSteven Rostedt (VMware) 3559a389d86fSSteven Rostedt (VMware) /*A*/ w = local_read(&tail_page->write) & RB_WRITE_MASK; 3560a389d86fSSteven Rostedt (VMware) barrier(); 356158fbc3c6SSteven Rostedt (VMware) b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 356258fbc3c6SSteven Rostedt (VMware) a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 3563a389d86fSSteven Rostedt (VMware) barrier(); 3564a389d86fSSteven Rostedt (VMware) info->ts = rb_time_stamp(cpu_buffer->buffer); 3565a389d86fSSteven Rostedt (VMware) 356658fbc3c6SSteven Rostedt (VMware) if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) { 3567a389d86fSSteven Rostedt (VMware) info->delta = info->ts; 3568a389d86fSSteven Rostedt (VMware) } else { 3569a389d86fSSteven Rostedt (VMware) /* 357058fbc3c6SSteven Rostedt (VMware) * If interrupting an event time update, we may need an 357158fbc3c6SSteven Rostedt (VMware) * absolute timestamp. 3572a389d86fSSteven Rostedt (VMware) * Don't bother if this is the start of a new page (w == 0). 3573a389d86fSSteven Rostedt (VMware) */ 357458fbc3c6SSteven Rostedt (VMware) if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) { 35757c4b4a51SSteven Rostedt (VMware) info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND; 3576fcc742eaSSteven Rostedt (Red Hat) info->length += RB_LEN_TIME_EXTEND; 357758fbc3c6SSteven Rostedt (VMware) } else { 357858fbc3c6SSteven Rostedt (VMware) info->delta = info->ts - info->after; 357958fbc3c6SSteven Rostedt (VMware) if (unlikely(test_time_stamp(info->delta))) { 358058fbc3c6SSteven Rostedt (VMware) info->add_timestamp |= RB_ADD_STAMP_EXTEND; 358158fbc3c6SSteven Rostedt (VMware) info->length += RB_LEN_TIME_EXTEND; 358258fbc3c6SSteven Rostedt (VMware) } 358358fbc3c6SSteven Rostedt (VMware) } 358458fbc3c6SSteven Rostedt (VMware) } 358577ae365eSSteven Rostedt 358610464b4aSSteven Rostedt (VMware) /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); 3587a389d86fSSteven Rostedt (VMware) 3588a389d86fSSteven Rostedt (VMware) /*C*/ write = local_add_return(info->length, &tail_page->write); 358977ae365eSSteven Rostedt 359077ae365eSSteven Rostedt /* set write to only the index of the write */ 359177ae365eSSteven Rostedt write &= RB_WRITE_MASK; 3592a389d86fSSteven Rostedt (VMware) 3593fcc742eaSSteven Rostedt (Red Hat) tail = write - info->length; 35946634ff26SSteven Rostedt 3595a389d86fSSteven Rostedt (VMware) /* See if we shot pass the end of this buffer page */ 3596a389d86fSSteven Rostedt (VMware) if (unlikely(write > BUF_PAGE_SIZE)) { 3597a389d86fSSteven Rostedt (VMware) /* before and after may now different, fix it up*/ 359858fbc3c6SSteven Rostedt (VMware) b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); 359958fbc3c6SSteven Rostedt (VMware) a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 360058fbc3c6SSteven Rostedt (VMware) if (a_ok && b_ok && info->before != info->after) 360158fbc3c6SSteven Rostedt (VMware) (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, 360258fbc3c6SSteven Rostedt (VMware) info->before, info->after); 36035b7be9c7SSteven Rostedt (VMware) if (a_ok && b_ok) 36045b7be9c7SSteven Rostedt (VMware) check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); 3605a389d86fSSteven Rostedt (VMware) return rb_move_tail(cpu_buffer, tail, info); 3606a389d86fSSteven Rostedt (VMware) } 3607a389d86fSSteven Rostedt (VMware) 3608a389d86fSSteven Rostedt (VMware) if (likely(tail == w)) { 3609a389d86fSSteven Rostedt (VMware) u64 save_before; 361010464b4aSSteven Rostedt (VMware) bool s_ok; 3611a389d86fSSteven Rostedt (VMware) 3612a389d86fSSteven Rostedt (VMware) /* Nothing interrupted us between A and C */ 361310464b4aSSteven Rostedt (VMware) /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); 3614a389d86fSSteven Rostedt (VMware) barrier(); 361510464b4aSSteven Rostedt (VMware) /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); 361610464b4aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, !s_ok); 36177c4b4a51SSteven Rostedt (VMware) if (likely(!(info->add_timestamp & 36187c4b4a51SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3619a389d86fSSteven Rostedt (VMware) /* This did not interrupt any time update */ 362058fbc3c6SSteven Rostedt (VMware) info->delta = info->ts - info->after; 3621a389d86fSSteven Rostedt (VMware) else 362282db909eSQiujun Huang /* Just use full timestamp for interrupting event */ 3623a389d86fSSteven Rostedt (VMware) info->delta = info->ts; 3624a389d86fSSteven Rostedt (VMware) barrier(); 36255b7be9c7SSteven Rostedt (VMware) check_buffer(cpu_buffer, info, tail); 3626a389d86fSSteven Rostedt (VMware) if (unlikely(info->ts != save_before)) { 3627a389d86fSSteven Rostedt (VMware) /* SLOW PATH - Interrupted between C and E */ 3628a389d86fSSteven Rostedt (VMware) 362958fbc3c6SSteven Rostedt (VMware) a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 363010464b4aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, !a_ok); 363110464b4aSSteven Rostedt (VMware) 3632a389d86fSSteven Rostedt (VMware) /* Write stamp must only go forward */ 363358fbc3c6SSteven Rostedt (VMware) if (save_before > info->after) { 3634a389d86fSSteven Rostedt (VMware) /* 3635a389d86fSSteven Rostedt (VMware) * We do not care about the result, only that 3636a389d86fSSteven Rostedt (VMware) * it gets updated atomically. 3637a389d86fSSteven Rostedt (VMware) */ 363858fbc3c6SSteven Rostedt (VMware) (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, 363958fbc3c6SSteven Rostedt (VMware) info->after, save_before); 3640a389d86fSSteven Rostedt (VMware) } 3641a389d86fSSteven Rostedt (VMware) } 3642a389d86fSSteven Rostedt (VMware) } else { 3643a389d86fSSteven Rostedt (VMware) u64 ts; 3644a389d86fSSteven Rostedt (VMware) /* SLOW PATH - Interrupted between A and C */ 364558fbc3c6SSteven Rostedt (VMware) a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); 364610464b4aSSteven Rostedt (VMware) /* Was interrupted before here, write_stamp must be valid */ 364710464b4aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, !a_ok); 3648a389d86fSSteven Rostedt (VMware) ts = rb_time_stamp(cpu_buffer->buffer); 3649a389d86fSSteven Rostedt (VMware) barrier(); 3650a389d86fSSteven Rostedt (VMware) /*E*/ if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) && 36518785f51aSAndrea Righi info->after < ts && 36528785f51aSAndrea Righi rb_time_cmpxchg(&cpu_buffer->write_stamp, 36538785f51aSAndrea Righi info->after, ts)) { 3654a389d86fSSteven Rostedt (VMware) /* Nothing came after this event between C and E */ 365558fbc3c6SSteven Rostedt (VMware) info->delta = ts - info->after; 3656a389d86fSSteven Rostedt (VMware) } else { 3657a389d86fSSteven Rostedt (VMware) /* 365882db909eSQiujun Huang * Interrupted between C and E: 3659a389d86fSSteven Rostedt (VMware) * Lost the previous events time stamp. Just set the 3660a389d86fSSteven Rostedt (VMware) * delta to zero, and this will be the same time as 3661a389d86fSSteven Rostedt (VMware) * the event this event interrupted. And the events that 3662a389d86fSSteven Rostedt (VMware) * came after this will still be correct (as they would 3663a389d86fSSteven Rostedt (VMware) * have built their delta on the previous event. 3664a389d86fSSteven Rostedt (VMware) */ 3665a389d86fSSteven Rostedt (VMware) info->delta = 0; 3666a389d86fSSteven Rostedt (VMware) } 36678672e494SSteven Rostedt (VMware) info->ts = ts; 36687c4b4a51SSteven Rostedt (VMware) info->add_timestamp &= ~RB_ADD_STAMP_FORCE; 3669a389d86fSSteven Rostedt (VMware) } 3670a389d86fSSteven Rostedt (VMware) 3671b7dc42fdSSteven Rostedt (Red Hat) /* 3672b7dc42fdSSteven Rostedt (Red Hat) * If this is the first commit on the page, then it has the same 3673b7dc42fdSSteven Rostedt (Red Hat) * timestamp as the page itself. 3674b7dc42fdSSteven Rostedt (Red Hat) */ 36757c4b4a51SSteven Rostedt (VMware) if (unlikely(!tail && !(info->add_timestamp & 36767c4b4a51SSteven Rostedt (VMware) (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)))) 3677b7dc42fdSSteven Rostedt (Red Hat) info->delta = 0; 3678b7dc42fdSSteven Rostedt (Red Hat) 36796634ff26SSteven Rostedt /* We reserved something on the buffer */ 3680b7dc42fdSSteven Rostedt (Red Hat) 36816634ff26SSteven Rostedt event = __rb_page_index(tail_page, tail); 3682fcc742eaSSteven Rostedt (Red Hat) rb_update_event(cpu_buffer, event, info); 36836634ff26SSteven Rostedt 36846634ff26SSteven Rostedt local_inc(&tail_page->entries); 36856634ff26SSteven Rostedt 3686b7dc42fdSSteven Rostedt (Red Hat) /* 3687b7dc42fdSSteven Rostedt (Red Hat) * If this is the first commit on the page, then update 3688b7dc42fdSSteven Rostedt (Red Hat) * its timestamp. 3689b7dc42fdSSteven Rostedt (Red Hat) */ 369075b21c6dSSteven Rostedt (VMware) if (unlikely(!tail)) 3691b7dc42fdSSteven Rostedt (Red Hat) tail_page->page->time_stamp = info->ts; 3692b7dc42fdSSteven Rostedt (Red Hat) 3693c64e148aSVaibhav Nagarnaik /* account for these added bytes */ 3694fcc742eaSSteven Rostedt (Red Hat) local_add(info->length, &cpu_buffer->entries_bytes); 3695c64e148aSVaibhav Nagarnaik 36966634ff26SSteven Rostedt return event; 36976634ff26SSteven Rostedt } 36986634ff26SSteven Rostedt 3699fa7ffb39SSteven Rostedt (Red Hat) static __always_inline struct ring_buffer_event * 370013292494SSteven Rostedt (VMware) rb_reserve_next_event(struct trace_buffer *buffer, 370162f0b3ebSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer, 37021cd8d735SSteven Rostedt unsigned long length) 37037a8e76a3SSteven Rostedt { 37047a8e76a3SSteven Rostedt struct ring_buffer_event *event; 3705fcc742eaSSteven Rostedt (Red Hat) struct rb_event_info info; 3706818e3dd3SSteven Rostedt int nr_loops = 0; 370758fbc3c6SSteven Rostedt (VMware) int add_ts_default; 37087a8e76a3SSteven Rostedt 3709fa743953SSteven Rostedt rb_start_commit(cpu_buffer); 3710a389d86fSSteven Rostedt (VMware) /* The commit page can not change after this */ 3711fa743953SSteven Rostedt 371285bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 371362f0b3ebSSteven Rostedt /* 371462f0b3ebSSteven Rostedt * Due to the ability to swap a cpu buffer from a buffer 371562f0b3ebSSteven Rostedt * it is possible it was swapped before we committed. 371662f0b3ebSSteven Rostedt * (committing stops a swap). We check for it here and 371762f0b3ebSSteven Rostedt * if it happened, we have to fail the write. 371862f0b3ebSSteven Rostedt */ 371962f0b3ebSSteven Rostedt barrier(); 37206aa7de05SMark Rutland if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { 372162f0b3ebSSteven Rostedt local_dec(&cpu_buffer->committing); 372262f0b3ebSSteven Rostedt local_dec(&cpu_buffer->commits); 372362f0b3ebSSteven Rostedt return NULL; 372462f0b3ebSSteven Rostedt } 372585bac32cSSteven Rostedt #endif 3726b7dc42fdSSteven Rostedt (Red Hat) 3727fcc742eaSSteven Rostedt (Red Hat) info.length = rb_calculate_event_length(length); 372858fbc3c6SSteven Rostedt (VMware) 372958fbc3c6SSteven Rostedt (VMware) if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { 373058fbc3c6SSteven Rostedt (VMware) add_ts_default = RB_ADD_STAMP_ABSOLUTE; 373158fbc3c6SSteven Rostedt (VMware) info.length += RB_LEN_TIME_EXTEND; 373258fbc3c6SSteven Rostedt (VMware) } else { 373358fbc3c6SSteven Rostedt (VMware) add_ts_default = RB_ADD_STAMP_NONE; 373458fbc3c6SSteven Rostedt (VMware) } 373558fbc3c6SSteven Rostedt (VMware) 3736a4543a2fSSteven Rostedt (Red Hat) again: 373758fbc3c6SSteven Rostedt (VMware) info.add_timestamp = add_ts_default; 3738b7dc42fdSSteven Rostedt (Red Hat) info.delta = 0; 3739b7dc42fdSSteven Rostedt (Red Hat) 3740818e3dd3SSteven Rostedt /* 3741818e3dd3SSteven Rostedt * We allow for interrupts to reenter here and do a trace. 3742818e3dd3SSteven Rostedt * If one does, it will cause this original code to loop 3743818e3dd3SSteven Rostedt * back here. Even with heavy interrupts happening, this 3744818e3dd3SSteven Rostedt * should only happen a few times in a row. If this happens 3745818e3dd3SSteven Rostedt * 1000 times in a row, there must be either an interrupt 3746818e3dd3SSteven Rostedt * storm or we have something buggy. 3747818e3dd3SSteven Rostedt * Bail! 3748818e3dd3SSteven Rostedt */ 37493e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 3750fa743953SSteven Rostedt goto out_fail; 3751818e3dd3SSteven Rostedt 3752fcc742eaSSteven Rostedt (Red Hat) event = __rb_reserve_next(cpu_buffer, &info); 3753fcc742eaSSteven Rostedt (Red Hat) 3754bd1b7cd3SSteven Rostedt (Red Hat) if (unlikely(PTR_ERR(event) == -EAGAIN)) { 375558fbc3c6SSteven Rostedt (VMware) if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND)) 3756bd1b7cd3SSteven Rostedt (Red Hat) info.length -= RB_LEN_TIME_EXTEND; 3757bf41a158SSteven Rostedt goto again; 3758bd1b7cd3SSteven Rostedt (Red Hat) } 37597a8e76a3SSteven Rostedt 3760a389d86fSSteven Rostedt (VMware) if (likely(event)) 37617a8e76a3SSteven Rostedt return event; 3762fa743953SSteven Rostedt out_fail: 3763fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 3764fa743953SSteven Rostedt return NULL; 37657a8e76a3SSteven Rostedt } 37667a8e76a3SSteven Rostedt 37677a8e76a3SSteven Rostedt /** 37687a8e76a3SSteven Rostedt * ring_buffer_lock_reserve - reserve a part of the buffer 37697a8e76a3SSteven Rostedt * @buffer: the ring buffer to reserve from 37707a8e76a3SSteven Rostedt * @length: the length of the data to reserve (excluding event header) 37717a8e76a3SSteven Rostedt * 37726167c205SSteven Rostedt (VMware) * Returns a reserved event on the ring buffer to copy directly to. 37737a8e76a3SSteven Rostedt * The user of this interface will need to get the body to write into 37747a8e76a3SSteven Rostedt * and can use the ring_buffer_event_data() interface. 37757a8e76a3SSteven Rostedt * 37767a8e76a3SSteven Rostedt * The length is the length of the data needed, not the event length 37777a8e76a3SSteven Rostedt * which also includes the event header. 37787a8e76a3SSteven Rostedt * 37797a8e76a3SSteven Rostedt * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. 37807a8e76a3SSteven Rostedt * If NULL is returned, then nothing has been allocated or locked. 37817a8e76a3SSteven Rostedt */ 37827a8e76a3SSteven Rostedt struct ring_buffer_event * 378313292494SSteven Rostedt (VMware) ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) 37847a8e76a3SSteven Rostedt { 37857a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 37867a8e76a3SSteven Rostedt struct ring_buffer_event *event; 37875168ae50SSteven Rostedt int cpu; 37887a8e76a3SSteven Rostedt 3789bf41a158SSteven Rostedt /* If we are tracing schedule, we don't want to recurse */ 37905168ae50SSteven Rostedt preempt_disable_notrace(); 3791bf41a158SSteven Rostedt 37923205f806SSteven Rostedt (Red Hat) if (unlikely(atomic_read(&buffer->record_disabled))) 379358a09ec6SSteven Rostedt (Red Hat) goto out; 3794261842b7SSteven Rostedt 37957a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 37967a8e76a3SSteven Rostedt 37973205f806SSteven Rostedt (Red Hat) if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 3798d769041fSSteven Rostedt goto out; 37997a8e76a3SSteven Rostedt 38007a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 38017a8e76a3SSteven Rostedt 38023205f806SSteven Rostedt (Red Hat) if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 3803d769041fSSteven Rostedt goto out; 38047a8e76a3SSteven Rostedt 38053205f806SSteven Rostedt (Red Hat) if (unlikely(length > BUF_MAX_DATA_SIZE)) 3806bf41a158SSteven Rostedt goto out; 38077a8e76a3SSteven Rostedt 380858a09ec6SSteven Rostedt (Red Hat) if (unlikely(trace_recursive_lock(cpu_buffer))) 380958a09ec6SSteven Rostedt (Red Hat) goto out; 381058a09ec6SSteven Rostedt (Red Hat) 381162f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 38127a8e76a3SSteven Rostedt if (!event) 381358a09ec6SSteven Rostedt (Red Hat) goto out_unlock; 38147a8e76a3SSteven Rostedt 38157a8e76a3SSteven Rostedt return event; 38167a8e76a3SSteven Rostedt 381758a09ec6SSteven Rostedt (Red Hat) out_unlock: 381858a09ec6SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer); 3819d769041fSSteven Rostedt out: 38205168ae50SSteven Rostedt preempt_enable_notrace(); 38217a8e76a3SSteven Rostedt return NULL; 38227a8e76a3SSteven Rostedt } 3823c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 38247a8e76a3SSteven Rostedt 3825a1863c21SSteven Rostedt /* 3826a1863c21SSteven Rostedt * Decrement the entries to the page that an event is on. 3827a1863c21SSteven Rostedt * The event does not even need to exist, only the pointer 3828a1863c21SSteven Rostedt * to the page it is on. This may only be called before the commit 3829a1863c21SSteven Rostedt * takes place. 3830a1863c21SSteven Rostedt */ 3831a1863c21SSteven Rostedt static inline void 3832a1863c21SSteven Rostedt rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, 3833a1863c21SSteven Rostedt struct ring_buffer_event *event) 3834a1863c21SSteven Rostedt { 3835a1863c21SSteven Rostedt unsigned long addr = (unsigned long)event; 3836a1863c21SSteven Rostedt struct buffer_page *bpage = cpu_buffer->commit_page; 3837a1863c21SSteven Rostedt struct buffer_page *start; 3838a1863c21SSteven Rostedt 3839a1863c21SSteven Rostedt addr &= PAGE_MASK; 3840a1863c21SSteven Rostedt 3841a1863c21SSteven Rostedt /* Do the likely case first */ 3842a1863c21SSteven Rostedt if (likely(bpage->page == (void *)addr)) { 3843a1863c21SSteven Rostedt local_dec(&bpage->entries); 3844a1863c21SSteven Rostedt return; 3845a1863c21SSteven Rostedt } 3846a1863c21SSteven Rostedt 3847a1863c21SSteven Rostedt /* 3848a1863c21SSteven Rostedt * Because the commit page may be on the reader page we 3849a1863c21SSteven Rostedt * start with the next page and check the end loop there. 3850a1863c21SSteven Rostedt */ 38516689bed3SQiujun Huang rb_inc_page(&bpage); 3852a1863c21SSteven Rostedt start = bpage; 3853a1863c21SSteven Rostedt do { 3854a1863c21SSteven Rostedt if (bpage->page == (void *)addr) { 3855a1863c21SSteven Rostedt local_dec(&bpage->entries); 3856a1863c21SSteven Rostedt return; 3857a1863c21SSteven Rostedt } 38586689bed3SQiujun Huang rb_inc_page(&bpage); 3859a1863c21SSteven Rostedt } while (bpage != start); 3860a1863c21SSteven Rostedt 3861a1863c21SSteven Rostedt /* commit not part of this buffer?? */ 3862a1863c21SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 3863a1863c21SSteven Rostedt } 3864a1863c21SSteven Rostedt 38657a8e76a3SSteven Rostedt /** 386688883490SQiujun Huang * ring_buffer_discard_commit - discard an event that has not been committed 3867fa1b47ddSSteven Rostedt * @buffer: the ring buffer 3868fa1b47ddSSteven Rostedt * @event: non committed event to discard 3869fa1b47ddSSteven Rostedt * 3870dc892f73SSteven Rostedt * Sometimes an event that is in the ring buffer needs to be ignored. 3871dc892f73SSteven Rostedt * This function lets the user discard an event in the ring buffer 3872dc892f73SSteven Rostedt * and then that event will not be read later. 3873dc892f73SSteven Rostedt * 38746167c205SSteven Rostedt (VMware) * This function only works if it is called before the item has been 3875dc892f73SSteven Rostedt * committed. It will try to free the event from the ring buffer 3876fa1b47ddSSteven Rostedt * if another event has not been added behind it. 3877fa1b47ddSSteven Rostedt * 3878fa1b47ddSSteven Rostedt * If another event has been added behind it, it will set the event 3879fa1b47ddSSteven Rostedt * up as discarded, and perform the commit. 3880fa1b47ddSSteven Rostedt * 3881fa1b47ddSSteven Rostedt * If this function is called, do not call ring_buffer_unlock_commit on 3882fa1b47ddSSteven Rostedt * the event. 3883fa1b47ddSSteven Rostedt */ 388413292494SSteven Rostedt (VMware) void ring_buffer_discard_commit(struct trace_buffer *buffer, 3885fa1b47ddSSteven Rostedt struct ring_buffer_event *event) 3886fa1b47ddSSteven Rostedt { 3887fa1b47ddSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 3888fa1b47ddSSteven Rostedt int cpu; 3889fa1b47ddSSteven Rostedt 3890fa1b47ddSSteven Rostedt /* The event is discarded regardless */ 3891f3b9aae1SFrederic Weisbecker rb_event_discard(event); 3892fa1b47ddSSteven Rostedt 3893fa743953SSteven Rostedt cpu = smp_processor_id(); 3894fa743953SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 3895fa743953SSteven Rostedt 3896fa1b47ddSSteven Rostedt /* 3897fa1b47ddSSteven Rostedt * This must only be called if the event has not been 3898fa1b47ddSSteven Rostedt * committed yet. Thus we can assume that preemption 3899fa1b47ddSSteven Rostedt * is still disabled. 3900fa1b47ddSSteven Rostedt */ 3901fa743953SSteven Rostedt RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 3902fa1b47ddSSteven Rostedt 3903a1863c21SSteven Rostedt rb_decrement_entry(cpu_buffer, event); 39040f2541d2SSteven Rostedt if (rb_try_to_discard(cpu_buffer, event)) 3905fa1b47ddSSteven Rostedt goto out; 3906fa1b47ddSSteven Rostedt 3907fa1b47ddSSteven Rostedt out: 3908fa743953SSteven Rostedt rb_end_commit(cpu_buffer); 3909fa1b47ddSSteven Rostedt 391058a09ec6SSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer); 3911f3b9aae1SFrederic Weisbecker 39125168ae50SSteven Rostedt preempt_enable_notrace(); 3913fa1b47ddSSteven Rostedt 3914fa1b47ddSSteven Rostedt } 3915fa1b47ddSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); 3916fa1b47ddSSteven Rostedt 3917fa1b47ddSSteven Rostedt /** 39187a8e76a3SSteven Rostedt * ring_buffer_write - write data to the buffer without reserving 39197a8e76a3SSteven Rostedt * @buffer: The ring buffer to write to. 39207a8e76a3SSteven Rostedt * @length: The length of the data being written (excluding the event header) 39217a8e76a3SSteven Rostedt * @data: The data to write to the buffer. 39227a8e76a3SSteven Rostedt * 39237a8e76a3SSteven Rostedt * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as 39247a8e76a3SSteven Rostedt * one function. If you already have the data to write to the buffer, it 39257a8e76a3SSteven Rostedt * may be easier to simply call this function. 39267a8e76a3SSteven Rostedt * 39277a8e76a3SSteven Rostedt * Note, like ring_buffer_lock_reserve, the length is the length of the data 39287a8e76a3SSteven Rostedt * and not the length of the event which would hold the header. 39297a8e76a3SSteven Rostedt */ 393013292494SSteven Rostedt (VMware) int ring_buffer_write(struct trace_buffer *buffer, 39317a8e76a3SSteven Rostedt unsigned long length, 39327a8e76a3SSteven Rostedt void *data) 39337a8e76a3SSteven Rostedt { 39347a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 39357a8e76a3SSteven Rostedt struct ring_buffer_event *event; 39367a8e76a3SSteven Rostedt void *body; 39377a8e76a3SSteven Rostedt int ret = -EBUSY; 39385168ae50SSteven Rostedt int cpu; 39397a8e76a3SSteven Rostedt 39405168ae50SSteven Rostedt preempt_disable_notrace(); 3941bf41a158SSteven Rostedt 394252fbe9cdSLai Jiangshan if (atomic_read(&buffer->record_disabled)) 394352fbe9cdSLai Jiangshan goto out; 394452fbe9cdSLai Jiangshan 39457a8e76a3SSteven Rostedt cpu = raw_smp_processor_id(); 39467a8e76a3SSteven Rostedt 39479e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3948d769041fSSteven Rostedt goto out; 39497a8e76a3SSteven Rostedt 39507a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 39517a8e76a3SSteven Rostedt 39527a8e76a3SSteven Rostedt if (atomic_read(&cpu_buffer->record_disabled)) 39537a8e76a3SSteven Rostedt goto out; 39547a8e76a3SSteven Rostedt 3955be957c44SSteven Rostedt if (length > BUF_MAX_DATA_SIZE) 3956be957c44SSteven Rostedt goto out; 3957be957c44SSteven Rostedt 3958985e871bSSteven Rostedt (Red Hat) if (unlikely(trace_recursive_lock(cpu_buffer))) 3959985e871bSSteven Rostedt (Red Hat) goto out; 3960985e871bSSteven Rostedt (Red Hat) 396162f0b3ebSSteven Rostedt event = rb_reserve_next_event(buffer, cpu_buffer, length); 39627a8e76a3SSteven Rostedt if (!event) 3963985e871bSSteven Rostedt (Red Hat) goto out_unlock; 39647a8e76a3SSteven Rostedt 39657a8e76a3SSteven Rostedt body = rb_event_data(event); 39667a8e76a3SSteven Rostedt 39677a8e76a3SSteven Rostedt memcpy(body, data, length); 39687a8e76a3SSteven Rostedt 396904aabc32SSong Chen rb_commit(cpu_buffer); 39707a8e76a3SSteven Rostedt 397115693458SSteven Rostedt (Red Hat) rb_wakeups(buffer, cpu_buffer); 397215693458SSteven Rostedt (Red Hat) 39737a8e76a3SSteven Rostedt ret = 0; 3974985e871bSSteven Rostedt (Red Hat) 3975985e871bSSteven Rostedt (Red Hat) out_unlock: 3976985e871bSSteven Rostedt (Red Hat) trace_recursive_unlock(cpu_buffer); 3977985e871bSSteven Rostedt (Red Hat) 39787a8e76a3SSteven Rostedt out: 39795168ae50SSteven Rostedt preempt_enable_notrace(); 39807a8e76a3SSteven Rostedt 39817a8e76a3SSteven Rostedt return ret; 39827a8e76a3SSteven Rostedt } 3983c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_write); 39847a8e76a3SSteven Rostedt 3985da58834cSYaowei Bai static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3986bf41a158SSteven Rostedt { 3987bf41a158SSteven Rostedt struct buffer_page *reader = cpu_buffer->reader_page; 398877ae365eSSteven Rostedt struct buffer_page *head = rb_set_head_page(cpu_buffer); 3989bf41a158SSteven Rostedt struct buffer_page *commit = cpu_buffer->commit_page; 3990bf41a158SSteven Rostedt 399177ae365eSSteven Rostedt /* In case of error, head will be NULL */ 399277ae365eSSteven Rostedt if (unlikely(!head)) 3993da58834cSYaowei Bai return true; 399477ae365eSSteven Rostedt 399567f0d6d9SHaoran Luo /* Reader should exhaust content in reader page */ 399667f0d6d9SHaoran Luo if (reader->read != rb_page_commit(reader)) 399767f0d6d9SHaoran Luo return false; 399867f0d6d9SHaoran Luo 399967f0d6d9SHaoran Luo /* 400067f0d6d9SHaoran Luo * If writers are committing on the reader page, knowing all 400167f0d6d9SHaoran Luo * committed content has been read, the ring buffer is empty. 400267f0d6d9SHaoran Luo */ 400367f0d6d9SHaoran Luo if (commit == reader) 400467f0d6d9SHaoran Luo return true; 400567f0d6d9SHaoran Luo 400667f0d6d9SHaoran Luo /* 400767f0d6d9SHaoran Luo * If writers are committing on a page other than reader page 400867f0d6d9SHaoran Luo * and head page, there should always be content to read. 400967f0d6d9SHaoran Luo */ 401067f0d6d9SHaoran Luo if (commit != head) 401167f0d6d9SHaoran Luo return false; 401267f0d6d9SHaoran Luo 401367f0d6d9SHaoran Luo /* 401467f0d6d9SHaoran Luo * Writers are committing on the head page, we just need 401567f0d6d9SHaoran Luo * to care about there're committed data, and the reader will 401667f0d6d9SHaoran Luo * swap reader page with head page when it is to read data. 401767f0d6d9SHaoran Luo */ 401867f0d6d9SHaoran Luo return rb_page_commit(commit) == 0; 4019bf41a158SSteven Rostedt } 4020bf41a158SSteven Rostedt 40217a8e76a3SSteven Rostedt /** 40227a8e76a3SSteven Rostedt * ring_buffer_record_disable - stop all writes into the buffer 40237a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 40247a8e76a3SSteven Rostedt * 40257a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 40267a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 40277a8e76a3SSteven Rostedt * 402874401729SPaul E. McKenney * The caller should call synchronize_rcu() after this. 40297a8e76a3SSteven Rostedt */ 403013292494SSteven Rostedt (VMware) void ring_buffer_record_disable(struct trace_buffer *buffer) 40317a8e76a3SSteven Rostedt { 40327a8e76a3SSteven Rostedt atomic_inc(&buffer->record_disabled); 40337a8e76a3SSteven Rostedt } 4034c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable); 40357a8e76a3SSteven Rostedt 40367a8e76a3SSteven Rostedt /** 40377a8e76a3SSteven Rostedt * ring_buffer_record_enable - enable writes to the buffer 40387a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 40397a8e76a3SSteven Rostedt * 40407a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 4041c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 40427a8e76a3SSteven Rostedt */ 404313292494SSteven Rostedt (VMware) void ring_buffer_record_enable(struct trace_buffer *buffer) 40447a8e76a3SSteven Rostedt { 40457a8e76a3SSteven Rostedt atomic_dec(&buffer->record_disabled); 40467a8e76a3SSteven Rostedt } 4047c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 40487a8e76a3SSteven Rostedt 40497a8e76a3SSteven Rostedt /** 4050499e5470SSteven Rostedt * ring_buffer_record_off - stop all writes into the buffer 4051499e5470SSteven Rostedt * @buffer: The ring buffer to stop writes to. 4052499e5470SSteven Rostedt * 4053499e5470SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 4054499e5470SSteven Rostedt * to the buffer after this will fail and return NULL. 4055499e5470SSteven Rostedt * 4056499e5470SSteven Rostedt * This is different than ring_buffer_record_disable() as 405787abb3b1SWang Tianhong * it works like an on/off switch, where as the disable() version 4058499e5470SSteven Rostedt * must be paired with a enable(). 4059499e5470SSteven Rostedt */ 406013292494SSteven Rostedt (VMware) void ring_buffer_record_off(struct trace_buffer *buffer) 4061499e5470SSteven Rostedt { 4062499e5470SSteven Rostedt unsigned int rd; 4063499e5470SSteven Rostedt unsigned int new_rd; 4064499e5470SSteven Rostedt 4065499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 40668328e36dSUros Bizjak do { 4067499e5470SSteven Rostedt new_rd = rd | RB_BUFFER_OFF; 40688328e36dSUros Bizjak } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4069499e5470SSteven Rostedt } 4070499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_off); 4071499e5470SSteven Rostedt 4072499e5470SSteven Rostedt /** 4073499e5470SSteven Rostedt * ring_buffer_record_on - restart writes into the buffer 4074499e5470SSteven Rostedt * @buffer: The ring buffer to start writes to. 4075499e5470SSteven Rostedt * 4076499e5470SSteven Rostedt * This enables all writes to the buffer that was disabled by 4077499e5470SSteven Rostedt * ring_buffer_record_off(). 4078499e5470SSteven Rostedt * 4079499e5470SSteven Rostedt * This is different than ring_buffer_record_enable() as 408087abb3b1SWang Tianhong * it works like an on/off switch, where as the enable() version 4081499e5470SSteven Rostedt * must be paired with a disable(). 4082499e5470SSteven Rostedt */ 408313292494SSteven Rostedt (VMware) void ring_buffer_record_on(struct trace_buffer *buffer) 4084499e5470SSteven Rostedt { 4085499e5470SSteven Rostedt unsigned int rd; 4086499e5470SSteven Rostedt unsigned int new_rd; 4087499e5470SSteven Rostedt 4088499e5470SSteven Rostedt rd = atomic_read(&buffer->record_disabled); 40898328e36dSUros Bizjak do { 4090499e5470SSteven Rostedt new_rd = rd & ~RB_BUFFER_OFF; 40918328e36dSUros Bizjak } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd)); 4092499e5470SSteven Rostedt } 4093499e5470SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_record_on); 4094499e5470SSteven Rostedt 4095499e5470SSteven Rostedt /** 4096499e5470SSteven Rostedt * ring_buffer_record_is_on - return true if the ring buffer can write 4097499e5470SSteven Rostedt * @buffer: The ring buffer to see if write is enabled 4098499e5470SSteven Rostedt * 4099499e5470SSteven Rostedt * Returns true if the ring buffer is in a state that it accepts writes. 4100499e5470SSteven Rostedt */ 410113292494SSteven Rostedt (VMware) bool ring_buffer_record_is_on(struct trace_buffer *buffer) 4102499e5470SSteven Rostedt { 4103499e5470SSteven Rostedt return !atomic_read(&buffer->record_disabled); 4104499e5470SSteven Rostedt } 4105499e5470SSteven Rostedt 4106499e5470SSteven Rostedt /** 410773c8d894SMasami Hiramatsu * ring_buffer_record_is_set_on - return true if the ring buffer is set writable 410873c8d894SMasami Hiramatsu * @buffer: The ring buffer to see if write is set enabled 410973c8d894SMasami Hiramatsu * 411073c8d894SMasami Hiramatsu * Returns true if the ring buffer is set writable by ring_buffer_record_on(). 411173c8d894SMasami Hiramatsu * Note that this does NOT mean it is in a writable state. 411273c8d894SMasami Hiramatsu * 411373c8d894SMasami Hiramatsu * It may return true when the ring buffer has been disabled by 411473c8d894SMasami Hiramatsu * ring_buffer_record_disable(), as that is a temporary disabling of 411573c8d894SMasami Hiramatsu * the ring buffer. 411673c8d894SMasami Hiramatsu */ 411713292494SSteven Rostedt (VMware) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer) 411873c8d894SMasami Hiramatsu { 411973c8d894SMasami Hiramatsu return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); 412073c8d894SMasami Hiramatsu } 412173c8d894SMasami Hiramatsu 412273c8d894SMasami Hiramatsu /** 41237a8e76a3SSteven Rostedt * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 41247a8e76a3SSteven Rostedt * @buffer: The ring buffer to stop writes to. 41257a8e76a3SSteven Rostedt * @cpu: The CPU buffer to stop 41267a8e76a3SSteven Rostedt * 41277a8e76a3SSteven Rostedt * This prevents all writes to the buffer. Any attempt to write 41287a8e76a3SSteven Rostedt * to the buffer after this will fail and return NULL. 41297a8e76a3SSteven Rostedt * 413074401729SPaul E. McKenney * The caller should call synchronize_rcu() after this. 41317a8e76a3SSteven Rostedt */ 413213292494SSteven Rostedt (VMware) void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) 41337a8e76a3SSteven Rostedt { 41347a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 41357a8e76a3SSteven Rostedt 41369e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 41378aabee57SSteven Rostedt return; 41387a8e76a3SSteven Rostedt 41397a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 41407a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer->record_disabled); 41417a8e76a3SSteven Rostedt } 4142c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); 41437a8e76a3SSteven Rostedt 41447a8e76a3SSteven Rostedt /** 41457a8e76a3SSteven Rostedt * ring_buffer_record_enable_cpu - enable writes to the buffer 41467a8e76a3SSteven Rostedt * @buffer: The ring buffer to enable writes 41477a8e76a3SSteven Rostedt * @cpu: The CPU to enable. 41487a8e76a3SSteven Rostedt * 41497a8e76a3SSteven Rostedt * Note, multiple disables will need the same number of enables 4150c41b20e7SAdam Buchbinder * to truly enable the writing (much like preempt_disable). 41517a8e76a3SSteven Rostedt */ 415213292494SSteven Rostedt (VMware) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) 41537a8e76a3SSteven Rostedt { 41547a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 41557a8e76a3SSteven Rostedt 41569e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 41578aabee57SSteven Rostedt return; 41587a8e76a3SSteven Rostedt 41597a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 41607a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 41617a8e76a3SSteven Rostedt } 4162c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); 41637a8e76a3SSteven Rostedt 4164f6195aa0SSteven Rostedt /* 4165f6195aa0SSteven Rostedt * The total entries in the ring buffer is the running counter 4166f6195aa0SSteven Rostedt * of entries entered into the ring buffer, minus the sum of 4167f6195aa0SSteven Rostedt * the entries read from the ring buffer and the number of 4168f6195aa0SSteven Rostedt * entries that were overwritten. 4169f6195aa0SSteven Rostedt */ 4170f6195aa0SSteven Rostedt static inline unsigned long 4171f6195aa0SSteven Rostedt rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) 4172f6195aa0SSteven Rostedt { 4173f6195aa0SSteven Rostedt return local_read(&cpu_buffer->entries) - 4174f6195aa0SSteven Rostedt (local_read(&cpu_buffer->overrun) + cpu_buffer->read); 4175f6195aa0SSteven Rostedt } 4176f6195aa0SSteven Rostedt 41777a8e76a3SSteven Rostedt /** 4178c64e148aSVaibhav Nagarnaik * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer 4179c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 4180c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 4181c64e148aSVaibhav Nagarnaik */ 418213292494SSteven Rostedt (VMware) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) 4183c64e148aSVaibhav Nagarnaik { 4184c64e148aSVaibhav Nagarnaik unsigned long flags; 4185c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 4186c64e148aSVaibhav Nagarnaik struct buffer_page *bpage; 4187da830e58SLinus Torvalds u64 ret = 0; 4188c64e148aSVaibhav Nagarnaik 4189c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4190c64e148aSVaibhav Nagarnaik return 0; 4191c64e148aSVaibhav Nagarnaik 4192c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 41937115e3fcSLinus Torvalds raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4194c64e148aSVaibhav Nagarnaik /* 4195c64e148aSVaibhav Nagarnaik * if the tail is on reader_page, oldest time stamp is on the reader 4196c64e148aSVaibhav Nagarnaik * page 4197c64e148aSVaibhav Nagarnaik */ 4198c64e148aSVaibhav Nagarnaik if (cpu_buffer->tail_page == cpu_buffer->reader_page) 4199c64e148aSVaibhav Nagarnaik bpage = cpu_buffer->reader_page; 4200c64e148aSVaibhav Nagarnaik else 4201c64e148aSVaibhav Nagarnaik bpage = rb_set_head_page(cpu_buffer); 420254f7be5bSSteven Rostedt if (bpage) 4203c64e148aSVaibhav Nagarnaik ret = bpage->page->time_stamp; 42047115e3fcSLinus Torvalds raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 4205c64e148aSVaibhav Nagarnaik 4206c64e148aSVaibhav Nagarnaik return ret; 4207c64e148aSVaibhav Nagarnaik } 4208c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); 4209c64e148aSVaibhav Nagarnaik 4210c64e148aSVaibhav Nagarnaik /** 4211c64e148aSVaibhav Nagarnaik * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer 4212c64e148aSVaibhav Nagarnaik * @buffer: The ring buffer 4213c64e148aSVaibhav Nagarnaik * @cpu: The per CPU buffer to read from. 4214c64e148aSVaibhav Nagarnaik */ 421513292494SSteven Rostedt (VMware) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) 4216c64e148aSVaibhav Nagarnaik { 4217c64e148aSVaibhav Nagarnaik struct ring_buffer_per_cpu *cpu_buffer; 4218c64e148aSVaibhav Nagarnaik unsigned long ret; 4219c64e148aSVaibhav Nagarnaik 4220c64e148aSVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4221c64e148aSVaibhav Nagarnaik return 0; 4222c64e148aSVaibhav Nagarnaik 4223c64e148aSVaibhav Nagarnaik cpu_buffer = buffer->buffers[cpu]; 4224c64e148aSVaibhav Nagarnaik ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; 4225c64e148aSVaibhav Nagarnaik 4226c64e148aSVaibhav Nagarnaik return ret; 4227c64e148aSVaibhav Nagarnaik } 4228c64e148aSVaibhav Nagarnaik EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); 4229c64e148aSVaibhav Nagarnaik 4230c64e148aSVaibhav Nagarnaik /** 42317a8e76a3SSteven Rostedt * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 42327a8e76a3SSteven Rostedt * @buffer: The ring buffer 42337a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the entries from. 42347a8e76a3SSteven Rostedt */ 423513292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) 42367a8e76a3SSteven Rostedt { 42377a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 42387a8e76a3SSteven Rostedt 42399e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 42408aabee57SSteven Rostedt return 0; 42417a8e76a3SSteven Rostedt 42427a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 4243554f786eSSteven Rostedt 4244f6195aa0SSteven Rostedt return rb_num_of_entries(cpu_buffer); 42457a8e76a3SSteven Rostedt } 4246c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 42477a8e76a3SSteven Rostedt 42487a8e76a3SSteven Rostedt /** 4249884bfe89SSlava Pestov * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 4250884bfe89SSlava Pestov * buffer wrapping around (only if RB_FL_OVERWRITE is on). 42517a8e76a3SSteven Rostedt * @buffer: The ring buffer 42527a8e76a3SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 42537a8e76a3SSteven Rostedt */ 425413292494SSteven Rostedt (VMware) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) 42557a8e76a3SSteven Rostedt { 42567a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 42578aabee57SSteven Rostedt unsigned long ret; 42587a8e76a3SSteven Rostedt 42599e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 42608aabee57SSteven Rostedt return 0; 42617a8e76a3SSteven Rostedt 42627a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 426377ae365eSSteven Rostedt ret = local_read(&cpu_buffer->overrun); 4264554f786eSSteven Rostedt 4265554f786eSSteven Rostedt return ret; 42667a8e76a3SSteven Rostedt } 4267c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 42687a8e76a3SSteven Rostedt 42697a8e76a3SSteven Rostedt /** 4270884bfe89SSlava Pestov * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 4271884bfe89SSlava Pestov * commits failing due to the buffer wrapping around while there are uncommitted 4272884bfe89SSlava Pestov * events, such as during an interrupt storm. 4273f0d2c681SSteven Rostedt * @buffer: The ring buffer 4274f0d2c681SSteven Rostedt * @cpu: The per CPU buffer to get the number of overruns from 4275f0d2c681SSteven Rostedt */ 4276f0d2c681SSteven Rostedt unsigned long 427713292494SSteven Rostedt (VMware) ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) 4278f0d2c681SSteven Rostedt { 4279f0d2c681SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4280f0d2c681SSteven Rostedt unsigned long ret; 4281f0d2c681SSteven Rostedt 4282f0d2c681SSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4283f0d2c681SSteven Rostedt return 0; 4284f0d2c681SSteven Rostedt 4285f0d2c681SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 428677ae365eSSteven Rostedt ret = local_read(&cpu_buffer->commit_overrun); 4287f0d2c681SSteven Rostedt 4288f0d2c681SSteven Rostedt return ret; 4289f0d2c681SSteven Rostedt } 4290f0d2c681SSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 4291f0d2c681SSteven Rostedt 4292f0d2c681SSteven Rostedt /** 4293884bfe89SSlava Pestov * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 4294884bfe89SSlava Pestov * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 4295884bfe89SSlava Pestov * @buffer: The ring buffer 4296884bfe89SSlava Pestov * @cpu: The per CPU buffer to get the number of overruns from 4297884bfe89SSlava Pestov */ 4298884bfe89SSlava Pestov unsigned long 429913292494SSteven Rostedt (VMware) ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) 4300884bfe89SSlava Pestov { 4301884bfe89SSlava Pestov struct ring_buffer_per_cpu *cpu_buffer; 4302884bfe89SSlava Pestov unsigned long ret; 4303884bfe89SSlava Pestov 4304884bfe89SSlava Pestov if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4305884bfe89SSlava Pestov return 0; 4306884bfe89SSlava Pestov 4307884bfe89SSlava Pestov cpu_buffer = buffer->buffers[cpu]; 4308884bfe89SSlava Pestov ret = local_read(&cpu_buffer->dropped_events); 4309884bfe89SSlava Pestov 4310884bfe89SSlava Pestov return ret; 4311884bfe89SSlava Pestov } 4312884bfe89SSlava Pestov EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 4313884bfe89SSlava Pestov 4314884bfe89SSlava Pestov /** 4315ad964704SSteven Rostedt (Red Hat) * ring_buffer_read_events_cpu - get the number of events successfully read 4316ad964704SSteven Rostedt (Red Hat) * @buffer: The ring buffer 4317ad964704SSteven Rostedt (Red Hat) * @cpu: The per CPU buffer to get the number of events read 4318ad964704SSteven Rostedt (Red Hat) */ 4319ad964704SSteven Rostedt (Red Hat) unsigned long 432013292494SSteven Rostedt (VMware) ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) 4321ad964704SSteven Rostedt (Red Hat) { 4322ad964704SSteven Rostedt (Red Hat) struct ring_buffer_per_cpu *cpu_buffer; 4323ad964704SSteven Rostedt (Red Hat) 4324ad964704SSteven Rostedt (Red Hat) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4325ad964704SSteven Rostedt (Red Hat) return 0; 4326ad964704SSteven Rostedt (Red Hat) 4327ad964704SSteven Rostedt (Red Hat) cpu_buffer = buffer->buffers[cpu]; 4328ad964704SSteven Rostedt (Red Hat) return cpu_buffer->read; 4329ad964704SSteven Rostedt (Red Hat) } 4330ad964704SSteven Rostedt (Red Hat) EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 4331ad964704SSteven Rostedt (Red Hat) 4332ad964704SSteven Rostedt (Red Hat) /** 43337a8e76a3SSteven Rostedt * ring_buffer_entries - get the number of entries in a buffer 43347a8e76a3SSteven Rostedt * @buffer: The ring buffer 43357a8e76a3SSteven Rostedt * 43367a8e76a3SSteven Rostedt * Returns the total number of entries in the ring buffer 43377a8e76a3SSteven Rostedt * (all CPU entries) 43387a8e76a3SSteven Rostedt */ 433913292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries(struct trace_buffer *buffer) 43407a8e76a3SSteven Rostedt { 43417a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 43427a8e76a3SSteven Rostedt unsigned long entries = 0; 43437a8e76a3SSteven Rostedt int cpu; 43447a8e76a3SSteven Rostedt 43457a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 43467a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 43477a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 4348f6195aa0SSteven Rostedt entries += rb_num_of_entries(cpu_buffer); 43497a8e76a3SSteven Rostedt } 43507a8e76a3SSteven Rostedt 43517a8e76a3SSteven Rostedt return entries; 43527a8e76a3SSteven Rostedt } 4353c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_entries); 43547a8e76a3SSteven Rostedt 43557a8e76a3SSteven Rostedt /** 435667b394f7SJiri Olsa * ring_buffer_overruns - get the number of overruns in buffer 43577a8e76a3SSteven Rostedt * @buffer: The ring buffer 43587a8e76a3SSteven Rostedt * 43597a8e76a3SSteven Rostedt * Returns the total number of overruns in the ring buffer 43607a8e76a3SSteven Rostedt * (all CPU entries) 43617a8e76a3SSteven Rostedt */ 436213292494SSteven Rostedt (VMware) unsigned long ring_buffer_overruns(struct trace_buffer *buffer) 43637a8e76a3SSteven Rostedt { 43647a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 43657a8e76a3SSteven Rostedt unsigned long overruns = 0; 43667a8e76a3SSteven Rostedt int cpu; 43677a8e76a3SSteven Rostedt 43687a8e76a3SSteven Rostedt /* if you care about this being correct, lock the buffer */ 43697a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 43707a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 437177ae365eSSteven Rostedt overruns += local_read(&cpu_buffer->overrun); 43727a8e76a3SSteven Rostedt } 43737a8e76a3SSteven Rostedt 43747a8e76a3SSteven Rostedt return overruns; 43757a8e76a3SSteven Rostedt } 4376c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_overruns); 43777a8e76a3SSteven Rostedt 4378642edba5SSteven Rostedt static void rb_iter_reset(struct ring_buffer_iter *iter) 43797a8e76a3SSteven Rostedt { 43807a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 43817a8e76a3SSteven Rostedt 4382d769041fSSteven Rostedt /* Iterator usage is expected to have record disabled */ 4383d769041fSSteven Rostedt iter->head_page = cpu_buffer->reader_page; 43846f807acdSSteven Rostedt iter->head = cpu_buffer->reader_page->read; 4385785888c5SSteven Rostedt (VMware) iter->next_event = iter->head; 4386651e22f2SSteven Rostedt (Red Hat) 4387651e22f2SSteven Rostedt (Red Hat) iter->cache_reader_page = iter->head_page; 438824607f11SSteven Rostedt (Red Hat) iter->cache_read = cpu_buffer->read; 43892d093282SZheng Yejian iter->cache_pages_removed = cpu_buffer->pages_removed; 4390651e22f2SSteven Rostedt (Red Hat) 439128e3fc56SSteven Rostedt (VMware) if (iter->head) { 4392d769041fSSteven Rostedt iter->read_stamp = cpu_buffer->read_stamp; 439328e3fc56SSteven Rostedt (VMware) iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; 439428e3fc56SSteven Rostedt (VMware) } else { 4395abc9b56dSSteven Rostedt iter->read_stamp = iter->head_page->page->time_stamp; 439628e3fc56SSteven Rostedt (VMware) iter->page_stamp = iter->read_stamp; 439728e3fc56SSteven Rostedt (VMware) } 4398642edba5SSteven Rostedt } 4399f83c9d0fSSteven Rostedt 4400642edba5SSteven Rostedt /** 4401642edba5SSteven Rostedt * ring_buffer_iter_reset - reset an iterator 4402642edba5SSteven Rostedt * @iter: The iterator to reset 4403642edba5SSteven Rostedt * 4404642edba5SSteven Rostedt * Resets the iterator, so that it will start from the beginning 4405642edba5SSteven Rostedt * again. 4406642edba5SSteven Rostedt */ 4407642edba5SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter) 4408642edba5SSteven Rostedt { 4409554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 4410642edba5SSteven Rostedt unsigned long flags; 4411642edba5SSteven Rostedt 4412554f786eSSteven Rostedt if (!iter) 4413554f786eSSteven Rostedt return; 4414554f786eSSteven Rostedt 4415554f786eSSteven Rostedt cpu_buffer = iter->cpu_buffer; 4416554f786eSSteven Rostedt 44175389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 4418642edba5SSteven Rostedt rb_iter_reset(iter); 44195389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 44207a8e76a3SSteven Rostedt } 4421c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 44227a8e76a3SSteven Rostedt 44237a8e76a3SSteven Rostedt /** 44247a8e76a3SSteven Rostedt * ring_buffer_iter_empty - check if an iterator has no more to read 44257a8e76a3SSteven Rostedt * @iter: The iterator to check 44267a8e76a3SSteven Rostedt */ 44277a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter) 44287a8e76a3SSteven Rostedt { 44297a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 443078f7a45dSSteven Rostedt (VMware) struct buffer_page *reader; 443178f7a45dSSteven Rostedt (VMware) struct buffer_page *head_page; 443278f7a45dSSteven Rostedt (VMware) struct buffer_page *commit_page; 4433ead6ecfdSSteven Rostedt (VMware) struct buffer_page *curr_commit_page; 443478f7a45dSSteven Rostedt (VMware) unsigned commit; 4435ead6ecfdSSteven Rostedt (VMware) u64 curr_commit_ts; 4436ead6ecfdSSteven Rostedt (VMware) u64 commit_ts; 44377a8e76a3SSteven Rostedt 44387a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 443978f7a45dSSteven Rostedt (VMware) reader = cpu_buffer->reader_page; 444078f7a45dSSteven Rostedt (VMware) head_page = cpu_buffer->head_page; 444178f7a45dSSteven Rostedt (VMware) commit_page = cpu_buffer->commit_page; 4442ead6ecfdSSteven Rostedt (VMware) commit_ts = commit_page->page->time_stamp; 444378f7a45dSSteven Rostedt (VMware) 4444ead6ecfdSSteven Rostedt (VMware) /* 4445ead6ecfdSSteven Rostedt (VMware) * When the writer goes across pages, it issues a cmpxchg which 4446ead6ecfdSSteven Rostedt (VMware) * is a mb(), which will synchronize with the rmb here. 4447ead6ecfdSSteven Rostedt (VMware) * (see rb_tail_page_update()) 4448ead6ecfdSSteven Rostedt (VMware) */ 4449ead6ecfdSSteven Rostedt (VMware) smp_rmb(); 4450ead6ecfdSSteven Rostedt (VMware) commit = rb_page_commit(commit_page); 4451ead6ecfdSSteven Rostedt (VMware) /* We want to make sure that the commit page doesn't change */ 4452ead6ecfdSSteven Rostedt (VMware) smp_rmb(); 4453ead6ecfdSSteven Rostedt (VMware) 4454ead6ecfdSSteven Rostedt (VMware) /* Make sure commit page didn't change */ 4455ead6ecfdSSteven Rostedt (VMware) curr_commit_page = READ_ONCE(cpu_buffer->commit_page); 4456ead6ecfdSSteven Rostedt (VMware) curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp); 4457ead6ecfdSSteven Rostedt (VMware) 4458ead6ecfdSSteven Rostedt (VMware) /* If the commit page changed, then there's more data */ 4459ead6ecfdSSteven Rostedt (VMware) if (curr_commit_page != commit_page || 4460ead6ecfdSSteven Rostedt (VMware) curr_commit_ts != commit_ts) 4461ead6ecfdSSteven Rostedt (VMware) return 0; 4462ead6ecfdSSteven Rostedt (VMware) 4463ead6ecfdSSteven Rostedt (VMware) /* Still racy, as it may return a false positive, but that's OK */ 4464785888c5SSteven Rostedt (VMware) return ((iter->head_page == commit_page && iter->head >= commit) || 446578f7a45dSSteven Rostedt (VMware) (iter->head_page == reader && commit_page == head_page && 446678f7a45dSSteven Rostedt (VMware) head_page->read == commit && 446778f7a45dSSteven Rostedt (VMware) iter->head == rb_page_commit(cpu_buffer->reader_page))); 44687a8e76a3SSteven Rostedt } 4469c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); 44707a8e76a3SSteven Rostedt 44717a8e76a3SSteven Rostedt static void 44727a8e76a3SSteven Rostedt rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 44737a8e76a3SSteven Rostedt struct ring_buffer_event *event) 44747a8e76a3SSteven Rostedt { 44757a8e76a3SSteven Rostedt u64 delta; 44767a8e76a3SSteven Rostedt 4477334d4169SLai Jiangshan switch (event->type_len) { 44787a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 44797a8e76a3SSteven Rostedt return; 44807a8e76a3SSteven Rostedt 44817a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 4482e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 44837a8e76a3SSteven Rostedt cpu_buffer->read_stamp += delta; 44847a8e76a3SSteven Rostedt return; 44857a8e76a3SSteven Rostedt 44867a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 4487e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 44886695da58SSteven Rostedt (Google) delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); 4489dc4e2801STom Zanussi cpu_buffer->read_stamp = delta; 44907a8e76a3SSteven Rostedt return; 44917a8e76a3SSteven Rostedt 44927a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 44937a8e76a3SSteven Rostedt cpu_buffer->read_stamp += event->time_delta; 44947a8e76a3SSteven Rostedt return; 44957a8e76a3SSteven Rostedt 44967a8e76a3SSteven Rostedt default: 4497da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1); 44987a8e76a3SSteven Rostedt } 44997a8e76a3SSteven Rostedt } 45007a8e76a3SSteven Rostedt 45017a8e76a3SSteven Rostedt static void 45027a8e76a3SSteven Rostedt rb_update_iter_read_stamp(struct ring_buffer_iter *iter, 45037a8e76a3SSteven Rostedt struct ring_buffer_event *event) 45047a8e76a3SSteven Rostedt { 45057a8e76a3SSteven Rostedt u64 delta; 45067a8e76a3SSteven Rostedt 4507334d4169SLai Jiangshan switch (event->type_len) { 45087a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 45097a8e76a3SSteven Rostedt return; 45107a8e76a3SSteven Rostedt 45117a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 4512e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 45137a8e76a3SSteven Rostedt iter->read_stamp += delta; 45147a8e76a3SSteven Rostedt return; 45157a8e76a3SSteven Rostedt 45167a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 4517e20044f7SSteven Rostedt (VMware) delta = rb_event_time_stamp(event); 45186695da58SSteven Rostedt (Google) delta = rb_fix_abs_ts(delta, iter->read_stamp); 4519dc4e2801STom Zanussi iter->read_stamp = delta; 45207a8e76a3SSteven Rostedt return; 45217a8e76a3SSteven Rostedt 45227a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 45237a8e76a3SSteven Rostedt iter->read_stamp += event->time_delta; 45247a8e76a3SSteven Rostedt return; 45257a8e76a3SSteven Rostedt 45267a8e76a3SSteven Rostedt default: 4527da4d401aSSteven Rostedt (VMware) RB_WARN_ON(iter->cpu_buffer, 1); 45287a8e76a3SSteven Rostedt } 45297a8e76a3SSteven Rostedt } 45307a8e76a3SSteven Rostedt 4531d769041fSSteven Rostedt static struct buffer_page * 4532d769041fSSteven Rostedt rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 45337a8e76a3SSteven Rostedt { 4534d769041fSSteven Rostedt struct buffer_page *reader = NULL; 453566a8cb95SSteven Rostedt unsigned long overwrite; 4536d769041fSSteven Rostedt unsigned long flags; 4537818e3dd3SSteven Rostedt int nr_loops = 0; 4538bc92b956SUros Bizjak bool ret; 4539d769041fSSteven Rostedt 45403e03fb7fSSteven Rostedt local_irq_save(flags); 45410199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 4542d769041fSSteven Rostedt 4543d769041fSSteven Rostedt again: 4544818e3dd3SSteven Rostedt /* 4545818e3dd3SSteven Rostedt * This should normally only loop twice. But because the 4546818e3dd3SSteven Rostedt * start of the reader inserts an empty page, it causes 4547818e3dd3SSteven Rostedt * a case where we will loop three times. There should be no 4548818e3dd3SSteven Rostedt * reason to loop four times (that I know of). 4549818e3dd3SSteven Rostedt */ 45503e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { 4551818e3dd3SSteven Rostedt reader = NULL; 4552818e3dd3SSteven Rostedt goto out; 4553818e3dd3SSteven Rostedt } 4554818e3dd3SSteven Rostedt 4555d769041fSSteven Rostedt reader = cpu_buffer->reader_page; 4556d769041fSSteven Rostedt 4557d769041fSSteven Rostedt /* If there's more to read, return this page */ 4558bf41a158SSteven Rostedt if (cpu_buffer->reader_page->read < rb_page_size(reader)) 4559d769041fSSteven Rostedt goto out; 4560d769041fSSteven Rostedt 4561d769041fSSteven Rostedt /* Never should we have an index greater than the size */ 45623e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, 45633e89c7bbSSteven Rostedt cpu_buffer->reader_page->read > rb_page_size(reader))) 45643e89c7bbSSteven Rostedt goto out; 4565d769041fSSteven Rostedt 4566d769041fSSteven Rostedt /* check if we caught up to the tail */ 4567d769041fSSteven Rostedt reader = NULL; 4568bf41a158SSteven Rostedt if (cpu_buffer->commit_page == cpu_buffer->reader_page) 4569d769041fSSteven Rostedt goto out; 45707a8e76a3SSteven Rostedt 4571a5fb8331SSteven Rostedt /* Don't bother swapping if the ring buffer is empty */ 4572a5fb8331SSteven Rostedt if (rb_num_of_entries(cpu_buffer) == 0) 4573a5fb8331SSteven Rostedt goto out; 4574a5fb8331SSteven Rostedt 45757a8e76a3SSteven Rostedt /* 4576d769041fSSteven Rostedt * Reset the reader page to size zero. 45777a8e76a3SSteven Rostedt */ 457877ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->write, 0); 457977ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->entries, 0); 458077ae365eSSteven Rostedt local_set(&cpu_buffer->reader_page->page->commit, 0); 4581ff0ff84aSSteven Rostedt cpu_buffer->reader_page->real_end = 0; 4582d769041fSSteven Rostedt 458377ae365eSSteven Rostedt spin: 458477ae365eSSteven Rostedt /* 458577ae365eSSteven Rostedt * Splice the empty reader page into the list around the head. 458677ae365eSSteven Rostedt */ 458777ae365eSSteven Rostedt reader = rb_set_head_page(cpu_buffer); 458854f7be5bSSteven Rostedt if (!reader) 458954f7be5bSSteven Rostedt goto out; 45900e1ff5d7SSteven Rostedt cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); 4591d769041fSSteven Rostedt cpu_buffer->reader_page->list.prev = reader->list.prev; 4592bf41a158SSteven Rostedt 45933adc54faSSteven Rostedt /* 45943adc54faSSteven Rostedt * cpu_buffer->pages just needs to point to the buffer, it 45953adc54faSSteven Rostedt * has no specific buffer page to point to. Lets move it out 459625985edcSLucas De Marchi * of our way so we don't accidentally swap it. 45973adc54faSSteven Rostedt */ 45983adc54faSSteven Rostedt cpu_buffer->pages = reader->list.prev; 45993adc54faSSteven Rostedt 460077ae365eSSteven Rostedt /* The reader page will be pointing to the new head */ 46016689bed3SQiujun Huang rb_set_list_to_head(&cpu_buffer->reader_page->list); 4602d769041fSSteven Rostedt 4603d769041fSSteven Rostedt /* 460466a8cb95SSteven Rostedt * We want to make sure we read the overruns after we set up our 460566a8cb95SSteven Rostedt * pointers to the next object. The writer side does a 460666a8cb95SSteven Rostedt * cmpxchg to cross pages which acts as the mb on the writer 460766a8cb95SSteven Rostedt * side. Note, the reader will constantly fail the swap 460866a8cb95SSteven Rostedt * while the writer is updating the pointers, so this 460966a8cb95SSteven Rostedt * guarantees that the overwrite recorded here is the one we 461066a8cb95SSteven Rostedt * want to compare with the last_overrun. 461166a8cb95SSteven Rostedt */ 461266a8cb95SSteven Rostedt smp_mb(); 461366a8cb95SSteven Rostedt overwrite = local_read(&(cpu_buffer->overrun)); 461466a8cb95SSteven Rostedt 461566a8cb95SSteven Rostedt /* 461677ae365eSSteven Rostedt * Here's the tricky part. 461777ae365eSSteven Rostedt * 461877ae365eSSteven Rostedt * We need to move the pointer past the header page. 461977ae365eSSteven Rostedt * But we can only do that if a writer is not currently 462077ae365eSSteven Rostedt * moving it. The page before the header page has the 462177ae365eSSteven Rostedt * flag bit '1' set if it is pointing to the page we want. 462277ae365eSSteven Rostedt * but if the writer is in the process of moving it 462377ae365eSSteven Rostedt * than it will be '2' or already moved '0'. 4624d769041fSSteven Rostedt */ 4625d769041fSSteven Rostedt 462677ae365eSSteven Rostedt ret = rb_head_page_replace(reader, cpu_buffer->reader_page); 462777ae365eSSteven Rostedt 462877ae365eSSteven Rostedt /* 462977ae365eSSteven Rostedt * If we did not convert it, then we must try again. 463077ae365eSSteven Rostedt */ 463177ae365eSSteven Rostedt if (!ret) 463277ae365eSSteven Rostedt goto spin; 463377ae365eSSteven Rostedt 463477ae365eSSteven Rostedt /* 46352c2b0a78SSteven Rostedt (VMware) * Yay! We succeeded in replacing the page. 463677ae365eSSteven Rostedt * 463777ae365eSSteven Rostedt * Now make the new head point back to the reader page. 463877ae365eSSteven Rostedt */ 46395ded3dc6SDavid Sharp rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; 46406689bed3SQiujun Huang rb_inc_page(&cpu_buffer->head_page); 4641d769041fSSteven Rostedt 46422c2b0a78SSteven Rostedt (VMware) local_inc(&cpu_buffer->pages_read); 46432c2b0a78SSteven Rostedt (VMware) 4644d769041fSSteven Rostedt /* Finally update the reader page to the new head */ 4645d769041fSSteven Rostedt cpu_buffer->reader_page = reader; 4646b81f472aSSteven Rostedt (Red Hat) cpu_buffer->reader_page->read = 0; 4647d769041fSSteven Rostedt 464866a8cb95SSteven Rostedt if (overwrite != cpu_buffer->last_overrun) { 464966a8cb95SSteven Rostedt cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 465066a8cb95SSteven Rostedt cpu_buffer->last_overrun = overwrite; 465166a8cb95SSteven Rostedt } 465266a8cb95SSteven Rostedt 4653d769041fSSteven Rostedt goto again; 4654d769041fSSteven Rostedt 4655d769041fSSteven Rostedt out: 4656b81f472aSSteven Rostedt (Red Hat) /* Update the read_stamp on the first event */ 4657b81f472aSSteven Rostedt (Red Hat) if (reader && reader->read == 0) 4658b81f472aSSteven Rostedt (Red Hat) cpu_buffer->read_stamp = reader->page->time_stamp; 4659b81f472aSSteven Rostedt (Red Hat) 46600199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 46613e03fb7fSSteven Rostedt local_irq_restore(flags); 4662d769041fSSteven Rostedt 4663a0fcaaedSSteven Rostedt (Google) /* 4664a0fcaaedSSteven Rostedt (Google) * The writer has preempt disable, wait for it. But not forever 4665a0fcaaedSSteven Rostedt (Google) * Although, 1 second is pretty much "forever" 4666a0fcaaedSSteven Rostedt (Google) */ 4667a0fcaaedSSteven Rostedt (Google) #define USECS_WAIT 1000000 4668a0fcaaedSSteven Rostedt (Google) for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { 4669a0fcaaedSSteven Rostedt (Google) /* If the write is past the end of page, a writer is still updating it */ 4670a0fcaaedSSteven Rostedt (Google) if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) 4671a0fcaaedSSteven Rostedt (Google) break; 4672a0fcaaedSSteven Rostedt (Google) 4673a0fcaaedSSteven Rostedt (Google) udelay(1); 4674a0fcaaedSSteven Rostedt (Google) 4675a0fcaaedSSteven Rostedt (Google) /* Get the latest version of the reader write value */ 4676a0fcaaedSSteven Rostedt (Google) smp_rmb(); 4677a0fcaaedSSteven Rostedt (Google) } 4678a0fcaaedSSteven Rostedt (Google) 4679a0fcaaedSSteven Rostedt (Google) /* The writer is not moving forward? Something is wrong */ 4680a0fcaaedSSteven Rostedt (Google) if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) 4681a0fcaaedSSteven Rostedt (Google) reader = NULL; 4682a0fcaaedSSteven Rostedt (Google) 4683a0fcaaedSSteven Rostedt (Google) /* 4684a0fcaaedSSteven Rostedt (Google) * Make sure we see any padding after the write update 46856455b616SZheng Yejian * (see rb_reset_tail()). 46866455b616SZheng Yejian * 46876455b616SZheng Yejian * In addition, a writer may be writing on the reader page 46886455b616SZheng Yejian * if the page has not been fully filled, so the read barrier 46896455b616SZheng Yejian * is also needed to make sure we see the content of what is 46906455b616SZheng Yejian * committed by the writer (see rb_set_commit_to_write()). 4691a0fcaaedSSteven Rostedt (Google) */ 4692a0fcaaedSSteven Rostedt (Google) smp_rmb(); 4693a0fcaaedSSteven Rostedt (Google) 4694a0fcaaedSSteven Rostedt (Google) 4695d769041fSSteven Rostedt return reader; 46967a8e76a3SSteven Rostedt } 46977a8e76a3SSteven Rostedt 4698d769041fSSteven Rostedt static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) 4699d769041fSSteven Rostedt { 4700d769041fSSteven Rostedt struct ring_buffer_event *event; 4701d769041fSSteven Rostedt struct buffer_page *reader; 4702d769041fSSteven Rostedt unsigned length; 4703d769041fSSteven Rostedt 4704d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 4705d769041fSSteven Rostedt 4706d769041fSSteven Rostedt /* This function should not be called when buffer is empty */ 47073e89c7bbSSteven Rostedt if (RB_WARN_ON(cpu_buffer, !reader)) 47083e89c7bbSSteven Rostedt return; 4709d769041fSSteven Rostedt 4710d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 47117a8e76a3SSteven Rostedt 4712a1863c21SSteven Rostedt if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 4713e4906effSSteven Rostedt cpu_buffer->read++; 47147a8e76a3SSteven Rostedt 47157a8e76a3SSteven Rostedt rb_update_read_stamp(cpu_buffer, event); 47167a8e76a3SSteven Rostedt 4717d769041fSSteven Rostedt length = rb_event_length(event); 47186f807acdSSteven Rostedt cpu_buffer->reader_page->read += length; 47197a8e76a3SSteven Rostedt } 47207a8e76a3SSteven Rostedt 47217a8e76a3SSteven Rostedt static void rb_advance_iter(struct ring_buffer_iter *iter) 47227a8e76a3SSteven Rostedt { 47237a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 47247a8e76a3SSteven Rostedt 47257a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 47267a8e76a3SSteven Rostedt 4727785888c5SSteven Rostedt (VMware) /* If head == next_event then we need to jump to the next event */ 4728785888c5SSteven Rostedt (VMware) if (iter->head == iter->next_event) { 4729785888c5SSteven Rostedt (VMware) /* If the event gets overwritten again, there's nothing to do */ 4730785888c5SSteven Rostedt (VMware) if (rb_iter_head_event(iter) == NULL) 4731785888c5SSteven Rostedt (VMware) return; 4732785888c5SSteven Rostedt (VMware) } 4733785888c5SSteven Rostedt (VMware) 4734785888c5SSteven Rostedt (VMware) iter->head = iter->next_event; 4735785888c5SSteven Rostedt (VMware) 47367a8e76a3SSteven Rostedt /* 47377a8e76a3SSteven Rostedt * Check if we are at the end of the buffer. 47387a8e76a3SSteven Rostedt */ 4739785888c5SSteven Rostedt (VMware) if (iter->next_event >= rb_page_size(iter->head_page)) { 4740ea05b57cSSteven Rostedt /* discarded commits can make the page empty */ 4741ea05b57cSSteven Rostedt if (iter->head_page == cpu_buffer->commit_page) 47423e89c7bbSSteven Rostedt return; 4743d769041fSSteven Rostedt rb_inc_iter(iter); 47447a8e76a3SSteven Rostedt return; 47457a8e76a3SSteven Rostedt } 47467a8e76a3SSteven Rostedt 4747785888c5SSteven Rostedt (VMware) rb_update_iter_read_stamp(iter, iter->event); 47487a8e76a3SSteven Rostedt } 47497a8e76a3SSteven Rostedt 475066a8cb95SSteven Rostedt static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) 475166a8cb95SSteven Rostedt { 475266a8cb95SSteven Rostedt return cpu_buffer->lost_events; 475366a8cb95SSteven Rostedt } 475466a8cb95SSteven Rostedt 4755f83c9d0fSSteven Rostedt static struct ring_buffer_event * 475666a8cb95SSteven Rostedt rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, 475766a8cb95SSteven Rostedt unsigned long *lost_events) 47587a8e76a3SSteven Rostedt { 47597a8e76a3SSteven Rostedt struct ring_buffer_event *event; 4760d769041fSSteven Rostedt struct buffer_page *reader; 4761818e3dd3SSteven Rostedt int nr_loops = 0; 47627a8e76a3SSteven Rostedt 4763dc4e2801STom Zanussi if (ts) 4764dc4e2801STom Zanussi *ts = 0; 47657a8e76a3SSteven Rostedt again: 4766818e3dd3SSteven Rostedt /* 476769d1b839SSteven Rostedt * We repeat when a time extend is encountered. 476869d1b839SSteven Rostedt * Since the time extend is always attached to a data event, 476969d1b839SSteven Rostedt * we should never loop more than once. 477069d1b839SSteven Rostedt * (We never hit the following condition more than twice). 4771818e3dd3SSteven Rostedt */ 477269d1b839SSteven Rostedt if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) 4773818e3dd3SSteven Rostedt return NULL; 4774818e3dd3SSteven Rostedt 4775d769041fSSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 4776d769041fSSteven Rostedt if (!reader) 47777a8e76a3SSteven Rostedt return NULL; 47787a8e76a3SSteven Rostedt 4779d769041fSSteven Rostedt event = rb_reader_event(cpu_buffer); 47807a8e76a3SSteven Rostedt 4781334d4169SLai Jiangshan switch (event->type_len) { 47827a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 47832d622719STom Zanussi if (rb_null_event(event)) 4784bf41a158SSteven Rostedt RB_WARN_ON(cpu_buffer, 1); 47852d622719STom Zanussi /* 47862d622719STom Zanussi * Because the writer could be discarding every 47872d622719STom Zanussi * event it creates (which would probably be bad) 47882d622719STom Zanussi * if we were to go back to "again" then we may never 47892d622719STom Zanussi * catch up, and will trigger the warn on, or lock 47902d622719STom Zanussi * the box. Return the padding, and we will release 47912d622719STom Zanussi * the current locks, and try again. 47922d622719STom Zanussi */ 47932d622719STom Zanussi return event; 47947a8e76a3SSteven Rostedt 47957a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 47967a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 4797d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 47987a8e76a3SSteven Rostedt goto again; 47997a8e76a3SSteven Rostedt 48007a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 4801dc4e2801STom Zanussi if (ts) { 4802e20044f7SSteven Rostedt (VMware) *ts = rb_event_time_stamp(event); 48036695da58SSteven Rostedt (Google) *ts = rb_fix_abs_ts(*ts, reader->page->time_stamp); 4804dc4e2801STom Zanussi ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4805dc4e2801STom Zanussi cpu_buffer->cpu, ts); 4806dc4e2801STom Zanussi } 4807dc4e2801STom Zanussi /* Internal data, OK to advance */ 4808d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 48097a8e76a3SSteven Rostedt goto again; 48107a8e76a3SSteven Rostedt 48117a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 4812dc4e2801STom Zanussi if (ts && !(*ts)) { 48137a8e76a3SSteven Rostedt *ts = cpu_buffer->read_stamp + event->time_delta; 4814d8eeb2d3SRobert Richter ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 481537886f6aSSteven Rostedt cpu_buffer->cpu, ts); 48167a8e76a3SSteven Rostedt } 481766a8cb95SSteven Rostedt if (lost_events) 481866a8cb95SSteven Rostedt *lost_events = rb_lost_events(cpu_buffer); 48197a8e76a3SSteven Rostedt return event; 48207a8e76a3SSteven Rostedt 48217a8e76a3SSteven Rostedt default: 4822da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1); 48237a8e76a3SSteven Rostedt } 48247a8e76a3SSteven Rostedt 48257a8e76a3SSteven Rostedt return NULL; 48267a8e76a3SSteven Rostedt } 4827c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_peek); 48287a8e76a3SSteven Rostedt 4829f83c9d0fSSteven Rostedt static struct ring_buffer_event * 4830f83c9d0fSSteven Rostedt rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 48317a8e76a3SSteven Rostedt { 483213292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 48337a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 48347a8e76a3SSteven Rostedt struct ring_buffer_event *event; 4835818e3dd3SSteven Rostedt int nr_loops = 0; 48367a8e76a3SSteven Rostedt 4837dc4e2801STom Zanussi if (ts) 4838dc4e2801STom Zanussi *ts = 0; 4839dc4e2801STom Zanussi 48407a8e76a3SSteven Rostedt cpu_buffer = iter->cpu_buffer; 48417a8e76a3SSteven Rostedt buffer = cpu_buffer->buffer; 48427a8e76a3SSteven Rostedt 4843492a74f4SSteven Rostedt /* 48442d093282SZheng Yejian * Check if someone performed a consuming read to the buffer 48452d093282SZheng Yejian * or removed some pages from the buffer. In these cases, 48462d093282SZheng Yejian * iterator was invalidated and we need to reset it. 4847492a74f4SSteven Rostedt */ 4848492a74f4SSteven Rostedt if (unlikely(iter->cache_read != cpu_buffer->read || 48492d093282SZheng Yejian iter->cache_reader_page != cpu_buffer->reader_page || 48502d093282SZheng Yejian iter->cache_pages_removed != cpu_buffer->pages_removed)) 4851492a74f4SSteven Rostedt rb_iter_reset(iter); 4852492a74f4SSteven Rostedt 48537a8e76a3SSteven Rostedt again: 48543c05d748SSteven Rostedt if (ring_buffer_iter_empty(iter)) 48553c05d748SSteven Rostedt return NULL; 48563c05d748SSteven Rostedt 4857818e3dd3SSteven Rostedt /* 48583d2353deSSteven Rostedt (VMware) * As the writer can mess with what the iterator is trying 48593d2353deSSteven Rostedt (VMware) * to read, just give up if we fail to get an event after 48603d2353deSSteven Rostedt (VMware) * three tries. The iterator is not as reliable when reading 48613d2353deSSteven Rostedt (VMware) * the ring buffer with an active write as the consumer is. 48623d2353deSSteven Rostedt (VMware) * Do not warn if the three failures is reached. 4863818e3dd3SSteven Rostedt */ 48643d2353deSSteven Rostedt (VMware) if (++nr_loops > 3) 4865818e3dd3SSteven Rostedt return NULL; 4866818e3dd3SSteven Rostedt 48677a8e76a3SSteven Rostedt if (rb_per_cpu_empty(cpu_buffer)) 48687a8e76a3SSteven Rostedt return NULL; 48697a8e76a3SSteven Rostedt 487010e83fd0SSteven Rostedt (Red Hat) if (iter->head >= rb_page_size(iter->head_page)) { 48713c05d748SSteven Rostedt rb_inc_iter(iter); 48723c05d748SSteven Rostedt goto again; 48733c05d748SSteven Rostedt } 48743c05d748SSteven Rostedt 48757a8e76a3SSteven Rostedt event = rb_iter_head_event(iter); 48763d2353deSSteven Rostedt (VMware) if (!event) 4877785888c5SSteven Rostedt (VMware) goto again; 48787a8e76a3SSteven Rostedt 4879334d4169SLai Jiangshan switch (event->type_len) { 48807a8e76a3SSteven Rostedt case RINGBUF_TYPE_PADDING: 48812d622719STom Zanussi if (rb_null_event(event)) { 4882d769041fSSteven Rostedt rb_inc_iter(iter); 48837a8e76a3SSteven Rostedt goto again; 48842d622719STom Zanussi } 48852d622719STom Zanussi rb_advance_iter(iter); 48862d622719STom Zanussi return event; 48877a8e76a3SSteven Rostedt 48887a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_EXTEND: 48897a8e76a3SSteven Rostedt /* Internal data, OK to advance */ 48907a8e76a3SSteven Rostedt rb_advance_iter(iter); 48917a8e76a3SSteven Rostedt goto again; 48927a8e76a3SSteven Rostedt 48937a8e76a3SSteven Rostedt case RINGBUF_TYPE_TIME_STAMP: 4894dc4e2801STom Zanussi if (ts) { 4895e20044f7SSteven Rostedt (VMware) *ts = rb_event_time_stamp(event); 48966695da58SSteven Rostedt (Google) *ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp); 4897dc4e2801STom Zanussi ring_buffer_normalize_time_stamp(cpu_buffer->buffer, 4898dc4e2801STom Zanussi cpu_buffer->cpu, ts); 4899dc4e2801STom Zanussi } 4900dc4e2801STom Zanussi /* Internal data, OK to advance */ 49017a8e76a3SSteven Rostedt rb_advance_iter(iter); 49027a8e76a3SSteven Rostedt goto again; 49037a8e76a3SSteven Rostedt 49047a8e76a3SSteven Rostedt case RINGBUF_TYPE_DATA: 4905dc4e2801STom Zanussi if (ts && !(*ts)) { 49067a8e76a3SSteven Rostedt *ts = iter->read_stamp + event->time_delta; 490737886f6aSSteven Rostedt ring_buffer_normalize_time_stamp(buffer, 490837886f6aSSteven Rostedt cpu_buffer->cpu, ts); 49097a8e76a3SSteven Rostedt } 49107a8e76a3SSteven Rostedt return event; 49117a8e76a3SSteven Rostedt 49127a8e76a3SSteven Rostedt default: 4913da4d401aSSteven Rostedt (VMware) RB_WARN_ON(cpu_buffer, 1); 49147a8e76a3SSteven Rostedt } 49157a8e76a3SSteven Rostedt 49167a8e76a3SSteven Rostedt return NULL; 49177a8e76a3SSteven Rostedt } 4918c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 49197a8e76a3SSteven Rostedt 4920289a5a25SSteven Rostedt (Red Hat) static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 49218d707e8eSSteven Rostedt { 4922289a5a25SSteven Rostedt (Red Hat) if (likely(!in_nmi())) { 4923289a5a25SSteven Rostedt (Red Hat) raw_spin_lock(&cpu_buffer->reader_lock); 4924289a5a25SSteven Rostedt (Red Hat) return true; 4925289a5a25SSteven Rostedt (Red Hat) } 4926289a5a25SSteven Rostedt (Red Hat) 49278d707e8eSSteven Rostedt /* 49288d707e8eSSteven Rostedt * If an NMI die dumps out the content of the ring buffer 4929289a5a25SSteven Rostedt (Red Hat) * trylock must be used to prevent a deadlock if the NMI 4930289a5a25SSteven Rostedt (Red Hat) * preempted a task that holds the ring buffer locks. If 4931289a5a25SSteven Rostedt (Red Hat) * we get the lock then all is fine, if not, then continue 4932289a5a25SSteven Rostedt (Red Hat) * to do the read, but this can corrupt the ring buffer, 4933289a5a25SSteven Rostedt (Red Hat) * so it must be permanently disabled from future writes. 4934289a5a25SSteven Rostedt (Red Hat) * Reading from NMI is a oneshot deal. 49358d707e8eSSteven Rostedt */ 4936289a5a25SSteven Rostedt (Red Hat) if (raw_spin_trylock(&cpu_buffer->reader_lock)) 4937289a5a25SSteven Rostedt (Red Hat) return true; 49388d707e8eSSteven Rostedt 4939289a5a25SSteven Rostedt (Red Hat) /* Continue without locking, but disable the ring buffer */ 4940289a5a25SSteven Rostedt (Red Hat) atomic_inc(&cpu_buffer->record_disabled); 4941289a5a25SSteven Rostedt (Red Hat) return false; 4942289a5a25SSteven Rostedt (Red Hat) } 4943289a5a25SSteven Rostedt (Red Hat) 4944289a5a25SSteven Rostedt (Red Hat) static inline void 4945289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 4946289a5a25SSteven Rostedt (Red Hat) { 4947289a5a25SSteven Rostedt (Red Hat) if (likely(locked)) 4948289a5a25SSteven Rostedt (Red Hat) raw_spin_unlock(&cpu_buffer->reader_lock); 49498d707e8eSSteven Rostedt } 49508d707e8eSSteven Rostedt 49517a8e76a3SSteven Rostedt /** 4952f83c9d0fSSteven Rostedt * ring_buffer_peek - peek at the next event to be read 4953f83c9d0fSSteven Rostedt * @buffer: The ring buffer to read 4954f83c9d0fSSteven Rostedt * @cpu: The cpu to peak at 4955f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 495666a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 4957f83c9d0fSSteven Rostedt * 4958f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 4959f83c9d0fSSteven Rostedt * not consume the data. 4960f83c9d0fSSteven Rostedt */ 4961f83c9d0fSSteven Rostedt struct ring_buffer_event * 496213292494SSteven Rostedt (VMware) ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 496366a8cb95SSteven Rostedt unsigned long *lost_events) 4964f83c9d0fSSteven Rostedt { 4965f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 49668aabee57SSteven Rostedt struct ring_buffer_event *event; 4967f83c9d0fSSteven Rostedt unsigned long flags; 4968289a5a25SSteven Rostedt (Red Hat) bool dolock; 4969f83c9d0fSSteven Rostedt 4970554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 49718aabee57SSteven Rostedt return NULL; 4972554f786eSSteven Rostedt 49732d622719STom Zanussi again: 49748d707e8eSSteven Rostedt local_irq_save(flags); 4975289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer); 497666a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 4977469535a5SRobert Richter if (event && event->type_len == RINGBUF_TYPE_PADDING) 4978469535a5SRobert Richter rb_advance_reader(cpu_buffer); 4979289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock); 49808d707e8eSSteven Rostedt local_irq_restore(flags); 4981f83c9d0fSSteven Rostedt 49821b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 49832d622719STom Zanussi goto again; 49842d622719STom Zanussi 4985f83c9d0fSSteven Rostedt return event; 4986f83c9d0fSSteven Rostedt } 4987f83c9d0fSSteven Rostedt 4988c9b7a4a7SSteven Rostedt (VMware) /** ring_buffer_iter_dropped - report if there are dropped events 4989c9b7a4a7SSteven Rostedt (VMware) * @iter: The ring buffer iterator 4990c9b7a4a7SSteven Rostedt (VMware) * 4991c9b7a4a7SSteven Rostedt (VMware) * Returns true if there was dropped events since the last peek. 4992c9b7a4a7SSteven Rostedt (VMware) */ 4993c9b7a4a7SSteven Rostedt (VMware) bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter) 4994c9b7a4a7SSteven Rostedt (VMware) { 4995c9b7a4a7SSteven Rostedt (VMware) bool ret = iter->missed_events != 0; 4996c9b7a4a7SSteven Rostedt (VMware) 4997c9b7a4a7SSteven Rostedt (VMware) iter->missed_events = 0; 4998c9b7a4a7SSteven Rostedt (VMware) return ret; 4999c9b7a4a7SSteven Rostedt (VMware) } 5000c9b7a4a7SSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped); 5001c9b7a4a7SSteven Rostedt (VMware) 5002f83c9d0fSSteven Rostedt /** 5003f83c9d0fSSteven Rostedt * ring_buffer_iter_peek - peek at the next event to be read 5004f83c9d0fSSteven Rostedt * @iter: The ring buffer iterator 5005f83c9d0fSSteven Rostedt * @ts: The timestamp counter of this event. 5006f83c9d0fSSteven Rostedt * 5007f83c9d0fSSteven Rostedt * This will return the event that will be read next, but does 5008f83c9d0fSSteven Rostedt * not increment the iterator. 5009f83c9d0fSSteven Rostedt */ 5010f83c9d0fSSteven Rostedt struct ring_buffer_event * 5011f83c9d0fSSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 5012f83c9d0fSSteven Rostedt { 5013f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5014f83c9d0fSSteven Rostedt struct ring_buffer_event *event; 5015f83c9d0fSSteven Rostedt unsigned long flags; 5016f83c9d0fSSteven Rostedt 50172d622719STom Zanussi again: 50185389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5019f83c9d0fSSteven Rostedt event = rb_iter_peek(iter, ts); 50205389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5021f83c9d0fSSteven Rostedt 50221b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 50232d622719STom Zanussi goto again; 50242d622719STom Zanussi 5025f83c9d0fSSteven Rostedt return event; 5026f83c9d0fSSteven Rostedt } 5027f83c9d0fSSteven Rostedt 5028f83c9d0fSSteven Rostedt /** 50297a8e76a3SSteven Rostedt * ring_buffer_consume - return an event and consume it 50307a8e76a3SSteven Rostedt * @buffer: The ring buffer to get the next event from 503166a8cb95SSteven Rostedt * @cpu: the cpu to read the buffer from 503266a8cb95SSteven Rostedt * @ts: a variable to store the timestamp (may be NULL) 503366a8cb95SSteven Rostedt * @lost_events: a variable to store if events were lost (may be NULL) 50347a8e76a3SSteven Rostedt * 50357a8e76a3SSteven Rostedt * Returns the next event in the ring buffer, and that event is consumed. 50367a8e76a3SSteven Rostedt * Meaning, that sequential reads will keep returning a different event, 50377a8e76a3SSteven Rostedt * and eventually empty the ring buffer if the producer is slower. 50387a8e76a3SSteven Rostedt */ 50397a8e76a3SSteven Rostedt struct ring_buffer_event * 504013292494SSteven Rostedt (VMware) ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 504166a8cb95SSteven Rostedt unsigned long *lost_events) 50427a8e76a3SSteven Rostedt { 5043554f786eSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5044554f786eSSteven Rostedt struct ring_buffer_event *event = NULL; 5045f83c9d0fSSteven Rostedt unsigned long flags; 5046289a5a25SSteven Rostedt (Red Hat) bool dolock; 50477a8e76a3SSteven Rostedt 50482d622719STom Zanussi again: 5049554f786eSSteven Rostedt /* might be called in atomic */ 5050554f786eSSteven Rostedt preempt_disable(); 50517a8e76a3SSteven Rostedt 5052554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5053554f786eSSteven Rostedt goto out; 5054554f786eSSteven Rostedt 5055554f786eSSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 50568d707e8eSSteven Rostedt local_irq_save(flags); 5057289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer); 50587a8e76a3SSteven Rostedt 505966a8cb95SSteven Rostedt event = rb_buffer_peek(cpu_buffer, ts, lost_events); 506066a8cb95SSteven Rostedt if (event) { 506166a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 5062d769041fSSteven Rostedt rb_advance_reader(cpu_buffer); 506366a8cb95SSteven Rostedt } 50647a8e76a3SSteven Rostedt 5065289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock); 50668d707e8eSSteven Rostedt local_irq_restore(flags); 5067f83c9d0fSSteven Rostedt 5068554f786eSSteven Rostedt out: 5069554f786eSSteven Rostedt preempt_enable(); 5070554f786eSSteven Rostedt 50711b959e18SSteven Rostedt if (event && event->type_len == RINGBUF_TYPE_PADDING) 50722d622719STom Zanussi goto again; 50732d622719STom Zanussi 50747a8e76a3SSteven Rostedt return event; 50757a8e76a3SSteven Rostedt } 5076c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_consume); 50777a8e76a3SSteven Rostedt 50787a8e76a3SSteven Rostedt /** 507972c9ddfdSDavid Miller * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer 50807a8e76a3SSteven Rostedt * @buffer: The ring buffer to read from 50817a8e76a3SSteven Rostedt * @cpu: The cpu buffer to iterate over 508231b265b3SDouglas Anderson * @flags: gfp flags to use for memory allocation 50837a8e76a3SSteven Rostedt * 508472c9ddfdSDavid Miller * This performs the initial preparations necessary to iterate 508572c9ddfdSDavid Miller * through the buffer. Memory is allocated, buffer recording 508672c9ddfdSDavid Miller * is disabled, and the iterator pointer is returned to the caller. 50877a8e76a3SSteven Rostedt * 50886167c205SSteven Rostedt (VMware) * Disabling buffer recording prevents the reading from being 508972c9ddfdSDavid Miller * corrupted. This is not a consuming read, so a producer is not 509072c9ddfdSDavid Miller * expected. 509172c9ddfdSDavid Miller * 509272c9ddfdSDavid Miller * After a sequence of ring_buffer_read_prepare calls, the user is 5093d611851bSzhangwei(Jovi) * expected to make at least one call to ring_buffer_read_prepare_sync. 509472c9ddfdSDavid Miller * Afterwards, ring_buffer_read_start is invoked to get things going 509572c9ddfdSDavid Miller * for real. 509672c9ddfdSDavid Miller * 5097d611851bSzhangwei(Jovi) * This overall must be paired with ring_buffer_read_finish. 50987a8e76a3SSteven Rostedt */ 50997a8e76a3SSteven Rostedt struct ring_buffer_iter * 510013292494SSteven Rostedt (VMware) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) 51017a8e76a3SSteven Rostedt { 51027a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 51038aabee57SSteven Rostedt struct ring_buffer_iter *iter; 51047a8e76a3SSteven Rostedt 51059e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 51068aabee57SSteven Rostedt return NULL; 51077a8e76a3SSteven Rostedt 5108785888c5SSteven Rostedt (VMware) iter = kzalloc(sizeof(*iter), flags); 51097a8e76a3SSteven Rostedt if (!iter) 51108aabee57SSteven Rostedt return NULL; 51117a8e76a3SSteven Rostedt 5112785888c5SSteven Rostedt (VMware) iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags); 5113785888c5SSteven Rostedt (VMware) if (!iter->event) { 5114785888c5SSteven Rostedt (VMware) kfree(iter); 5115785888c5SSteven Rostedt (VMware) return NULL; 5116785888c5SSteven Rostedt (VMware) } 5117785888c5SSteven Rostedt (VMware) 51187a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 51197a8e76a3SSteven Rostedt 51207a8e76a3SSteven Rostedt iter->cpu_buffer = cpu_buffer; 51217a8e76a3SSteven Rostedt 512207b8b10eSSteven Rostedt (VMware) atomic_inc(&cpu_buffer->resize_disabled); 512372c9ddfdSDavid Miller 512472c9ddfdSDavid Miller return iter; 512572c9ddfdSDavid Miller } 512672c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); 512772c9ddfdSDavid Miller 512872c9ddfdSDavid Miller /** 512972c9ddfdSDavid Miller * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls 513072c9ddfdSDavid Miller * 513172c9ddfdSDavid Miller * All previously invoked ring_buffer_read_prepare calls to prepare 513272c9ddfdSDavid Miller * iterators will be synchronized. Afterwards, read_buffer_read_start 513372c9ddfdSDavid Miller * calls on those iterators are allowed. 513472c9ddfdSDavid Miller */ 513572c9ddfdSDavid Miller void 513672c9ddfdSDavid Miller ring_buffer_read_prepare_sync(void) 513772c9ddfdSDavid Miller { 513874401729SPaul E. McKenney synchronize_rcu(); 513972c9ddfdSDavid Miller } 514072c9ddfdSDavid Miller EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); 514172c9ddfdSDavid Miller 514272c9ddfdSDavid Miller /** 514372c9ddfdSDavid Miller * ring_buffer_read_start - start a non consuming read of the buffer 514472c9ddfdSDavid Miller * @iter: The iterator returned by ring_buffer_read_prepare 514572c9ddfdSDavid Miller * 514672c9ddfdSDavid Miller * This finalizes the startup of an iteration through the buffer. 514772c9ddfdSDavid Miller * The iterator comes from a call to ring_buffer_read_prepare and 514872c9ddfdSDavid Miller * an intervening ring_buffer_read_prepare_sync must have been 514972c9ddfdSDavid Miller * performed. 515072c9ddfdSDavid Miller * 5151d611851bSzhangwei(Jovi) * Must be paired with ring_buffer_read_finish. 515272c9ddfdSDavid Miller */ 515372c9ddfdSDavid Miller void 515472c9ddfdSDavid Miller ring_buffer_read_start(struct ring_buffer_iter *iter) 515572c9ddfdSDavid Miller { 515672c9ddfdSDavid Miller struct ring_buffer_per_cpu *cpu_buffer; 515772c9ddfdSDavid Miller unsigned long flags; 515872c9ddfdSDavid Miller 515972c9ddfdSDavid Miller if (!iter) 516072c9ddfdSDavid Miller return; 516172c9ddfdSDavid Miller 516272c9ddfdSDavid Miller cpu_buffer = iter->cpu_buffer; 51637a8e76a3SSteven Rostedt 51645389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 51650199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 5166642edba5SSteven Rostedt rb_iter_reset(iter); 51670199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 51685389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 51697a8e76a3SSteven Rostedt } 5170c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_start); 51717a8e76a3SSteven Rostedt 51727a8e76a3SSteven Rostedt /** 5173d611851bSzhangwei(Jovi) * ring_buffer_read_finish - finish reading the iterator of the buffer 51747a8e76a3SSteven Rostedt * @iter: The iterator retrieved by ring_buffer_start 51757a8e76a3SSteven Rostedt * 51767a8e76a3SSteven Rostedt * This re-enables the recording to the buffer, and frees the 51777a8e76a3SSteven Rostedt * iterator. 51787a8e76a3SSteven Rostedt */ 51797a8e76a3SSteven Rostedt void 51807a8e76a3SSteven Rostedt ring_buffer_read_finish(struct ring_buffer_iter *iter) 51817a8e76a3SSteven Rostedt { 51827a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 51839366c1baSSteven Rostedt unsigned long flags; 51847a8e76a3SSteven Rostedt 5185659f451fSSteven Rostedt /* 5186659f451fSSteven Rostedt * Ring buffer is disabled from recording, here's a good place 5187659f451fSSteven Rostedt * to check the integrity of the ring buffer. 51889366c1baSSteven Rostedt * Must prevent readers from trying to read, as the check 51899366c1baSSteven Rostedt * clears the HEAD page and readers require it. 5190659f451fSSteven Rostedt */ 51919366c1baSSteven Rostedt raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5192659f451fSSteven Rostedt rb_check_pages(cpu_buffer); 51939366c1baSSteven Rostedt raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5194659f451fSSteven Rostedt 519507b8b10eSSteven Rostedt (VMware) atomic_dec(&cpu_buffer->resize_disabled); 5196785888c5SSteven Rostedt (VMware) kfree(iter->event); 51977a8e76a3SSteven Rostedt kfree(iter); 51987a8e76a3SSteven Rostedt } 5199c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_read_finish); 52007a8e76a3SSteven Rostedt 52017a8e76a3SSteven Rostedt /** 5202bc1a72afSSteven Rostedt (VMware) * ring_buffer_iter_advance - advance the iterator to the next location 52037a8e76a3SSteven Rostedt * @iter: The ring buffer iterator 52047a8e76a3SSteven Rostedt * 5205bc1a72afSSteven Rostedt (VMware) * Move the location of the iterator such that the next read will 5206bc1a72afSSteven Rostedt (VMware) * be the next location of the iterator. 52077a8e76a3SSteven Rostedt */ 5208bc1a72afSSteven Rostedt (VMware) void ring_buffer_iter_advance(struct ring_buffer_iter *iter) 52097a8e76a3SSteven Rostedt { 5210f83c9d0fSSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 5211f83c9d0fSSteven Rostedt unsigned long flags; 52127a8e76a3SSteven Rostedt 52135389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 52147e9391cfSSteven Rostedt 52157a8e76a3SSteven Rostedt rb_advance_iter(iter); 52167a8e76a3SSteven Rostedt 5217bc1a72afSSteven Rostedt (VMware) raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 52187a8e76a3SSteven Rostedt } 5219bc1a72afSSteven Rostedt (VMware) EXPORT_SYMBOL_GPL(ring_buffer_iter_advance); 52207a8e76a3SSteven Rostedt 52217a8e76a3SSteven Rostedt /** 52227a8e76a3SSteven Rostedt * ring_buffer_size - return the size of the ring buffer (in bytes) 52237a8e76a3SSteven Rostedt * @buffer: The ring buffer. 522459e7cffeSFabian Frederick * @cpu: The CPU to get ring buffer size from. 52257a8e76a3SSteven Rostedt */ 522613292494SSteven Rostedt (VMware) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) 52277a8e76a3SSteven Rostedt { 5228438ced17SVaibhav Nagarnaik /* 5229438ced17SVaibhav Nagarnaik * Earlier, this method returned 5230438ced17SVaibhav Nagarnaik * BUF_PAGE_SIZE * buffer->nr_pages 5231438ced17SVaibhav Nagarnaik * Since the nr_pages field is now removed, we have converted this to 5232438ced17SVaibhav Nagarnaik * return the per cpu buffer value. 5233438ced17SVaibhav Nagarnaik */ 5234438ced17SVaibhav Nagarnaik if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5235438ced17SVaibhav Nagarnaik return 0; 5236438ced17SVaibhav Nagarnaik 5237438ced17SVaibhav Nagarnaik return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; 52387a8e76a3SSteven Rostedt } 5239c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_size); 52407a8e76a3SSteven Rostedt 52417e42907fSZheng Yejian static void rb_clear_buffer_page(struct buffer_page *page) 52427e42907fSZheng Yejian { 52437e42907fSZheng Yejian local_set(&page->write, 0); 52447e42907fSZheng Yejian local_set(&page->entries, 0); 52457e42907fSZheng Yejian rb_init_page(page->page); 52467e42907fSZheng Yejian page->read = 0; 52477e42907fSZheng Yejian } 52487e42907fSZheng Yejian 52497a8e76a3SSteven Rostedt static void 52507a8e76a3SSteven Rostedt rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 52517a8e76a3SSteven Rostedt { 52527e42907fSZheng Yejian struct buffer_page *page; 52537e42907fSZheng Yejian 525477ae365eSSteven Rostedt rb_head_page_deactivate(cpu_buffer); 525577ae365eSSteven Rostedt 52567a8e76a3SSteven Rostedt cpu_buffer->head_page 52573adc54faSSteven Rostedt = list_entry(cpu_buffer->pages, struct buffer_page, list); 52587e42907fSZheng Yejian rb_clear_buffer_page(cpu_buffer->head_page); 52597e42907fSZheng Yejian list_for_each_entry(page, cpu_buffer->pages, list) { 52607e42907fSZheng Yejian rb_clear_buffer_page(page); 52617e42907fSZheng Yejian } 5262bf41a158SSteven Rostedt 5263bf41a158SSteven Rostedt cpu_buffer->tail_page = cpu_buffer->head_page; 5264bf41a158SSteven Rostedt cpu_buffer->commit_page = cpu_buffer->head_page; 5265bf41a158SSteven Rostedt 5266bf41a158SSteven Rostedt INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 52675040b4b7SVaibhav Nagarnaik INIT_LIST_HEAD(&cpu_buffer->new_pages); 52687e42907fSZheng Yejian rb_clear_buffer_page(cpu_buffer->reader_page); 5269d769041fSSteven Rostedt 5270c64e148aSVaibhav Nagarnaik local_set(&cpu_buffer->entries_bytes, 0); 527177ae365eSSteven Rostedt local_set(&cpu_buffer->overrun, 0); 5272884bfe89SSlava Pestov local_set(&cpu_buffer->commit_overrun, 0); 5273884bfe89SSlava Pestov local_set(&cpu_buffer->dropped_events, 0); 5274e4906effSSteven Rostedt local_set(&cpu_buffer->entries, 0); 5275fa743953SSteven Rostedt local_set(&cpu_buffer->committing, 0); 5276fa743953SSteven Rostedt local_set(&cpu_buffer->commits, 0); 52772c2b0a78SSteven Rostedt (VMware) local_set(&cpu_buffer->pages_touched, 0); 527831029a8bSSteven Rostedt (Google) local_set(&cpu_buffer->pages_lost, 0); 52792c2b0a78SSteven Rostedt (VMware) local_set(&cpu_buffer->pages_read, 0); 528003329f99SSteven Rostedt (VMware) cpu_buffer->last_pages_touch = 0; 52812c2b0a78SSteven Rostedt (VMware) cpu_buffer->shortest_full = 0; 528277ae365eSSteven Rostedt cpu_buffer->read = 0; 5283c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes = 0; 528469507c06SSteven Rostedt 528510464b4aSSteven Rostedt (VMware) rb_time_set(&cpu_buffer->write_stamp, 0); 528610464b4aSSteven Rostedt (VMware) rb_time_set(&cpu_buffer->before_stamp, 0); 528777ae365eSSteven Rostedt 52888672e494SSteven Rostedt (VMware) memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); 52898672e494SSteven Rostedt (VMware) 529066a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 529166a8cb95SSteven Rostedt cpu_buffer->last_overrun = 0; 529266a8cb95SSteven Rostedt 529377ae365eSSteven Rostedt rb_head_page_activate(cpu_buffer); 52942d093282SZheng Yejian cpu_buffer->pages_removed = 0; 52957a8e76a3SSteven Rostedt } 52967a8e76a3SSteven Rostedt 5297b23d7a5fSNicholas Piggin /* Must have disabled the cpu buffer then done a synchronize_rcu */ 5298b23d7a5fSNicholas Piggin static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 52997a8e76a3SSteven Rostedt { 53007a8e76a3SSteven Rostedt unsigned long flags; 53017a8e76a3SSteven Rostedt 53025389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 5303f83c9d0fSSteven Rostedt 530441b6a95dSSteven Rostedt if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 530541b6a95dSSteven Rostedt goto out; 530641b6a95dSSteven Rostedt 53070199c4e6SThomas Gleixner arch_spin_lock(&cpu_buffer->lock); 53087a8e76a3SSteven Rostedt 53097a8e76a3SSteven Rostedt rb_reset_cpu(cpu_buffer); 53107a8e76a3SSteven Rostedt 53110199c4e6SThomas Gleixner arch_spin_unlock(&cpu_buffer->lock); 5312f83c9d0fSSteven Rostedt 531341b6a95dSSteven Rostedt out: 53145389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 5315b23d7a5fSNicholas Piggin } 5316b23d7a5fSNicholas Piggin 5317b23d7a5fSNicholas Piggin /** 5318b23d7a5fSNicholas Piggin * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer 5319b23d7a5fSNicholas Piggin * @buffer: The ring buffer to reset a per cpu buffer of 5320b23d7a5fSNicholas Piggin * @cpu: The CPU buffer to be reset 5321b23d7a5fSNicholas Piggin */ 5322b23d7a5fSNicholas Piggin void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) 5323b23d7a5fSNicholas Piggin { 5324b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 5325b23d7a5fSNicholas Piggin 5326b23d7a5fSNicholas Piggin if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5327b23d7a5fSNicholas Piggin return; 5328b23d7a5fSNicholas Piggin 5329bbeb9746SGaurav Kohli /* prevent another thread from changing buffer sizes */ 5330bbeb9746SGaurav Kohli mutex_lock(&buffer->mutex); 5331bbeb9746SGaurav Kohli 5332b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->resize_disabled); 5333b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled); 5334b23d7a5fSNicholas Piggin 5335b23d7a5fSNicholas Piggin /* Make sure all commits have finished */ 5336b23d7a5fSNicholas Piggin synchronize_rcu(); 5337b23d7a5fSNicholas Piggin 5338b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer); 533941ede23eSSteven Rostedt 534041ede23eSSteven Rostedt atomic_dec(&cpu_buffer->record_disabled); 534107b8b10eSSteven Rostedt (VMware) atomic_dec(&cpu_buffer->resize_disabled); 5342bbeb9746SGaurav Kohli 5343bbeb9746SGaurav Kohli mutex_unlock(&buffer->mutex); 53447a8e76a3SSteven Rostedt } 5345c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); 53467a8e76a3SSteven Rostedt 53477c339fb4STze-nan Wu /* Flag to ensure proper resetting of atomic variables */ 53487c339fb4STze-nan Wu #define RESET_BIT (1 << 30) 53497c339fb4STze-nan Wu 53507a8e76a3SSteven Rostedt /** 5351b7085b6fSJiapeng Chong * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer 5352b23d7a5fSNicholas Piggin * @buffer: The ring buffer to reset a per cpu buffer of 5353b23d7a5fSNicholas Piggin */ 5354b23d7a5fSNicholas Piggin void ring_buffer_reset_online_cpus(struct trace_buffer *buffer) 5355b23d7a5fSNicholas Piggin { 5356b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer; 5357b23d7a5fSNicholas Piggin int cpu; 5358b23d7a5fSNicholas Piggin 5359bbeb9746SGaurav Kohli /* prevent another thread from changing buffer sizes */ 5360bbeb9746SGaurav Kohli mutex_lock(&buffer->mutex); 5361bbeb9746SGaurav Kohli 5362b23d7a5fSNicholas Piggin for_each_online_buffer_cpu(buffer, cpu) { 5363b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu]; 5364b23d7a5fSNicholas Piggin 53657c339fb4STze-nan Wu atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); 5366b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled); 5367b23d7a5fSNicholas Piggin } 5368b23d7a5fSNicholas Piggin 5369b23d7a5fSNicholas Piggin /* Make sure all commits have finished */ 5370b23d7a5fSNicholas Piggin synchronize_rcu(); 5371b23d7a5fSNicholas Piggin 53727c339fb4STze-nan Wu for_each_buffer_cpu(buffer, cpu) { 5373b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu]; 5374b23d7a5fSNicholas Piggin 53757c339fb4STze-nan Wu /* 53767c339fb4STze-nan Wu * If a CPU came online during the synchronize_rcu(), then 53777c339fb4STze-nan Wu * ignore it. 53787c339fb4STze-nan Wu */ 53797c339fb4STze-nan Wu if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) 53807c339fb4STze-nan Wu continue; 53817c339fb4STze-nan Wu 5382b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer); 5383b23d7a5fSNicholas Piggin 5384b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->record_disabled); 53857c339fb4STze-nan Wu atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); 5386b23d7a5fSNicholas Piggin } 5387bbeb9746SGaurav Kohli 5388bbeb9746SGaurav Kohli mutex_unlock(&buffer->mutex); 5389b23d7a5fSNicholas Piggin } 5390b23d7a5fSNicholas Piggin 5391b23d7a5fSNicholas Piggin /** 53927a8e76a3SSteven Rostedt * ring_buffer_reset - reset a ring buffer 53937a8e76a3SSteven Rostedt * @buffer: The ring buffer to reset all cpu buffers 53947a8e76a3SSteven Rostedt */ 539513292494SSteven Rostedt (VMware) void ring_buffer_reset(struct trace_buffer *buffer) 53967a8e76a3SSteven Rostedt { 5397b23d7a5fSNicholas Piggin struct ring_buffer_per_cpu *cpu_buffer; 53987a8e76a3SSteven Rostedt int cpu; 53997a8e76a3SSteven Rostedt 540051d15794SSteven Rostedt (VMware) /* prevent another thread from changing buffer sizes */ 540151d15794SSteven Rostedt (VMware) mutex_lock(&buffer->mutex); 540251d15794SSteven Rostedt (VMware) 5403b23d7a5fSNicholas Piggin for_each_buffer_cpu(buffer, cpu) { 5404b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu]; 5405b23d7a5fSNicholas Piggin 5406b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->resize_disabled); 5407b23d7a5fSNicholas Piggin atomic_inc(&cpu_buffer->record_disabled); 5408b23d7a5fSNicholas Piggin } 5409b23d7a5fSNicholas Piggin 5410b23d7a5fSNicholas Piggin /* Make sure all commits have finished */ 5411b23d7a5fSNicholas Piggin synchronize_rcu(); 5412b23d7a5fSNicholas Piggin 5413b23d7a5fSNicholas Piggin for_each_buffer_cpu(buffer, cpu) { 5414b23d7a5fSNicholas Piggin cpu_buffer = buffer->buffers[cpu]; 5415b23d7a5fSNicholas Piggin 5416b23d7a5fSNicholas Piggin reset_disabled_cpu_buffer(cpu_buffer); 5417b23d7a5fSNicholas Piggin 5418b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->record_disabled); 5419b23d7a5fSNicholas Piggin atomic_dec(&cpu_buffer->resize_disabled); 5420b23d7a5fSNicholas Piggin } 542151d15794SSteven Rostedt (VMware) 542251d15794SSteven Rostedt (VMware) mutex_unlock(&buffer->mutex); 54237a8e76a3SSteven Rostedt } 5424c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_reset); 54257a8e76a3SSteven Rostedt 54267a8e76a3SSteven Rostedt /** 5427b7085b6fSJiapeng Chong * ring_buffer_empty - is the ring buffer empty? 54287a8e76a3SSteven Rostedt * @buffer: The ring buffer to test 54297a8e76a3SSteven Rostedt */ 543013292494SSteven Rostedt (VMware) bool ring_buffer_empty(struct trace_buffer *buffer) 54317a8e76a3SSteven Rostedt { 54327a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5433d4788207SSteven Rostedt unsigned long flags; 5434289a5a25SSteven Rostedt (Red Hat) bool dolock; 5435bc92b956SUros Bizjak bool ret; 54367a8e76a3SSteven Rostedt int cpu; 54377a8e76a3SSteven Rostedt 54387a8e76a3SSteven Rostedt /* yes this is racy, but if you don't like the race, lock the buffer */ 54397a8e76a3SSteven Rostedt for_each_buffer_cpu(buffer, cpu) { 54407a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 54418d707e8eSSteven Rostedt local_irq_save(flags); 5442289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer); 5443d4788207SSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 5444289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock); 54458d707e8eSSteven Rostedt local_irq_restore(flags); 54468d707e8eSSteven Rostedt 5447d4788207SSteven Rostedt if (!ret) 54483d4e204dSYaowei Bai return false; 54497a8e76a3SSteven Rostedt } 5450554f786eSSteven Rostedt 54513d4e204dSYaowei Bai return true; 54527a8e76a3SSteven Rostedt } 5453c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty); 54547a8e76a3SSteven Rostedt 54557a8e76a3SSteven Rostedt /** 54567a8e76a3SSteven Rostedt * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 54577a8e76a3SSteven Rostedt * @buffer: The ring buffer 54587a8e76a3SSteven Rostedt * @cpu: The CPU buffer to test 54597a8e76a3SSteven Rostedt */ 546013292494SSteven Rostedt (VMware) bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) 54617a8e76a3SSteven Rostedt { 54627a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer; 5463d4788207SSteven Rostedt unsigned long flags; 5464289a5a25SSteven Rostedt (Red Hat) bool dolock; 5465bc92b956SUros Bizjak bool ret; 54667a8e76a3SSteven Rostedt 54679e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer->cpumask)) 54683d4e204dSYaowei Bai return true; 54697a8e76a3SSteven Rostedt 54707a8e76a3SSteven Rostedt cpu_buffer = buffer->buffers[cpu]; 54718d707e8eSSteven Rostedt local_irq_save(flags); 5472289a5a25SSteven Rostedt (Red Hat) dolock = rb_reader_lock(cpu_buffer); 5473554f786eSSteven Rostedt ret = rb_per_cpu_empty(cpu_buffer); 5474289a5a25SSteven Rostedt (Red Hat) rb_reader_unlock(cpu_buffer, dolock); 54758d707e8eSSteven Rostedt local_irq_restore(flags); 5476554f786eSSteven Rostedt 5477554f786eSSteven Rostedt return ret; 54787a8e76a3SSteven Rostedt } 5479c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 54807a8e76a3SSteven Rostedt 548185bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 54827a8e76a3SSteven Rostedt /** 54837a8e76a3SSteven Rostedt * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 54847a8e76a3SSteven Rostedt * @buffer_a: One buffer to swap with 54857a8e76a3SSteven Rostedt * @buffer_b: The other buffer to swap with 548659e7cffeSFabian Frederick * @cpu: the CPU of the buffers to swap 54877a8e76a3SSteven Rostedt * 54887a8e76a3SSteven Rostedt * This function is useful for tracers that want to take a "snapshot" 54897a8e76a3SSteven Rostedt * of a CPU buffer and has another back up buffer lying around. 54907a8e76a3SSteven Rostedt * it is expected that the tracer handles the cpu buffer not being 54917a8e76a3SSteven Rostedt * used at the moment. 54927a8e76a3SSteven Rostedt */ 549313292494SSteven Rostedt (VMware) int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, 549413292494SSteven Rostedt (VMware) struct trace_buffer *buffer_b, int cpu) 54957a8e76a3SSteven Rostedt { 54967a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_a; 54977a8e76a3SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer_b; 5498554f786eSSteven Rostedt int ret = -EINVAL; 5499554f786eSSteven Rostedt 55009e01c1b7SRusty Russell if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || 55019e01c1b7SRusty Russell !cpumask_test_cpu(cpu, buffer_b->cpumask)) 5502554f786eSSteven Rostedt goto out; 55037a8e76a3SSteven Rostedt 5504438ced17SVaibhav Nagarnaik cpu_buffer_a = buffer_a->buffers[cpu]; 5505438ced17SVaibhav Nagarnaik cpu_buffer_b = buffer_b->buffers[cpu]; 5506438ced17SVaibhav Nagarnaik 55077a8e76a3SSteven Rostedt /* At least make sure the two buffers are somewhat the same */ 5508438ced17SVaibhav Nagarnaik if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) 5509554f786eSSteven Rostedt goto out; 5510554f786eSSteven Rostedt 5511554f786eSSteven Rostedt ret = -EAGAIN; 55127a8e76a3SSteven Rostedt 551397b17efeSSteven Rostedt if (atomic_read(&buffer_a->record_disabled)) 5514554f786eSSteven Rostedt goto out; 551597b17efeSSteven Rostedt 551697b17efeSSteven Rostedt if (atomic_read(&buffer_b->record_disabled)) 5517554f786eSSteven Rostedt goto out; 551897b17efeSSteven Rostedt 551997b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_a->record_disabled)) 5520554f786eSSteven Rostedt goto out; 552197b17efeSSteven Rostedt 552297b17efeSSteven Rostedt if (atomic_read(&cpu_buffer_b->record_disabled)) 5523554f786eSSteven Rostedt goto out; 552497b17efeSSteven Rostedt 55257a8e76a3SSteven Rostedt /* 552674401729SPaul E. McKenney * We can't do a synchronize_rcu here because this 55277a8e76a3SSteven Rostedt * function can be called in atomic context. 55287a8e76a3SSteven Rostedt * Normally this will be called from the same CPU as cpu. 55297a8e76a3SSteven Rostedt * If not it's up to the caller to protect this. 55307a8e76a3SSteven Rostedt */ 55317a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_a->record_disabled); 55327a8e76a3SSteven Rostedt atomic_inc(&cpu_buffer_b->record_disabled); 55337a8e76a3SSteven Rostedt 553498277991SSteven Rostedt ret = -EBUSY; 553598277991SSteven Rostedt if (local_read(&cpu_buffer_a->committing)) 553698277991SSteven Rostedt goto out_dec; 553798277991SSteven Rostedt if (local_read(&cpu_buffer_b->committing)) 553898277991SSteven Rostedt goto out_dec; 553998277991SSteven Rostedt 55408a96c028SChen Lin /* 55418a96c028SChen Lin * When resize is in progress, we cannot swap it because 55428a96c028SChen Lin * it will mess the state of the cpu buffer. 55438a96c028SChen Lin */ 55448a96c028SChen Lin if (atomic_read(&buffer_a->resizing)) 55458a96c028SChen Lin goto out_dec; 55468a96c028SChen Lin if (atomic_read(&buffer_b->resizing)) 55478a96c028SChen Lin goto out_dec; 55488a96c028SChen Lin 55497a8e76a3SSteven Rostedt buffer_a->buffers[cpu] = cpu_buffer_b; 55507a8e76a3SSteven Rostedt buffer_b->buffers[cpu] = cpu_buffer_a; 55517a8e76a3SSteven Rostedt 55527a8e76a3SSteven Rostedt cpu_buffer_b->buffer = buffer_a; 55537a8e76a3SSteven Rostedt cpu_buffer_a->buffer = buffer_b; 55547a8e76a3SSteven Rostedt 555598277991SSteven Rostedt ret = 0; 555698277991SSteven Rostedt 555798277991SSteven Rostedt out_dec: 55587a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_a->record_disabled); 55597a8e76a3SSteven Rostedt atomic_dec(&cpu_buffer_b->record_disabled); 5560554f786eSSteven Rostedt out: 5561554f786eSSteven Rostedt return ret; 55627a8e76a3SSteven Rostedt } 5563c4f50183SRobert Richter EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 556485bac32cSSteven Rostedt #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ 55657a8e76a3SSteven Rostedt 55668789a9e7SSteven Rostedt /** 55678789a9e7SSteven Rostedt * ring_buffer_alloc_read_page - allocate a page to read from buffer 55688789a9e7SSteven Rostedt * @buffer: the buffer to allocate for. 5569d611851bSzhangwei(Jovi) * @cpu: the cpu buffer to allocate. 55708789a9e7SSteven Rostedt * 55718789a9e7SSteven Rostedt * This function is used in conjunction with ring_buffer_read_page. 55728789a9e7SSteven Rostedt * When reading a full page from the ring buffer, these functions 55738789a9e7SSteven Rostedt * can be used to speed up the process. The calling function should 55748789a9e7SSteven Rostedt * allocate a few pages first with this function. Then when it 55758789a9e7SSteven Rostedt * needs to get pages from the ring buffer, it passes the result 55768789a9e7SSteven Rostedt * of this function into ring_buffer_read_page, which will swap 55778789a9e7SSteven Rostedt * the page that was allocated, with the read page of the buffer. 55788789a9e7SSteven Rostedt * 55798789a9e7SSteven Rostedt * Returns: 5580a7e52ad7SSteven Rostedt (VMware) * The page allocated, or ERR_PTR 55818789a9e7SSteven Rostedt */ 558213292494SSteven Rostedt (VMware) void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) 55838789a9e7SSteven Rostedt { 5584a7e52ad7SSteven Rostedt (VMware) struct ring_buffer_per_cpu *cpu_buffer; 558573a757e6SSteven Rostedt (VMware) struct buffer_data_page *bpage = NULL; 558673a757e6SSteven Rostedt (VMware) unsigned long flags; 55877ea59064SVaibhav Nagarnaik struct page *page; 55888789a9e7SSteven Rostedt 5589a7e52ad7SSteven Rostedt (VMware) if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5590a7e52ad7SSteven Rostedt (VMware) return ERR_PTR(-ENODEV); 5591a7e52ad7SSteven Rostedt (VMware) 5592a7e52ad7SSteven Rostedt (VMware) cpu_buffer = buffer->buffers[cpu]; 559373a757e6SSteven Rostedt (VMware) local_irq_save(flags); 559473a757e6SSteven Rostedt (VMware) arch_spin_lock(&cpu_buffer->lock); 559573a757e6SSteven Rostedt (VMware) 559673a757e6SSteven Rostedt (VMware) if (cpu_buffer->free_page) { 559773a757e6SSteven Rostedt (VMware) bpage = cpu_buffer->free_page; 559873a757e6SSteven Rostedt (VMware) cpu_buffer->free_page = NULL; 559973a757e6SSteven Rostedt (VMware) } 560073a757e6SSteven Rostedt (VMware) 560173a757e6SSteven Rostedt (VMware) arch_spin_unlock(&cpu_buffer->lock); 560273a757e6SSteven Rostedt (VMware) local_irq_restore(flags); 560373a757e6SSteven Rostedt (VMware) 560473a757e6SSteven Rostedt (VMware) if (bpage) 560573a757e6SSteven Rostedt (VMware) goto out; 560673a757e6SSteven Rostedt (VMware) 5607d7ec4bfeSVaibhav Nagarnaik page = alloc_pages_node(cpu_to_node(cpu), 5608d7ec4bfeSVaibhav Nagarnaik GFP_KERNEL | __GFP_NORETRY, 0); 56097ea59064SVaibhav Nagarnaik if (!page) 5610a7e52ad7SSteven Rostedt (VMware) return ERR_PTR(-ENOMEM); 56118789a9e7SSteven Rostedt 56127ea59064SVaibhav Nagarnaik bpage = page_address(page); 56138789a9e7SSteven Rostedt 561473a757e6SSteven Rostedt (VMware) out: 5615ef7a4a16SSteven Rostedt rb_init_page(bpage); 5616ef7a4a16SSteven Rostedt 5617044fa782SSteven Rostedt return bpage; 56188789a9e7SSteven Rostedt } 5619d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); 56208789a9e7SSteven Rostedt 56218789a9e7SSteven Rostedt /** 56228789a9e7SSteven Rostedt * ring_buffer_free_read_page - free an allocated read page 56238789a9e7SSteven Rostedt * @buffer: the buffer the page was allocate for 562473a757e6SSteven Rostedt (VMware) * @cpu: the cpu buffer the page came from 56258789a9e7SSteven Rostedt * @data: the page to free 56268789a9e7SSteven Rostedt * 56278789a9e7SSteven Rostedt * Free a page allocated from ring_buffer_alloc_read_page. 56288789a9e7SSteven Rostedt */ 562913292494SSteven Rostedt (VMware) void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) 56308789a9e7SSteven Rostedt { 56313e4272b9SJia-Ju Bai struct ring_buffer_per_cpu *cpu_buffer; 563273a757e6SSteven Rostedt (VMware) struct buffer_data_page *bpage = data; 5633ae415fa4SSteven Rostedt (VMware) struct page *page = virt_to_page(bpage); 563473a757e6SSteven Rostedt (VMware) unsigned long flags; 563573a757e6SSteven Rostedt (VMware) 56363e4272b9SJia-Ju Bai if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) 56373e4272b9SJia-Ju Bai return; 56383e4272b9SJia-Ju Bai 56393e4272b9SJia-Ju Bai cpu_buffer = buffer->buffers[cpu]; 56403e4272b9SJia-Ju Bai 5641ae415fa4SSteven Rostedt (VMware) /* If the page is still in use someplace else, we can't reuse it */ 5642ae415fa4SSteven Rostedt (VMware) if (page_ref_count(page) > 1) 5643ae415fa4SSteven Rostedt (VMware) goto out; 5644ae415fa4SSteven Rostedt (VMware) 564573a757e6SSteven Rostedt (VMware) local_irq_save(flags); 564673a757e6SSteven Rostedt (VMware) arch_spin_lock(&cpu_buffer->lock); 564773a757e6SSteven Rostedt (VMware) 564873a757e6SSteven Rostedt (VMware) if (!cpu_buffer->free_page) { 564973a757e6SSteven Rostedt (VMware) cpu_buffer->free_page = bpage; 565073a757e6SSteven Rostedt (VMware) bpage = NULL; 565173a757e6SSteven Rostedt (VMware) } 565273a757e6SSteven Rostedt (VMware) 565373a757e6SSteven Rostedt (VMware) arch_spin_unlock(&cpu_buffer->lock); 565473a757e6SSteven Rostedt (VMware) local_irq_restore(flags); 565573a757e6SSteven Rostedt (VMware) 5656ae415fa4SSteven Rostedt (VMware) out: 565773a757e6SSteven Rostedt (VMware) free_page((unsigned long)bpage); 56588789a9e7SSteven Rostedt } 5659d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); 56608789a9e7SSteven Rostedt 56618789a9e7SSteven Rostedt /** 56628789a9e7SSteven Rostedt * ring_buffer_read_page - extract a page from the ring buffer 56638789a9e7SSteven Rostedt * @buffer: buffer to extract from 56648789a9e7SSteven Rostedt * @data_page: the page to use allocated from ring_buffer_alloc_read_page 5665ef7a4a16SSteven Rostedt * @len: amount to extract 56668789a9e7SSteven Rostedt * @cpu: the cpu of the buffer to extract 56678789a9e7SSteven Rostedt * @full: should the extraction only happen when the page is full. 56688789a9e7SSteven Rostedt * 56698789a9e7SSteven Rostedt * This function will pull out a page from the ring buffer and consume it. 56708789a9e7SSteven Rostedt * @data_page must be the address of the variable that was returned 56718789a9e7SSteven Rostedt * from ring_buffer_alloc_read_page. This is because the page might be used 56728789a9e7SSteven Rostedt * to swap with a page in the ring buffer. 56738789a9e7SSteven Rostedt * 56748789a9e7SSteven Rostedt * for example: 5675d611851bSzhangwei(Jovi) * rpage = ring_buffer_alloc_read_page(buffer, cpu); 5676a7e52ad7SSteven Rostedt (VMware) * if (IS_ERR(rpage)) 5677a7e52ad7SSteven Rostedt (VMware) * return PTR_ERR(rpage); 5678ef7a4a16SSteven Rostedt * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); 5679667d2412SLai Jiangshan * if (ret >= 0) 5680667d2412SLai Jiangshan * process_page(rpage, ret); 56818789a9e7SSteven Rostedt * 56828789a9e7SSteven Rostedt * When @full is set, the function will not return true unless 56838789a9e7SSteven Rostedt * the writer is off the reader page. 56848789a9e7SSteven Rostedt * 56858789a9e7SSteven Rostedt * Note: it is up to the calling functions to handle sleeps and wakeups. 56868789a9e7SSteven Rostedt * The ring buffer can be used anywhere in the kernel and can not 56878789a9e7SSteven Rostedt * blindly call wake_up. The layer that uses the ring buffer must be 56888789a9e7SSteven Rostedt * responsible for that. 56898789a9e7SSteven Rostedt * 56908789a9e7SSteven Rostedt * Returns: 5691667d2412SLai Jiangshan * >=0 if data has been transferred, returns the offset of consumed data. 5692667d2412SLai Jiangshan * <0 if no data has been transferred. 56938789a9e7SSteven Rostedt */ 569413292494SSteven Rostedt (VMware) int ring_buffer_read_page(struct trace_buffer *buffer, 5695ef7a4a16SSteven Rostedt void **data_page, size_t len, int cpu, int full) 56968789a9e7SSteven Rostedt { 56978789a9e7SSteven Rostedt struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 56988789a9e7SSteven Rostedt struct ring_buffer_event *event; 5699044fa782SSteven Rostedt struct buffer_data_page *bpage; 5700ef7a4a16SSteven Rostedt struct buffer_page *reader; 5701ff0ff84aSSteven Rostedt unsigned long missed_events; 57028789a9e7SSteven Rostedt unsigned long flags; 5703ef7a4a16SSteven Rostedt unsigned int commit; 5704667d2412SLai Jiangshan unsigned int read; 57054f3640f8SSteven Rostedt u64 save_timestamp; 5706667d2412SLai Jiangshan int ret = -1; 57078789a9e7SSteven Rostedt 5708554f786eSSteven Rostedt if (!cpumask_test_cpu(cpu, buffer->cpumask)) 5709554f786eSSteven Rostedt goto out; 5710554f786eSSteven Rostedt 5711474d32b6SSteven Rostedt /* 5712474d32b6SSteven Rostedt * If len is not big enough to hold the page header, then 5713474d32b6SSteven Rostedt * we can not copy anything. 5714474d32b6SSteven Rostedt */ 5715474d32b6SSteven Rostedt if (len <= BUF_PAGE_HDR_SIZE) 5716554f786eSSteven Rostedt goto out; 5717474d32b6SSteven Rostedt 5718474d32b6SSteven Rostedt len -= BUF_PAGE_HDR_SIZE; 5719474d32b6SSteven Rostedt 57208789a9e7SSteven Rostedt if (!data_page) 5721554f786eSSteven Rostedt goto out; 57228789a9e7SSteven Rostedt 5723044fa782SSteven Rostedt bpage = *data_page; 5724044fa782SSteven Rostedt if (!bpage) 5725554f786eSSteven Rostedt goto out; 57268789a9e7SSteven Rostedt 57275389f6faSThomas Gleixner raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 57288789a9e7SSteven Rostedt 5729ef7a4a16SSteven Rostedt reader = rb_get_reader_page(cpu_buffer); 5730ef7a4a16SSteven Rostedt if (!reader) 5731554f786eSSteven Rostedt goto out_unlock; 57328789a9e7SSteven Rostedt 5733ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 5734667d2412SLai Jiangshan 5735ef7a4a16SSteven Rostedt read = reader->read; 5736ef7a4a16SSteven Rostedt commit = rb_page_commit(reader); 5737ef7a4a16SSteven Rostedt 573866a8cb95SSteven Rostedt /* Check if any events were dropped */ 5739ff0ff84aSSteven Rostedt missed_events = cpu_buffer->lost_events; 574066a8cb95SSteven Rostedt 57418789a9e7SSteven Rostedt /* 5742474d32b6SSteven Rostedt * If this page has been partially read or 5743474d32b6SSteven Rostedt * if len is not big enough to read the rest of the page or 5744474d32b6SSteven Rostedt * a writer is still on the page, then 5745474d32b6SSteven Rostedt * we must copy the data from the page to the buffer. 5746474d32b6SSteven Rostedt * Otherwise, we can simply swap the page with the one passed in. 57478789a9e7SSteven Rostedt */ 5748474d32b6SSteven Rostedt if (read || (len < (commit - read)) || 5749ef7a4a16SSteven Rostedt cpu_buffer->reader_page == cpu_buffer->commit_page) { 5750667d2412SLai Jiangshan struct buffer_data_page *rpage = cpu_buffer->reader_page->page; 5751474d32b6SSteven Rostedt unsigned int rpos = read; 5752474d32b6SSteven Rostedt unsigned int pos = 0; 5753ef7a4a16SSteven Rostedt unsigned int size; 57548789a9e7SSteven Rostedt 5755fa8f4a89SSteven Rostedt (Google) /* 5756fa8f4a89SSteven Rostedt (Google) * If a full page is expected, this can still be returned 5757fa8f4a89SSteven Rostedt (Google) * if there's been a previous partial read and the 5758fa8f4a89SSteven Rostedt (Google) * rest of the page can be read and the commit page is off 5759fa8f4a89SSteven Rostedt (Google) * the reader page. 5760fa8f4a89SSteven Rostedt (Google) */ 5761fa8f4a89SSteven Rostedt (Google) if (full && 5762fa8f4a89SSteven Rostedt (Google) (!read || (len < (commit - read)) || 5763fa8f4a89SSteven Rostedt (Google) cpu_buffer->reader_page == cpu_buffer->commit_page)) 5764554f786eSSteven Rostedt goto out_unlock; 57658789a9e7SSteven Rostedt 5766ef7a4a16SSteven Rostedt if (len > (commit - read)) 5767ef7a4a16SSteven Rostedt len = (commit - read); 5768ef7a4a16SSteven Rostedt 576969d1b839SSteven Rostedt /* Always keep the time extend and data together */ 577069d1b839SSteven Rostedt size = rb_event_ts_length(event); 5771ef7a4a16SSteven Rostedt 5772ef7a4a16SSteven Rostedt if (len < size) 5773554f786eSSteven Rostedt goto out_unlock; 5774ef7a4a16SSteven Rostedt 57754f3640f8SSteven Rostedt /* save the current timestamp, since the user will need it */ 57764f3640f8SSteven Rostedt save_timestamp = cpu_buffer->read_stamp; 57774f3640f8SSteven Rostedt 5778ef7a4a16SSteven Rostedt /* Need to copy one event at a time */ 5779ef7a4a16SSteven Rostedt do { 5780e1e35927SDavid Sharp /* We need the size of one event, because 5781e1e35927SDavid Sharp * rb_advance_reader only advances by one event, 5782e1e35927SDavid Sharp * whereas rb_event_ts_length may include the size of 5783e1e35927SDavid Sharp * one or two events. 5784e1e35927SDavid Sharp * We have already ensured there's enough space if this 5785e1e35927SDavid Sharp * is a time extend. */ 5786e1e35927SDavid Sharp size = rb_event_length(event); 5787474d32b6SSteven Rostedt memcpy(bpage->data + pos, rpage->data + rpos, size); 5788ef7a4a16SSteven Rostedt 5789ef7a4a16SSteven Rostedt len -= size; 5790ef7a4a16SSteven Rostedt 5791ef7a4a16SSteven Rostedt rb_advance_reader(cpu_buffer); 5792474d32b6SSteven Rostedt rpos = reader->read; 5793474d32b6SSteven Rostedt pos += size; 5794ef7a4a16SSteven Rostedt 579518fab912SHuang Ying if (rpos >= commit) 579618fab912SHuang Ying break; 579718fab912SHuang Ying 5798ef7a4a16SSteven Rostedt event = rb_reader_event(cpu_buffer); 579969d1b839SSteven Rostedt /* Always keep the time extend and data together */ 580069d1b839SSteven Rostedt size = rb_event_ts_length(event); 5801e1e35927SDavid Sharp } while (len >= size); 5802667d2412SLai Jiangshan 5803667d2412SLai Jiangshan /* update bpage */ 5804ef7a4a16SSteven Rostedt local_set(&bpage->commit, pos); 58054f3640f8SSteven Rostedt bpage->time_stamp = save_timestamp; 5806ef7a4a16SSteven Rostedt 5807474d32b6SSteven Rostedt /* we copied everything to the beginning */ 5808474d32b6SSteven Rostedt read = 0; 58098789a9e7SSteven Rostedt } else { 5810afbab76aSSteven Rostedt /* update the entry counter */ 581177ae365eSSteven Rostedt cpu_buffer->read += rb_page_entries(reader); 5812c64e148aSVaibhav Nagarnaik cpu_buffer->read_bytes += BUF_PAGE_SIZE; 5813afbab76aSSteven Rostedt 58148789a9e7SSteven Rostedt /* swap the pages */ 5815044fa782SSteven Rostedt rb_init_page(bpage); 5816ef7a4a16SSteven Rostedt bpage = reader->page; 5817ef7a4a16SSteven Rostedt reader->page = *data_page; 5818ef7a4a16SSteven Rostedt local_set(&reader->write, 0); 5819778c55d4SSteven Rostedt local_set(&reader->entries, 0); 5820ef7a4a16SSteven Rostedt reader->read = 0; 5821044fa782SSteven Rostedt *data_page = bpage; 5822ff0ff84aSSteven Rostedt 5823ff0ff84aSSteven Rostedt /* 5824ff0ff84aSSteven Rostedt * Use the real_end for the data size, 5825ff0ff84aSSteven Rostedt * This gives us a chance to store the lost events 5826ff0ff84aSSteven Rostedt * on the page. 5827ff0ff84aSSteven Rostedt */ 5828ff0ff84aSSteven Rostedt if (reader->real_end) 5829ff0ff84aSSteven Rostedt local_set(&bpage->commit, reader->real_end); 5830ef7a4a16SSteven Rostedt } 5831ef7a4a16SSteven Rostedt ret = read; 5832ef7a4a16SSteven Rostedt 583366a8cb95SSteven Rostedt cpu_buffer->lost_events = 0; 58342711ca23SSteven Rostedt 58352711ca23SSteven Rostedt commit = local_read(&bpage->commit); 583666a8cb95SSteven Rostedt /* 583766a8cb95SSteven Rostedt * Set a flag in the commit field if we lost events 583866a8cb95SSteven Rostedt */ 5839ff0ff84aSSteven Rostedt if (missed_events) { 5840ff0ff84aSSteven Rostedt /* If there is room at the end of the page to save the 5841ff0ff84aSSteven Rostedt * missed events, then record it there. 5842ff0ff84aSSteven Rostedt */ 5843ff0ff84aSSteven Rostedt if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { 5844ff0ff84aSSteven Rostedt memcpy(&bpage->data[commit], &missed_events, 5845ff0ff84aSSteven Rostedt sizeof(missed_events)); 5846ff0ff84aSSteven Rostedt local_add(RB_MISSED_STORED, &bpage->commit); 58472711ca23SSteven Rostedt commit += sizeof(missed_events); 5848ff0ff84aSSteven Rostedt } 584966a8cb95SSteven Rostedt local_add(RB_MISSED_EVENTS, &bpage->commit); 5850ff0ff84aSSteven Rostedt } 585166a8cb95SSteven Rostedt 58522711ca23SSteven Rostedt /* 58532711ca23SSteven Rostedt * This page may be off to user land. Zero it out here. 58542711ca23SSteven Rostedt */ 58552711ca23SSteven Rostedt if (commit < BUF_PAGE_SIZE) 58562711ca23SSteven Rostedt memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 58572711ca23SSteven Rostedt 5858554f786eSSteven Rostedt out_unlock: 58595389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 58608789a9e7SSteven Rostedt 5861554f786eSSteven Rostedt out: 58628789a9e7SSteven Rostedt return ret; 58638789a9e7SSteven Rostedt } 5864d6ce96daSSteven Rostedt EXPORT_SYMBOL_GPL(ring_buffer_read_page); 58658789a9e7SSteven Rostedt 5866b32614c0SSebastian Andrzej Siewior /* 5867b32614c0SSebastian Andrzej Siewior * We only allocate new buffers, never free them if the CPU goes down. 5868b32614c0SSebastian Andrzej Siewior * If we were to free the buffer, then the user would lose any trace that was in 5869b32614c0SSebastian Andrzej Siewior * the buffer. 5870b32614c0SSebastian Andrzej Siewior */ 5871b32614c0SSebastian Andrzej Siewior int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) 5872554f786eSSteven Rostedt { 587313292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 58749b94a8fbSSteven Rostedt (Red Hat) long nr_pages_same; 58759b94a8fbSSteven Rostedt (Red Hat) int cpu_i; 58769b94a8fbSSteven Rostedt (Red Hat) unsigned long nr_pages; 5877554f786eSSteven Rostedt 587813292494SSteven Rostedt (VMware) buffer = container_of(node, struct trace_buffer, node); 58793f237a79SRusty Russell if (cpumask_test_cpu(cpu, buffer->cpumask)) 5880b32614c0SSebastian Andrzej Siewior return 0; 5881554f786eSSteven Rostedt 5882438ced17SVaibhav Nagarnaik nr_pages = 0; 5883438ced17SVaibhav Nagarnaik nr_pages_same = 1; 5884438ced17SVaibhav Nagarnaik /* check if all cpu sizes are same */ 5885438ced17SVaibhav Nagarnaik for_each_buffer_cpu(buffer, cpu_i) { 5886438ced17SVaibhav Nagarnaik /* fill in the size from first enabled cpu */ 5887438ced17SVaibhav Nagarnaik if (nr_pages == 0) 5888438ced17SVaibhav Nagarnaik nr_pages = buffer->buffers[cpu_i]->nr_pages; 5889438ced17SVaibhav Nagarnaik if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { 5890438ced17SVaibhav Nagarnaik nr_pages_same = 0; 5891438ced17SVaibhav Nagarnaik break; 5892438ced17SVaibhav Nagarnaik } 5893438ced17SVaibhav Nagarnaik } 5894438ced17SVaibhav Nagarnaik /* allocate minimum pages, user can later expand it */ 5895438ced17SVaibhav Nagarnaik if (!nr_pages_same) 5896438ced17SVaibhav Nagarnaik nr_pages = 2; 5897554f786eSSteven Rostedt buffer->buffers[cpu] = 5898438ced17SVaibhav Nagarnaik rb_allocate_cpu_buffer(buffer, nr_pages, cpu); 5899554f786eSSteven Rostedt if (!buffer->buffers[cpu]) { 5900b32614c0SSebastian Andrzej Siewior WARN(1, "failed to allocate ring buffer on CPU %u\n", 5901554f786eSSteven Rostedt cpu); 5902b32614c0SSebastian Andrzej Siewior return -ENOMEM; 5903554f786eSSteven Rostedt } 5904554f786eSSteven Rostedt smp_wmb(); 59053f237a79SRusty Russell cpumask_set_cpu(cpu, buffer->cpumask); 5906b32614c0SSebastian Andrzej Siewior return 0; 5907554f786eSSteven Rostedt } 59086c43e554SSteven Rostedt (Red Hat) 59096c43e554SSteven Rostedt (Red Hat) #ifdef CONFIG_RING_BUFFER_STARTUP_TEST 59106c43e554SSteven Rostedt (Red Hat) /* 59116c43e554SSteven Rostedt (Red Hat) * This is a basic integrity check of the ring buffer. 59126c43e554SSteven Rostedt (Red Hat) * Late in the boot cycle this test will run when configured in. 59136c43e554SSteven Rostedt (Red Hat) * It will kick off a thread per CPU that will go into a loop 59146c43e554SSteven Rostedt (Red Hat) * writing to the per cpu ring buffer various sizes of data. 59156c43e554SSteven Rostedt (Red Hat) * Some of the data will be large items, some small. 59166c43e554SSteven Rostedt (Red Hat) * 59176c43e554SSteven Rostedt (Red Hat) * Another thread is created that goes into a spin, sending out 59186c43e554SSteven Rostedt (Red Hat) * IPIs to the other CPUs to also write into the ring buffer. 59196c43e554SSteven Rostedt (Red Hat) * this is to test the nesting ability of the buffer. 59206c43e554SSteven Rostedt (Red Hat) * 59216c43e554SSteven Rostedt (Red Hat) * Basic stats are recorded and reported. If something in the 59226c43e554SSteven Rostedt (Red Hat) * ring buffer should happen that's not expected, a big warning 59236c43e554SSteven Rostedt (Red Hat) * is displayed and all ring buffers are disabled. 59246c43e554SSteven Rostedt (Red Hat) */ 59256c43e554SSteven Rostedt (Red Hat) static struct task_struct *rb_threads[NR_CPUS] __initdata; 59266c43e554SSteven Rostedt (Red Hat) 59276c43e554SSteven Rostedt (Red Hat) struct rb_test_data { 592813292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 59296c43e554SSteven Rostedt (Red Hat) unsigned long events; 59306c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written; 59316c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc; 59326c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped; 59336c43e554SSteven Rostedt (Red Hat) unsigned long events_nested; 59346c43e554SSteven Rostedt (Red Hat) unsigned long bytes_written_nested; 59356c43e554SSteven Rostedt (Red Hat) unsigned long bytes_alloc_nested; 59366c43e554SSteven Rostedt (Red Hat) unsigned long bytes_dropped_nested; 59376c43e554SSteven Rostedt (Red Hat) int min_size_nested; 59386c43e554SSteven Rostedt (Red Hat) int max_size_nested; 59396c43e554SSteven Rostedt (Red Hat) int max_size; 59406c43e554SSteven Rostedt (Red Hat) int min_size; 59416c43e554SSteven Rostedt (Red Hat) int cpu; 59426c43e554SSteven Rostedt (Red Hat) int cnt; 59436c43e554SSteven Rostedt (Red Hat) }; 59446c43e554SSteven Rostedt (Red Hat) 59456c43e554SSteven Rostedt (Red Hat) static struct rb_test_data rb_data[NR_CPUS] __initdata; 59466c43e554SSteven Rostedt (Red Hat) 59476c43e554SSteven Rostedt (Red Hat) /* 1 meg per cpu */ 59486c43e554SSteven Rostedt (Red Hat) #define RB_TEST_BUFFER_SIZE 1048576 59496c43e554SSteven Rostedt (Red Hat) 59506c43e554SSteven Rostedt (Red Hat) static char rb_string[] __initdata = 59516c43e554SSteven Rostedt (Red Hat) "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" 59526c43e554SSteven Rostedt (Red Hat) "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" 59536c43e554SSteven Rostedt (Red Hat) "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; 59546c43e554SSteven Rostedt (Red Hat) 59556c43e554SSteven Rostedt (Red Hat) static bool rb_test_started __initdata; 59566c43e554SSteven Rostedt (Red Hat) 59576c43e554SSteven Rostedt (Red Hat) struct rb_item { 59586c43e554SSteven Rostedt (Red Hat) int size; 59596c43e554SSteven Rostedt (Red Hat) char str[]; 59606c43e554SSteven Rostedt (Red Hat) }; 59616c43e554SSteven Rostedt (Red Hat) 59626c43e554SSteven Rostedt (Red Hat) static __init int rb_write_something(struct rb_test_data *data, bool nested) 59636c43e554SSteven Rostedt (Red Hat) { 59646c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 59656c43e554SSteven Rostedt (Red Hat) struct rb_item *item; 59666c43e554SSteven Rostedt (Red Hat) bool started; 59676c43e554SSteven Rostedt (Red Hat) int event_len; 59686c43e554SSteven Rostedt (Red Hat) int size; 59696c43e554SSteven Rostedt (Red Hat) int len; 59706c43e554SSteven Rostedt (Red Hat) int cnt; 59716c43e554SSteven Rostedt (Red Hat) 59726c43e554SSteven Rostedt (Red Hat) /* Have nested writes different that what is written */ 59736c43e554SSteven Rostedt (Red Hat) cnt = data->cnt + (nested ? 27 : 0); 59746c43e554SSteven Rostedt (Red Hat) 59756c43e554SSteven Rostedt (Red Hat) /* Multiply cnt by ~e, to make some unique increment */ 597640ed29b3SYueHaibing size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 59776c43e554SSteven Rostedt (Red Hat) 59786c43e554SSteven Rostedt (Red Hat) len = size + sizeof(struct rb_item); 59796c43e554SSteven Rostedt (Red Hat) 59806c43e554SSteven Rostedt (Red Hat) started = rb_test_started; 59816c43e554SSteven Rostedt (Red Hat) /* read rb_test_started before checking buffer enabled */ 59826c43e554SSteven Rostedt (Red Hat) smp_rmb(); 59836c43e554SSteven Rostedt (Red Hat) 59846c43e554SSteven Rostedt (Red Hat) event = ring_buffer_lock_reserve(data->buffer, len); 59856c43e554SSteven Rostedt (Red Hat) if (!event) { 59866c43e554SSteven Rostedt (Red Hat) /* Ignore dropped events before test starts. */ 59876c43e554SSteven Rostedt (Red Hat) if (started) { 59886c43e554SSteven Rostedt (Red Hat) if (nested) 59896c43e554SSteven Rostedt (Red Hat) data->bytes_dropped += len; 59906c43e554SSteven Rostedt (Red Hat) else 59916c43e554SSteven Rostedt (Red Hat) data->bytes_dropped_nested += len; 59926c43e554SSteven Rostedt (Red Hat) } 59936c43e554SSteven Rostedt (Red Hat) return len; 59946c43e554SSteven Rostedt (Red Hat) } 59956c43e554SSteven Rostedt (Red Hat) 59966c43e554SSteven Rostedt (Red Hat) event_len = ring_buffer_event_length(event); 59976c43e554SSteven Rostedt (Red Hat) 59986c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(data->buffer, event_len < len)) 59996c43e554SSteven Rostedt (Red Hat) goto out; 60006c43e554SSteven Rostedt (Red Hat) 60016c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event); 60026c43e554SSteven Rostedt (Red Hat) item->size = size; 60036c43e554SSteven Rostedt (Red Hat) memcpy(item->str, rb_string, size); 60046c43e554SSteven Rostedt (Red Hat) 60056c43e554SSteven Rostedt (Red Hat) if (nested) { 60066c43e554SSteven Rostedt (Red Hat) data->bytes_alloc_nested += event_len; 60076c43e554SSteven Rostedt (Red Hat) data->bytes_written_nested += len; 60086c43e554SSteven Rostedt (Red Hat) data->events_nested++; 60096c43e554SSteven Rostedt (Red Hat) if (!data->min_size_nested || len < data->min_size_nested) 60106c43e554SSteven Rostedt (Red Hat) data->min_size_nested = len; 60116c43e554SSteven Rostedt (Red Hat) if (len > data->max_size_nested) 60126c43e554SSteven Rostedt (Red Hat) data->max_size_nested = len; 60136c43e554SSteven Rostedt (Red Hat) } else { 60146c43e554SSteven Rostedt (Red Hat) data->bytes_alloc += event_len; 60156c43e554SSteven Rostedt (Red Hat) data->bytes_written += len; 60166c43e554SSteven Rostedt (Red Hat) data->events++; 60176c43e554SSteven Rostedt (Red Hat) if (!data->min_size || len < data->min_size) 60186c43e554SSteven Rostedt (Red Hat) data->max_size = len; 60196c43e554SSteven Rostedt (Red Hat) if (len > data->max_size) 60206c43e554SSteven Rostedt (Red Hat) data->max_size = len; 60216c43e554SSteven Rostedt (Red Hat) } 60226c43e554SSteven Rostedt (Red Hat) 60236c43e554SSteven Rostedt (Red Hat) out: 602404aabc32SSong Chen ring_buffer_unlock_commit(data->buffer); 60256c43e554SSteven Rostedt (Red Hat) 60266c43e554SSteven Rostedt (Red Hat) return 0; 60276c43e554SSteven Rostedt (Red Hat) } 60286c43e554SSteven Rostedt (Red Hat) 60296c43e554SSteven Rostedt (Red Hat) static __init int rb_test(void *arg) 60306c43e554SSteven Rostedt (Red Hat) { 60316c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = arg; 60326c43e554SSteven Rostedt (Red Hat) 60336c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 60346c43e554SSteven Rostedt (Red Hat) rb_write_something(data, false); 60356c43e554SSteven Rostedt (Red Hat) data->cnt++; 60366c43e554SSteven Rostedt (Red Hat) 60376c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE); 60386c43e554SSteven Rostedt (Red Hat) /* Now sleep between a min of 100-300us and a max of 1ms */ 60396c43e554SSteven Rostedt (Red Hat) usleep_range(((data->cnt % 3) + 1) * 100, 1000); 60406c43e554SSteven Rostedt (Red Hat) } 60416c43e554SSteven Rostedt (Red Hat) 60426c43e554SSteven Rostedt (Red Hat) return 0; 60436c43e554SSteven Rostedt (Red Hat) } 60446c43e554SSteven Rostedt (Red Hat) 60456c43e554SSteven Rostedt (Red Hat) static __init void rb_ipi(void *ignore) 60466c43e554SSteven Rostedt (Red Hat) { 60476c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data; 60486c43e554SSteven Rostedt (Red Hat) int cpu = smp_processor_id(); 60496c43e554SSteven Rostedt (Red Hat) 60506c43e554SSteven Rostedt (Red Hat) data = &rb_data[cpu]; 60516c43e554SSteven Rostedt (Red Hat) rb_write_something(data, true); 60526c43e554SSteven Rostedt (Red Hat) } 60536c43e554SSteven Rostedt (Red Hat) 60546c43e554SSteven Rostedt (Red Hat) static __init int rb_hammer_test(void *arg) 60556c43e554SSteven Rostedt (Red Hat) { 60566c43e554SSteven Rostedt (Red Hat) while (!kthread_should_stop()) { 60576c43e554SSteven Rostedt (Red Hat) 60586c43e554SSteven Rostedt (Red Hat) /* Send an IPI to all cpus to write data! */ 60596c43e554SSteven Rostedt (Red Hat) smp_call_function(rb_ipi, NULL, 1); 60606c43e554SSteven Rostedt (Red Hat) /* No sleep, but for non preempt, let others run */ 60616c43e554SSteven Rostedt (Red Hat) schedule(); 60626c43e554SSteven Rostedt (Red Hat) } 60636c43e554SSteven Rostedt (Red Hat) 60646c43e554SSteven Rostedt (Red Hat) return 0; 60656c43e554SSteven Rostedt (Red Hat) } 60666c43e554SSteven Rostedt (Red Hat) 60676c43e554SSteven Rostedt (Red Hat) static __init int test_ringbuffer(void) 60686c43e554SSteven Rostedt (Red Hat) { 60696c43e554SSteven Rostedt (Red Hat) struct task_struct *rb_hammer; 607013292494SSteven Rostedt (VMware) struct trace_buffer *buffer; 60716c43e554SSteven Rostedt (Red Hat) int cpu; 60726c43e554SSteven Rostedt (Red Hat) int ret = 0; 60736c43e554SSteven Rostedt (Red Hat) 6074a356646aSSteven Rostedt (VMware) if (security_locked_down(LOCKDOWN_TRACEFS)) { 6075ee195452SStephen Rothwell pr_warn("Lockdown is enabled, skipping ring buffer tests\n"); 6076a356646aSSteven Rostedt (VMware) return 0; 6077a356646aSSteven Rostedt (VMware) } 6078a356646aSSteven Rostedt (VMware) 60796c43e554SSteven Rostedt (Red Hat) pr_info("Running ring buffer tests...\n"); 60806c43e554SSteven Rostedt (Red Hat) 60816c43e554SSteven Rostedt (Red Hat) buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); 60826c43e554SSteven Rostedt (Red Hat) if (WARN_ON(!buffer)) 60836c43e554SSteven Rostedt (Red Hat) return 0; 60846c43e554SSteven Rostedt (Red Hat) 60856c43e554SSteven Rostedt (Red Hat) /* Disable buffer so that threads can't write to it yet */ 60866c43e554SSteven Rostedt (Red Hat) ring_buffer_record_off(buffer); 60876c43e554SSteven Rostedt (Red Hat) 60886c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 60896c43e554SSteven Rostedt (Red Hat) rb_data[cpu].buffer = buffer; 60906c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cpu = cpu; 60916c43e554SSteven Rostedt (Red Hat) rb_data[cpu].cnt = cpu; 609264ed3a04SCai Huoqing rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], 609364ed3a04SCai Huoqing cpu, "rbtester/%u"); 609462277de7SWei Yongjun if (WARN_ON(IS_ERR(rb_threads[cpu]))) { 60956c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n"); 609662277de7SWei Yongjun ret = PTR_ERR(rb_threads[cpu]); 60976c43e554SSteven Rostedt (Red Hat) goto out_free; 60986c43e554SSteven Rostedt (Red Hat) } 60996c43e554SSteven Rostedt (Red Hat) } 61006c43e554SSteven Rostedt (Red Hat) 61016c43e554SSteven Rostedt (Red Hat) /* Now create the rb hammer! */ 61026c43e554SSteven Rostedt (Red Hat) rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); 610362277de7SWei Yongjun if (WARN_ON(IS_ERR(rb_hammer))) { 61046c43e554SSteven Rostedt (Red Hat) pr_cont("FAILED\n"); 610562277de7SWei Yongjun ret = PTR_ERR(rb_hammer); 61066c43e554SSteven Rostedt (Red Hat) goto out_free; 61076c43e554SSteven Rostedt (Red Hat) } 61086c43e554SSteven Rostedt (Red Hat) 61096c43e554SSteven Rostedt (Red Hat) ring_buffer_record_on(buffer); 61106c43e554SSteven Rostedt (Red Hat) /* 61116c43e554SSteven Rostedt (Red Hat) * Show buffer is enabled before setting rb_test_started. 61126c43e554SSteven Rostedt (Red Hat) * Yes there's a small race window where events could be 61136c43e554SSteven Rostedt (Red Hat) * dropped and the thread wont catch it. But when a ring 61146c43e554SSteven Rostedt (Red Hat) * buffer gets enabled, there will always be some kind of 61156c43e554SSteven Rostedt (Red Hat) * delay before other CPUs see it. Thus, we don't care about 61166c43e554SSteven Rostedt (Red Hat) * those dropped events. We care about events dropped after 61176c43e554SSteven Rostedt (Red Hat) * the threads see that the buffer is active. 61186c43e554SSteven Rostedt (Red Hat) */ 61196c43e554SSteven Rostedt (Red Hat) smp_wmb(); 61206c43e554SSteven Rostedt (Red Hat) rb_test_started = true; 61216c43e554SSteven Rostedt (Red Hat) 61226c43e554SSteven Rostedt (Red Hat) set_current_state(TASK_INTERRUPTIBLE); 61236c43e554SSteven Rostedt (Red Hat) /* Just run for 10 seconds */; 61246c43e554SSteven Rostedt (Red Hat) schedule_timeout(10 * HZ); 61256c43e554SSteven Rostedt (Red Hat) 61266c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_hammer); 61276c43e554SSteven Rostedt (Red Hat) 61286c43e554SSteven Rostedt (Red Hat) out_free: 61296c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 61306c43e554SSteven Rostedt (Red Hat) if (!rb_threads[cpu]) 61316c43e554SSteven Rostedt (Red Hat) break; 61326c43e554SSteven Rostedt (Red Hat) kthread_stop(rb_threads[cpu]); 61336c43e554SSteven Rostedt (Red Hat) } 61346c43e554SSteven Rostedt (Red Hat) if (ret) { 61356c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer); 61366c43e554SSteven Rostedt (Red Hat) return ret; 61376c43e554SSteven Rostedt (Red Hat) } 61386c43e554SSteven Rostedt (Red Hat) 61396c43e554SSteven Rostedt (Red Hat) /* Report! */ 61406c43e554SSteven Rostedt (Red Hat) pr_info("finished\n"); 61416c43e554SSteven Rostedt (Red Hat) for_each_online_cpu(cpu) { 61426c43e554SSteven Rostedt (Red Hat) struct ring_buffer_event *event; 61436c43e554SSteven Rostedt (Red Hat) struct rb_test_data *data = &rb_data[cpu]; 61446c43e554SSteven Rostedt (Red Hat) struct rb_item *item; 61456c43e554SSteven Rostedt (Red Hat) unsigned long total_events; 61466c43e554SSteven Rostedt (Red Hat) unsigned long total_dropped; 61476c43e554SSteven Rostedt (Red Hat) unsigned long total_written; 61486c43e554SSteven Rostedt (Red Hat) unsigned long total_alloc; 61496c43e554SSteven Rostedt (Red Hat) unsigned long total_read = 0; 61506c43e554SSteven Rostedt (Red Hat) unsigned long total_size = 0; 61516c43e554SSteven Rostedt (Red Hat) unsigned long total_len = 0; 61526c43e554SSteven Rostedt (Red Hat) unsigned long total_lost = 0; 61536c43e554SSteven Rostedt (Red Hat) unsigned long lost; 61546c43e554SSteven Rostedt (Red Hat) int big_event_size; 61556c43e554SSteven Rostedt (Red Hat) int small_event_size; 61566c43e554SSteven Rostedt (Red Hat) 61576c43e554SSteven Rostedt (Red Hat) ret = -1; 61586c43e554SSteven Rostedt (Red Hat) 61596c43e554SSteven Rostedt (Red Hat) total_events = data->events + data->events_nested; 61606c43e554SSteven Rostedt (Red Hat) total_written = data->bytes_written + data->bytes_written_nested; 61616c43e554SSteven Rostedt (Red Hat) total_alloc = data->bytes_alloc + data->bytes_alloc_nested; 61626c43e554SSteven Rostedt (Red Hat) total_dropped = data->bytes_dropped + data->bytes_dropped_nested; 61636c43e554SSteven Rostedt (Red Hat) 61646c43e554SSteven Rostedt (Red Hat) big_event_size = data->max_size + data->max_size_nested; 61656c43e554SSteven Rostedt (Red Hat) small_event_size = data->min_size + data->min_size_nested; 61666c43e554SSteven Rostedt (Red Hat) 61676c43e554SSteven Rostedt (Red Hat) pr_info("CPU %d:\n", cpu); 61686c43e554SSteven Rostedt (Red Hat) pr_info(" events: %ld\n", total_events); 61696c43e554SSteven Rostedt (Red Hat) pr_info(" dropped bytes: %ld\n", total_dropped); 61706c43e554SSteven Rostedt (Red Hat) pr_info(" alloced bytes: %ld\n", total_alloc); 61716c43e554SSteven Rostedt (Red Hat) pr_info(" written bytes: %ld\n", total_written); 61726c43e554SSteven Rostedt (Red Hat) pr_info(" biggest event: %d\n", big_event_size); 61736c43e554SSteven Rostedt (Red Hat) pr_info(" smallest event: %d\n", small_event_size); 61746c43e554SSteven Rostedt (Red Hat) 61756c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_dropped)) 61766c43e554SSteven Rostedt (Red Hat) break; 61776c43e554SSteven Rostedt (Red Hat) 61786c43e554SSteven Rostedt (Red Hat) ret = 0; 61796c43e554SSteven Rostedt (Red Hat) 61806c43e554SSteven Rostedt (Red Hat) while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { 61816c43e554SSteven Rostedt (Red Hat) total_lost += lost; 61826c43e554SSteven Rostedt (Red Hat) item = ring_buffer_event_data(event); 61836c43e554SSteven Rostedt (Red Hat) total_len += ring_buffer_event_length(event); 61846c43e554SSteven Rostedt (Red Hat) total_size += item->size + sizeof(struct rb_item); 61856c43e554SSteven Rostedt (Red Hat) if (memcmp(&item->str[0], rb_string, item->size) != 0) { 61866c43e554SSteven Rostedt (Red Hat) pr_info("FAILED!\n"); 61876c43e554SSteven Rostedt (Red Hat) pr_info("buffer had: %.*s\n", item->size, item->str); 61886c43e554SSteven Rostedt (Red Hat) pr_info("expected: %.*s\n", item->size, rb_string); 61896c43e554SSteven Rostedt (Red Hat) RB_WARN_ON(buffer, 1); 61906c43e554SSteven Rostedt (Red Hat) ret = -1; 61916c43e554SSteven Rostedt (Red Hat) break; 61926c43e554SSteven Rostedt (Red Hat) } 61936c43e554SSteven Rostedt (Red Hat) total_read++; 61946c43e554SSteven Rostedt (Red Hat) } 61956c43e554SSteven Rostedt (Red Hat) if (ret) 61966c43e554SSteven Rostedt (Red Hat) break; 61976c43e554SSteven Rostedt (Red Hat) 61986c43e554SSteven Rostedt (Red Hat) ret = -1; 61996c43e554SSteven Rostedt (Red Hat) 62006c43e554SSteven Rostedt (Red Hat) pr_info(" read events: %ld\n", total_read); 62016c43e554SSteven Rostedt (Red Hat) pr_info(" lost events: %ld\n", total_lost); 62026c43e554SSteven Rostedt (Red Hat) pr_info(" total events: %ld\n", total_lost + total_read); 62036c43e554SSteven Rostedt (Red Hat) pr_info(" recorded len bytes: %ld\n", total_len); 62046c43e554SSteven Rostedt (Red Hat) pr_info(" recorded size bytes: %ld\n", total_size); 6205ed888241SWan Jiabing if (total_lost) { 62066c43e554SSteven Rostedt (Red Hat) pr_info(" With dropped events, record len and size may not match\n" 62076c43e554SSteven Rostedt (Red Hat) " alloced and written from above\n"); 6208ed888241SWan Jiabing } else { 62096c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_len != total_alloc || 62106c43e554SSteven Rostedt (Red Hat) total_size != total_written)) 62116c43e554SSteven Rostedt (Red Hat) break; 62126c43e554SSteven Rostedt (Red Hat) } 62136c43e554SSteven Rostedt (Red Hat) if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) 62146c43e554SSteven Rostedt (Red Hat) break; 62156c43e554SSteven Rostedt (Red Hat) 62166c43e554SSteven Rostedt (Red Hat) ret = 0; 62176c43e554SSteven Rostedt (Red Hat) } 62186c43e554SSteven Rostedt (Red Hat) if (!ret) 62196c43e554SSteven Rostedt (Red Hat) pr_info("Ring buffer PASSED!\n"); 62206c43e554SSteven Rostedt (Red Hat) 62216c43e554SSteven Rostedt (Red Hat) ring_buffer_free(buffer); 62226c43e554SSteven Rostedt (Red Hat) return 0; 62236c43e554SSteven Rostedt (Red Hat) } 62246c43e554SSteven Rostedt (Red Hat) 62256c43e554SSteven Rostedt (Red Hat) late_initcall(test_ringbuffer); 62266c43e554SSteven Rostedt (Red Hat) #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */ 6227