xref: /linux-6.15/include/linux/ring_buffer.h (revision b6533482)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27a8e76a3SSteven Rostedt #ifndef _LINUX_RING_BUFFER_H
37a8e76a3SSteven Rostedt #define _LINUX_RING_BUFFER_H
47a8e76a3SSteven Rostedt 
57a8e76a3SSteven Rostedt #include <linux/mm.h>
67a8e76a3SSteven Rostedt #include <linux/seq_file.h>
715693458SSteven Rostedt (Red Hat) #include <linux/poll.h>
87a8e76a3SSteven Rostedt 
9117c3920SVincent Donnefort #include <uapi/linux/trace_mmap.h>
10117c3920SVincent Donnefort 
1113292494SSteven Rostedt (VMware) struct trace_buffer;
127a8e76a3SSteven Rostedt struct ring_buffer_iter;
137a8e76a3SSteven Rostedt 
147a8e76a3SSteven Rostedt /*
15c3706f00SWenji Huang  * Don't refer to this struct directly, use functions below.
167a8e76a3SSteven Rostedt  */
177a8e76a3SSteven Rostedt struct ring_buffer_event {
18334d4169SLai Jiangshan 	u32		type_len:5, time_delta:27;
191744a21dSVegard Nossum 
207a8e76a3SSteven Rostedt 	u32		array[];
217a8e76a3SSteven Rostedt };
227a8e76a3SSteven Rostedt 
237a8e76a3SSteven Rostedt /**
247a8e76a3SSteven Rostedt  * enum ring_buffer_type - internal ring buffer types
257a8e76a3SSteven Rostedt  *
262d622719STom Zanussi  * @RINGBUF_TYPE_PADDING:	Left over page padding or discarded event
272d622719STom Zanussi  *				 If time_delta is 0:
287a8e76a3SSteven Rostedt  *				  array is ignored
297a8e76a3SSteven Rostedt  *				  size is variable depending on how much
307a8e76a3SSteven Rostedt  *				  padding is needed
312d622719STom Zanussi  *				 If time_delta is non zero:
32334d4169SLai Jiangshan  *				  array[0] holds the actual length
33334d4169SLai Jiangshan  *				  size = 4 + length (bytes)
347a8e76a3SSteven Rostedt  *
357a8e76a3SSteven Rostedt  * @RINGBUF_TYPE_TIME_EXTEND:	Extend the time delta
367a8e76a3SSteven Rostedt  *				 array[0] = time delta (28 .. 59)
377a8e76a3SSteven Rostedt  *				 size = 8 bytes
387a8e76a3SSteven Rostedt  *
39dc4e2801STom Zanussi  * @RINGBUF_TYPE_TIME_STAMP:	Absolute timestamp
40dc4e2801STom Zanussi  *				 Same format as TIME_EXTEND except that the
41dc4e2801STom Zanussi  *				 value is an absolute timestamp, not a delta
42dc4e2801STom Zanussi  *				 event.time_delta contains bottom 27 bits
43dc4e2801STom Zanussi  *				 array[0] = top (28 .. 59) bits
44dc4e2801STom Zanussi  *				 size = 8 bytes
457a8e76a3SSteven Rostedt  *
46334d4169SLai Jiangshan  * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
47334d4169SLai Jiangshan  *				Data record
48334d4169SLai Jiangshan  *				 If type_len is zero:
497a8e76a3SSteven Rostedt  *				  array[0] holds the actual length
50361b73d5SLai Jiangshan  *				  array[1..(length+3)/4] holds data
51334d4169SLai Jiangshan  *				  size = 4 + length (bytes)
527a8e76a3SSteven Rostedt  *				 else
53334d4169SLai Jiangshan  *				  length = type_len << 2
54361b73d5SLai Jiangshan  *				  array[0..(length+3)/4-1] holds data
55361b73d5SLai Jiangshan  *				  size = 4 + length (bytes)
567a8e76a3SSteven Rostedt  */
577a8e76a3SSteven Rostedt enum ring_buffer_type {
58334d4169SLai Jiangshan 	RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
597a8e76a3SSteven Rostedt 	RINGBUF_TYPE_PADDING,
607a8e76a3SSteven Rostedt 	RINGBUF_TYPE_TIME_EXTEND,
617a8e76a3SSteven Rostedt 	RINGBUF_TYPE_TIME_STAMP,
627a8e76a3SSteven Rostedt };
637a8e76a3SSteven Rostedt 
647a8e76a3SSteven Rostedt unsigned ring_buffer_event_length(struct ring_buffer_event *event);
657a8e76a3SSteven Rostedt void *ring_buffer_event_data(struct ring_buffer_event *event);
66efe6196aSSteven Rostedt (VMware) u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
67efe6196aSSteven Rostedt (VMware) 				 struct ring_buffer_event *event);
687a8e76a3SSteven Rostedt 
69fa1b47ddSSteven Rostedt /*
70fa1b47ddSSteven Rostedt  * ring_buffer_discard_commit will remove an event that has not
71a9235b54SVasyl Gomonovych  *   been committed yet. If this is used, then ring_buffer_unlock_commit
72fa1b47ddSSteven Rostedt  *   must not be called on the discarded event. This function
73fa1b47ddSSteven Rostedt  *   will try to remove the event from the ring buffer completely
74fa1b47ddSSteven Rostedt  *   if another event has not been written after it.
75fa1b47ddSSteven Rostedt  *
76fa1b47ddSSteven Rostedt  * Example use:
77fa1b47ddSSteven Rostedt  *
78fa1b47ddSSteven Rostedt  *  if (some_condition)
79fa1b47ddSSteven Rostedt  *    ring_buffer_discard_commit(buffer, event);
80fa1b47ddSSteven Rostedt  *  else
81fa1b47ddSSteven Rostedt  *    ring_buffer_unlock_commit(buffer, event);
82fa1b47ddSSteven Rostedt  */
8313292494SSteven Rostedt (VMware) void ring_buffer_discard_commit(struct trace_buffer *buffer,
84fa1b47ddSSteven Rostedt 				struct ring_buffer_event *event);
85fa1b47ddSSteven Rostedt 
86fa1b47ddSSteven Rostedt /*
877a8e76a3SSteven Rostedt  * size is in bytes for each per CPU buffer.
887a8e76a3SSteven Rostedt  */
8913292494SSteven Rostedt (VMware) struct trace_buffer *
901f8a6a10SPeter Zijlstra __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
911f8a6a10SPeter Zijlstra 
92be68d63aSSteven Rostedt (Google) struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
93be68d63aSSteven Rostedt (Google) 					       int order, unsigned long start,
94be68d63aSSteven Rostedt (Google) 					       unsigned long range_size,
95*4af0a9c5SSteven Rostedt 					       unsigned long scratch_size,
96be68d63aSSteven Rostedt (Google) 					       struct lock_class_key *key);
97be68d63aSSteven Rostedt (Google) 
98*4af0a9c5SSteven Rostedt void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size);
997a1d1e4bSSteven Rostedt (Google) 
1001f8a6a10SPeter Zijlstra /*
1011f8a6a10SPeter Zijlstra  * Because the ring buffer is generic, if other users of the ring buffer get
1021f8a6a10SPeter Zijlstra  * traced by ftrace, it can produce lockdep warnings. We need to keep each
1031f8a6a10SPeter Zijlstra  * ring buffer's lock class separate.
1041f8a6a10SPeter Zijlstra  */
1051f8a6a10SPeter Zijlstra #define ring_buffer_alloc(size, flags)			\
1061f8a6a10SPeter Zijlstra ({							\
1071f8a6a10SPeter Zijlstra 	static struct lock_class_key __key;		\
1081f8a6a10SPeter Zijlstra 	__ring_buffer_alloc((size), (flags), &__key);	\
1091f8a6a10SPeter Zijlstra })
1101f8a6a10SPeter Zijlstra 
111be68d63aSSteven Rostedt (Google) /*
112be68d63aSSteven Rostedt (Google)  * Because the ring buffer is generic, if other users of the ring buffer get
113be68d63aSSteven Rostedt (Google)  * traced by ftrace, it can produce lockdep warnings. We need to keep each
114be68d63aSSteven Rostedt (Google)  * ring buffer's lock class separate.
115be68d63aSSteven Rostedt (Google)  */
116*4af0a9c5SSteven Rostedt #define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size)	\
117be68d63aSSteven Rostedt (Google) ({									\
118be68d63aSSteven Rostedt (Google) 	static struct lock_class_key __key;				\
119be68d63aSSteven Rostedt (Google) 	__ring_buffer_alloc_range((size), (flags), (order), (start),	\
120*4af0a9c5SSteven Rostedt 				  (range_size), (s_size), &__key);	\
121be68d63aSSteven Rostedt (Google) })
122be68d63aSSteven Rostedt (Google) 
1237af9ded0SSteven Rostedt (Google) typedef bool (*ring_buffer_cond_fn)(void *data);
1242aa043a5SSteven Rostedt (Google) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
1252aa043a5SSteven Rostedt (Google) 		     ring_buffer_cond_fn cond, void *data);
12613292494SSteven Rostedt (VMware) __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
12742fb0a1eSSteven Rostedt (Google) 			  struct file *filp, poll_table *poll_table, int full);
1287e9fbbb1SSteven Rostedt (Google) void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
12915693458SSteven Rostedt (Red Hat) 
130438ced17SVaibhav Nagarnaik #define RING_BUFFER_ALL_CPUS -1
131438ced17SVaibhav Nagarnaik 
13213292494SSteven Rostedt (VMware) void ring_buffer_free(struct trace_buffer *buffer);
1337a8e76a3SSteven Rostedt 
13413292494SSteven Rostedt (VMware) int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
1357a8e76a3SSteven Rostedt 
13613292494SSteven Rostedt (VMware) void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
137750912faSDavid Sharp 
13813292494SSteven Rostedt (VMware) struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
1390a987751SArnaldo Carvalho de Melo 						   unsigned long length);
14004aabc32SSong Chen int ring_buffer_unlock_commit(struct trace_buffer *buffer);
14113292494SSteven Rostedt (VMware) int ring_buffer_write(struct trace_buffer *buffer,
1427a8e76a3SSteven Rostedt 		      unsigned long length, void *data);
1437a8e76a3SSteven Rostedt 
14413292494SSteven Rostedt (VMware) void ring_buffer_nest_start(struct trace_buffer *buffer);
14513292494SSteven Rostedt (VMware) void ring_buffer_nest_end(struct trace_buffer *buffer);
1468e012066SSteven Rostedt (VMware) 
1477a8e76a3SSteven Rostedt struct ring_buffer_event *
14813292494SSteven Rostedt (VMware) ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
14966a8cb95SSteven Rostedt 		 unsigned long *lost_events);
1507a8e76a3SSteven Rostedt struct ring_buffer_event *
15113292494SSteven Rostedt (VMware) ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
15266a8cb95SSteven Rostedt 		    unsigned long *lost_events);
1537a8e76a3SSteven Rostedt 
1547a8e76a3SSteven Rostedt struct ring_buffer_iter *
15513292494SSteven Rostedt (VMware) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
15672c9ddfdSDavid Miller void ring_buffer_read_prepare_sync(void);
15772c9ddfdSDavid Miller void ring_buffer_read_start(struct ring_buffer_iter *iter);
1587a8e76a3SSteven Rostedt void ring_buffer_read_finish(struct ring_buffer_iter *iter);
1597a8e76a3SSteven Rostedt 
1607a8e76a3SSteven Rostedt struct ring_buffer_event *
1617a8e76a3SSteven Rostedt ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
162bc1a72afSSteven Rostedt (VMware) void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
1637a8e76a3SSteven Rostedt void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
1647a8e76a3SSteven Rostedt int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
165c9b7a4a7SSteven Rostedt (VMware) bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
1667a8e76a3SSteven Rostedt 
16713292494SSteven Rostedt (VMware) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
1688ec90be7SSteven Rostedt (Google) unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
1697a8e76a3SSteven Rostedt 
17013292494SSteven Rostedt (VMware) void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
171b23d7a5fSNicholas Piggin void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
17213292494SSteven Rostedt (VMware) void ring_buffer_reset(struct trace_buffer *buffer);
1737a8e76a3SSteven Rostedt 
17485bac32cSSteven Rostedt #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
17513292494SSteven Rostedt (VMware) int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
17613292494SSteven Rostedt (VMware) 			 struct trace_buffer *buffer_b, int cpu);
17785bac32cSSteven Rostedt #else
17885bac32cSSteven Rostedt static inline int
ring_buffer_swap_cpu(struct trace_buffer * buffer_a,struct trace_buffer * buffer_b,int cpu)17913292494SSteven Rostedt (VMware) ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
18013292494SSteven Rostedt (VMware) 		     struct trace_buffer *buffer_b, int cpu)
18185bac32cSSteven Rostedt {
18285bac32cSSteven Rostedt 	return -ENODEV;
18385bac32cSSteven Rostedt }
18485bac32cSSteven Rostedt #endif
1857a8e76a3SSteven Rostedt 
18613292494SSteven Rostedt (VMware) bool ring_buffer_empty(struct trace_buffer *buffer);
18713292494SSteven Rostedt (VMware) bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
1887a8e76a3SSteven Rostedt 
18913292494SSteven Rostedt (VMware) void ring_buffer_record_disable(struct trace_buffer *buffer);
19013292494SSteven Rostedt (VMware) void ring_buffer_record_enable(struct trace_buffer *buffer);
19113292494SSteven Rostedt (VMware) void ring_buffer_record_off(struct trace_buffer *buffer);
19213292494SSteven Rostedt (VMware) void ring_buffer_record_on(struct trace_buffer *buffer);
19313292494SSteven Rostedt (VMware) bool ring_buffer_record_is_on(struct trace_buffer *buffer);
19413292494SSteven Rostedt (VMware) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
19513292494SSteven Rostedt (VMware) void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
19613292494SSteven Rostedt (VMware) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
1977a8e76a3SSteven Rostedt 
19813292494SSteven Rostedt (VMware) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
19913292494SSteven Rostedt (VMware) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
20013292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries(struct trace_buffer *buffer);
20113292494SSteven Rostedt (VMware) unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
20213292494SSteven Rostedt (VMware) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
20313292494SSteven Rostedt (VMware) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
20413292494SSteven Rostedt (VMware) unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
20513292494SSteven Rostedt (VMware) unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
20613292494SSteven Rostedt (VMware) unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
2077a8e76a3SSteven Rostedt 
208f3ef7202SYordan Karadzhov (VMware) u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
20913292494SSteven Rostedt (VMware) void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
21037886f6aSSteven Rostedt 				      int cpu, u64 *ts);
21113292494SSteven Rostedt (VMware) void ring_buffer_set_clock(struct trace_buffer *buffer,
21237886f6aSSteven Rostedt 			   u64 (*clock)(void));
21313292494SSteven Rostedt (VMware) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
21413292494SSteven Rostedt (VMware) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
2157a8e76a3SSteven Rostedt 
21613292494SSteven Rostedt (VMware) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
217ef7a4a16SSteven Rostedt 
218bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page;
219bce761d7STzvetomir Stoyanov (VMware) struct buffer_data_read_page *
220bce761d7STzvetomir Stoyanov (VMware) ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
221bce761d7STzvetomir Stoyanov (VMware) void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
222bce761d7STzvetomir Stoyanov (VMware) 				struct buffer_data_read_page *page);
223bce761d7STzvetomir Stoyanov (VMware) int ring_buffer_read_page(struct trace_buffer *buffer,
224bce761d7STzvetomir Stoyanov (VMware) 			  struct buffer_data_read_page *data_page,
225ef7a4a16SSteven Rostedt 			  size_t len, int cpu, int full);
226bce761d7STzvetomir Stoyanov (VMware) void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
2278789a9e7SSteven Rostedt 
228d1b182a8SSteven Rostedt struct trace_seq;
229d1b182a8SSteven Rostedt 
230d1b182a8SSteven Rostedt int ring_buffer_print_entry_header(struct trace_seq *s);
231139f8400STzvetomir Stoyanov (VMware) int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
232d1b182a8SSteven Rostedt 
2332808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
2342808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
2352808e31eSTzvetomir Stoyanov (VMware) int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
2362808e31eSTzvetomir Stoyanov (VMware) 
2377a8e76a3SSteven Rostedt enum ring_buffer_flags {
2387a8e76a3SSteven Rostedt 	RB_FL_OVERWRITE		= 1 << 0,
2397a8e76a3SSteven Rostedt };
2407a8e76a3SSteven Rostedt 
241b32614c0SSebastian Andrzej Siewior #ifdef CONFIG_RING_BUFFER
242b32614c0SSebastian Andrzej Siewior int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
243b32614c0SSebastian Andrzej Siewior #else
244b32614c0SSebastian Andrzej Siewior #define trace_rb_cpu_prepare	NULL
245b32614c0SSebastian Andrzej Siewior #endif
246b32614c0SSebastian Andrzej Siewior 
247117c3920SVincent Donnefort int ring_buffer_map(struct trace_buffer *buffer, int cpu,
248117c3920SVincent Donnefort 		    struct vm_area_struct *vma);
249117c3920SVincent Donnefort int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
250117c3920SVincent Donnefort int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
2517a8e76a3SSteven Rostedt #endif /* _LINUX_RING_BUFFER_H */
252