Lines Matching refs:ring_buffer_per_cpu

477 struct ring_buffer_per_cpu {  struct
546 struct ring_buffer_per_cpu **buffers; argument
565 struct ring_buffer_per_cpu *cpu_buffer;
626 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
653 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event()
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp()
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
795 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters()
796 container_of(rbwork, struct ring_buffer_per_cpu, irq_work); in rb_wake_up_waiters()
821 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters()
853 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit()
952 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait()
1011 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait()
1076 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1077 struct ring_buffer_per_cpu *__b = \
1263 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate()
1293 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate()
1304 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set()
1327 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update()
1336 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head()
1345 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal()
1362 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page()
1414 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update()
1475 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage()
1483 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_links()
1504 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages()
1647 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer()
1866 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events()
2016 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start()
2041 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show()
2088 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update()
2102 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages()
2201 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages()
2226 static struct ring_buffer_per_cpu *
2229 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer()
2331 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer()
2605 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages()
2715 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages()
2796 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages()
2812 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler()
2813 struct ring_buffer_per_cpu, update_pages_work); in update_pages_handler()
2831 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize()
3055 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event()
3127 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index()
3133 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index()
3144 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter()
3172 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head()
3189 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader()
3216 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page()
3373 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail()
3450 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3456 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail()
3560 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp()
3589 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp()
3607 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp()
3660 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event()
3720 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard()
3785 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit()
3792 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write()
3852 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit()
3897 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit()
3904 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups()
4008 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock()
4035 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock()
4059 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start()
4079 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end()
4100 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit()
4266 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4319 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer()
4327 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next()
4471 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event()
4568 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve()
4615 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry()
4670 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit()
4717 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write()
4775 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries()
4781 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty()
4899 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu()
4919 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu()
4937 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts()
4969 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu()
4989 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu()
5008 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu()
5031 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu()
5053 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu()
5074 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu()
5093 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries()
5116 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns()
5132 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset()
5161 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset()
5181 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty()
5224 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp()
5284 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page()
5455 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader()
5481 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter()
5508 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events()
5514 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek()
5591 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek()
5678 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock()
5703 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock()
5723 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5771 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek()
5801 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume()
5856 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare()
5912 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start()
5937 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish()
5957 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance()
6005 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page()
6025 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu()
6081 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer()
6107 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
6139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus()
6180 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset()
6215 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty()
6245 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu()
6279 struct ring_buffer_per_cpu *cpu_buffer_a; in ring_buffer_swap_cpu()
6280 struct ring_buffer_per_cpu *cpu_buffer_b; in ring_buffer_swap_cpu()
6377 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page()
6433 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page()
6506 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
6741 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set()
6899 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page()
6915 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page()
6923 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page()
6957 static struct ring_buffer_per_cpu *
6960 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer()
6977 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer()
6986 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped()
7032 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7129 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma()
7139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map()
7207 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap()
7251 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader()