Lines Matching refs:cpu_buffer
565 struct ring_buffer_per_cpu *cpu_buffer; member
626 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
629 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
630 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
653 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
700 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
707 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
710 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
711 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
717 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
723 rb_time_read(&cpu_buffer->write_stamp, &ts); in ring_buffer_event_time_stamp()
761 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
765 nr_pages = cpu_buffer->nr_pages; in full_hit()
795 struct ring_buffer_per_cpu *cpu_buffer = in rb_wake_up_waiters() local
799 raw_spin_lock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
804 cpu_buffer->shortest_full = 0; in rb_wake_up_waiters()
805 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_wake_up_waiters()
821 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
840 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
842 if (!cpu_buffer) in ring_buffer_wake_waiters()
844 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
853 struct ring_buffer_per_cpu *cpu_buffer; in rb_watermark_hit() local
860 cpu_buffer = buffer->buffers[cpu]; in rb_watermark_hit()
869 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
870 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in rb_watermark_hit()
873 if (!ret && (!cpu_buffer->shortest_full || in rb_watermark_hit()
874 cpu_buffer->shortest_full > full)) { in rb_watermark_hit()
875 cpu_buffer->shortest_full = full; in rb_watermark_hit()
877 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_watermark_hit()
952 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
970 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
971 rbwork = &cpu_buffer->irq_work; in ring_buffer_wait()
1011 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1021 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1022 rbwork = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1263 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1267 head = cpu_buffer->head_page; in rb_head_page_activate()
1276 if (cpu_buffer->ring_meta) { in rb_head_page_activate()
1277 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_head_page_activate()
1293 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1298 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1300 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1304 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1327 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1332 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1336 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1341 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1345 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1350 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1362 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1369 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1373 list = cpu_buffer->pages; in rb_set_head_page()
1374 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1377 page = head = cpu_buffer->head_page; in rb_set_head_page()
1387 cpu_buffer->head_page = page; in rb_set_head_page()
1394 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1414 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1444 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1470 if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page)) in rb_tail_page_update()
1471 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1475 static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1480 RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK); in rb_check_bpage()
1483 static bool rb_check_links(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_links() argument
1486 if (RB_WARN_ON(cpu_buffer, in rb_check_links()
1490 if (RB_WARN_ON(cpu_buffer, in rb_check_links()
1504 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1534 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1535 head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1536 if (!rb_check_links(cpu_buffer, head)) in rb_check_pages()
1538 buffer_cnt = cpu_buffer->cnt; in rb_check_pages()
1540 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1543 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1545 if (buffer_cnt != cpu_buffer->cnt) { in rb_check_pages()
1547 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1556 if (!rb_check_links(cpu_buffer, tmp)) in rb_check_pages()
1559 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1563 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_check_pages()
1647 static void *rb_range_buffer(struct ring_buffer_per_cpu *cpu_buffer, int idx) in rb_range_buffer() argument
1653 meta = rb_range_meta(cpu_buffer->buffer, 0, cpu_buffer->cpu); in rb_range_buffer()
1668 if (ptr + subbuf_size > cpu_buffer->buffer->range_addr_end) in rb_range_buffer()
1866 static void rb_meta_validate_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_meta_validate_events() argument
1868 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_validate_events()
1879 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1885 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); in rb_meta_validate_events()
1886 local_set(&cpu_buffer->reader_page->entries, ret); in rb_meta_validate_events()
1888 head_page = cpu_buffer->head_page; in rb_meta_validate_events()
1891 if (meta->commit_buffer == (unsigned long)cpu_buffer->reader_page->page) { in rb_meta_validate_events()
1892 cpu_buffer->commit_page = cpu_buffer->reader_page; in rb_meta_validate_events()
1901 if (head_page == cpu_buffer->reader_page) in rb_meta_validate_events()
1904 ret = rb_validate_buffer(head_page->page, cpu_buffer->cpu); in rb_meta_validate_events()
1907 cpu_buffer->cpu); in rb_meta_validate_events()
1913 local_inc(&cpu_buffer->pages_touched); in rb_meta_validate_events()
1917 local_set(&cpu_buffer->head_page->entries, ret); in rb_meta_validate_events()
1919 if (head_page == cpu_buffer->commit_page) in rb_meta_validate_events()
1923 if (head_page != cpu_buffer->commit_page) { in rb_meta_validate_events()
1925 cpu_buffer->cpu); in rb_meta_validate_events()
1929 local_set(&cpu_buffer->entries, entries); in rb_meta_validate_events()
1930 local_set(&cpu_buffer->entries_bytes, entry_bytes); in rb_meta_validate_events()
1932 pr_info("Ring buffer meta [%d] is from previous boot!\n", cpu_buffer->cpu); in rb_meta_validate_events()
1941 local_set(&cpu_buffer->reader_page->entries, 0); in rb_meta_validate_events()
1942 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_meta_validate_events()
2016 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_start() local
2017 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_start()
2041 struct ring_buffer_per_cpu *cpu_buffer = m->private; in rbm_show() local
2042 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rbm_show()
2088 static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_meta_buffer_update() argument
2091 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_meta_buffer_update()
2094 cpu_buffer->head_page = bpage; in rb_meta_buffer_update()
2097 cpu_buffer->commit_page = bpage; in rb_meta_buffer_update()
2098 cpu_buffer->tail_page = bpage; in rb_meta_buffer_update()
2102 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
2105 struct trace_buffer *buffer = cpu_buffer->buffer; in __rb_allocate_pages()
2143 meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu); in __rb_allocate_pages()
2149 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
2153 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
2163 bpage->page = rb_range_buffer(cpu_buffer, i + 1); in __rb_allocate_pages()
2168 rb_meta_buffer_update(cpu_buffer, bpage); in __rb_allocate_pages()
2172 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), in __rb_allocate_pages()
2174 cpu_buffer->buffer->subbuf_order); in __rb_allocate_pages()
2180 bpage->order = cpu_buffer->buffer->subbuf_order; in __rb_allocate_pages()
2201 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
2208 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
2216 cpu_buffer->pages = pages.next; in rb_allocate_pages()
2219 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
2221 rb_check_pages(cpu_buffer); in rb_allocate_pages()
2229 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
2235 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
2237 if (!cpu_buffer) in rb_allocate_cpu_buffer()
2240 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
2241 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
2242 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
2243 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
2244 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
2245 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
2246 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
2247 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
2248 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
2249 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
2250 mutex_init(&cpu_buffer->mapping_lock); in rb_allocate_cpu_buffer()
2257 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2259 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
2266 cpu_buffer->mapped = 1; in rb_allocate_cpu_buffer()
2267 cpu_buffer->ring_meta = rb_range_meta(buffer, nr_pages, cpu); in rb_allocate_cpu_buffer()
2268 bpage->page = rb_range_buffer(cpu_buffer, 0); in rb_allocate_cpu_buffer()
2271 if (cpu_buffer->ring_meta->head_buffer) in rb_allocate_cpu_buffer()
2272 rb_meta_buffer_update(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
2277 cpu_buffer->buffer->subbuf_order); in rb_allocate_cpu_buffer()
2284 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
2285 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
2287 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
2291 rb_meta_validate_events(cpu_buffer); in rb_allocate_cpu_buffer()
2294 meta = cpu_buffer->ring_meta; in rb_allocate_cpu_buffer()
2296 !cpu_buffer->head_page || !cpu_buffer->commit_page || !cpu_buffer->tail_page) { in rb_allocate_cpu_buffer()
2298 (cpu_buffer->head_page || cpu_buffer->commit_page || cpu_buffer->tail_page)) { in rb_allocate_cpu_buffer()
2300 if (!cpu_buffer->head_page) in rb_allocate_cpu_buffer()
2302 if (!cpu_buffer->commit_page) in rb_allocate_cpu_buffer()
2304 if (!cpu_buffer->tail_page) in rb_allocate_cpu_buffer()
2308 cpu_buffer->head_page in rb_allocate_cpu_buffer()
2309 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
2310 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
2312 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2314 if (cpu_buffer->ring_meta) in rb_allocate_cpu_buffer()
2318 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
2321 return cpu_buffer; in rb_allocate_cpu_buffer()
2324 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
2327 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
2331 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
2333 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
2336 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
2338 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
2341 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
2351 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
2353 kfree(cpu_buffer); in rb_free_cpu_buffer()
2605 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
2616 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2617 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
2627 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
2633 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
2646 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
2661 cpu_buffer->pages = next_page; in rb_remove_pages()
2662 cpu_buffer->cnt++; in rb_remove_pages()
2666 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
2670 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
2671 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
2673 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
2695 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
2696 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
2697 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2709 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
2715 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
2717 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2723 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2744 struct buffer_page *hpage = rb_set_head_page(cpu_buffer); in rb_insert_pages()
2769 cpu_buffer->cnt++; in rb_insert_pages()
2781 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2782 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in rb_insert_pages()
2787 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2796 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2800 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2801 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2803 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2804 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2807 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2812 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2814 rb_update_pages(cpu_buffer); in update_pages_handler()
2815 complete(&cpu_buffer->update_done); in update_pages_handler()
2831 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2863 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2864 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2872 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2874 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2875 cpu_buffer->nr_pages; in ring_buffer_resize()
2879 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2885 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2886 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2887 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2903 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2904 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2909 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2910 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2917 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2919 update_pages_handler(&cpu_buffer->update_pages_work); in ring_buffer_resize()
2927 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2928 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2932 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2933 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2938 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2940 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2948 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2953 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2954 cpu_buffer->nr_pages; in ring_buffer_resize()
2956 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2957 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2958 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2959 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2968 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2973 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2978 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2979 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2983 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3005 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3006 rb_check_pages(cpu_buffer); in ring_buffer_resize()
3019 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3020 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
3022 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
3025 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
3055 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
3057 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
3058 cpu_buffer->reader_page->read); in rb_reader_event()
3127 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
3129 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
3133 rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) in rb_event_index() argument
3137 addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1; in rb_event_index()
3144 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
3152 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
3153 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
3172 static void rb_update_meta_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_head() argument
3175 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_head()
3189 static void rb_update_meta_reader(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_meta_reader() argument
3192 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_update_meta_reader()
3193 void *old_reader = cpu_buffer->reader_page->page; in rb_update_meta_reader()
3198 cpu_buffer->reader_page->id = id; in rb_update_meta_reader()
3205 rb_update_meta_head(cpu_buffer, reader); in rb_update_meta_reader()
3216 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
3232 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
3253 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
3254 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
3255 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
3257 if (cpu_buffer->ring_meta) in rb_handle_head_page()
3258 rb_update_meta_head(cpu_buffer, next_page); in rb_handle_head_page()
3288 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
3309 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
3326 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
3343 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
3350 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
3361 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
3364 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
3373 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
3376 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_reset_tail()
3440 local_add(bsize - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
3450 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
3456 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
3460 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
3461 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
3475 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3499 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
3505 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
3509 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
3527 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
3528 cpu_buffer->tail_page) && in rb_move_tail()
3529 (cpu_buffer->commit_page == in rb_move_tail()
3530 cpu_buffer->reader_page))) { in rb_move_tail()
3531 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
3537 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
3541 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3544 rb_end_commit(cpu_buffer); in rb_move_tail()
3546 local_inc(&cpu_buffer->committing); in rb_move_tail()
3553 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
3560 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_time_stamp() argument
3569 if (abs || rb_event_index(cpu_buffer, event)) { in rb_add_time_stamp()
3589 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
3599 (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}), in rb_check_timestamp()
3607 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
3639 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
3643 *event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs); in rb_add_timestamp()
3660 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
3666 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
3669 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
3676 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
3720 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
3727 new_index = rb_event_index(cpu_buffer, event); in rb_try_to_discard()
3730 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_try_to_discard()
3732 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
3754 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
3776 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3785 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
3787 local_inc(&cpu_buffer->committing); in rb_start_commit()
3788 local_inc(&cpu_buffer->commits); in rb_start_commit()
3792 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
3805 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3807 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3808 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3810 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3811 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3817 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3818 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3819 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3820 if (cpu_buffer->ring_meta) { in rb_set_commit_to_write()
3821 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_set_commit_to_write()
3822 meta->commit_buffer = (unsigned long)cpu_buffer->commit_page->page; in rb_set_commit_to_write()
3827 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
3828 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3832 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3833 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3834 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3835 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3848 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3852 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3856 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3857 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3861 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3864 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3865 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3867 local_dec(&cpu_buffer->committing); in rb_end_commit()
3877 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3878 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3879 local_inc(&cpu_buffer->committing); in rb_end_commit()
3897 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit() argument
3899 local_inc(&cpu_buffer->entries); in rb_commit()
3900 rb_end_commit(cpu_buffer); in rb_commit()
3904 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3912 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3913 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3915 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3918 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3921 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3924 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3927 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3929 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3932 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3933 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3935 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
4008 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
4010 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
4015 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
4022 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
4028 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
4029 cpu_buffer->current_context = val; in trace_recursive_lock()
4035 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
4037 cpu_buffer->current_context &= in trace_recursive_unlock()
4038 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
4059 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
4065 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
4067 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
4079 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
4084 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
4086 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
4100 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
4103 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
4105 rb_commit(cpu_buffer); in ring_buffer_unlock_commit()
4107 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
4109 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
4252 atomic_inc(&cpu_buffer->record_disabled); \
4266 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4299 ret = rb_read_data_buffer(bpage, tail, cpu_buffer->cpu, &ts, &delta); in check_buffer()
4303 cpu_buffer->cpu, ts, delta); in check_buffer()
4310 cpu_buffer->cpu, in check_buffer()
4319 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
4327 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
4335 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
4339 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4340 rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4342 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4367 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
4377 if (unlikely(write > cpu_buffer->buffer->subbuf_size)) { in __rb_reserve_next()
4378 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
4379 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
4384 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
4398 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
4404 rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
4412 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
4413 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
4416 /*E*/ rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
4452 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
4464 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
4471 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
4490 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
4501 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
4502 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
4503 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
4510 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
4513 if (info.length > cpu_buffer->buffer->max_data_size) in rb_reserve_next_event()
4532 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
4535 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
4546 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
4568 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
4583 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
4585 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
4591 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
4594 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
4601 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
4615 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
4619 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
4622 addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1); in rb_decrement_entry()
4645 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
4670 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
4677 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
4684 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
4686 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
4687 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
4691 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
4693 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
4717 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
4733 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4735 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
4741 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
4744 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
4752 rb_commit(cpu_buffer); in ring_buffer_write()
4754 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
4759 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
4775 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4777 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4778 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4781 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
4783 return !rb_num_of_entries(cpu_buffer); in rb_per_cpu_empty()
4899 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4904 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4905 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4919 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4924 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4925 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4937 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4944 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4945 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4950 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4951 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4953 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
4956 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4969 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4975 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4976 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4989 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4994 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4996 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
5008 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
5014 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
5015 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
5031 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
5037 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
5038 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
5053 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
5059 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
5060 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
5074 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
5079 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
5080 return cpu_buffer->read; in ring_buffer_read_events_cpu()
5093 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
5099 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
5100 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
5116 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
5122 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
5123 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
5132 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
5135 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
5136 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
5140 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
5141 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
5144 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
5145 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
5161 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
5167 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
5169 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5171 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
5181 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
5190 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
5191 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
5192 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
5193 commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5207 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
5219 iter->head == rb_page_size(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
5224 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
5235 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
5240 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
5241 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
5245 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
5249 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
5279 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
5284 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
5287 unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size); in rb_get_reader_page()
5294 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
5303 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
5308 reader = cpu_buffer->reader_page; in rb_get_reader_page()
5311 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
5315 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
5316 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
5321 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
5325 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
5331 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
5332 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
5333 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
5334 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
5340 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
5343 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
5344 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
5351 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
5354 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_get_reader_page()
5366 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
5379 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
5387 if (cpu_buffer->ring_meta) in rb_get_reader_page()
5388 rb_update_meta_reader(cpu_buffer, reader); in rb_get_reader_page()
5395 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
5396 rb_inc_page(&cpu_buffer->head_page); in rb_get_reader_page()
5398 cpu_buffer->cnt++; in rb_get_reader_page()
5399 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
5402 cpu_buffer->reader_page = reader; in rb_get_reader_page()
5403 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
5405 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
5406 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
5407 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
5415 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
5417 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
5437 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
5455 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
5461 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
5464 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
5467 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
5470 cpu_buffer->read++; in rb_advance_reader()
5472 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
5475 cpu_buffer->reader_page->read += length; in rb_advance_reader()
5476 cpu_buffer->read_bytes += length; in rb_advance_reader()
5481 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
5483 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
5499 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
5508 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
5510 return cpu_buffer->lost_events; in rb_lost_events()
5514 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
5530 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
5533 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
5537 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
5542 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5555 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5562 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5563 cpu_buffer->cpu, ts); in rb_buffer_peek()
5566 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
5571 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
5572 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
5573 cpu_buffer->cpu, ts); in rb_buffer_peek()
5576 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
5580 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
5591 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
5598 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
5599 buffer = cpu_buffer->buffer; in rb_iter_peek()
5606 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
5607 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
5608 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
5625 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
5655 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
5656 cpu_buffer->cpu, ts); in rb_iter_peek()
5666 cpu_buffer->cpu, ts); in rb_iter_peek()
5671 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
5678 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
5681 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5694 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5698 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5703 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
5706 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5723 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
5733 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
5734 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
5736 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
5737 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
5771 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5776 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5778 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5801 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5813 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5815 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
5817 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
5819 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5820 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
5823 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
5856 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
5874 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5876 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5878 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5912 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5918 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5920 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5921 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5923 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5924 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5937 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5940 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
5942 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5957 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
5960 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5964 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
6005 static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_meta_page() argument
6007 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_update_meta_page()
6012 meta->reader.read = cpu_buffer->reader_page->read; in rb_update_meta_page()
6013 meta->reader.id = cpu_buffer->reader_page->id; in rb_update_meta_page()
6014 meta->reader.lost_events = cpu_buffer->lost_events; in rb_update_meta_page()
6016 meta->entries = local_read(&cpu_buffer->entries); in rb_update_meta_page()
6017 meta->overrun = local_read(&cpu_buffer->overrun); in rb_update_meta_page()
6018 meta->read = cpu_buffer->read; in rb_update_meta_page()
6021 flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE); in rb_update_meta_page()
6025 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
6029 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
6031 cpu_buffer->head_page in rb_reset_cpu()
6032 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
6033 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
6034 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
6038 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
6039 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
6041 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
6042 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
6043 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
6045 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
6046 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
6047 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
6048 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
6049 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
6050 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
6051 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
6052 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
6053 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
6054 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
6055 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
6056 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
6057 cpu_buffer->read = 0; in rb_reset_cpu()
6058 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
6060 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
6061 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
6063 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
6065 cpu_buffer->lost_events = 0; in rb_reset_cpu()
6066 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
6068 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
6069 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
6071 if (cpu_buffer->mapped) { in rb_reset_cpu()
6072 rb_update_meta_page(cpu_buffer); in rb_reset_cpu()
6073 if (cpu_buffer->ring_meta) { in rb_reset_cpu()
6074 struct ring_buffer_cpu_meta *meta = cpu_buffer->ring_meta; in rb_reset_cpu()
6081 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
6085 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
6087 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
6090 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6092 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
6094 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
6097 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
6107 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
6115 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6116 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6121 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
6123 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
6124 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
6139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
6146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6148 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6149 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6156 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
6162 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
6165 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
6167 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
6168 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
6180 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
6187 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6189 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6190 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
6197 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
6199 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
6201 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
6202 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
6215 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
6223 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
6225 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
6226 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
6227 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
6245 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
6253 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
6255 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
6256 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
6257 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
6377 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
6390 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
6392 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6394 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
6395 bpage->data = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
6396 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
6399 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
6407 cpu_buffer->buffer->subbuf_order); in ring_buffer_alloc_read_page()
6433 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
6441 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
6452 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6454 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
6455 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
6459 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
6506 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
6538 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6540 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
6544 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6550 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
6560 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
6561 cpu_buffer->mapped) { in ring_buffer_read_page()
6562 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
6575 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
6588 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
6603 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
6610 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
6623 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
6624 cpu_buffer->read_bytes += rb_page_size(reader); in ring_buffer_read_page()
6645 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6671 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
6741 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_subbuf_order_set() local
6782 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6784 if (cpu_buffer->mapped) { in ring_buffer_subbuf_order_set()
6797 cpu_buffer->nr_pages_to_update = nr_pages; in ring_buffer_subbuf_order_set()
6803 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6804 if (__rb_allocate_pages(cpu_buffer, nr_pages, in ring_buffer_subbuf_order_set()
6805 &cpu_buffer->new_pages)) { in ring_buffer_subbuf_order_set()
6820 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6822 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6825 rb_head_page_deactivate(cpu_buffer); in ring_buffer_subbuf_order_set()
6834 list_add(&old_pages, cpu_buffer->pages); in ring_buffer_subbuf_order_set()
6835 list_add(&cpu_buffer->reader_page->list, &old_pages); in ring_buffer_subbuf_order_set()
6838 cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next, in ring_buffer_subbuf_order_set()
6840 list_del_init(&cpu_buffer->reader_page->list); in ring_buffer_subbuf_order_set()
6843 cpu_buffer->pages = cpu_buffer->new_pages.next; in ring_buffer_subbuf_order_set()
6844 list_del_init(&cpu_buffer->new_pages); in ring_buffer_subbuf_order_set()
6845 cpu_buffer->cnt++; in ring_buffer_subbuf_order_set()
6847 cpu_buffer->head_page in ring_buffer_subbuf_order_set()
6848 = list_entry(cpu_buffer->pages, struct buffer_page, list); in ring_buffer_subbuf_order_set()
6849 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in ring_buffer_subbuf_order_set()
6851 cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update; in ring_buffer_subbuf_order_set()
6852 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_subbuf_order_set()
6854 old_free_data_page = cpu_buffer->free_page; in ring_buffer_subbuf_order_set()
6855 cpu_buffer->free_page = NULL; in ring_buffer_subbuf_order_set()
6857 rb_head_page_activate(cpu_buffer); in ring_buffer_subbuf_order_set()
6859 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_subbuf_order_set()
6868 rb_check_pages(cpu_buffer); in ring_buffer_subbuf_order_set()
6884 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_subbuf_order_set()
6886 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_subbuf_order_set()
6889 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { in ring_buffer_subbuf_order_set()
6899 static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_alloc_meta_page() argument
6903 if (cpu_buffer->meta_page) in rb_alloc_meta_page()
6910 cpu_buffer->meta_page = page_to_virt(page); in rb_alloc_meta_page()
6915 static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_meta_page() argument
6917 unsigned long addr = (unsigned long)cpu_buffer->meta_page; in rb_free_meta_page()
6920 cpu_buffer->meta_page = NULL; in rb_free_meta_page()
6923 static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_setup_ids_meta_page() argument
6926 struct trace_buffer_meta *meta = cpu_buffer->meta_page; in rb_setup_ids_meta_page()
6927 unsigned int nr_subbufs = cpu_buffer->nr_pages + 1; in rb_setup_ids_meta_page()
6931 subbuf_ids[id] = (unsigned long)cpu_buffer->reader_page->page; in rb_setup_ids_meta_page()
6932 cpu_buffer->reader_page->id = id++; in rb_setup_ids_meta_page()
6934 first_subbuf = subbuf = rb_set_head_page(cpu_buffer); in rb_setup_ids_meta_page()
6947 cpu_buffer->subbuf_ids = subbuf_ids; in rb_setup_ids_meta_page()
6951 meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE; in rb_setup_ids_meta_page()
6954 rb_update_meta_page(cpu_buffer); in rb_setup_ids_meta_page()
6960 struct ring_buffer_per_cpu *cpu_buffer; in rb_get_mapped_buffer() local
6965 cpu_buffer = buffer->buffers[cpu]; in rb_get_mapped_buffer()
6967 mutex_lock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6969 if (!cpu_buffer->user_mapped) { in rb_get_mapped_buffer()
6970 mutex_unlock(&cpu_buffer->mapping_lock); in rb_get_mapped_buffer()
6974 return cpu_buffer; in rb_get_mapped_buffer()
6977 static void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_put_mapped_buffer() argument
6979 mutex_unlock(&cpu_buffer->mapping_lock); in rb_put_mapped_buffer()
6986 static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer, in __rb_inc_dec_mapped() argument
6991 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_inc_dec_mapped()
6994 if (WARN_ON(cpu_buffer->mapped < cpu_buffer->user_mapped)) in __rb_inc_dec_mapped()
6997 if (inc && cpu_buffer->mapped == UINT_MAX) in __rb_inc_dec_mapped()
7000 if (WARN_ON(!inc && cpu_buffer->user_mapped == 0)) in __rb_inc_dec_mapped()
7003 mutex_lock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7004 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7007 cpu_buffer->user_mapped++; in __rb_inc_dec_mapped()
7008 cpu_buffer->mapped++; in __rb_inc_dec_mapped()
7010 cpu_buffer->user_mapped--; in __rb_inc_dec_mapped()
7011 cpu_buffer->mapped--; in __rb_inc_dec_mapped()
7014 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in __rb_inc_dec_mapped()
7015 mutex_unlock(&cpu_buffer->buffer->mutex); in __rb_inc_dec_mapped()
7032 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
7046 subbuf_order = cpu_buffer->buffer->subbuf_order; in __rb_map_vma()
7059 lockdep_assert_held(&cpu_buffer->mapping_lock); in __rb_map_vma()
7061 nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */ in __rb_map_vma()
7081 pages[p++] = virt_to_page(cpu_buffer->meta_page); in __rb_map_vma()
7110 page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]); in __rb_map_vma()
7129 static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer, in __rb_map_vma() argument
7139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map() local
7146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_map()
7148 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7150 if (cpu_buffer->user_mapped) { in ring_buffer_map()
7151 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7153 err = __rb_inc_dec_mapped(cpu_buffer, true); in ring_buffer_map()
7154 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7161 err = rb_alloc_meta_page(cpu_buffer); in ring_buffer_map()
7166 subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL); in ring_buffer_map()
7168 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7173 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_map()
7179 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7180 rb_setup_ids_meta_page(cpu_buffer, subbuf_ids); in ring_buffer_map()
7182 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7184 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map()
7186 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7188 cpu_buffer->mapped++; in ring_buffer_map()
7189 cpu_buffer->user_mapped = 1; in ring_buffer_map()
7190 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map()
7192 kfree(cpu_buffer->subbuf_ids); in ring_buffer_map()
7193 cpu_buffer->subbuf_ids = NULL; in ring_buffer_map()
7194 rb_free_meta_page(cpu_buffer); in ring_buffer_map()
7195 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_map()
7200 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_map()
7207 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unmap() local
7214 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unmap()
7216 mutex_lock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7218 if (!cpu_buffer->user_mapped) { in ring_buffer_unmap()
7221 } else if (cpu_buffer->user_mapped > 1) { in ring_buffer_unmap()
7222 __rb_inc_dec_mapped(cpu_buffer, false); in ring_buffer_unmap()
7227 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7230 if (!WARN_ON_ONCE(cpu_buffer->mapped < cpu_buffer->user_mapped)) in ring_buffer_unmap()
7231 cpu_buffer->mapped--; in ring_buffer_unmap()
7232 cpu_buffer->user_mapped = 0; in ring_buffer_unmap()
7234 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_unmap()
7236 kfree(cpu_buffer->subbuf_ids); in ring_buffer_unmap()
7237 cpu_buffer->subbuf_ids = NULL; in ring_buffer_unmap()
7238 rb_free_meta_page(cpu_buffer); in ring_buffer_unmap()
7239 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_unmap()
7244 mutex_unlock(&cpu_buffer->mapping_lock); in ring_buffer_unmap()
7251 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_map_get_reader() local
7257 cpu_buffer = rb_get_mapped_buffer(buffer, cpu); in ring_buffer_map_get_reader()
7258 if (IS_ERR(cpu_buffer)) in ring_buffer_map_get_reader()
7259 return (int)PTR_ERR(cpu_buffer); in ring_buffer_map_get_reader()
7261 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7264 if (rb_per_cpu_empty(cpu_buffer)) in ring_buffer_map_get_reader()
7267 reader_size = rb_page_size(cpu_buffer->reader_page); in ring_buffer_map_get_reader()
7274 if (cpu_buffer->reader_page->read < reader_size) { in ring_buffer_map_get_reader()
7275 while (cpu_buffer->reader_page->read < reader_size) in ring_buffer_map_get_reader()
7276 rb_advance_reader(cpu_buffer); in ring_buffer_map_get_reader()
7280 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_map_get_reader()
7285 missed_events = cpu_buffer->lost_events; in ring_buffer_map_get_reader()
7287 if (cpu_buffer->reader_page != cpu_buffer->commit_page) { in ring_buffer_map_get_reader()
7318 cpu_buffer->lost_events = 0; in ring_buffer_map_get_reader()
7324 flush_kernel_vmap_range(cpu_buffer->reader_page->page, in ring_buffer_map_get_reader()
7327 rb_update_meta_page(cpu_buffer); in ring_buffer_map_get_reader()
7329 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_map_get_reader()
7330 rb_put_mapped_buffer(cpu_buffer); in ring_buffer_map_get_reader()