Lines Matching refs:rb
81 struct bpf_ringbuf *rb; member
98 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
135 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
137 if (rb) { in bpf_ringbuf_area_alloc()
139 rb->pages = pages; in bpf_ringbuf_area_alloc()
140 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
141 return rb; in bpf_ringbuf_area_alloc()
153 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
155 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
171 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
173 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc()
174 if (!rb) in bpf_ringbuf_alloc()
177 raw_res_spin_lock_init(&rb->spinlock); in bpf_ringbuf_alloc()
178 atomic_set(&rb->busy, 0); in bpf_ringbuf_alloc()
179 init_waitqueue_head(&rb->waitq); in bpf_ringbuf_alloc()
180 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
182 rb->mask = data_sz - 1; in bpf_ringbuf_alloc()
183 rb->consumer_pos = 0; in bpf_ringbuf_alloc()
184 rb->producer_pos = 0; in bpf_ringbuf_alloc()
185 rb->pending_pos = 0; in bpf_ringbuf_alloc()
187 return rb; in bpf_ringbuf_alloc()
208 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
209 if (!rb_map->rb) { in ringbuf_map_alloc()
217 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) in bpf_ringbuf_free() argument
222 struct page **pages = rb->pages; in bpf_ringbuf_free()
223 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free()
225 vunmap(rb); in bpf_ringbuf_free()
236 bpf_ringbuf_free(rb_map->rb); in ringbuf_map_free()
274 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern()
293 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_user()
296 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) in ringbuf_avail_data_sz() argument
300 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz()
301 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz()
305 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb) in ringbuf_total_data_sz() argument
307 return rb->mask + 1; in ringbuf_total_data_sz()
316 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_kern()
318 if (ringbuf_avail_data_sz(rb_map->rb)) in ringbuf_map_poll_kern()
329 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_user()
331 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb)) in ringbuf_map_poll_user()
338 struct bpf_ringbuf *rb; in ringbuf_map_mem_usage() local
343 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in ringbuf_map_mem_usage()
344 usage += (u64)rb->nr_pages << PAGE_SHIFT; in ringbuf_map_mem_usage()
387 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, in bpf_ringbuf_rec_pg_off() argument
390 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; in bpf_ringbuf_rec_pg_off()
405 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) in __bpf_ringbuf_reserve() argument
415 if (len > ringbuf_total_data_sz(rb)) in __bpf_ringbuf_reserve()
418 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve()
420 if (raw_res_spin_lock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
423 pend_pos = rb->pending_pos; in __bpf_ringbuf_reserve()
424 prod_pos = rb->producer_pos; in __bpf_ringbuf_reserve()
428 hdr = (void *)rb->data + (pend_pos & rb->mask); in __bpf_ringbuf_reserve()
436 rb->pending_pos = pend_pos; in __bpf_ringbuf_reserve()
444 if (new_prod_pos - cons_pos > rb->mask || in __bpf_ringbuf_reserve()
445 new_prod_pos - pend_pos > rb->mask) { in __bpf_ringbuf_reserve()
446 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
450 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve()
451 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); in __bpf_ringbuf_reserve()
456 smp_store_release(&rb->producer_pos, new_prod_pos); in __bpf_ringbuf_reserve()
458 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
471 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_3()
486 struct bpf_ringbuf *rb; in bpf_ringbuf_commit() local
490 rb = bpf_ringbuf_restore_from_rec(hdr); in bpf_ringbuf_commit()
501 rec_pos = (void *)hdr - (void *)rb->data; in bpf_ringbuf_commit()
502 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit()
505 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
507 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
546 rec = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
566 struct bpf_ringbuf *rb; in BPF_CALL_2() local
568 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2()
572 return ringbuf_avail_data_sz(rb); in BPF_CALL_2()
574 return ringbuf_total_data_sz(rb); in BPF_CALL_2()
576 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2()
578 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()
611 sample = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
669 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size) in __bpf_user_ringbuf_peek() argument
676 prod_pos = smp_load_acquire(&rb->producer_pos); in __bpf_user_ringbuf_peek()
681 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_user_ringbuf_peek()
685 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask)); in __bpf_user_ringbuf_peek()
697 if (total_len > ringbuf_total_data_sz(rb)) in __bpf_user_ringbuf_peek()
711 smp_store_release(&rb->consumer_pos, cons_pos + total_len); in __bpf_user_ringbuf_peek()
718 *sample = (void *)((uintptr_t)rb->data + in __bpf_user_ringbuf_peek()
719 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask)); in __bpf_user_ringbuf_peek()
724 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags) in __bpf_user_ringbuf_sample_release() argument
733 consumer_pos = rb->consumer_pos; in __bpf_user_ringbuf_sample_release()
735 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size); in __bpf_user_ringbuf_sample_release()
741 struct bpf_ringbuf *rb; in BPF_CALL_4() local
750 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_4()
753 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1)) in BPF_CALL_4()
762 err = __bpf_user_ringbuf_peek(rb, &sample, &size); in BPF_CALL_4()
777 __bpf_user_ringbuf_sample_release(rb, size, flags); in BPF_CALL_4()
785 atomic_set_release(&rb->busy, 0); in BPF_CALL_4()
788 irq_work_queue(&rb->work); in BPF_CALL_4()
790 irq_work_queue(&rb->work); in BPF_CALL_4()