| /linux-6.15/kernel/events/ |
| H A D | ring_buffer.c | 42 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local 56 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local 174 rb = rcu_dereference(event->rb); in __perf_output_begin() 186 handle->rb = rb; in __perf_output_begin() 239 local_add(rb->watermark, &rb->wakeup); in __perf_output_begin() 421 handle->rb = rb; in perf_aux_output_begin() 470 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { in rb_need_aux_wakeup() 471 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark); in rb_need_aux_wakeup() 491 struct perf_buffer *rb = handle->rb; in perf_aux_output_end() local 548 struct perf_buffer *rb = handle->rb; in perf_aux_output_skip() local [all …]
|
| H A D | internal.h | 67 struct perf_buffer *rb; in rb_free_rcu() local 70 rb_free(rb); in rb_free_rcu() 75 if (!pause && rb->nr_pages) in rb_toggle_paused() 76 rb->paused = 0; in rb_toggle_paused() 78 rb->paused = 1; in rb_toggle_paused() 92 return !!rb->aux_nr_pages; in rb_has_aux() 110 return rb->page_order; in page_order() 123 return rb->nr_pages << page_order(rb); in data_page_nr() 128 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size() 151 struct perf_buffer *rb = handle->rb; \ [all …]
|
| /linux-6.15/tools/lib/bpf/ |
| H A D | ringbuf.c | 103 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 108 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add() 116 rb->rings[rb->ring_cnt] = r; in ring_buffer__add() 153 e = &rb->events[rb->ring_cnt]; in ring_buffer__add() 181 ringbuf_free_ring(rb, rb->rings[i]); in ring_buffer__free() 200 rb = calloc(1, sizeof(*rb)); in ring_buffer__new() 342 cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms); in ring_buffer__poll() 430 munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1)); in user_ringbuf_unmap_ring() 505 rb->data = tmp + rb->page_size; in user_ringbuf_map() 527 rb = calloc(1, sizeof(*rb)); in user_ring_buffer__new() [all …]
|
| /linux-6.15/drivers/scsi/bfa/ |
| H A D | bfa_ioc_ct.c | 185 void __iomem *rb; in bfa_ioc_ct_reg_init() local 188 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 244 void __iomem *rb; in bfa_ioc_ct2_reg_init() local 247 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init() 597 writel(0, (rb + OP_MODE)); in bfa_ioc_ct_pll_init() 821 bfa_ioc_ct2_sclk_init(rb); in bfa_ioc_ct2_clk_reset() 822 bfa_ioc_ct2_lclk_init(rb); in bfa_ioc_ct2_clk_reset() 898 bfa_ioc_ct2_clk_reset(rb); in bfa_ioc_ct2_pll_init() 901 bfa_ioc_ct2_mac_reset(rb); in bfa_ioc_ct2_pll_init() 903 bfa_ioc_ct2_clk_reset(rb); in bfa_ioc_ct2_pll_init() [all …]
|
| H A D | bfa_ioc_cb.c | 138 void __iomem *rb; in bfa_ioc_cb_reg_init() local 141 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init() 186 ioc->ioc_regs.err_set = (rb + ERR_SET_REG); in bfa_ioc_cb_reg_init() 369 join_bits = readl(rb + BFA_IOC0_STATE_REG) & in bfa_ioc_cb_pll_init() 372 join_bits = readl(rb + BFA_IOC1_STATE_REG) & in bfa_ioc_cb_pll_init() 375 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); in bfa_ioc_cb_pll_init() 376 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); in bfa_ioc_cb_pll_init() 383 rb + APP_PLL_SCLK_CTL_REG); in bfa_ioc_cb_pll_init() 386 rb + APP_PLL_LCLK_CTL_REG); in bfa_ioc_cb_pll_init() 391 rb + APP_PLL_SCLK_CTL_REG); in bfa_ioc_cb_pll_init() [all …]
|
| /linux-6.15/drivers/net/ethernet/brocade/bna/ |
| H A D | bfa_ioc_ct.c | 251 void __iomem *rb; in bfa_ioc_ct_reg_init() local 254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 310 void __iomem *rb; in bfa_ioc_ct2_reg_init() local 313 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct2_reg_init() 616 writel(0, (rb + OP_MODE)); in bfa_ioc_ct_pll_init() 620 (rb + ETH_MAC_SER_REG)); in bfa_ioc_ct_pll_init() 624 (rb + ETH_MAC_SER_REG)); in bfa_ioc_ct_pll_init() 789 bfa_ioc_ct2_sclk_init(rb); in bfa_ioc_ct2_mac_reset() 790 bfa_ioc_ct2_lclk_init(rb); in bfa_ioc_ct2_mac_reset() 888 bfa_ioc_ct2_mac_reset(rb); in bfa_ioc_ct2_pll_init() [all …]
|
| /linux-6.15/fs/xfs/scrub/ |
| H A D | bmap_repair.c | 99 struct xrep_bmap *rb, in xrep_bmap_discover_shared() argument 131 struct xrep_bmap *rb, in xrep_bmap_from_rmap() argument 576 error = xfarray_load(rb->bmap_records, rb->array_cur++, in xrep_bmap_get_records() 679 return xrep_ino_ensure_extent_count(rb->sc, rb->whichfork, in xrep_bmap_extents_load() 697 &rb->new_bmapbt.bload, rb->real_mappings); in xrep_bmap_btree_load() 724 error = xfs_btree_bload(bmap_cur, &rb->new_bmapbt.bload, rb); in xrep_bmap_btree_load() 764 error = xrep_newbt_init_inode(&rb->new_bmapbt, sc, rb->whichfork, in xrep_bmap_build_new_fork() 785 if (rb->real_mappings <= XFS_IFORK_MAXEXT(sc->ip, rb->whichfork)) { in xrep_bmap_build_new_fork() 938 if (!rb) in xrep_bmap() 940 rb->sc = sc; in xrep_bmap() [all …]
|
| /linux-6.15/kernel/bpf/ |
| H A D | ringbuf.c | 137 if (rb) { in bpf_ringbuf_area_alloc() 141 return rb; in bpf_ringbuf_area_alloc() 174 if (!rb) in bpf_ringbuf_alloc() 187 return rb; in bpf_ringbuf_alloc() 225 vunmap(rb); in bpf_ringbuf_free() 343 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in ringbuf_map_mem_usage() 428 hdr = (void *)rb->data + (pend_pos & rb->mask); in __bpf_ringbuf_reserve() 450 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve() 502 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit() 568 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2() [all …]
|
| H A D | range_tree.c | 47 return rb_entry(rb, struct range_node, rb_range_size); in rb_to_range_node() 58 struct rb_node *rb = rt->range_size_root.rb_root.rb_node; in __find_range() local 61 while (rb) { in __find_range() 62 struct range_node *rn = rb_to_range_node(rb); in __find_range() 66 rb = rb->rb_right; in __find_range() 68 rb = rb->rb_left; in __find_range() 94 rb = *link; in __range_size_insert() 95 if (size > rn_size(rb_to_range_node(rb))) { in __range_size_insert() 96 link = &rb->rb_left; in __range_size_insert() 98 link = &rb->rb_right; in __range_size_insert() [all …]
|
| /linux-6.15/drivers/hid/intel-ish-hid/ishtp/ |
| H A D | client-buffers.c | 29 if (!rb) { in ishtp_cl_alloc_rx_ring() 109 kfree(rb); in ishtp_cl_free_rx_ring() 119 kfree(rb); in ishtp_cl_free_rx_ring() 171 kfree(rb); in ishtp_io_rb_free() 187 if (!rb) in ishtp_io_rb_init() 191 rb->cl = cl; in ishtp_io_rb_init() 193 return rb; in ishtp_io_rb_init() 207 if (!rb) in ishtp_io_rb_alloc_buf() 235 if (!rb || !rb->cl) in ishtp_cl_io_rb_recycle() 270 if (rb) in ishtp_cl_rx_get_rb() [all …]
|
| H A D | client.c | 31 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush() 623 rb = NULL; in ishtp_cl_read_start() 631 rb->cl = cl; in ishtp_cl_read_start() 632 rb->buf_idx = 0; in ishtp_cl_read_start() 1004 cl = rb->cl; in recv_ishtp_cl_msg() 1011 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg() 1027 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) { in recv_ishtp_cl_msg() 1039 buffer = rb->buffer.data + rb->buf_idx; in recv_ishtp_cl_msg() 1124 cl = rb->cl; in recv_ishtp_cl_msg_dma() 1133 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg_dma() [all …]
|
| /linux-6.15/lib/ |
| H A D | rbtree_test.c | 21 struct rb_node rb; member 167 for (count = 0; rb; rb = rb_parent(rb)) in black_path_count() 184 struct rb_node *rb; in check_postorder() local 186 for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb)) in check_postorder() 194 struct rb_node *rb; in check() local 198 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check() 199 struct test_node *node = rb_entry(rb, struct test_node, rb); in check() 202 (!rb_parent(rb) || is_red(rb_parent(rb)))); in check() 206 WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) && in check() 224 for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { in check_augmented() [all …]
|
| /linux-6.15/kernel/printk/ |
| H A D | printk_ringbuffer.c | 1392 e->rb = rb; in prb_reserve_in_last() 1570 desc_update_last_finalized(rb); in desc_make_final() 1619 if (!desc_reserve(rb, &id)) { in prb_reserve() 1621 atomic_long_inc(&rb->fail); in prb_reserve() 1641 e->rb = rb; in prb_reserve() 1760 desc_make_final(e->rb, e->id); in prb_commit() 1782 desc_update_last_finalized(e->rb); in prb_final_commit() 2114 tail_seq = prb_first_seq(rb); in _prb_read_valid() 2316 rb->desc_ring.descs = descs; in prb_init() 2317 rb->desc_ring.infos = infos; in prb_init() [all …]
|
| H A D | printk_ringbuffer.h | 109 struct printk_ringbuffer *rb; member 329 void prb_init(struct printk_ringbuffer *rb, 372 #define prb_for_each_record(from, rb, s, r) \ argument 390 #define prb_for_each_info(from, rb, s, i, lc) \ argument 398 u64 prb_first_seq(struct printk_ringbuffer *rb); 399 u64 prb_first_valid_seq(struct printk_ringbuffer *rb); 400 u64 prb_next_seq(struct printk_ringbuffer *rb); 406 #define __ulseq_to_u64seq(rb, ulseq) (ulseq) argument 407 #define ULSEQ_MAX(rb) (-1) argument 412 #define ULSEQ_MAX(rb) __u64seq_to_ulseq(prb_first_seq(rb) + 0x80000000UL) argument [all …]
|
| /linux-6.15/drivers/gpu/drm/ |
| H A D | drm_mm.c | 175 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 177 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node() 182 rb = rb_parent(rb); in drm_mm_interval_tree_add_node() 185 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 189 rb = NULL; in drm_mm_interval_tree_add_node() 195 rb = *link; in drm_mm_interval_tree_add_node() 196 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node() 207 rb_link_node(&node->rb, rb, link); in drm_mm_interval_tree_add_node() 316 rb = rb->rb_right; in best_hole() 318 rb = rb->rb_left; in best_hole() [all …]
|
| H A D | drm_prime.c | 110 rb = NULL; in drm_prime_add_buf_handle() 115 rb = *p; in drm_prime_add_buf_handle() 125 rb = NULL; in drm_prime_add_buf_handle() 130 rb = *p; in drm_prime_add_buf_handle() 149 while (rb) { in drm_prime_lookup_buf_by_handle() 156 rb = rb->rb_right; in drm_prime_lookup_buf_by_handle() 158 rb = rb->rb_left; in drm_prime_lookup_buf_by_handle() 179 rb = rb->rb_right; in drm_prime_lookup_buf_handle() 181 rb = rb->rb_left; in drm_prime_lookup_buf_handle() 208 rb = rb->rb_right; in drm_prime_remove_buf_handle() [all …]
|
| /linux-6.15/mm/ |
| H A D | interval_tree.c | 23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after() 43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 46 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after() 47 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after() 48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 52 link = &parent->shared.rb.rb_left; in vma_interval_tree_insert_after() 56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after() [all …]
|
| /linux-6.15/drivers/misc/mchp_pci1xxxx/ |
| H A D | mchp_pci1xxxx_otpe2p.c | 99 void __iomem *rb = priv->reg_base; in is_eeprom_responsive() local 104 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive() 106 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in is_eeprom_responsive() 124 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_read() local 148 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in pci1xxxx_eeprom_read() 165 void __iomem *rb = priv->reg_base; in pci1xxxx_eeprom_write() local 193 rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG)); in pci1xxxx_eeprom_write() 219 void __iomem *rb = priv->reg_base; in pci1xxxx_otp_read() local 249 rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET)); in pci1xxxx_otp_read() 268 void __iomem *rb = priv->reg_base; in pci1xxxx_otp_write() local [all …]
|
| /linux-6.15/Documentation/translations/zh_CN/core-api/ |
| H A D | rbtree.rst | 274 if (node->rb.rb_left) { 276 rb_entry(node->rb.rb_left, 294 if (node->rb.rb_right) { 296 struct interval_tree_node, rb); 311 if (node->rb.rb_left) { 317 if (node->rb.rb_right) { 328 while (rb != stop) { 330 rb_entry(rb, struct interval_tree_node, rb); 335 rb = rb_parent(&node->rb); 377 link = &parent->rb.rb_left; [all …]
|
| /linux-6.15/arch/arm64/crypto/ |
| H A D | sm3-neon-core.S | 42 #define rb w4 macro 356 ldp ra, rb, [RSTATE, #0] 401 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0) 402 R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0) 403 R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0) 404 R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0) 407 R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0) 512 eor rb, rb, s1 518 stp ra, rb, [RSTATE, #0] 538 eor rb, rb, s1 [all …]
|
| /linux-6.15/drivers/target/iscsi/ |
| H A D | iscsi_target_configfs.c | 44 ssize_t rb; in lio_target_np_driver_show() local 52 return rb; in lio_target_np_driver_show() 546 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 595 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 599 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 603 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 607 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 611 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 615 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() 619 rb += sysfs_emit_at(page, rb, in lio_target_nacl_info_show() [all …]
|
| /linux-6.15/drivers/firmware/arm_scmi/ |
| H A D | raw_mode.c | 271 return rb; in scmi_raw_buffer_get() 280 rb->msg.len = rb->max_len; in scmi_raw_buffer_put() 309 return rb; in scmi_raw_buffer_dequeue_unlocked() 321 return rb; in scmi_raw_buffer_dequeue() 330 if (rb) in scmi_raw_buffer_queue_flush() 332 } while (rb); in scmi_raw_buffer_queue_flush() 717 return rb; in scmi_raw_message_dequeue() 755 memcpy(buf, rb->msg.buf, rb->msg.len); in scmi_raw_message_receive() 1038 if (!rb) in scmi_raw_queue_init() 1341 if (!rb) { in scmi_raw_message_report() [all …]
|
| /linux-6.15/fs/jffs2/ |
| H A D | nodelist.h | 230 struct rb_node rb; member 271 struct rb_node rb; member 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) 350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) 351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) 354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 357 #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb) [all …]
|
| /linux-6.15/tools/testing/selftests/bpf/benchs/ |
| H A D | run_bench_ringbufs.sh | 10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 43 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
|
| /linux-6.15/drivers/gpu/drm/amd/display/dmub/inc/ |
| H A D | dmub_cmd.h | 5914 if (rb->wrpt >= rb->rptr) in dmub_rb_num_outstanding() 5917 data_count = rb->capacity - (rb->rptr - rb->wrpt); in dmub_rb_num_outstanding() 5932 if (rb->wrpt >= rb->rptr) in dmub_rb_num_free() 5935 data_count = rb->capacity - (rb->rptr - rb->wrpt); in dmub_rb_num_free() 5951 if (rb->wrpt >= rb->rptr) in dmub_rb_full() 5954 data_count = rb->capacity - (rb->rptr - rb->wrpt); in dmub_rb_full() 5983 if (rb->wrpt >= rb->capacity) in dmub_rb_push_front() 5984 rb->wrpt %= rb->capacity; in dmub_rb_push_front() 6010 if (rb->wrpt >= rb->capacity) in dmub_rb_out_push_front() 6011 rb->wrpt %= rb->capacity; in dmub_rb_out_push_front() [all …]
|