Lines Matching refs:BPF_REG_SIZE
591 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
604 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
627 if (off % BPF_REG_SIZE) { in stack_slot_obj_get_spi()
750 for (i = 0; i < BPF_REG_SIZE; i++) { in mark_stack_slots_dynptr()
788 for (i = 0; i < BPF_REG_SIZE; i++) { in invalidate_dynptr()
848 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
906 for (i = 0; i < BPF_REG_SIZE; i++) { in destroy_if_dynptr_stack_slot()
982 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_init()
1054 for (j = 0; j < BPF_REG_SIZE; j++) in mark_stack_slots_iter()
1085 for (j = 0; j < BPF_REG_SIZE; j++) in unmark_stack_slots_iter()
1113 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_uninit()
1145 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_init()
1183 for (i = 0; i < BPF_REG_SIZE; i++) in mark_stack_slot_irq_flag()
1236 for (i = 0; i < BPF_REG_SIZE; i++) in unmark_stack_slot_irq_flag()
1261 for (i = 0; i < BPF_REG_SIZE; i++) in is_irq_flag_reg_valid_uninit()
1284 for (i = 0; i < BPF_REG_SIZE; i++) in is_irq_flag_reg_valid_init()
1298 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1321 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1326 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1438 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1465 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1468 size = round_up(size, BPF_REG_SIZE); in grow_stack_state()
1469 n = size / BPF_REG_SIZE; in grow_stack_state()
4527 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
4557 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
4786 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4788 i, func->allocated_stack / BPF_REG_SIZE); in __mark_chain_precision()
4946 if (size == BPF_REG_SIZE) in save_register_state()
4949 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
5002 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
5013 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
5043 if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { in check_stack_write_fixed_off()
5054 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && in check_stack_write_fixed_off()
5064 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
5081 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
5092 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
5112 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
5183 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
5184 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
5271 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
5274 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
5306 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
5320 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
5323 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
5353 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5407 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
7648 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
7971 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
7985 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
7991 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
8008 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
8658 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
8668 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { in process_iter_arg()
8783 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
9475 spi = slot / BPF_REG_SIZE; in get_constant_map_key()
9476 off = slot % BPF_REG_SIZE; in get_constant_map_key()
16313 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in collect_linked_regs()
17365 for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) { in mark_fastcall_pattern_for_call()
18213 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
18219 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
18486 spi = i / BPF_REG_SIZE; in stacksafe()
18490 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18491 cur->stack[spi].slot_type[i % BPF_REG_SIZE])) in stacksafe()
18496 i += BPF_REG_SIZE - 1; in stacksafe()
18501 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
18505 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
18524 i += BPF_REG_SIZE - 1; in stacksafe()
18532 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
18533 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
18535 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
18536 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
18543 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
18546 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
18812 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
18813 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
18855 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
18866 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
18868 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
18973 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
19310 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
22464 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; in inline_bpf_loop()
22465 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; in inline_bpf_loop()
22466 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; in inline_bpf_loop()
22567 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; in optimize_bpf_loop()