Lines Matching refs:buffer_size
64 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
74 size_t buffer_size; in binder_insert_free_buffer() local
90 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
92 if (new_buffer_size < buffer_size) in binder_insert_free_buffer()
436 size_t buffer_size; in debug_no_space_locked() local
441 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
443 total_alloc_size += buffer_size; in debug_no_space_locked()
444 if (buffer_size > largest_alloc_size) in debug_no_space_locked()
445 largest_alloc_size = buffer_size; in debug_no_space_locked()
450 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
452 total_free_size += buffer_size; in debug_no_space_locked()
453 if (buffer_size > largest_free_size) in debug_no_space_locked()
454 largest_free_size = buffer_size; in debug_no_space_locked()
483 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
504 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
528 size_t buffer_size; in binder_alloc_new_buf_locked() local
541 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
543 if (size < buffer_size) { in binder_alloc_new_buf_locked()
546 } else if (size > buffer_size) { in binder_alloc_new_buf_locked()
563 if (buffer_size != size) { in binder_alloc_new_buf_locked()
566 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
568 WARN_ON(n || buffer_size == size); in binder_alloc_new_buf_locked()
578 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
586 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
743 size_t size, buffer_size; in binder_free_buf_locked() local
745 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
753 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
756 BUG_ON(size > buffer_size); in binder_free_buf_locked()
759 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
762 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
769 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1299 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer() local
1301 return buffer_size >= bytes && in check_buffer()
1302 offset <= buffer_size - bytes && in check_buffer()