Lines Matching refs:alloc

60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,  in binder_alloc_buffer_size()  argument
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
68 static void binder_insert_free_buffer(struct binder_alloc *alloc, in binder_insert_free_buffer() argument
71 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
79 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); in binder_insert_free_buffer()
83 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
90 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
102 struct binder_alloc *alloc, struct binder_buffer *new_buffer) in binder_insert_allocated_buffer_locked() argument
104 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
127 struct binder_alloc *alloc, in binder_alloc_prepare_to_free_locked() argument
130 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, in binder_alloc_prepare_to_free() argument
172 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
173 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
174 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
179 binder_set_installed_page(struct binder_alloc *alloc, in binder_set_installed_page() argument
184 smp_store_release(&alloc->pages[index], page); in binder_set_installed_page()
188 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index) in binder_get_installed_page() argument
191 return smp_load_acquire(&alloc->pages[index]); in binder_get_installed_page()
194 static void binder_lru_freelist_add(struct binder_alloc *alloc, in binder_lru_freelist_add() argument
200 trace_binder_update_page_range(alloc, false, start, end); in binder_lru_freelist_add()
206 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_add()
207 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_add()
211 trace_binder_free_lru_start(alloc, index); in binder_lru_freelist_add()
219 trace_binder_free_lru_end(alloc, index); in binder_lru_freelist_add()
224 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state) in binder_alloc_set_mapped() argument
227 smp_store_release(&alloc->mapped, state); in binder_alloc_set_mapped()
230 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc) in binder_alloc_is_mapped() argument
233 return smp_load_acquire(&alloc->mapped); in binder_alloc_is_mapped()
236 static struct page *binder_page_lookup(struct binder_alloc *alloc, in binder_page_lookup() argument
239 struct mm_struct *mm = alloc->mm; in binder_page_lookup()
248 if (binder_alloc_is_mapped(alloc)) in binder_page_lookup()
256 static int binder_page_insert(struct binder_alloc *alloc, in binder_page_insert() argument
260 struct mm_struct *mm = alloc->mm; in binder_page_insert()
267 if (binder_alloc_is_mapped(alloc)) in binder_page_insert()
276 if (vma && binder_alloc_is_mapped(alloc)) in binder_page_insert()
283 static struct page *binder_page_alloc(struct binder_alloc *alloc, in binder_page_alloc() argument
300 mdata->alloc = alloc; in binder_page_alloc()
314 static int binder_install_single_page(struct binder_alloc *alloc, in binder_install_single_page() argument
321 if (!mmget_not_zero(alloc->mm)) in binder_install_single_page()
324 page = binder_page_alloc(alloc, index); in binder_install_single_page()
330 ret = binder_page_insert(alloc, addr, page); in binder_install_single_page()
340 page = binder_page_lookup(alloc, addr); in binder_install_single_page()
343 alloc->pid, addr - alloc->vm_start); in binder_install_single_page()
350 binder_set_installed_page(alloc, index, page); in binder_install_single_page()
355 alloc->pid, __func__, addr - alloc->vm_start, ret); in binder_install_single_page()
359 mmput_async(alloc->mm); in binder_install_single_page()
363 static int binder_install_buffer_pages(struct binder_alloc *alloc, in binder_install_buffer_pages() argument
377 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_install_buffer_pages()
378 if (binder_get_installed_page(alloc, index)) in binder_install_buffer_pages()
381 trace_binder_alloc_page_start(alloc, index); in binder_install_buffer_pages()
383 ret = binder_install_single_page(alloc, index, page_addr); in binder_install_buffer_pages()
387 trace_binder_alloc_page_end(alloc, index); in binder_install_buffer_pages()
394 static void binder_lru_freelist_del(struct binder_alloc *alloc, in binder_lru_freelist_del() argument
400 trace_binder_update_page_range(alloc, true, start, end); in binder_lru_freelist_del()
406 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_del()
407 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_del()
410 trace_binder_alloc_lru_start(alloc, index); in binder_lru_freelist_del()
418 trace_binder_alloc_lru_end(alloc, index); in binder_lru_freelist_del()
422 if (index + 1 > alloc->pages_high) in binder_lru_freelist_del()
423 alloc->pages_high = index + 1; in binder_lru_freelist_del()
427 static void debug_no_space_locked(struct binder_alloc *alloc) in debug_no_space_locked() argument
439 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
441 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
448 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
450 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
464 static bool debug_low_async_space_locked(struct binder_alloc *alloc) in debug_low_async_space_locked() argument
483 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
484 alloc->oneway_spam_detected = false; in debug_low_async_space_locked()
488 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
495 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
504 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
507 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
508 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
509 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
518 struct binder_alloc *alloc, in binder_alloc_new_buf_locked() argument
523 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
530 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
533 alloc->pid, size); in binder_alloc_new_buf_locked()
541 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
557 alloc->pid, size); in binder_alloc_new_buf_locked()
558 debug_no_space_locked(alloc); in binder_alloc_new_buf_locked()
566 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
572 binder_insert_free_buffer(alloc, new_buffer); in binder_alloc_new_buf_locked()
578 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
588 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
591 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
594 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
598 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
601 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
602 if (debug_low_async_space_locked(alloc)) in binder_alloc_new_buf_locked()
649 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, in binder_alloc_new_buf() argument
660 if (!binder_alloc_is_mapped(alloc)) { in binder_alloc_new_buf()
663 alloc->pid); in binder_alloc_new_buf()
671 alloc->pid, data_size, offsets_size, in binder_alloc_new_buf()
681 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
682 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
684 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
692 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
694 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
696 binder_alloc_free_buf(alloc, buffer); in binder_alloc_new_buf()
713 static void binder_delete_free_buffer(struct binder_alloc *alloc, in binder_delete_free_buffer() argument
721 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
727 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
733 binder_lru_freelist_add(alloc, buffer_start_page(buffer), in binder_delete_free_buffer()
740 static void binder_free_buf_locked(struct binder_alloc *alloc, in binder_free_buf_locked() argument
745 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
753 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
758 BUG_ON(buffer->user_data < alloc->vm_start); in binder_free_buf_locked()
759 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
762 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
765 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
768 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
771 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
773 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
777 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
778 binder_delete_free_buffer(alloc, next); in binder_free_buf_locked()
781 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
785 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
786 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
790 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
812 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page() argument
818 (buffer->user_data - alloc->vm_start); in binder_alloc_get_page()
824 return alloc->pages[index]; in binder_alloc_get_page()
834 static void binder_alloc_clear_buf(struct binder_alloc *alloc, in binder_alloc_clear_buf() argument
837 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
845 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
861 void binder_alloc_free_buf(struct binder_alloc *alloc, in binder_alloc_free_buf() argument
873 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
876 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
877 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
878 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
894 int binder_alloc_mmap_handler(struct binder_alloc *alloc, in binder_alloc_mmap_handler() argument
901 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
917 alloc->vm_start = vma->vm_start; in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
920 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
922 if (!alloc->pages) { in binder_alloc_mmap_handler()
935 buffer->user_data = alloc->vm_start; in binder_alloc_mmap_handler()
936 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
938 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
942 binder_alloc_set_mapped(alloc, true); in binder_alloc_mmap_handler()
947 kvfree(alloc->pages); in binder_alloc_mmap_handler()
948 alloc->pages = NULL; in binder_alloc_mmap_handler()
950 alloc->vm_start = 0; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
958 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
964 void binder_alloc_deferred_release(struct binder_alloc *alloc) in binder_alloc_deferred_release() argument
971 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
972 BUG_ON(alloc->mapped); in binder_alloc_deferred_release()
974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
981 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
984 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
988 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
989 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
994 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
999 if (alloc->pages) { in binder_alloc_deferred_release()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1006 page = binder_get_installed_page(alloc, i); in binder_alloc_deferred_release()
1016 __func__, alloc->pid, i, in binder_alloc_deferred_release()
1022 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
1023 kvfree(alloc->pages); in binder_alloc_deferred_release()
1024 if (alloc->mm) in binder_alloc_deferred_release()
1025 mmdrop(alloc->mm); in binder_alloc_deferred_release()
1029 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
1041 struct binder_alloc *alloc) in binder_alloc_print_allocated() argument
1046 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
1047 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated()
1051 buffer->user_data - alloc->vm_start, in binder_alloc_print_allocated()
1056 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
1065 struct binder_alloc *alloc) in binder_alloc_print_pages() argument
1073 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
1078 if (binder_alloc_is_mapped(alloc)) { in binder_alloc_print_pages()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1080 page = binder_get_installed_page(alloc, i); in binder_alloc_print_pages()
1089 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
1091 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
1100 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) in binder_alloc_get_allocated_count() argument
1105 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
1108 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
1121 void binder_alloc_vma_close(struct binder_alloc *alloc) in binder_alloc_vma_close() argument
1123 binder_alloc_set_mapped(alloc, false); in binder_alloc_vma_close()
1141 struct binder_alloc *alloc = mdata->alloc; in binder_alloc_free_page() local
1142 struct mm_struct *mm = alloc->mm; in binder_alloc_free_page()
1153 page_addr = alloc->vm_start + index * PAGE_SIZE; in binder_alloc_free_page()
1165 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1173 if (vma && !binder_alloc_is_mapped(alloc)) in binder_alloc_free_page()
1176 trace_binder_unmap_kernel_start(alloc, index); in binder_alloc_free_page()
1178 page_to_free = alloc->pages[index]; in binder_alloc_free_page()
1179 binder_set_installed_page(alloc, index, NULL); in binder_alloc_free_page()
1181 trace_binder_unmap_kernel_end(alloc, index); in binder_alloc_free_page()
1187 trace_binder_unmap_user_start(alloc, index); in binder_alloc_free_page()
1191 trace_binder_unmap_user_end(alloc, index); in binder_alloc_free_page()
1194 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1205 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1239 void binder_alloc_init(struct binder_alloc *alloc) in binder_alloc_init() argument
1241 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1242 alloc->mm = current->mm; in binder_alloc_init()
1243 mmgrab(alloc->mm); in binder_alloc_init()
1244 mutex_init(&alloc->mutex); in binder_alloc_init()
1245 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1295 static inline bool check_buffer(struct binder_alloc *alloc, in check_buffer() argument
1299 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1321 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_user_to_buffer() argument
1327 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1337 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1352 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, in binder_alloc_do_buffer_copy() argument
1360 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1368 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1383 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, in binder_alloc_copy_to_buffer() argument
1389 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1393 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, in binder_alloc_copy_from_buffer() argument
1399 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()