xref: /linux-6.15/drivers/android/binder_alloc.c (revision 49d2562c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 
29 struct list_lru binder_freelist;
30 
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32 
33 enum {
34 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
35 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40 
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42 		   uint, 0644);
43 
44 #define binder_alloc_debug(mask, x...) \
45 	do { \
46 		if (binder_alloc_debug_mask & mask) \
47 			pr_info_ratelimited(x); \
48 	} while (0)
49 
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54 
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59 
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61 				       struct binder_buffer *buffer)
62 {
63 	if (list_is_last(&buffer->entry, &alloc->buffers))
64 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
65 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67 
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69 				      struct binder_buffer *new_buffer)
70 {
71 	struct rb_node **p = &alloc->free_buffers.rb_node;
72 	struct rb_node *parent = NULL;
73 	struct binder_buffer *buffer;
74 	size_t buffer_size;
75 	size_t new_buffer_size;
76 
77 	BUG_ON(!new_buffer->free);
78 
79 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80 
81 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82 		     "%d: add free buffer, size %zd, at %pK\n",
83 		      alloc->pid, new_buffer_size, new_buffer);
84 
85 	while (*p) {
86 		parent = *p;
87 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
88 		BUG_ON(!buffer->free);
89 
90 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
91 
92 		if (new_buffer_size < buffer_size)
93 			p = &parent->rb_left;
94 		else
95 			p = &parent->rb_right;
96 	}
97 	rb_link_node(&new_buffer->rb_node, parent, p);
98 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100 
101 static void binder_insert_allocated_buffer_locked(
102 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct binder_buffer *buffer;
107 
108 	BUG_ON(new_buffer->free);
109 
110 	while (*p) {
111 		parent = *p;
112 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
113 		BUG_ON(buffer->free);
114 
115 		if (new_buffer->user_data < buffer->user_data)
116 			p = &parent->rb_left;
117 		else if (new_buffer->user_data > buffer->user_data)
118 			p = &parent->rb_right;
119 		else
120 			BUG();
121 	}
122 	rb_link_node(&new_buffer->rb_node, parent, p);
123 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125 
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127 		struct binder_alloc *alloc,
128 		unsigned long user_ptr)
129 {
130 	struct rb_node *n = alloc->allocated_buffers.rb_node;
131 	struct binder_buffer *buffer;
132 
133 	while (n) {
134 		buffer = rb_entry(n, struct binder_buffer, rb_node);
135 		BUG_ON(buffer->free);
136 
137 		if (user_ptr < buffer->user_data) {
138 			n = n->rb_left;
139 		} else if (user_ptr > buffer->user_data) {
140 			n = n->rb_right;
141 		} else {
142 			/*
143 			 * Guard against user threads attempting to
144 			 * free the buffer when in use by kernel or
145 			 * after it's already been freed.
146 			 */
147 			if (!buffer->allow_user_free)
148 				return ERR_PTR(-EPERM);
149 			buffer->allow_user_free = 0;
150 			return buffer;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * binder_alloc_prepare_to_free() - get buffer given user ptr
158  * @alloc:	binder_alloc for this proc
159  * @user_ptr:	User pointer to buffer data
160  *
161  * Validate userspace pointer to buffer data and return buffer corresponding to
162  * that user pointer. Search the rb tree for buffer that matches user data
163  * pointer.
164  *
165  * Return:	Pointer to buffer or NULL
166  */
167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
168 						   unsigned long user_ptr)
169 {
170 	struct binder_buffer *buffer;
171 
172 	mutex_lock(&alloc->mutex);
173 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174 	mutex_unlock(&alloc->mutex);
175 	return buffer;
176 }
177 
178 static inline void
179 binder_set_installed_page(struct binder_lru_page *lru_page,
180 			  struct page *page)
181 {
182 	/* Pairs with acquire in binder_get_installed_page() */
183 	smp_store_release(&lru_page->page_ptr, page);
184 }
185 
186 static inline struct page *
187 binder_get_installed_page(struct binder_lru_page *lru_page)
188 {
189 	/* Pairs with release in binder_set_installed_page() */
190 	return smp_load_acquire(&lru_page->page_ptr);
191 }
192 
193 static void binder_lru_freelist_add(struct binder_alloc *alloc,
194 				    unsigned long start, unsigned long end)
195 {
196 	struct binder_lru_page *page;
197 	unsigned long page_addr;
198 
199 	trace_binder_update_page_range(alloc, false, start, end);
200 
201 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
202 		size_t index;
203 		int ret;
204 
205 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
206 		page = &alloc->pages[index];
207 
208 		if (!binder_get_installed_page(page))
209 			continue;
210 
211 		trace_binder_free_lru_start(alloc, index);
212 
213 		ret = list_lru_add(&binder_freelist,
214 				   &page->lru,
215 				   page_to_nid(page->page_ptr),
216 				   NULL);
217 		WARN_ON(!ret);
218 
219 		trace_binder_free_lru_end(alloc, index);
220 	}
221 }
222 
223 static int binder_install_single_page(struct binder_alloc *alloc,
224 				      struct binder_lru_page *lru_page,
225 				      unsigned long addr)
226 {
227 	struct vm_area_struct *vma;
228 	struct page *page;
229 	long npages;
230 	int ret;
231 
232 	if (!mmget_not_zero(alloc->mm))
233 		return -ESRCH;
234 
235 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
236 	if (!page) {
237 		pr_err("%d: failed to allocate page\n", alloc->pid);
238 		ret = -ENOMEM;
239 		goto out;
240 	}
241 
242 	mmap_read_lock(alloc->mm);
243 	vma = vma_lookup(alloc->mm, addr);
244 	if (!vma || vma != alloc->vma) {
245 		__free_page(page);
246 		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
247 		ret = -ESRCH;
248 		goto unlock;
249 	}
250 
251 	ret = vm_insert_page(vma, addr, page);
252 	switch (ret) {
253 	case -EBUSY:
254 		/*
255 		 * EBUSY is ok. Someone installed the pte first but the
256 		 * lru_page->page_ptr has not been updated yet. Discard
257 		 * our page and look up the one already installed.
258 		 */
259 		ret = 0;
260 		__free_page(page);
261 		npages = get_user_pages_remote(alloc->mm, addr, 1,
262 					       FOLL_NOFAULT, &page, NULL);
263 		if (npages <= 0) {
264 			pr_err("%d: failed to find page at offset %lx\n",
265 			       alloc->pid, addr - alloc->buffer);
266 			ret = -ESRCH;
267 			break;
268 		}
269 		fallthrough;
270 	case 0:
271 		/* Mark page installation complete and safe to use */
272 		binder_set_installed_page(lru_page, page);
273 		break;
274 	default:
275 		__free_page(page);
276 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
277 		       alloc->pid, __func__, addr - alloc->buffer, ret);
278 		ret = -ENOMEM;
279 		break;
280 	}
281 unlock:
282 	mmap_read_unlock(alloc->mm);
283 out:
284 	mmput_async(alloc->mm);
285 	return ret;
286 }
287 
288 static int binder_install_buffer_pages(struct binder_alloc *alloc,
289 				       struct binder_buffer *buffer,
290 				       size_t size)
291 {
292 	struct binder_lru_page *page;
293 	unsigned long start, final;
294 	unsigned long page_addr;
295 
296 	start = buffer->user_data & PAGE_MASK;
297 	final = PAGE_ALIGN(buffer->user_data + size);
298 
299 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
300 		unsigned long index;
301 		int ret;
302 
303 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
304 		page = &alloc->pages[index];
305 
306 		if (binder_get_installed_page(page))
307 			continue;
308 
309 		trace_binder_alloc_page_start(alloc, index);
310 
311 		ret = binder_install_single_page(alloc, page, page_addr);
312 		if (ret)
313 			return ret;
314 
315 		trace_binder_alloc_page_end(alloc, index);
316 	}
317 
318 	return 0;
319 }
320 
321 /* The range of pages should exclude those shared with other buffers */
322 static void binder_lru_freelist_del(struct binder_alloc *alloc,
323 				    unsigned long start, unsigned long end)
324 {
325 	struct binder_lru_page *page;
326 	unsigned long page_addr;
327 
328 	trace_binder_update_page_range(alloc, true, start, end);
329 
330 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
331 		unsigned long index;
332 		bool on_lru;
333 
334 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
335 		page = &alloc->pages[index];
336 
337 		if (page->page_ptr) {
338 			trace_binder_alloc_lru_start(alloc, index);
339 
340 			on_lru = list_lru_del(&binder_freelist,
341 					      &page->lru,
342 					      page_to_nid(page->page_ptr),
343 					      NULL);
344 			WARN_ON(!on_lru);
345 
346 			trace_binder_alloc_lru_end(alloc, index);
347 			continue;
348 		}
349 
350 		if (index + 1 > alloc->pages_high)
351 			alloc->pages_high = index + 1;
352 	}
353 }
354 
355 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
356 		struct vm_area_struct *vma)
357 {
358 	/* pairs with smp_load_acquire in binder_alloc_get_vma() */
359 	smp_store_release(&alloc->vma, vma);
360 }
361 
362 static inline struct vm_area_struct *binder_alloc_get_vma(
363 		struct binder_alloc *alloc)
364 {
365 	/* pairs with smp_store_release in binder_alloc_set_vma() */
366 	return smp_load_acquire(&alloc->vma);
367 }
368 
369 static void debug_no_space_locked(struct binder_alloc *alloc)
370 {
371 	size_t largest_alloc_size = 0;
372 	struct binder_buffer *buffer;
373 	size_t allocated_buffers = 0;
374 	size_t largest_free_size = 0;
375 	size_t total_alloc_size = 0;
376 	size_t total_free_size = 0;
377 	size_t free_buffers = 0;
378 	size_t buffer_size;
379 	struct rb_node *n;
380 
381 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
382 		buffer = rb_entry(n, struct binder_buffer, rb_node);
383 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
384 		allocated_buffers++;
385 		total_alloc_size += buffer_size;
386 		if (buffer_size > largest_alloc_size)
387 			largest_alloc_size = buffer_size;
388 	}
389 
390 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
391 		buffer = rb_entry(n, struct binder_buffer, rb_node);
392 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
393 		free_buffers++;
394 		total_free_size += buffer_size;
395 		if (buffer_size > largest_free_size)
396 			largest_free_size = buffer_size;
397 	}
398 
399 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
400 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
401 			   total_alloc_size, allocated_buffers,
402 			   largest_alloc_size, total_free_size,
403 			   free_buffers, largest_free_size);
404 }
405 
406 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
407 {
408 	/*
409 	 * Find the amount and size of buffers allocated by the current caller;
410 	 * The idea is that once we cross the threshold, whoever is responsible
411 	 * for the low async space is likely to try to send another async txn,
412 	 * and at some point we'll catch them in the act. This is more efficient
413 	 * than keeping a map per pid.
414 	 */
415 	struct binder_buffer *buffer;
416 	size_t total_alloc_size = 0;
417 	int pid = current->tgid;
418 	size_t num_buffers = 0;
419 	struct rb_node *n;
420 
421 	/*
422 	 * Only start detecting spammers once we have less than 20% of async
423 	 * space left (which is less than 10% of total buffer size).
424 	 */
425 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
426 		alloc->oneway_spam_detected = false;
427 		return false;
428 	}
429 
430 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
431 		 n = rb_next(n)) {
432 		buffer = rb_entry(n, struct binder_buffer, rb_node);
433 		if (buffer->pid != pid)
434 			continue;
435 		if (!buffer->async_transaction)
436 			continue;
437 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
438 		num_buffers++;
439 	}
440 
441 	/*
442 	 * Warn if this pid has more than 50 transactions, or more than 50% of
443 	 * async space (which is 25% of total buffer size). Oneway spam is only
444 	 * detected when the threshold is exceeded.
445 	 */
446 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
447 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
448 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
449 			      alloc->pid, pid, num_buffers, total_alloc_size);
450 		if (!alloc->oneway_spam_detected) {
451 			alloc->oneway_spam_detected = true;
452 			return true;
453 		}
454 	}
455 	return false;
456 }
457 
458 /* Callers preallocate @new_buffer, it is freed by this function if unused */
459 static struct binder_buffer *binder_alloc_new_buf_locked(
460 				struct binder_alloc *alloc,
461 				struct binder_buffer *new_buffer,
462 				size_t size,
463 				int is_async)
464 {
465 	struct rb_node *n = alloc->free_buffers.rb_node;
466 	struct rb_node *best_fit = NULL;
467 	struct binder_buffer *buffer;
468 	unsigned long next_used_page;
469 	unsigned long curr_last_page;
470 	size_t buffer_size;
471 
472 	if (is_async && alloc->free_async_space < size) {
473 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
474 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
475 			      alloc->pid, size);
476 		buffer = ERR_PTR(-ENOSPC);
477 		goto out;
478 	}
479 
480 	while (n) {
481 		buffer = rb_entry(n, struct binder_buffer, rb_node);
482 		BUG_ON(!buffer->free);
483 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
484 
485 		if (size < buffer_size) {
486 			best_fit = n;
487 			n = n->rb_left;
488 		} else if (size > buffer_size) {
489 			n = n->rb_right;
490 		} else {
491 			best_fit = n;
492 			break;
493 		}
494 	}
495 
496 	if (unlikely(!best_fit)) {
497 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
498 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
499 				   alloc->pid, size);
500 		debug_no_space_locked(alloc);
501 		buffer = ERR_PTR(-ENOSPC);
502 		goto out;
503 	}
504 
505 	if (buffer_size != size) {
506 		/* Found an oversized buffer and needs to be split */
507 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
508 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
509 
510 		WARN_ON(n || buffer_size == size);
511 		new_buffer->user_data = buffer->user_data + size;
512 		list_add(&new_buffer->entry, &buffer->entry);
513 		new_buffer->free = 1;
514 		binder_insert_free_buffer(alloc, new_buffer);
515 		new_buffer = NULL;
516 	}
517 
518 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
519 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
520 		      alloc->pid, size, buffer, buffer_size);
521 
522 	/*
523 	 * Now we remove the pages from the freelist. A clever calculation
524 	 * with buffer_size determines if the last page is shared with an
525 	 * adjacent in-use buffer. In such case, the page has been already
526 	 * removed from the freelist so we trim our range short.
527 	 */
528 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
529 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
530 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
531 				min(next_used_page, curr_last_page));
532 
533 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
534 	buffer->free = 0;
535 	buffer->allow_user_free = 0;
536 	binder_insert_allocated_buffer_locked(alloc, buffer);
537 	buffer->async_transaction = is_async;
538 	buffer->oneway_spam_suspect = false;
539 	if (is_async) {
540 		alloc->free_async_space -= size;
541 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
542 			     "%d: binder_alloc_buf size %zd async free %zd\n",
543 			      alloc->pid, size, alloc->free_async_space);
544 		if (debug_low_async_space_locked(alloc))
545 			buffer->oneway_spam_suspect = true;
546 	}
547 
548 out:
549 	/* Discard possibly unused new_buffer */
550 	kfree(new_buffer);
551 	return buffer;
552 }
553 
554 /* Calculate the sanitized total size, returns 0 for invalid request */
555 static inline size_t sanitized_size(size_t data_size,
556 				    size_t offsets_size,
557 				    size_t extra_buffers_size)
558 {
559 	size_t total, tmp;
560 
561 	/* Align to pointer size and check for overflows */
562 	tmp = ALIGN(data_size, sizeof(void *)) +
563 		ALIGN(offsets_size, sizeof(void *));
564 	if (tmp < data_size || tmp < offsets_size)
565 		return 0;
566 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
567 	if (total < tmp || total < extra_buffers_size)
568 		return 0;
569 
570 	/* Pad 0-sized buffers so they get a unique address */
571 	total = max(total, sizeof(void *));
572 
573 	return total;
574 }
575 
576 /**
577  * binder_alloc_new_buf() - Allocate a new binder buffer
578  * @alloc:              binder_alloc for this proc
579  * @data_size:          size of user data buffer
580  * @offsets_size:       user specified buffer offset
581  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
582  * @is_async:           buffer for async transaction
583  *
584  * Allocate a new buffer given the requested sizes. Returns
585  * the kernel version of the buffer pointer. The size allocated
586  * is the sum of the three given sizes (each rounded up to
587  * pointer-sized boundary)
588  *
589  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
590  */
591 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
592 					   size_t data_size,
593 					   size_t offsets_size,
594 					   size_t extra_buffers_size,
595 					   int is_async)
596 {
597 	struct binder_buffer *buffer, *next;
598 	size_t size;
599 	int ret;
600 
601 	/* Check binder_alloc is fully initialized */
602 	if (!binder_alloc_get_vma(alloc)) {
603 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
604 				   "%d: binder_alloc_buf, no vma\n",
605 				   alloc->pid);
606 		return ERR_PTR(-ESRCH);
607 	}
608 
609 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
610 	if (unlikely(!size)) {
611 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
612 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
613 				   alloc->pid, data_size, offsets_size,
614 				   extra_buffers_size);
615 		return ERR_PTR(-EINVAL);
616 	}
617 
618 	/* Preallocate the next buffer */
619 	next = kzalloc(sizeof(*next), GFP_KERNEL);
620 	if (!next)
621 		return ERR_PTR(-ENOMEM);
622 
623 	mutex_lock(&alloc->mutex);
624 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
625 	if (IS_ERR(buffer)) {
626 		mutex_unlock(&alloc->mutex);
627 		goto out;
628 	}
629 
630 	buffer->data_size = data_size;
631 	buffer->offsets_size = offsets_size;
632 	buffer->extra_buffers_size = extra_buffers_size;
633 	buffer->pid = current->tgid;
634 	mutex_unlock(&alloc->mutex);
635 
636 	ret = binder_install_buffer_pages(alloc, buffer, size);
637 	if (ret) {
638 		binder_alloc_free_buf(alloc, buffer);
639 		buffer = ERR_PTR(ret);
640 	}
641 out:
642 	return buffer;
643 }
644 
645 static unsigned long buffer_start_page(struct binder_buffer *buffer)
646 {
647 	return buffer->user_data & PAGE_MASK;
648 }
649 
650 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
651 {
652 	return (buffer->user_data - 1) & PAGE_MASK;
653 }
654 
655 static void binder_delete_free_buffer(struct binder_alloc *alloc,
656 				      struct binder_buffer *buffer)
657 {
658 	struct binder_buffer *prev, *next;
659 
660 	if (PAGE_ALIGNED(buffer->user_data))
661 		goto skip_freelist;
662 
663 	BUG_ON(alloc->buffers.next == &buffer->entry);
664 	prev = binder_buffer_prev(buffer);
665 	BUG_ON(!prev->free);
666 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
667 		goto skip_freelist;
668 
669 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
670 		next = binder_buffer_next(buffer);
671 		if (buffer_start_page(next) == buffer_start_page(buffer))
672 			goto skip_freelist;
673 	}
674 
675 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
676 				buffer_start_page(buffer) + PAGE_SIZE);
677 skip_freelist:
678 	list_del(&buffer->entry);
679 	kfree(buffer);
680 }
681 
682 static void binder_free_buf_locked(struct binder_alloc *alloc,
683 				   struct binder_buffer *buffer)
684 {
685 	size_t size, buffer_size;
686 
687 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
688 
689 	size = ALIGN(buffer->data_size, sizeof(void *)) +
690 		ALIGN(buffer->offsets_size, sizeof(void *)) +
691 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
692 
693 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
694 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
695 		      alloc->pid, buffer, size, buffer_size);
696 
697 	BUG_ON(buffer->free);
698 	BUG_ON(size > buffer_size);
699 	BUG_ON(buffer->transaction != NULL);
700 	BUG_ON(buffer->user_data < alloc->buffer);
701 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
702 
703 	if (buffer->async_transaction) {
704 		alloc->free_async_space += buffer_size;
705 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
706 			     "%d: binder_free_buf size %zd async free %zd\n",
707 			      alloc->pid, size, alloc->free_async_space);
708 	}
709 
710 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
711 				(buffer->user_data + buffer_size) & PAGE_MASK);
712 
713 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
714 	buffer->free = 1;
715 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
716 		struct binder_buffer *next = binder_buffer_next(buffer);
717 
718 		if (next->free) {
719 			rb_erase(&next->rb_node, &alloc->free_buffers);
720 			binder_delete_free_buffer(alloc, next);
721 		}
722 	}
723 	if (alloc->buffers.next != &buffer->entry) {
724 		struct binder_buffer *prev = binder_buffer_prev(buffer);
725 
726 		if (prev->free) {
727 			binder_delete_free_buffer(alloc, buffer);
728 			rb_erase(&prev->rb_node, &alloc->free_buffers);
729 			buffer = prev;
730 		}
731 	}
732 	binder_insert_free_buffer(alloc, buffer);
733 }
734 
735 /**
736  * binder_alloc_get_page() - get kernel pointer for given buffer offset
737  * @alloc: binder_alloc for this proc
738  * @buffer: binder buffer to be accessed
739  * @buffer_offset: offset into @buffer data
740  * @pgoffp: address to copy final page offset to
741  *
742  * Lookup the struct page corresponding to the address
743  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
744  * NULL, the byte-offset into the page is written there.
745  *
746  * The caller is responsible to ensure that the offset points
747  * to a valid address within the @buffer and that @buffer is
748  * not freeable by the user. Since it can't be freed, we are
749  * guaranteed that the corresponding elements of @alloc->pages[]
750  * cannot change.
751  *
752  * Return: struct page
753  */
754 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
755 					  struct binder_buffer *buffer,
756 					  binder_size_t buffer_offset,
757 					  pgoff_t *pgoffp)
758 {
759 	binder_size_t buffer_space_offset = buffer_offset +
760 		(buffer->user_data - alloc->buffer);
761 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
762 	size_t index = buffer_space_offset >> PAGE_SHIFT;
763 	struct binder_lru_page *lru_page;
764 
765 	lru_page = &alloc->pages[index];
766 	*pgoffp = pgoff;
767 	return lru_page->page_ptr;
768 }
769 
770 /**
771  * binder_alloc_clear_buf() - zero out buffer
772  * @alloc: binder_alloc for this proc
773  * @buffer: binder buffer to be cleared
774  *
775  * memset the given buffer to 0
776  */
777 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
778 				   struct binder_buffer *buffer)
779 {
780 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
781 	binder_size_t buffer_offset = 0;
782 
783 	while (bytes) {
784 		unsigned long size;
785 		struct page *page;
786 		pgoff_t pgoff;
787 
788 		page = binder_alloc_get_page(alloc, buffer,
789 					     buffer_offset, &pgoff);
790 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
791 		memset_page(page, pgoff, 0, size);
792 		bytes -= size;
793 		buffer_offset += size;
794 	}
795 }
796 
797 /**
798  * binder_alloc_free_buf() - free a binder buffer
799  * @alloc:	binder_alloc for this proc
800  * @buffer:	kernel pointer to buffer
801  *
802  * Free the buffer allocated via binder_alloc_new_buf()
803  */
804 void binder_alloc_free_buf(struct binder_alloc *alloc,
805 			    struct binder_buffer *buffer)
806 {
807 	/*
808 	 * We could eliminate the call to binder_alloc_clear_buf()
809 	 * from binder_alloc_deferred_release() by moving this to
810 	 * binder_free_buf_locked(). However, that could
811 	 * increase contention for the alloc mutex if clear_on_free
812 	 * is used frequently for large buffers. The mutex is not
813 	 * needed for correctness here.
814 	 */
815 	if (buffer->clear_on_free) {
816 		binder_alloc_clear_buf(alloc, buffer);
817 		buffer->clear_on_free = false;
818 	}
819 	mutex_lock(&alloc->mutex);
820 	binder_free_buf_locked(alloc, buffer);
821 	mutex_unlock(&alloc->mutex);
822 }
823 
824 /**
825  * binder_alloc_mmap_handler() - map virtual address space for proc
826  * @alloc:	alloc structure for this proc
827  * @vma:	vma passed to mmap()
828  *
829  * Called by binder_mmap() to initialize the space specified in
830  * vma for allocating binder buffers
831  *
832  * Return:
833  *      0 = success
834  *      -EBUSY = address space already mapped
835  *      -ENOMEM = failed to map memory to given address space
836  */
837 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
838 			      struct vm_area_struct *vma)
839 {
840 	struct binder_buffer *buffer;
841 	const char *failure_string;
842 	int ret, i;
843 
844 	if (unlikely(vma->vm_mm != alloc->mm)) {
845 		ret = -EINVAL;
846 		failure_string = "invalid vma->vm_mm";
847 		goto err_invalid_mm;
848 	}
849 
850 	mutex_lock(&binder_alloc_mmap_lock);
851 	if (alloc->buffer_size) {
852 		ret = -EBUSY;
853 		failure_string = "already mapped";
854 		goto err_already_mapped;
855 	}
856 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
857 				   SZ_4M);
858 	mutex_unlock(&binder_alloc_mmap_lock);
859 
860 	alloc->buffer = vma->vm_start;
861 
862 	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
863 				sizeof(alloc->pages[0]),
864 				GFP_KERNEL);
865 	if (alloc->pages == NULL) {
866 		ret = -ENOMEM;
867 		failure_string = "alloc page array";
868 		goto err_alloc_pages_failed;
869 	}
870 
871 	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
872 		alloc->pages[i].alloc = alloc;
873 		INIT_LIST_HEAD(&alloc->pages[i].lru);
874 	}
875 
876 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
877 	if (!buffer) {
878 		ret = -ENOMEM;
879 		failure_string = "alloc buffer struct";
880 		goto err_alloc_buf_struct_failed;
881 	}
882 
883 	buffer->user_data = alloc->buffer;
884 	list_add(&buffer->entry, &alloc->buffers);
885 	buffer->free = 1;
886 	binder_insert_free_buffer(alloc, buffer);
887 	alloc->free_async_space = alloc->buffer_size / 2;
888 
889 	/* Signal binder_alloc is fully initialized */
890 	binder_alloc_set_vma(alloc, vma);
891 
892 	return 0;
893 
894 err_alloc_buf_struct_failed:
895 	kvfree(alloc->pages);
896 	alloc->pages = NULL;
897 err_alloc_pages_failed:
898 	alloc->buffer = 0;
899 	mutex_lock(&binder_alloc_mmap_lock);
900 	alloc->buffer_size = 0;
901 err_already_mapped:
902 	mutex_unlock(&binder_alloc_mmap_lock);
903 err_invalid_mm:
904 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
905 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
906 			   alloc->pid, vma->vm_start, vma->vm_end,
907 			   failure_string, ret);
908 	return ret;
909 }
910 
911 
912 void binder_alloc_deferred_release(struct binder_alloc *alloc)
913 {
914 	struct rb_node *n;
915 	int buffers, page_count;
916 	struct binder_buffer *buffer;
917 
918 	buffers = 0;
919 	mutex_lock(&alloc->mutex);
920 	BUG_ON(alloc->vma);
921 
922 	while ((n = rb_first(&alloc->allocated_buffers))) {
923 		buffer = rb_entry(n, struct binder_buffer, rb_node);
924 
925 		/* Transaction should already have been freed */
926 		BUG_ON(buffer->transaction);
927 
928 		if (buffer->clear_on_free) {
929 			binder_alloc_clear_buf(alloc, buffer);
930 			buffer->clear_on_free = false;
931 		}
932 		binder_free_buf_locked(alloc, buffer);
933 		buffers++;
934 	}
935 
936 	while (!list_empty(&alloc->buffers)) {
937 		buffer = list_first_entry(&alloc->buffers,
938 					  struct binder_buffer, entry);
939 		WARN_ON(!buffer->free);
940 
941 		list_del(&buffer->entry);
942 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
943 		kfree(buffer);
944 	}
945 
946 	page_count = 0;
947 	if (alloc->pages) {
948 		int i;
949 
950 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
951 			bool on_lru;
952 
953 			if (!alloc->pages[i].page_ptr)
954 				continue;
955 
956 			on_lru = list_lru_del(&binder_freelist,
957 					      &alloc->pages[i].lru,
958 					      page_to_nid(alloc->pages[i].page_ptr),
959 					      NULL);
960 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
961 				     "%s: %d: page %d %s\n",
962 				     __func__, alloc->pid, i,
963 				     on_lru ? "on lru" : "active");
964 			__free_page(alloc->pages[i].page_ptr);
965 			page_count++;
966 		}
967 	}
968 	mutex_unlock(&alloc->mutex);
969 	kvfree(alloc->pages);
970 	if (alloc->mm)
971 		mmdrop(alloc->mm);
972 
973 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
974 		     "%s: %d buffers %d, pages %d\n",
975 		     __func__, alloc->pid, buffers, page_count);
976 }
977 
978 /**
979  * binder_alloc_print_allocated() - print buffer info
980  * @m:     seq_file for output via seq_printf()
981  * @alloc: binder_alloc for this proc
982  *
983  * Prints information about every buffer associated with
984  * the binder_alloc state to the given seq_file
985  */
986 void binder_alloc_print_allocated(struct seq_file *m,
987 				  struct binder_alloc *alloc)
988 {
989 	struct binder_buffer *buffer;
990 	struct rb_node *n;
991 
992 	mutex_lock(&alloc->mutex);
993 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
994 		buffer = rb_entry(n, struct binder_buffer, rb_node);
995 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
996 			   buffer->debug_id,
997 			   buffer->user_data - alloc->buffer,
998 			   buffer->data_size, buffer->offsets_size,
999 			   buffer->extra_buffers_size,
1000 			   buffer->transaction ? "active" : "delivered");
1001 	}
1002 	mutex_unlock(&alloc->mutex);
1003 }
1004 
1005 /**
1006  * binder_alloc_print_pages() - print page usage
1007  * @m:     seq_file for output via seq_printf()
1008  * @alloc: binder_alloc for this proc
1009  */
1010 void binder_alloc_print_pages(struct seq_file *m,
1011 			      struct binder_alloc *alloc)
1012 {
1013 	struct binder_lru_page *page;
1014 	int i;
1015 	int active = 0;
1016 	int lru = 0;
1017 	int free = 0;
1018 
1019 	mutex_lock(&alloc->mutex);
1020 	/*
1021 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1022 	 * read inconsistent state.
1023 	 */
1024 	if (binder_alloc_get_vma(alloc) != NULL) {
1025 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1026 			page = &alloc->pages[i];
1027 			if (!page->page_ptr)
1028 				free++;
1029 			else if (list_empty(&page->lru))
1030 				active++;
1031 			else
1032 				lru++;
1033 		}
1034 	}
1035 	mutex_unlock(&alloc->mutex);
1036 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1037 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1038 }
1039 
1040 /**
1041  * binder_alloc_get_allocated_count() - return count of buffers
1042  * @alloc: binder_alloc for this proc
1043  *
1044  * Return: count of allocated buffers
1045  */
1046 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1047 {
1048 	struct rb_node *n;
1049 	int count = 0;
1050 
1051 	mutex_lock(&alloc->mutex);
1052 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1053 		count++;
1054 	mutex_unlock(&alloc->mutex);
1055 	return count;
1056 }
1057 
1058 
1059 /**
1060  * binder_alloc_vma_close() - invalidate address space
1061  * @alloc: binder_alloc for this proc
1062  *
1063  * Called from binder_vma_close() when releasing address space.
1064  * Clears alloc->vma to prevent new incoming transactions from
1065  * allocating more buffers.
1066  */
1067 void binder_alloc_vma_close(struct binder_alloc *alloc)
1068 {
1069 	binder_alloc_set_vma(alloc, NULL);
1070 }
1071 
1072 /**
1073  * binder_alloc_free_page() - shrinker callback to free pages
1074  * @item:   item to free
1075  * @lru:    list_lru instance of the item
1076  * @cb_arg: callback argument
1077  *
1078  * Called from list_lru_walk() in binder_shrink_scan() to free
1079  * up pages when the system is under memory pressure.
1080  */
1081 enum lru_status binder_alloc_free_page(struct list_head *item,
1082 				       struct list_lru_one *lru,
1083 				       void *cb_arg)
1084 	__must_hold(&lru->lock)
1085 {
1086 	struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1087 	struct binder_alloc *alloc = page->alloc;
1088 	struct mm_struct *mm = alloc->mm;
1089 	struct vm_area_struct *vma;
1090 	struct page *page_to_free;
1091 	unsigned long page_addr;
1092 	size_t index;
1093 
1094 	if (!mmget_not_zero(mm))
1095 		goto err_mmget;
1096 	if (!mmap_read_trylock(mm))
1097 		goto err_mmap_read_lock_failed;
1098 	if (!mutex_trylock(&alloc->mutex))
1099 		goto err_get_alloc_mutex_failed;
1100 	if (!page->page_ptr)
1101 		goto err_page_already_freed;
1102 
1103 	index = page - alloc->pages;
1104 	page_addr = alloc->buffer + index * PAGE_SIZE;
1105 
1106 	vma = vma_lookup(mm, page_addr);
1107 	if (vma && vma != binder_alloc_get_vma(alloc))
1108 		goto err_invalid_vma;
1109 
1110 	trace_binder_unmap_kernel_start(alloc, index);
1111 
1112 	page_to_free = page->page_ptr;
1113 	page->page_ptr = NULL;
1114 
1115 	trace_binder_unmap_kernel_end(alloc, index);
1116 
1117 	list_lru_isolate(lru, item);
1118 	spin_unlock(&lru->lock);
1119 
1120 	if (vma) {
1121 		trace_binder_unmap_user_start(alloc, index);
1122 
1123 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1124 
1125 		trace_binder_unmap_user_end(alloc, index);
1126 	}
1127 
1128 	mutex_unlock(&alloc->mutex);
1129 	mmap_read_unlock(mm);
1130 	mmput_async(mm);
1131 	__free_page(page_to_free);
1132 
1133 	return LRU_REMOVED_RETRY;
1134 
1135 err_invalid_vma:
1136 err_page_already_freed:
1137 	mutex_unlock(&alloc->mutex);
1138 err_get_alloc_mutex_failed:
1139 	mmap_read_unlock(mm);
1140 err_mmap_read_lock_failed:
1141 	mmput_async(mm);
1142 err_mmget:
1143 	return LRU_SKIP;
1144 }
1145 
1146 static unsigned long
1147 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1148 {
1149 	return list_lru_count(&binder_freelist);
1150 }
1151 
1152 static unsigned long
1153 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1154 {
1155 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1156 			    NULL, sc->nr_to_scan);
1157 }
1158 
1159 static struct shrinker *binder_shrinker;
1160 
1161 /**
1162  * binder_alloc_init() - called by binder_open() for per-proc initialization
1163  * @alloc: binder_alloc for this proc
1164  *
1165  * Called from binder_open() to initialize binder_alloc fields for
1166  * new binder proc
1167  */
1168 void binder_alloc_init(struct binder_alloc *alloc)
1169 {
1170 	alloc->pid = current->group_leader->pid;
1171 	alloc->mm = current->mm;
1172 	mmgrab(alloc->mm);
1173 	mutex_init(&alloc->mutex);
1174 	INIT_LIST_HEAD(&alloc->buffers);
1175 }
1176 
1177 int binder_alloc_shrinker_init(void)
1178 {
1179 	int ret;
1180 
1181 	ret = list_lru_init(&binder_freelist);
1182 	if (ret)
1183 		return ret;
1184 
1185 	binder_shrinker = shrinker_alloc(0, "android-binder");
1186 	if (!binder_shrinker) {
1187 		list_lru_destroy(&binder_freelist);
1188 		return -ENOMEM;
1189 	}
1190 
1191 	binder_shrinker->count_objects = binder_shrink_count;
1192 	binder_shrinker->scan_objects = binder_shrink_scan;
1193 
1194 	shrinker_register(binder_shrinker);
1195 
1196 	return 0;
1197 }
1198 
1199 void binder_alloc_shrinker_exit(void)
1200 {
1201 	shrinker_free(binder_shrinker);
1202 	list_lru_destroy(&binder_freelist);
1203 }
1204 
1205 /**
1206  * check_buffer() - verify that buffer/offset is safe to access
1207  * @alloc: binder_alloc for this proc
1208  * @buffer: binder buffer to be accessed
1209  * @offset: offset into @buffer data
1210  * @bytes: bytes to access from offset
1211  *
1212  * Check that the @offset/@bytes are within the size of the given
1213  * @buffer and that the buffer is currently active and not freeable.
1214  * Offsets must also be multiples of sizeof(u32). The kernel is
1215  * allowed to touch the buffer in two cases:
1216  *
1217  * 1) when the buffer is being created:
1218  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1219  * 2) when the buffer is being torn down:
1220  *     (buffer->free == 0 && buffer->transaction == NULL).
1221  *
1222  * Return: true if the buffer is safe to access
1223  */
1224 static inline bool check_buffer(struct binder_alloc *alloc,
1225 				struct binder_buffer *buffer,
1226 				binder_size_t offset, size_t bytes)
1227 {
1228 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1229 
1230 	return buffer_size >= bytes &&
1231 		offset <= buffer_size - bytes &&
1232 		IS_ALIGNED(offset, sizeof(u32)) &&
1233 		!buffer->free &&
1234 		(!buffer->allow_user_free || !buffer->transaction);
1235 }
1236 
1237 /**
1238  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1239  * @alloc: binder_alloc for this proc
1240  * @buffer: binder buffer to be accessed
1241  * @buffer_offset: offset into @buffer data
1242  * @from: userspace pointer to source buffer
1243  * @bytes: bytes to copy
1244  *
1245  * Copy bytes from source userspace to target buffer.
1246  *
1247  * Return: bytes remaining to be copied
1248  */
1249 unsigned long
1250 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1251 				 struct binder_buffer *buffer,
1252 				 binder_size_t buffer_offset,
1253 				 const void __user *from,
1254 				 size_t bytes)
1255 {
1256 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1257 		return bytes;
1258 
1259 	while (bytes) {
1260 		unsigned long size;
1261 		unsigned long ret;
1262 		struct page *page;
1263 		pgoff_t pgoff;
1264 		void *kptr;
1265 
1266 		page = binder_alloc_get_page(alloc, buffer,
1267 					     buffer_offset, &pgoff);
1268 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1269 		kptr = kmap_local_page(page) + pgoff;
1270 		ret = copy_from_user(kptr, from, size);
1271 		kunmap_local(kptr);
1272 		if (ret)
1273 			return bytes - size + ret;
1274 		bytes -= size;
1275 		from += size;
1276 		buffer_offset += size;
1277 	}
1278 	return 0;
1279 }
1280 
1281 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1282 				       bool to_buffer,
1283 				       struct binder_buffer *buffer,
1284 				       binder_size_t buffer_offset,
1285 				       void *ptr,
1286 				       size_t bytes)
1287 {
1288 	/* All copies must be 32-bit aligned and 32-bit size */
1289 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1290 		return -EINVAL;
1291 
1292 	while (bytes) {
1293 		unsigned long size;
1294 		struct page *page;
1295 		pgoff_t pgoff;
1296 
1297 		page = binder_alloc_get_page(alloc, buffer,
1298 					     buffer_offset, &pgoff);
1299 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1300 		if (to_buffer)
1301 			memcpy_to_page(page, pgoff, ptr, size);
1302 		else
1303 			memcpy_from_page(ptr, page, pgoff, size);
1304 		bytes -= size;
1305 		pgoff = 0;
1306 		ptr = ptr + size;
1307 		buffer_offset += size;
1308 	}
1309 	return 0;
1310 }
1311 
1312 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1313 				struct binder_buffer *buffer,
1314 				binder_size_t buffer_offset,
1315 				void *src,
1316 				size_t bytes)
1317 {
1318 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1319 					   src, bytes);
1320 }
1321 
1322 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1323 				  void *dest,
1324 				  struct binder_buffer *buffer,
1325 				  binder_size_t buffer_offset,
1326 				  size_t bytes)
1327 {
1328 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1329 					   dest, bytes);
1330 }
1331