xref: /linux-6.15/drivers/android/binder_alloc.c (revision 258ce20e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 
29 struct list_lru binder_alloc_lru;
30 
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32 
33 enum {
34 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
35 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40 
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42 		   uint, 0644);
43 
44 #define binder_alloc_debug(mask, x...) \
45 	do { \
46 		if (binder_alloc_debug_mask & mask) \
47 			pr_info_ratelimited(x); \
48 	} while (0)
49 
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54 
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59 
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61 				       struct binder_buffer *buffer)
62 {
63 	if (list_is_last(&buffer->entry, &alloc->buffers))
64 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
65 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67 
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69 				      struct binder_buffer *new_buffer)
70 {
71 	struct rb_node **p = &alloc->free_buffers.rb_node;
72 	struct rb_node *parent = NULL;
73 	struct binder_buffer *buffer;
74 	size_t buffer_size;
75 	size_t new_buffer_size;
76 
77 	BUG_ON(!new_buffer->free);
78 
79 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80 
81 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82 		     "%d: add free buffer, size %zd, at %pK\n",
83 		      alloc->pid, new_buffer_size, new_buffer);
84 
85 	while (*p) {
86 		parent = *p;
87 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
88 		BUG_ON(!buffer->free);
89 
90 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
91 
92 		if (new_buffer_size < buffer_size)
93 			p = &parent->rb_left;
94 		else
95 			p = &parent->rb_right;
96 	}
97 	rb_link_node(&new_buffer->rb_node, parent, p);
98 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100 
101 static void binder_insert_allocated_buffer_locked(
102 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct binder_buffer *buffer;
107 
108 	BUG_ON(new_buffer->free);
109 
110 	while (*p) {
111 		parent = *p;
112 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
113 		BUG_ON(buffer->free);
114 
115 		if (new_buffer->user_data < buffer->user_data)
116 			p = &parent->rb_left;
117 		else if (new_buffer->user_data > buffer->user_data)
118 			p = &parent->rb_right;
119 		else
120 			BUG();
121 	}
122 	rb_link_node(&new_buffer->rb_node, parent, p);
123 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125 
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127 		struct binder_alloc *alloc,
128 		unsigned long user_ptr)
129 {
130 	struct rb_node *n = alloc->allocated_buffers.rb_node;
131 	struct binder_buffer *buffer;
132 
133 	while (n) {
134 		buffer = rb_entry(n, struct binder_buffer, rb_node);
135 		BUG_ON(buffer->free);
136 
137 		if (user_ptr < buffer->user_data) {
138 			n = n->rb_left;
139 		} else if (user_ptr > buffer->user_data) {
140 			n = n->rb_right;
141 		} else {
142 			/*
143 			 * Guard against user threads attempting to
144 			 * free the buffer when in use by kernel or
145 			 * after it's already been freed.
146 			 */
147 			if (!buffer->allow_user_free)
148 				return ERR_PTR(-EPERM);
149 			buffer->allow_user_free = 0;
150 			return buffer;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * binder_alloc_prepare_to_free() - get buffer given user ptr
158  * @alloc:	binder_alloc for this proc
159  * @user_ptr:	User pointer to buffer data
160  *
161  * Validate userspace pointer to buffer data and return buffer corresponding to
162  * that user pointer. Search the rb tree for buffer that matches user data
163  * pointer.
164  *
165  * Return:	Pointer to buffer or NULL
166  */
167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
168 						   unsigned long user_ptr)
169 {
170 	struct binder_buffer *buffer;
171 
172 	mutex_lock(&alloc->mutex);
173 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174 	mutex_unlock(&alloc->mutex);
175 	return buffer;
176 }
177 
178 static inline void
179 binder_set_installed_page(struct binder_lru_page *lru_page,
180 			  struct page *page)
181 {
182 	/* Pairs with acquire in binder_get_installed_page() */
183 	smp_store_release(&lru_page->page_ptr, page);
184 }
185 
186 static inline struct page *
187 binder_get_installed_page(struct binder_lru_page *lru_page)
188 {
189 	/* Pairs with release in binder_set_installed_page() */
190 	return smp_load_acquire(&lru_page->page_ptr);
191 }
192 
193 static void binder_free_page_range(struct binder_alloc *alloc,
194 				   unsigned long start, unsigned long end)
195 {
196 	struct binder_lru_page *page;
197 	unsigned long page_addr;
198 
199 	trace_binder_update_page_range(alloc, false, start, end);
200 
201 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
202 		size_t index;
203 		int ret;
204 
205 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
206 		page = &alloc->pages[index];
207 
208 		if (!binder_get_installed_page(page))
209 			continue;
210 
211 		trace_binder_free_lru_start(alloc, index);
212 
213 		ret = list_lru_add(&binder_alloc_lru, &page->lru);
214 		WARN_ON(!ret);
215 
216 		trace_binder_free_lru_end(alloc, index);
217 	}
218 }
219 
220 static int binder_install_single_page(struct binder_alloc *alloc,
221 				      struct binder_lru_page *lru_page,
222 				      unsigned long addr)
223 {
224 	struct page *page;
225 	int ret = 0;
226 
227 	if (!mmget_not_zero(alloc->mm))
228 		return -ESRCH;
229 
230 	/*
231 	 * Protected with mmap_sem in write mode as multiple tasks
232 	 * might race to install the same page.
233 	 */
234 	mmap_write_lock(alloc->mm);
235 	if (binder_get_installed_page(lru_page))
236 		goto out;
237 
238 	if (!alloc->vma) {
239 		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
240 		ret = -ESRCH;
241 		goto out;
242 	}
243 
244 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
245 	if (!page) {
246 		pr_err("%d: failed to allocate page\n", alloc->pid);
247 		ret = -ENOMEM;
248 		goto out;
249 	}
250 
251 	ret = vm_insert_page(alloc->vma, addr, page);
252 	if (ret) {
253 		pr_err("%d: %s failed to insert page at %lx with %d\n",
254 		       alloc->pid, __func__, addr, ret);
255 		__free_page(page);
256 		ret = -ENOMEM;
257 		goto out;
258 	}
259 
260 	/* Mark page installation complete and safe to use */
261 	binder_set_installed_page(lru_page, page);
262 out:
263 	mmap_write_unlock(alloc->mm);
264 	mmput_async(alloc->mm);
265 	return ret;
266 }
267 
268 static int binder_install_buffer_pages(struct binder_alloc *alloc,
269 				       struct binder_buffer *buffer,
270 				       size_t size)
271 {
272 	struct binder_lru_page *page;
273 	unsigned long start, final;
274 	unsigned long page_addr;
275 
276 	start = buffer->user_data & PAGE_MASK;
277 	final = PAGE_ALIGN(buffer->user_data + size);
278 
279 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
280 		unsigned long index;
281 		int ret;
282 
283 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
284 		page = &alloc->pages[index];
285 
286 		if (binder_get_installed_page(page))
287 			continue;
288 
289 		trace_binder_alloc_page_start(alloc, index);
290 
291 		ret = binder_install_single_page(alloc, page, page_addr);
292 		if (ret)
293 			return ret;
294 
295 		trace_binder_alloc_page_end(alloc, index);
296 	}
297 
298 	return 0;
299 }
300 
301 /* The range of pages should exclude those shared with other buffers */
302 static void binder_allocate_page_range(struct binder_alloc *alloc,
303 				       unsigned long start, unsigned long end)
304 {
305 	struct binder_lru_page *page;
306 	unsigned long page_addr;
307 
308 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
309 			   "%d: allocate pages %lx-%lx\n",
310 			   alloc->pid, start, end);
311 
312 	trace_binder_update_page_range(alloc, true, start, end);
313 
314 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
315 		unsigned long index;
316 		bool on_lru;
317 
318 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
319 		page = &alloc->pages[index];
320 
321 		if (page->page_ptr) {
322 			trace_binder_alloc_lru_start(alloc, index);
323 
324 			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
325 			WARN_ON(!on_lru);
326 
327 			trace_binder_alloc_lru_end(alloc, index);
328 			continue;
329 		}
330 
331 		if (index + 1 > alloc->pages_high)
332 			alloc->pages_high = index + 1;
333 	}
334 }
335 
336 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337 		struct vm_area_struct *vma)
338 {
339 	/* pairs with smp_load_acquire in binder_alloc_get_vma() */
340 	smp_store_release(&alloc->vma, vma);
341 }
342 
343 static inline struct vm_area_struct *binder_alloc_get_vma(
344 		struct binder_alloc *alloc)
345 {
346 	/* pairs with smp_store_release in binder_alloc_set_vma() */
347 	return smp_load_acquire(&alloc->vma);
348 }
349 
350 static void debug_no_space_locked(struct binder_alloc *alloc)
351 {
352 	size_t largest_alloc_size = 0;
353 	struct binder_buffer *buffer;
354 	size_t allocated_buffers = 0;
355 	size_t largest_free_size = 0;
356 	size_t total_alloc_size = 0;
357 	size_t total_free_size = 0;
358 	size_t free_buffers = 0;
359 	size_t buffer_size;
360 	struct rb_node *n;
361 
362 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
363 		buffer = rb_entry(n, struct binder_buffer, rb_node);
364 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
365 		allocated_buffers++;
366 		total_alloc_size += buffer_size;
367 		if (buffer_size > largest_alloc_size)
368 			largest_alloc_size = buffer_size;
369 	}
370 
371 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
372 		buffer = rb_entry(n, struct binder_buffer, rb_node);
373 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
374 		free_buffers++;
375 		total_free_size += buffer_size;
376 		if (buffer_size > largest_free_size)
377 			largest_free_size = buffer_size;
378 	}
379 
380 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
381 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
382 			   total_alloc_size, allocated_buffers,
383 			   largest_alloc_size, total_free_size,
384 			   free_buffers, largest_free_size);
385 }
386 
387 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
388 {
389 	/*
390 	 * Find the amount and size of buffers allocated by the current caller;
391 	 * The idea is that once we cross the threshold, whoever is responsible
392 	 * for the low async space is likely to try to send another async txn,
393 	 * and at some point we'll catch them in the act. This is more efficient
394 	 * than keeping a map per pid.
395 	 */
396 	struct binder_buffer *buffer;
397 	size_t total_alloc_size = 0;
398 	int pid = current->tgid;
399 	size_t num_buffers = 0;
400 	struct rb_node *n;
401 
402 	/*
403 	 * Only start detecting spammers once we have less than 20% of async
404 	 * space left (which is less than 10% of total buffer size).
405 	 */
406 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
407 		alloc->oneway_spam_detected = false;
408 		return false;
409 	}
410 
411 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
412 		 n = rb_next(n)) {
413 		buffer = rb_entry(n, struct binder_buffer, rb_node);
414 		if (buffer->pid != pid)
415 			continue;
416 		if (!buffer->async_transaction)
417 			continue;
418 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
419 		num_buffers++;
420 	}
421 
422 	/*
423 	 * Warn if this pid has more than 50 transactions, or more than 50% of
424 	 * async space (which is 25% of total buffer size). Oneway spam is only
425 	 * detected when the threshold is exceeded.
426 	 */
427 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
428 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
429 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
430 			      alloc->pid, pid, num_buffers, total_alloc_size);
431 		if (!alloc->oneway_spam_detected) {
432 			alloc->oneway_spam_detected = true;
433 			return true;
434 		}
435 	}
436 	return false;
437 }
438 
439 /* Callers preallocate @new_buffer, it is freed by this function if unused */
440 static struct binder_buffer *binder_alloc_new_buf_locked(
441 				struct binder_alloc *alloc,
442 				struct binder_buffer *new_buffer,
443 				size_t size,
444 				int is_async)
445 {
446 	struct rb_node *n = alloc->free_buffers.rb_node;
447 	struct rb_node *best_fit = NULL;
448 	struct binder_buffer *buffer;
449 	unsigned long has_page_addr;
450 	unsigned long end_page_addr;
451 	size_t buffer_size;
452 
453 	if (is_async && alloc->free_async_space < size) {
454 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
455 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
456 			      alloc->pid, size);
457 		buffer = ERR_PTR(-ENOSPC);
458 		goto out;
459 	}
460 
461 	while (n) {
462 		buffer = rb_entry(n, struct binder_buffer, rb_node);
463 		BUG_ON(!buffer->free);
464 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
465 
466 		if (size < buffer_size) {
467 			best_fit = n;
468 			n = n->rb_left;
469 		} else if (size > buffer_size) {
470 			n = n->rb_right;
471 		} else {
472 			best_fit = n;
473 			break;
474 		}
475 	}
476 
477 	if (unlikely(!best_fit)) {
478 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
479 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
480 				   alloc->pid, size);
481 		debug_no_space_locked(alloc);
482 		buffer = ERR_PTR(-ENOSPC);
483 		goto out;
484 	}
485 
486 	if (n == NULL) {
487 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
488 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
489 	}
490 
491 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
492 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
493 		      alloc->pid, size, buffer, buffer_size);
494 
495 	WARN_ON(n && buffer_size != size);
496 
497 	has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
498 	end_page_addr = PAGE_ALIGN(buffer->user_data + size);
499 	if (end_page_addr > has_page_addr)
500 		end_page_addr = has_page_addr;
501 	binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
502 				   end_page_addr);
503 	if (buffer_size != size) {
504 		new_buffer->user_data = buffer->user_data + size;
505 		list_add(&new_buffer->entry, &buffer->entry);
506 		new_buffer->free = 1;
507 		binder_insert_free_buffer(alloc, new_buffer);
508 		new_buffer = NULL;
509 	}
510 
511 	rb_erase(best_fit, &alloc->free_buffers);
512 	buffer->free = 0;
513 	buffer->allow_user_free = 0;
514 	binder_insert_allocated_buffer_locked(alloc, buffer);
515 	buffer->async_transaction = is_async;
516 	buffer->oneway_spam_suspect = false;
517 	if (is_async) {
518 		alloc->free_async_space -= size;
519 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
520 			     "%d: binder_alloc_buf size %zd async free %zd\n",
521 			      alloc->pid, size, alloc->free_async_space);
522 		if (debug_low_async_space_locked(alloc))
523 			buffer->oneway_spam_suspect = true;
524 	}
525 
526 out:
527 	/* Discard possibly unused new_buffer */
528 	kfree(new_buffer);
529 	return buffer;
530 }
531 
532 /* Calculate the sanitized total size, returns 0 for invalid request */
533 static inline size_t sanitized_size(size_t data_size,
534 				    size_t offsets_size,
535 				    size_t extra_buffers_size)
536 {
537 	size_t total, tmp;
538 
539 	/* Align to pointer size and check for overflows */
540 	tmp = ALIGN(data_size, sizeof(void *)) +
541 		ALIGN(offsets_size, sizeof(void *));
542 	if (tmp < data_size || tmp < offsets_size)
543 		return 0;
544 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
545 	if (total < tmp || total < extra_buffers_size)
546 		return 0;
547 
548 	/* Pad 0-sized buffers so they get a unique address */
549 	total = max(total, sizeof(void *));
550 
551 	return total;
552 }
553 
554 /**
555  * binder_alloc_new_buf() - Allocate a new binder buffer
556  * @alloc:              binder_alloc for this proc
557  * @data_size:          size of user data buffer
558  * @offsets_size:       user specified buffer offset
559  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
560  * @is_async:           buffer for async transaction
561  *
562  * Allocate a new buffer given the requested sizes. Returns
563  * the kernel version of the buffer pointer. The size allocated
564  * is the sum of the three given sizes (each rounded up to
565  * pointer-sized boundary)
566  *
567  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
568  */
569 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
570 					   size_t data_size,
571 					   size_t offsets_size,
572 					   size_t extra_buffers_size,
573 					   int is_async)
574 {
575 	struct binder_buffer *buffer, *next;
576 	size_t size;
577 	int ret;
578 
579 	/* Check binder_alloc is fully initialized */
580 	if (!binder_alloc_get_vma(alloc)) {
581 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
582 				   "%d: binder_alloc_buf, no vma\n",
583 				   alloc->pid);
584 		return ERR_PTR(-ESRCH);
585 	}
586 
587 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
588 	if (unlikely(!size)) {
589 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
590 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
591 				   alloc->pid, data_size, offsets_size,
592 				   extra_buffers_size);
593 		return ERR_PTR(-EINVAL);
594 	}
595 
596 	/* Preallocate the next buffer */
597 	next = kzalloc(sizeof(*next), GFP_KERNEL);
598 	if (!next)
599 		return ERR_PTR(-ENOMEM);
600 
601 	mutex_lock(&alloc->mutex);
602 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
603 	if (IS_ERR(buffer)) {
604 		mutex_unlock(&alloc->mutex);
605 		goto out;
606 	}
607 
608 	buffer->data_size = data_size;
609 	buffer->offsets_size = offsets_size;
610 	buffer->extra_buffers_size = extra_buffers_size;
611 	buffer->pid = current->tgid;
612 	mutex_unlock(&alloc->mutex);
613 
614 	ret = binder_install_buffer_pages(alloc, buffer, size);
615 	if (ret) {
616 		binder_alloc_free_buf(alloc, buffer);
617 		buffer = ERR_PTR(ret);
618 	}
619 out:
620 	return buffer;
621 }
622 
623 static unsigned long buffer_start_page(struct binder_buffer *buffer)
624 {
625 	return buffer->user_data & PAGE_MASK;
626 }
627 
628 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
629 {
630 	return (buffer->user_data - 1) & PAGE_MASK;
631 }
632 
633 static void binder_delete_free_buffer(struct binder_alloc *alloc,
634 				      struct binder_buffer *buffer)
635 {
636 	struct binder_buffer *prev, *next = NULL;
637 	bool to_free = true;
638 
639 	BUG_ON(alloc->buffers.next == &buffer->entry);
640 	prev = binder_buffer_prev(buffer);
641 	BUG_ON(!prev->free);
642 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
643 		to_free = false;
644 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
645 				   "%d: merge free, buffer %lx share page with %lx\n",
646 				   alloc->pid, buffer->user_data,
647 				   prev->user_data);
648 	}
649 
650 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
651 		next = binder_buffer_next(buffer);
652 		if (buffer_start_page(next) == buffer_start_page(buffer)) {
653 			to_free = false;
654 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
655 					   "%d: merge free, buffer %lx share page with %lx\n",
656 					   alloc->pid,
657 					   buffer->user_data,
658 					   next->user_data);
659 		}
660 	}
661 
662 	if (PAGE_ALIGNED(buffer->user_data)) {
663 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
664 				   "%d: merge free, buffer start %lx is page aligned\n",
665 				   alloc->pid, buffer->user_data);
666 		to_free = false;
667 	}
668 
669 	if (to_free) {
670 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
671 				   "%d: merge free, buffer %lx do not share page with %lx or %lx\n",
672 				   alloc->pid, buffer->user_data,
673 				   prev->user_data,
674 				   next ? next->user_data : 0);
675 		binder_free_page_range(alloc, buffer_start_page(buffer),
676 				       buffer_start_page(buffer) + PAGE_SIZE);
677 	}
678 	list_del(&buffer->entry);
679 	kfree(buffer);
680 }
681 
682 static void binder_free_buf_locked(struct binder_alloc *alloc,
683 				   struct binder_buffer *buffer)
684 {
685 	size_t size, buffer_size;
686 
687 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
688 
689 	size = ALIGN(buffer->data_size, sizeof(void *)) +
690 		ALIGN(buffer->offsets_size, sizeof(void *)) +
691 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
692 
693 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
694 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
695 		      alloc->pid, buffer, size, buffer_size);
696 
697 	BUG_ON(buffer->free);
698 	BUG_ON(size > buffer_size);
699 	BUG_ON(buffer->transaction != NULL);
700 	BUG_ON(buffer->user_data < alloc->buffer);
701 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
702 
703 	if (buffer->async_transaction) {
704 		alloc->free_async_space += buffer_size;
705 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
706 			     "%d: binder_free_buf size %zd async free %zd\n",
707 			      alloc->pid, size, alloc->free_async_space);
708 	}
709 
710 	binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
711 			       (buffer->user_data + buffer_size) & PAGE_MASK);
712 
713 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
714 	buffer->free = 1;
715 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
716 		struct binder_buffer *next = binder_buffer_next(buffer);
717 
718 		if (next->free) {
719 			rb_erase(&next->rb_node, &alloc->free_buffers);
720 			binder_delete_free_buffer(alloc, next);
721 		}
722 	}
723 	if (alloc->buffers.next != &buffer->entry) {
724 		struct binder_buffer *prev = binder_buffer_prev(buffer);
725 
726 		if (prev->free) {
727 			binder_delete_free_buffer(alloc, buffer);
728 			rb_erase(&prev->rb_node, &alloc->free_buffers);
729 			buffer = prev;
730 		}
731 	}
732 	binder_insert_free_buffer(alloc, buffer);
733 }
734 
735 /**
736  * binder_alloc_get_page() - get kernel pointer for given buffer offset
737  * @alloc: binder_alloc for this proc
738  * @buffer: binder buffer to be accessed
739  * @buffer_offset: offset into @buffer data
740  * @pgoffp: address to copy final page offset to
741  *
742  * Lookup the struct page corresponding to the address
743  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
744  * NULL, the byte-offset into the page is written there.
745  *
746  * The caller is responsible to ensure that the offset points
747  * to a valid address within the @buffer and that @buffer is
748  * not freeable by the user. Since it can't be freed, we are
749  * guaranteed that the corresponding elements of @alloc->pages[]
750  * cannot change.
751  *
752  * Return: struct page
753  */
754 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
755 					  struct binder_buffer *buffer,
756 					  binder_size_t buffer_offset,
757 					  pgoff_t *pgoffp)
758 {
759 	binder_size_t buffer_space_offset = buffer_offset +
760 		(buffer->user_data - alloc->buffer);
761 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
762 	size_t index = buffer_space_offset >> PAGE_SHIFT;
763 	struct binder_lru_page *lru_page;
764 
765 	lru_page = &alloc->pages[index];
766 	*pgoffp = pgoff;
767 	return lru_page->page_ptr;
768 }
769 
770 /**
771  * binder_alloc_clear_buf() - zero out buffer
772  * @alloc: binder_alloc for this proc
773  * @buffer: binder buffer to be cleared
774  *
775  * memset the given buffer to 0
776  */
777 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
778 				   struct binder_buffer *buffer)
779 {
780 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
781 	binder_size_t buffer_offset = 0;
782 
783 	while (bytes) {
784 		unsigned long size;
785 		struct page *page;
786 		pgoff_t pgoff;
787 
788 		page = binder_alloc_get_page(alloc, buffer,
789 					     buffer_offset, &pgoff);
790 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
791 		memset_page(page, pgoff, 0, size);
792 		bytes -= size;
793 		buffer_offset += size;
794 	}
795 }
796 
797 /**
798  * binder_alloc_free_buf() - free a binder buffer
799  * @alloc:	binder_alloc for this proc
800  * @buffer:	kernel pointer to buffer
801  *
802  * Free the buffer allocated via binder_alloc_new_buf()
803  */
804 void binder_alloc_free_buf(struct binder_alloc *alloc,
805 			    struct binder_buffer *buffer)
806 {
807 	/*
808 	 * We could eliminate the call to binder_alloc_clear_buf()
809 	 * from binder_alloc_deferred_release() by moving this to
810 	 * binder_free_buf_locked(). However, that could
811 	 * increase contention for the alloc mutex if clear_on_free
812 	 * is used frequently for large buffers. The mutex is not
813 	 * needed for correctness here.
814 	 */
815 	if (buffer->clear_on_free) {
816 		binder_alloc_clear_buf(alloc, buffer);
817 		buffer->clear_on_free = false;
818 	}
819 	mutex_lock(&alloc->mutex);
820 	binder_free_buf_locked(alloc, buffer);
821 	mutex_unlock(&alloc->mutex);
822 }
823 
824 /**
825  * binder_alloc_mmap_handler() - map virtual address space for proc
826  * @alloc:	alloc structure for this proc
827  * @vma:	vma passed to mmap()
828  *
829  * Called by binder_mmap() to initialize the space specified in
830  * vma for allocating binder buffers
831  *
832  * Return:
833  *      0 = success
834  *      -EBUSY = address space already mapped
835  *      -ENOMEM = failed to map memory to given address space
836  */
837 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
838 			      struct vm_area_struct *vma)
839 {
840 	struct binder_buffer *buffer;
841 	const char *failure_string;
842 	int ret, i;
843 
844 	if (unlikely(vma->vm_mm != alloc->mm)) {
845 		ret = -EINVAL;
846 		failure_string = "invalid vma->vm_mm";
847 		goto err_invalid_mm;
848 	}
849 
850 	mutex_lock(&binder_alloc_mmap_lock);
851 	if (alloc->buffer_size) {
852 		ret = -EBUSY;
853 		failure_string = "already mapped";
854 		goto err_already_mapped;
855 	}
856 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
857 				   SZ_4M);
858 	mutex_unlock(&binder_alloc_mmap_lock);
859 
860 	alloc->buffer = vma->vm_start;
861 
862 	alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
863 			       sizeof(alloc->pages[0]),
864 			       GFP_KERNEL);
865 	if (alloc->pages == NULL) {
866 		ret = -ENOMEM;
867 		failure_string = "alloc page array";
868 		goto err_alloc_pages_failed;
869 	}
870 
871 	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
872 		alloc->pages[i].alloc = alloc;
873 		INIT_LIST_HEAD(&alloc->pages[i].lru);
874 	}
875 
876 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
877 	if (!buffer) {
878 		ret = -ENOMEM;
879 		failure_string = "alloc buffer struct";
880 		goto err_alloc_buf_struct_failed;
881 	}
882 
883 	buffer->user_data = alloc->buffer;
884 	list_add(&buffer->entry, &alloc->buffers);
885 	buffer->free = 1;
886 	binder_insert_free_buffer(alloc, buffer);
887 	alloc->free_async_space = alloc->buffer_size / 2;
888 
889 	/* Signal binder_alloc is fully initialized */
890 	binder_alloc_set_vma(alloc, vma);
891 
892 	return 0;
893 
894 err_alloc_buf_struct_failed:
895 	kfree(alloc->pages);
896 	alloc->pages = NULL;
897 err_alloc_pages_failed:
898 	alloc->buffer = 0;
899 	mutex_lock(&binder_alloc_mmap_lock);
900 	alloc->buffer_size = 0;
901 err_already_mapped:
902 	mutex_unlock(&binder_alloc_mmap_lock);
903 err_invalid_mm:
904 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
905 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
906 			   alloc->pid, vma->vm_start, vma->vm_end,
907 			   failure_string, ret);
908 	return ret;
909 }
910 
911 
912 void binder_alloc_deferred_release(struct binder_alloc *alloc)
913 {
914 	struct rb_node *n;
915 	int buffers, page_count;
916 	struct binder_buffer *buffer;
917 
918 	buffers = 0;
919 	mutex_lock(&alloc->mutex);
920 	BUG_ON(alloc->vma);
921 
922 	while ((n = rb_first(&alloc->allocated_buffers))) {
923 		buffer = rb_entry(n, struct binder_buffer, rb_node);
924 
925 		/* Transaction should already have been freed */
926 		BUG_ON(buffer->transaction);
927 
928 		if (buffer->clear_on_free) {
929 			binder_alloc_clear_buf(alloc, buffer);
930 			buffer->clear_on_free = false;
931 		}
932 		binder_free_buf_locked(alloc, buffer);
933 		buffers++;
934 	}
935 
936 	while (!list_empty(&alloc->buffers)) {
937 		buffer = list_first_entry(&alloc->buffers,
938 					  struct binder_buffer, entry);
939 		WARN_ON(!buffer->free);
940 
941 		list_del(&buffer->entry);
942 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
943 		kfree(buffer);
944 	}
945 
946 	page_count = 0;
947 	if (alloc->pages) {
948 		int i;
949 
950 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
951 			unsigned long page_addr;
952 			bool on_lru;
953 
954 			if (!alloc->pages[i].page_ptr)
955 				continue;
956 
957 			on_lru = list_lru_del(&binder_alloc_lru,
958 					      &alloc->pages[i].lru);
959 			page_addr = alloc->buffer + i * PAGE_SIZE;
960 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
961 				     "%s: %d: page %d at %lx %s\n",
962 				     __func__, alloc->pid, i, page_addr,
963 				     on_lru ? "on lru" : "active");
964 			__free_page(alloc->pages[i].page_ptr);
965 			page_count++;
966 		}
967 		kfree(alloc->pages);
968 	}
969 	mutex_unlock(&alloc->mutex);
970 	if (alloc->mm)
971 		mmdrop(alloc->mm);
972 
973 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
974 		     "%s: %d buffers %d, pages %d\n",
975 		     __func__, alloc->pid, buffers, page_count);
976 }
977 
978 static void print_binder_buffer(struct seq_file *m, const char *prefix,
979 				struct binder_buffer *buffer)
980 {
981 	seq_printf(m, "%s %d: %lx size %zd:%zd:%zd %s\n",
982 		   prefix, buffer->debug_id, buffer->user_data,
983 		   buffer->data_size, buffer->offsets_size,
984 		   buffer->extra_buffers_size,
985 		   buffer->transaction ? "active" : "delivered");
986 }
987 
988 /**
989  * binder_alloc_print_allocated() - print buffer info
990  * @m:     seq_file for output via seq_printf()
991  * @alloc: binder_alloc for this proc
992  *
993  * Prints information about every buffer associated with
994  * the binder_alloc state to the given seq_file
995  */
996 void binder_alloc_print_allocated(struct seq_file *m,
997 				  struct binder_alloc *alloc)
998 {
999 	struct rb_node *n;
1000 
1001 	mutex_lock(&alloc->mutex);
1002 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1003 		print_binder_buffer(m, "  buffer",
1004 				    rb_entry(n, struct binder_buffer, rb_node));
1005 	mutex_unlock(&alloc->mutex);
1006 }
1007 
1008 /**
1009  * binder_alloc_print_pages() - print page usage
1010  * @m:     seq_file for output via seq_printf()
1011  * @alloc: binder_alloc for this proc
1012  */
1013 void binder_alloc_print_pages(struct seq_file *m,
1014 			      struct binder_alloc *alloc)
1015 {
1016 	struct binder_lru_page *page;
1017 	int i;
1018 	int active = 0;
1019 	int lru = 0;
1020 	int free = 0;
1021 
1022 	mutex_lock(&alloc->mutex);
1023 	/*
1024 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1025 	 * read inconsistent state.
1026 	 */
1027 	if (binder_alloc_get_vma(alloc) != NULL) {
1028 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1029 			page = &alloc->pages[i];
1030 			if (!page->page_ptr)
1031 				free++;
1032 			else if (list_empty(&page->lru))
1033 				active++;
1034 			else
1035 				lru++;
1036 		}
1037 	}
1038 	mutex_unlock(&alloc->mutex);
1039 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1040 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1041 }
1042 
1043 /**
1044  * binder_alloc_get_allocated_count() - return count of buffers
1045  * @alloc: binder_alloc for this proc
1046  *
1047  * Return: count of allocated buffers
1048  */
1049 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1050 {
1051 	struct rb_node *n;
1052 	int count = 0;
1053 
1054 	mutex_lock(&alloc->mutex);
1055 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1056 		count++;
1057 	mutex_unlock(&alloc->mutex);
1058 	return count;
1059 }
1060 
1061 
1062 /**
1063  * binder_alloc_vma_close() - invalidate address space
1064  * @alloc: binder_alloc for this proc
1065  *
1066  * Called from binder_vma_close() when releasing address space.
1067  * Clears alloc->vma to prevent new incoming transactions from
1068  * allocating more buffers.
1069  */
1070 void binder_alloc_vma_close(struct binder_alloc *alloc)
1071 {
1072 	binder_alloc_set_vma(alloc, NULL);
1073 }
1074 
1075 /**
1076  * binder_alloc_free_page() - shrinker callback to free pages
1077  * @item:   item to free
1078  * @lock:   lock protecting the item
1079  * @cb_arg: callback argument
1080  *
1081  * Called from list_lru_walk() in binder_shrink_scan() to free
1082  * up pages when the system is under memory pressure.
1083  */
1084 enum lru_status binder_alloc_free_page(struct list_head *item,
1085 				       struct list_lru_one *lru,
1086 				       spinlock_t *lock,
1087 				       void *cb_arg)
1088 	__must_hold(lock)
1089 {
1090 	struct mm_struct *mm = NULL;
1091 	struct binder_lru_page *page = container_of(item,
1092 						    struct binder_lru_page,
1093 						    lru);
1094 	struct binder_alloc *alloc;
1095 	unsigned long page_addr;
1096 	size_t index;
1097 	struct vm_area_struct *vma;
1098 
1099 	alloc = page->alloc;
1100 	if (!mutex_trylock(&alloc->mutex))
1101 		goto err_get_alloc_mutex_failed;
1102 
1103 	if (!page->page_ptr)
1104 		goto err_page_already_freed;
1105 
1106 	index = page - alloc->pages;
1107 	page_addr = alloc->buffer + index * PAGE_SIZE;
1108 
1109 	mm = alloc->mm;
1110 	if (!mmget_not_zero(mm))
1111 		goto err_mmget;
1112 	if (!mmap_read_trylock(mm))
1113 		goto err_mmap_read_lock_failed;
1114 	vma = vma_lookup(mm, page_addr);
1115 	if (vma && vma != binder_alloc_get_vma(alloc))
1116 		goto err_invalid_vma;
1117 
1118 	list_lru_isolate(lru, item);
1119 	spin_unlock(lock);
1120 
1121 	if (vma) {
1122 		trace_binder_unmap_user_start(alloc, index);
1123 
1124 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1125 
1126 		trace_binder_unmap_user_end(alloc, index);
1127 	}
1128 	mmap_read_unlock(mm);
1129 	mmput_async(mm);
1130 
1131 	trace_binder_unmap_kernel_start(alloc, index);
1132 
1133 	__free_page(page->page_ptr);
1134 	page->page_ptr = NULL;
1135 
1136 	trace_binder_unmap_kernel_end(alloc, index);
1137 
1138 	spin_lock(lock);
1139 	mutex_unlock(&alloc->mutex);
1140 	return LRU_REMOVED_RETRY;
1141 
1142 err_invalid_vma:
1143 	mmap_read_unlock(mm);
1144 err_mmap_read_lock_failed:
1145 	mmput_async(mm);
1146 err_mmget:
1147 err_page_already_freed:
1148 	mutex_unlock(&alloc->mutex);
1149 err_get_alloc_mutex_failed:
1150 	return LRU_SKIP;
1151 }
1152 
1153 static unsigned long
1154 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1155 {
1156 	return list_lru_count(&binder_alloc_lru);
1157 }
1158 
1159 static unsigned long
1160 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1161 {
1162 	return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1163 			    NULL, sc->nr_to_scan);
1164 }
1165 
1166 static struct shrinker *binder_shrinker;
1167 
1168 /**
1169  * binder_alloc_init() - called by binder_open() for per-proc initialization
1170  * @alloc: binder_alloc for this proc
1171  *
1172  * Called from binder_open() to initialize binder_alloc fields for
1173  * new binder proc
1174  */
1175 void binder_alloc_init(struct binder_alloc *alloc)
1176 {
1177 	alloc->pid = current->group_leader->pid;
1178 	alloc->mm = current->mm;
1179 	mmgrab(alloc->mm);
1180 	mutex_init(&alloc->mutex);
1181 	INIT_LIST_HEAD(&alloc->buffers);
1182 }
1183 
1184 int binder_alloc_shrinker_init(void)
1185 {
1186 	int ret;
1187 
1188 	ret = list_lru_init(&binder_alloc_lru);
1189 	if (ret)
1190 		return ret;
1191 
1192 	binder_shrinker = shrinker_alloc(0, "android-binder");
1193 	if (!binder_shrinker) {
1194 		list_lru_destroy(&binder_alloc_lru);
1195 		return -ENOMEM;
1196 	}
1197 
1198 	binder_shrinker->count_objects = binder_shrink_count;
1199 	binder_shrinker->scan_objects = binder_shrink_scan;
1200 
1201 	shrinker_register(binder_shrinker);
1202 
1203 	return 0;
1204 }
1205 
1206 void binder_alloc_shrinker_exit(void)
1207 {
1208 	shrinker_free(binder_shrinker);
1209 	list_lru_destroy(&binder_alloc_lru);
1210 }
1211 
1212 /**
1213  * check_buffer() - verify that buffer/offset is safe to access
1214  * @alloc: binder_alloc for this proc
1215  * @buffer: binder buffer to be accessed
1216  * @offset: offset into @buffer data
1217  * @bytes: bytes to access from offset
1218  *
1219  * Check that the @offset/@bytes are within the size of the given
1220  * @buffer and that the buffer is currently active and not freeable.
1221  * Offsets must also be multiples of sizeof(u32). The kernel is
1222  * allowed to touch the buffer in two cases:
1223  *
1224  * 1) when the buffer is being created:
1225  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1226  * 2) when the buffer is being torn down:
1227  *     (buffer->free == 0 && buffer->transaction == NULL).
1228  *
1229  * Return: true if the buffer is safe to access
1230  */
1231 static inline bool check_buffer(struct binder_alloc *alloc,
1232 				struct binder_buffer *buffer,
1233 				binder_size_t offset, size_t bytes)
1234 {
1235 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1236 
1237 	return buffer_size >= bytes &&
1238 		offset <= buffer_size - bytes &&
1239 		IS_ALIGNED(offset, sizeof(u32)) &&
1240 		!buffer->free &&
1241 		(!buffer->allow_user_free || !buffer->transaction);
1242 }
1243 
1244 /**
1245  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1246  * @alloc: binder_alloc for this proc
1247  * @buffer: binder buffer to be accessed
1248  * @buffer_offset: offset into @buffer data
1249  * @from: userspace pointer to source buffer
1250  * @bytes: bytes to copy
1251  *
1252  * Copy bytes from source userspace to target buffer.
1253  *
1254  * Return: bytes remaining to be copied
1255  */
1256 unsigned long
1257 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1258 				 struct binder_buffer *buffer,
1259 				 binder_size_t buffer_offset,
1260 				 const void __user *from,
1261 				 size_t bytes)
1262 {
1263 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1264 		return bytes;
1265 
1266 	while (bytes) {
1267 		unsigned long size;
1268 		unsigned long ret;
1269 		struct page *page;
1270 		pgoff_t pgoff;
1271 		void *kptr;
1272 
1273 		page = binder_alloc_get_page(alloc, buffer,
1274 					     buffer_offset, &pgoff);
1275 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1276 		kptr = kmap_local_page(page) + pgoff;
1277 		ret = copy_from_user(kptr, from, size);
1278 		kunmap_local(kptr);
1279 		if (ret)
1280 			return bytes - size + ret;
1281 		bytes -= size;
1282 		from += size;
1283 		buffer_offset += size;
1284 	}
1285 	return 0;
1286 }
1287 
1288 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1289 				       bool to_buffer,
1290 				       struct binder_buffer *buffer,
1291 				       binder_size_t buffer_offset,
1292 				       void *ptr,
1293 				       size_t bytes)
1294 {
1295 	/* All copies must be 32-bit aligned and 32-bit size */
1296 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1297 		return -EINVAL;
1298 
1299 	while (bytes) {
1300 		unsigned long size;
1301 		struct page *page;
1302 		pgoff_t pgoff;
1303 
1304 		page = binder_alloc_get_page(alloc, buffer,
1305 					     buffer_offset, &pgoff);
1306 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1307 		if (to_buffer)
1308 			memcpy_to_page(page, pgoff, ptr, size);
1309 		else
1310 			memcpy_from_page(ptr, page, pgoff, size);
1311 		bytes -= size;
1312 		pgoff = 0;
1313 		ptr = ptr + size;
1314 		buffer_offset += size;
1315 	}
1316 	return 0;
1317 }
1318 
1319 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1320 				struct binder_buffer *buffer,
1321 				binder_size_t buffer_offset,
1322 				void *src,
1323 				size_t bytes)
1324 {
1325 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1326 					   src, bytes);
1327 }
1328 
1329 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1330 				  void *dest,
1331 				  struct binder_buffer *buffer,
1332 				  binder_size_t buffer_offset,
1333 				  size_t bytes)
1334 {
1335 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1336 					   dest, bytes);
1337 }
1338 
1339