xref: /linux-6.15/drivers/android/binder_alloc.c (revision 072010ab)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2017 Google, Inc.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/list.h>
12 #include <linux/sched/mm.h>
13 #include <linux/module.h>
14 #include <linux/rtmutex.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/list_lru.h>
21 #include <linux/ratelimit.h>
22 #include <asm/cacheflush.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/sizes.h>
26 #include "binder_alloc.h"
27 #include "binder_trace.h"
28 
29 struct list_lru binder_freelist;
30 
31 static DEFINE_MUTEX(binder_alloc_mmap_lock);
32 
33 enum {
34 	BINDER_DEBUG_USER_ERROR             = 1U << 0,
35 	BINDER_DEBUG_OPEN_CLOSE             = 1U << 1,
36 	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 2,
37 	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 3,
38 };
39 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
40 
41 module_param_named(debug_mask, binder_alloc_debug_mask,
42 		   uint, 0644);
43 
44 #define binder_alloc_debug(mask, x...) \
45 	do { \
46 		if (binder_alloc_debug_mask & mask) \
47 			pr_info_ratelimited(x); \
48 	} while (0)
49 
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
51 {
52 	return list_entry(buffer->entry.next, struct binder_buffer, entry);
53 }
54 
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
56 {
57 	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
58 }
59 
60 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
61 				       struct binder_buffer *buffer)
62 {
63 	if (list_is_last(&buffer->entry, &alloc->buffers))
64 		return alloc->buffer + alloc->buffer_size - buffer->user_data;
65 	return binder_buffer_next(buffer)->user_data - buffer->user_data;
66 }
67 
68 static void binder_insert_free_buffer(struct binder_alloc *alloc,
69 				      struct binder_buffer *new_buffer)
70 {
71 	struct rb_node **p = &alloc->free_buffers.rb_node;
72 	struct rb_node *parent = NULL;
73 	struct binder_buffer *buffer;
74 	size_t buffer_size;
75 	size_t new_buffer_size;
76 
77 	BUG_ON(!new_buffer->free);
78 
79 	new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
80 
81 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
82 		     "%d: add free buffer, size %zd, at %pK\n",
83 		      alloc->pid, new_buffer_size, new_buffer);
84 
85 	while (*p) {
86 		parent = *p;
87 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
88 		BUG_ON(!buffer->free);
89 
90 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
91 
92 		if (new_buffer_size < buffer_size)
93 			p = &parent->rb_left;
94 		else
95 			p = &parent->rb_right;
96 	}
97 	rb_link_node(&new_buffer->rb_node, parent, p);
98 	rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
99 }
100 
101 static void binder_insert_allocated_buffer_locked(
102 		struct binder_alloc *alloc, struct binder_buffer *new_buffer)
103 {
104 	struct rb_node **p = &alloc->allocated_buffers.rb_node;
105 	struct rb_node *parent = NULL;
106 	struct binder_buffer *buffer;
107 
108 	BUG_ON(new_buffer->free);
109 
110 	while (*p) {
111 		parent = *p;
112 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
113 		BUG_ON(buffer->free);
114 
115 		if (new_buffer->user_data < buffer->user_data)
116 			p = &parent->rb_left;
117 		else if (new_buffer->user_data > buffer->user_data)
118 			p = &parent->rb_right;
119 		else
120 			BUG();
121 	}
122 	rb_link_node(&new_buffer->rb_node, parent, p);
123 	rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
124 }
125 
126 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
127 		struct binder_alloc *alloc,
128 		unsigned long user_ptr)
129 {
130 	struct rb_node *n = alloc->allocated_buffers.rb_node;
131 	struct binder_buffer *buffer;
132 
133 	while (n) {
134 		buffer = rb_entry(n, struct binder_buffer, rb_node);
135 		BUG_ON(buffer->free);
136 
137 		if (user_ptr < buffer->user_data) {
138 			n = n->rb_left;
139 		} else if (user_ptr > buffer->user_data) {
140 			n = n->rb_right;
141 		} else {
142 			/*
143 			 * Guard against user threads attempting to
144 			 * free the buffer when in use by kernel or
145 			 * after it's already been freed.
146 			 */
147 			if (!buffer->allow_user_free)
148 				return ERR_PTR(-EPERM);
149 			buffer->allow_user_free = 0;
150 			return buffer;
151 		}
152 	}
153 	return NULL;
154 }
155 
156 /**
157  * binder_alloc_prepare_to_free() - get buffer given user ptr
158  * @alloc:	binder_alloc for this proc
159  * @user_ptr:	User pointer to buffer data
160  *
161  * Validate userspace pointer to buffer data and return buffer corresponding to
162  * that user pointer. Search the rb tree for buffer that matches user data
163  * pointer.
164  *
165  * Return:	Pointer to buffer or NULL
166  */
167 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
168 						   unsigned long user_ptr)
169 {
170 	struct binder_buffer *buffer;
171 
172 	mutex_lock(&alloc->mutex);
173 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
174 	mutex_unlock(&alloc->mutex);
175 	return buffer;
176 }
177 
178 static inline void
179 binder_set_installed_page(struct binder_alloc *alloc,
180 			  unsigned long index,
181 			  struct page *page)
182 {
183 	/* Pairs with acquire in binder_get_installed_page() */
184 	smp_store_release(&alloc->pages[index], page);
185 }
186 
187 static inline struct page *
188 binder_get_installed_page(struct binder_alloc *alloc, unsigned long index)
189 {
190 	/* Pairs with release in binder_set_installed_page() */
191 	return smp_load_acquire(&alloc->pages[index]);
192 }
193 
194 static void binder_lru_freelist_add(struct binder_alloc *alloc,
195 				    unsigned long start, unsigned long end)
196 {
197 	unsigned long page_addr;
198 	struct page *page;
199 
200 	trace_binder_update_page_range(alloc, false, start, end);
201 
202 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
203 		size_t index;
204 		int ret;
205 
206 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
207 		page = binder_get_installed_page(alloc, index);
208 		if (!page)
209 			continue;
210 
211 		trace_binder_free_lru_start(alloc, index);
212 
213 		ret = list_lru_add(&binder_freelist,
214 				   page_to_lru(page),
215 				   page_to_nid(page),
216 				   NULL);
217 		WARN_ON(!ret);
218 
219 		trace_binder_free_lru_end(alloc, index);
220 	}
221 }
222 
223 static inline
224 void binder_alloc_set_mapped(struct binder_alloc *alloc, bool state)
225 {
226 	/* pairs with smp_load_acquire in binder_alloc_is_mapped() */
227 	smp_store_release(&alloc->mapped, state);
228 }
229 
230 static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
231 {
232 	/* pairs with smp_store_release in binder_alloc_set_mapped() */
233 	return smp_load_acquire(&alloc->mapped);
234 }
235 
236 static struct page *binder_page_alloc(struct binder_alloc *alloc,
237 				      unsigned long index)
238 {
239 	struct binder_shrinker_mdata *mdata;
240 	struct page *page;
241 
242 	page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
243 	if (!page)
244 		return NULL;
245 
246 	/* allocate and install shrinker metadata under page->private */
247 	mdata = kzalloc(sizeof(*mdata), GFP_KERNEL);
248 	if (!mdata) {
249 		__free_page(page);
250 		return NULL;
251 	}
252 
253 	mdata->alloc = alloc;
254 	mdata->page_index = index;
255 	INIT_LIST_HEAD(&mdata->lru);
256 	set_page_private(page, (unsigned long)mdata);
257 
258 	return page;
259 }
260 
261 static void binder_free_page(struct page *page)
262 {
263 	kfree((struct binder_shrinker_mdata *)page_private(page));
264 	__free_page(page);
265 }
266 
267 static int binder_install_single_page(struct binder_alloc *alloc,
268 				      unsigned long index,
269 				      unsigned long addr)
270 {
271 	struct vm_area_struct *vma;
272 	struct page *page;
273 	long npages;
274 	int ret;
275 
276 	if (!mmget_not_zero(alloc->mm))
277 		return -ESRCH;
278 
279 	page = binder_page_alloc(alloc, index);
280 	if (!page) {
281 		ret = -ENOMEM;
282 		goto out;
283 	}
284 
285 	mmap_read_lock(alloc->mm);
286 	vma = vma_lookup(alloc->mm, addr);
287 	if (!vma || !binder_alloc_is_mapped(alloc)) {
288 		binder_free_page(page);
289 		pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
290 		ret = -ESRCH;
291 		goto unlock;
292 	}
293 
294 	ret = vm_insert_page(vma, addr, page);
295 	switch (ret) {
296 	case -EBUSY:
297 		/*
298 		 * EBUSY is ok. Someone installed the pte first but the
299 		 * alloc->pages[index] has not been updated yet. Discard
300 		 * our page and look up the one already installed.
301 		 */
302 		ret = 0;
303 		binder_free_page(page);
304 		npages = get_user_pages_remote(alloc->mm, addr, 1,
305 					       FOLL_NOFAULT, &page, NULL);
306 		if (npages <= 0) {
307 			pr_err("%d: failed to find page at offset %lx\n",
308 			       alloc->pid, addr - alloc->buffer);
309 			ret = -ESRCH;
310 			break;
311 		}
312 		fallthrough;
313 	case 0:
314 		/* Mark page installation complete and safe to use */
315 		binder_set_installed_page(alloc, index, page);
316 		break;
317 	default:
318 		binder_free_page(page);
319 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
320 		       alloc->pid, __func__, addr - alloc->buffer, ret);
321 		ret = -ENOMEM;
322 		break;
323 	}
324 unlock:
325 	mmap_read_unlock(alloc->mm);
326 out:
327 	mmput_async(alloc->mm);
328 	return ret;
329 }
330 
331 static int binder_install_buffer_pages(struct binder_alloc *alloc,
332 				       struct binder_buffer *buffer,
333 				       size_t size)
334 {
335 	unsigned long start, final;
336 	unsigned long page_addr;
337 
338 	start = buffer->user_data & PAGE_MASK;
339 	final = PAGE_ALIGN(buffer->user_data + size);
340 
341 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
342 		unsigned long index;
343 		int ret;
344 
345 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
346 		if (binder_get_installed_page(alloc, index))
347 			continue;
348 
349 		trace_binder_alloc_page_start(alloc, index);
350 
351 		ret = binder_install_single_page(alloc, index, page_addr);
352 		if (ret)
353 			return ret;
354 
355 		trace_binder_alloc_page_end(alloc, index);
356 	}
357 
358 	return 0;
359 }
360 
361 /* The range of pages should exclude those shared with other buffers */
362 static void binder_lru_freelist_del(struct binder_alloc *alloc,
363 				    unsigned long start, unsigned long end)
364 {
365 	unsigned long page_addr;
366 	struct page *page;
367 
368 	trace_binder_update_page_range(alloc, true, start, end);
369 
370 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
371 		unsigned long index;
372 		bool on_lru;
373 
374 		index = (page_addr - alloc->buffer) / PAGE_SIZE;
375 		page = binder_get_installed_page(alloc, index);
376 
377 		if (page) {
378 			trace_binder_alloc_lru_start(alloc, index);
379 
380 			on_lru = list_lru_del(&binder_freelist,
381 					      page_to_lru(page),
382 					      page_to_nid(page),
383 					      NULL);
384 			WARN_ON(!on_lru);
385 
386 			trace_binder_alloc_lru_end(alloc, index);
387 			continue;
388 		}
389 
390 		if (index + 1 > alloc->pages_high)
391 			alloc->pages_high = index + 1;
392 	}
393 }
394 
395 static void debug_no_space_locked(struct binder_alloc *alloc)
396 {
397 	size_t largest_alloc_size = 0;
398 	struct binder_buffer *buffer;
399 	size_t allocated_buffers = 0;
400 	size_t largest_free_size = 0;
401 	size_t total_alloc_size = 0;
402 	size_t total_free_size = 0;
403 	size_t free_buffers = 0;
404 	size_t buffer_size;
405 	struct rb_node *n;
406 
407 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
408 		buffer = rb_entry(n, struct binder_buffer, rb_node);
409 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
410 		allocated_buffers++;
411 		total_alloc_size += buffer_size;
412 		if (buffer_size > largest_alloc_size)
413 			largest_alloc_size = buffer_size;
414 	}
415 
416 	for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
417 		buffer = rb_entry(n, struct binder_buffer, rb_node);
418 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
419 		free_buffers++;
420 		total_free_size += buffer_size;
421 		if (buffer_size > largest_free_size)
422 			largest_free_size = buffer_size;
423 	}
424 
425 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
426 			   "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
427 			   total_alloc_size, allocated_buffers,
428 			   largest_alloc_size, total_free_size,
429 			   free_buffers, largest_free_size);
430 }
431 
432 static bool debug_low_async_space_locked(struct binder_alloc *alloc)
433 {
434 	/*
435 	 * Find the amount and size of buffers allocated by the current caller;
436 	 * The idea is that once we cross the threshold, whoever is responsible
437 	 * for the low async space is likely to try to send another async txn,
438 	 * and at some point we'll catch them in the act. This is more efficient
439 	 * than keeping a map per pid.
440 	 */
441 	struct binder_buffer *buffer;
442 	size_t total_alloc_size = 0;
443 	int pid = current->tgid;
444 	size_t num_buffers = 0;
445 	struct rb_node *n;
446 
447 	/*
448 	 * Only start detecting spammers once we have less than 20% of async
449 	 * space left (which is less than 10% of total buffer size).
450 	 */
451 	if (alloc->free_async_space >= alloc->buffer_size / 10) {
452 		alloc->oneway_spam_detected = false;
453 		return false;
454 	}
455 
456 	for (n = rb_first(&alloc->allocated_buffers); n != NULL;
457 		 n = rb_next(n)) {
458 		buffer = rb_entry(n, struct binder_buffer, rb_node);
459 		if (buffer->pid != pid)
460 			continue;
461 		if (!buffer->async_transaction)
462 			continue;
463 		total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
464 		num_buffers++;
465 	}
466 
467 	/*
468 	 * Warn if this pid has more than 50 transactions, or more than 50% of
469 	 * async space (which is 25% of total buffer size). Oneway spam is only
470 	 * detected when the threshold is exceeded.
471 	 */
472 	if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
473 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
474 			     "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
475 			      alloc->pid, pid, num_buffers, total_alloc_size);
476 		if (!alloc->oneway_spam_detected) {
477 			alloc->oneway_spam_detected = true;
478 			return true;
479 		}
480 	}
481 	return false;
482 }
483 
484 /* Callers preallocate @new_buffer, it is freed by this function if unused */
485 static struct binder_buffer *binder_alloc_new_buf_locked(
486 				struct binder_alloc *alloc,
487 				struct binder_buffer *new_buffer,
488 				size_t size,
489 				int is_async)
490 {
491 	struct rb_node *n = alloc->free_buffers.rb_node;
492 	struct rb_node *best_fit = NULL;
493 	struct binder_buffer *buffer;
494 	unsigned long next_used_page;
495 	unsigned long curr_last_page;
496 	size_t buffer_size;
497 
498 	if (is_async && alloc->free_async_space < size) {
499 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
500 			     "%d: binder_alloc_buf size %zd failed, no async space left\n",
501 			      alloc->pid, size);
502 		buffer = ERR_PTR(-ENOSPC);
503 		goto out;
504 	}
505 
506 	while (n) {
507 		buffer = rb_entry(n, struct binder_buffer, rb_node);
508 		BUG_ON(!buffer->free);
509 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
510 
511 		if (size < buffer_size) {
512 			best_fit = n;
513 			n = n->rb_left;
514 		} else if (size > buffer_size) {
515 			n = n->rb_right;
516 		} else {
517 			best_fit = n;
518 			break;
519 		}
520 	}
521 
522 	if (unlikely(!best_fit)) {
523 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
524 				   "%d: binder_alloc_buf size %zd failed, no address space\n",
525 				   alloc->pid, size);
526 		debug_no_space_locked(alloc);
527 		buffer = ERR_PTR(-ENOSPC);
528 		goto out;
529 	}
530 
531 	if (buffer_size != size) {
532 		/* Found an oversized buffer and needs to be split */
533 		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
534 		buffer_size = binder_alloc_buffer_size(alloc, buffer);
535 
536 		WARN_ON(n || buffer_size == size);
537 		new_buffer->user_data = buffer->user_data + size;
538 		list_add(&new_buffer->entry, &buffer->entry);
539 		new_buffer->free = 1;
540 		binder_insert_free_buffer(alloc, new_buffer);
541 		new_buffer = NULL;
542 	}
543 
544 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
545 		     "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
546 		      alloc->pid, size, buffer, buffer_size);
547 
548 	/*
549 	 * Now we remove the pages from the freelist. A clever calculation
550 	 * with buffer_size determines if the last page is shared with an
551 	 * adjacent in-use buffer. In such case, the page has been already
552 	 * removed from the freelist so we trim our range short.
553 	 */
554 	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
555 	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
556 	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
557 				min(next_used_page, curr_last_page));
558 
559 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
560 	buffer->free = 0;
561 	buffer->allow_user_free = 0;
562 	binder_insert_allocated_buffer_locked(alloc, buffer);
563 	buffer->async_transaction = is_async;
564 	buffer->oneway_spam_suspect = false;
565 	if (is_async) {
566 		alloc->free_async_space -= size;
567 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
568 			     "%d: binder_alloc_buf size %zd async free %zd\n",
569 			      alloc->pid, size, alloc->free_async_space);
570 		if (debug_low_async_space_locked(alloc))
571 			buffer->oneway_spam_suspect = true;
572 	}
573 
574 out:
575 	/* Discard possibly unused new_buffer */
576 	kfree(new_buffer);
577 	return buffer;
578 }
579 
580 /* Calculate the sanitized total size, returns 0 for invalid request */
581 static inline size_t sanitized_size(size_t data_size,
582 				    size_t offsets_size,
583 				    size_t extra_buffers_size)
584 {
585 	size_t total, tmp;
586 
587 	/* Align to pointer size and check for overflows */
588 	tmp = ALIGN(data_size, sizeof(void *)) +
589 		ALIGN(offsets_size, sizeof(void *));
590 	if (tmp < data_size || tmp < offsets_size)
591 		return 0;
592 	total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
593 	if (total < tmp || total < extra_buffers_size)
594 		return 0;
595 
596 	/* Pad 0-sized buffers so they get a unique address */
597 	total = max(total, sizeof(void *));
598 
599 	return total;
600 }
601 
602 /**
603  * binder_alloc_new_buf() - Allocate a new binder buffer
604  * @alloc:              binder_alloc for this proc
605  * @data_size:          size of user data buffer
606  * @offsets_size:       user specified buffer offset
607  * @extra_buffers_size: size of extra space for meta-data (eg, security context)
608  * @is_async:           buffer for async transaction
609  *
610  * Allocate a new buffer given the requested sizes. Returns
611  * the kernel version of the buffer pointer. The size allocated
612  * is the sum of the three given sizes (each rounded up to
613  * pointer-sized boundary)
614  *
615  * Return:	The allocated buffer or %ERR_PTR(-errno) if error
616  */
617 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
618 					   size_t data_size,
619 					   size_t offsets_size,
620 					   size_t extra_buffers_size,
621 					   int is_async)
622 {
623 	struct binder_buffer *buffer, *next;
624 	size_t size;
625 	int ret;
626 
627 	/* Check binder_alloc is fully initialized */
628 	if (!binder_alloc_is_mapped(alloc)) {
629 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
630 				   "%d: binder_alloc_buf, no vma\n",
631 				   alloc->pid);
632 		return ERR_PTR(-ESRCH);
633 	}
634 
635 	size = sanitized_size(data_size, offsets_size, extra_buffers_size);
636 	if (unlikely(!size)) {
637 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
638 				   "%d: got transaction with invalid size %zd-%zd-%zd\n",
639 				   alloc->pid, data_size, offsets_size,
640 				   extra_buffers_size);
641 		return ERR_PTR(-EINVAL);
642 	}
643 
644 	/* Preallocate the next buffer */
645 	next = kzalloc(sizeof(*next), GFP_KERNEL);
646 	if (!next)
647 		return ERR_PTR(-ENOMEM);
648 
649 	mutex_lock(&alloc->mutex);
650 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
651 	if (IS_ERR(buffer)) {
652 		mutex_unlock(&alloc->mutex);
653 		goto out;
654 	}
655 
656 	buffer->data_size = data_size;
657 	buffer->offsets_size = offsets_size;
658 	buffer->extra_buffers_size = extra_buffers_size;
659 	buffer->pid = current->tgid;
660 	mutex_unlock(&alloc->mutex);
661 
662 	ret = binder_install_buffer_pages(alloc, buffer, size);
663 	if (ret) {
664 		binder_alloc_free_buf(alloc, buffer);
665 		buffer = ERR_PTR(ret);
666 	}
667 out:
668 	return buffer;
669 }
670 
671 static unsigned long buffer_start_page(struct binder_buffer *buffer)
672 {
673 	return buffer->user_data & PAGE_MASK;
674 }
675 
676 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
677 {
678 	return (buffer->user_data - 1) & PAGE_MASK;
679 }
680 
681 static void binder_delete_free_buffer(struct binder_alloc *alloc,
682 				      struct binder_buffer *buffer)
683 {
684 	struct binder_buffer *prev, *next;
685 
686 	if (PAGE_ALIGNED(buffer->user_data))
687 		goto skip_freelist;
688 
689 	BUG_ON(alloc->buffers.next == &buffer->entry);
690 	prev = binder_buffer_prev(buffer);
691 	BUG_ON(!prev->free);
692 	if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
693 		goto skip_freelist;
694 
695 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
696 		next = binder_buffer_next(buffer);
697 		if (buffer_start_page(next) == buffer_start_page(buffer))
698 			goto skip_freelist;
699 	}
700 
701 	binder_lru_freelist_add(alloc, buffer_start_page(buffer),
702 				buffer_start_page(buffer) + PAGE_SIZE);
703 skip_freelist:
704 	list_del(&buffer->entry);
705 	kfree(buffer);
706 }
707 
708 static void binder_free_buf_locked(struct binder_alloc *alloc,
709 				   struct binder_buffer *buffer)
710 {
711 	size_t size, buffer_size;
712 
713 	buffer_size = binder_alloc_buffer_size(alloc, buffer);
714 
715 	size = ALIGN(buffer->data_size, sizeof(void *)) +
716 		ALIGN(buffer->offsets_size, sizeof(void *)) +
717 		ALIGN(buffer->extra_buffers_size, sizeof(void *));
718 
719 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
720 		     "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
721 		      alloc->pid, buffer, size, buffer_size);
722 
723 	BUG_ON(buffer->free);
724 	BUG_ON(size > buffer_size);
725 	BUG_ON(buffer->transaction != NULL);
726 	BUG_ON(buffer->user_data < alloc->buffer);
727 	BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
728 
729 	if (buffer->async_transaction) {
730 		alloc->free_async_space += buffer_size;
731 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
732 			     "%d: binder_free_buf size %zd async free %zd\n",
733 			      alloc->pid, size, alloc->free_async_space);
734 	}
735 
736 	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
737 				(buffer->user_data + buffer_size) & PAGE_MASK);
738 
739 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
740 	buffer->free = 1;
741 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
742 		struct binder_buffer *next = binder_buffer_next(buffer);
743 
744 		if (next->free) {
745 			rb_erase(&next->rb_node, &alloc->free_buffers);
746 			binder_delete_free_buffer(alloc, next);
747 		}
748 	}
749 	if (alloc->buffers.next != &buffer->entry) {
750 		struct binder_buffer *prev = binder_buffer_prev(buffer);
751 
752 		if (prev->free) {
753 			binder_delete_free_buffer(alloc, buffer);
754 			rb_erase(&prev->rb_node, &alloc->free_buffers);
755 			buffer = prev;
756 		}
757 	}
758 	binder_insert_free_buffer(alloc, buffer);
759 }
760 
761 /**
762  * binder_alloc_get_page() - get kernel pointer for given buffer offset
763  * @alloc: binder_alloc for this proc
764  * @buffer: binder buffer to be accessed
765  * @buffer_offset: offset into @buffer data
766  * @pgoffp: address to copy final page offset to
767  *
768  * Lookup the struct page corresponding to the address
769  * at @buffer_offset into @buffer->user_data. If @pgoffp is not
770  * NULL, the byte-offset into the page is written there.
771  *
772  * The caller is responsible to ensure that the offset points
773  * to a valid address within the @buffer and that @buffer is
774  * not freeable by the user. Since it can't be freed, we are
775  * guaranteed that the corresponding elements of @alloc->pages[]
776  * cannot change.
777  *
778  * Return: struct page
779  */
780 static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
781 					  struct binder_buffer *buffer,
782 					  binder_size_t buffer_offset,
783 					  pgoff_t *pgoffp)
784 {
785 	binder_size_t buffer_space_offset = buffer_offset +
786 		(buffer->user_data - alloc->buffer);
787 	pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
788 	size_t index = buffer_space_offset >> PAGE_SHIFT;
789 
790 	*pgoffp = pgoff;
791 
792 	return alloc->pages[index];
793 }
794 
795 /**
796  * binder_alloc_clear_buf() - zero out buffer
797  * @alloc: binder_alloc for this proc
798  * @buffer: binder buffer to be cleared
799  *
800  * memset the given buffer to 0
801  */
802 static void binder_alloc_clear_buf(struct binder_alloc *alloc,
803 				   struct binder_buffer *buffer)
804 {
805 	size_t bytes = binder_alloc_buffer_size(alloc, buffer);
806 	binder_size_t buffer_offset = 0;
807 
808 	while (bytes) {
809 		unsigned long size;
810 		struct page *page;
811 		pgoff_t pgoff;
812 
813 		page = binder_alloc_get_page(alloc, buffer,
814 					     buffer_offset, &pgoff);
815 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
816 		memset_page(page, pgoff, 0, size);
817 		bytes -= size;
818 		buffer_offset += size;
819 	}
820 }
821 
822 /**
823  * binder_alloc_free_buf() - free a binder buffer
824  * @alloc:	binder_alloc for this proc
825  * @buffer:	kernel pointer to buffer
826  *
827  * Free the buffer allocated via binder_alloc_new_buf()
828  */
829 void binder_alloc_free_buf(struct binder_alloc *alloc,
830 			    struct binder_buffer *buffer)
831 {
832 	/*
833 	 * We could eliminate the call to binder_alloc_clear_buf()
834 	 * from binder_alloc_deferred_release() by moving this to
835 	 * binder_free_buf_locked(). However, that could
836 	 * increase contention for the alloc mutex if clear_on_free
837 	 * is used frequently for large buffers. The mutex is not
838 	 * needed for correctness here.
839 	 */
840 	if (buffer->clear_on_free) {
841 		binder_alloc_clear_buf(alloc, buffer);
842 		buffer->clear_on_free = false;
843 	}
844 	mutex_lock(&alloc->mutex);
845 	binder_free_buf_locked(alloc, buffer);
846 	mutex_unlock(&alloc->mutex);
847 }
848 
849 /**
850  * binder_alloc_mmap_handler() - map virtual address space for proc
851  * @alloc:	alloc structure for this proc
852  * @vma:	vma passed to mmap()
853  *
854  * Called by binder_mmap() to initialize the space specified in
855  * vma for allocating binder buffers
856  *
857  * Return:
858  *      0 = success
859  *      -EBUSY = address space already mapped
860  *      -ENOMEM = failed to map memory to given address space
861  */
862 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
863 			      struct vm_area_struct *vma)
864 {
865 	struct binder_buffer *buffer;
866 	const char *failure_string;
867 	int ret;
868 
869 	if (unlikely(vma->vm_mm != alloc->mm)) {
870 		ret = -EINVAL;
871 		failure_string = "invalid vma->vm_mm";
872 		goto err_invalid_mm;
873 	}
874 
875 	mutex_lock(&binder_alloc_mmap_lock);
876 	if (alloc->buffer_size) {
877 		ret = -EBUSY;
878 		failure_string = "already mapped";
879 		goto err_already_mapped;
880 	}
881 	alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
882 				   SZ_4M);
883 	mutex_unlock(&binder_alloc_mmap_lock);
884 
885 	alloc->buffer = vma->vm_start;
886 
887 	alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE,
888 				sizeof(alloc->pages[0]),
889 				GFP_KERNEL);
890 	if (!alloc->pages) {
891 		ret = -ENOMEM;
892 		failure_string = "alloc page array";
893 		goto err_alloc_pages_failed;
894 	}
895 
896 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
897 	if (!buffer) {
898 		ret = -ENOMEM;
899 		failure_string = "alloc buffer struct";
900 		goto err_alloc_buf_struct_failed;
901 	}
902 
903 	buffer->user_data = alloc->buffer;
904 	list_add(&buffer->entry, &alloc->buffers);
905 	buffer->free = 1;
906 	binder_insert_free_buffer(alloc, buffer);
907 	alloc->free_async_space = alloc->buffer_size / 2;
908 
909 	/* Signal binder_alloc is fully initialized */
910 	binder_alloc_set_mapped(alloc, true);
911 
912 	return 0;
913 
914 err_alloc_buf_struct_failed:
915 	kvfree(alloc->pages);
916 	alloc->pages = NULL;
917 err_alloc_pages_failed:
918 	alloc->buffer = 0;
919 	mutex_lock(&binder_alloc_mmap_lock);
920 	alloc->buffer_size = 0;
921 err_already_mapped:
922 	mutex_unlock(&binder_alloc_mmap_lock);
923 err_invalid_mm:
924 	binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
925 			   "%s: %d %lx-%lx %s failed %d\n", __func__,
926 			   alloc->pid, vma->vm_start, vma->vm_end,
927 			   failure_string, ret);
928 	return ret;
929 }
930 
931 
932 void binder_alloc_deferred_release(struct binder_alloc *alloc)
933 {
934 	struct rb_node *n;
935 	int buffers, page_count;
936 	struct binder_buffer *buffer;
937 
938 	buffers = 0;
939 	mutex_lock(&alloc->mutex);
940 	BUG_ON(alloc->mapped);
941 
942 	while ((n = rb_first(&alloc->allocated_buffers))) {
943 		buffer = rb_entry(n, struct binder_buffer, rb_node);
944 
945 		/* Transaction should already have been freed */
946 		BUG_ON(buffer->transaction);
947 
948 		if (buffer->clear_on_free) {
949 			binder_alloc_clear_buf(alloc, buffer);
950 			buffer->clear_on_free = false;
951 		}
952 		binder_free_buf_locked(alloc, buffer);
953 		buffers++;
954 	}
955 
956 	while (!list_empty(&alloc->buffers)) {
957 		buffer = list_first_entry(&alloc->buffers,
958 					  struct binder_buffer, entry);
959 		WARN_ON(!buffer->free);
960 
961 		list_del(&buffer->entry);
962 		WARN_ON_ONCE(!list_empty(&alloc->buffers));
963 		kfree(buffer);
964 	}
965 
966 	page_count = 0;
967 	if (alloc->pages) {
968 		int i;
969 
970 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
971 			struct page *page;
972 			bool on_lru;
973 
974 			page = binder_get_installed_page(alloc, i);
975 			if (!page)
976 				continue;
977 
978 			on_lru = list_lru_del(&binder_freelist,
979 					      page_to_lru(page),
980 					      page_to_nid(page),
981 					      NULL);
982 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
983 				     "%s: %d: page %d %s\n",
984 				     __func__, alloc->pid, i,
985 				     on_lru ? "on lru" : "active");
986 			binder_free_page(page);
987 			page_count++;
988 		}
989 	}
990 	mutex_unlock(&alloc->mutex);
991 	kvfree(alloc->pages);
992 	if (alloc->mm)
993 		mmdrop(alloc->mm);
994 
995 	binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
996 		     "%s: %d buffers %d, pages %d\n",
997 		     __func__, alloc->pid, buffers, page_count);
998 }
999 
1000 /**
1001  * binder_alloc_print_allocated() - print buffer info
1002  * @m:     seq_file for output via seq_printf()
1003  * @alloc: binder_alloc for this proc
1004  *
1005  * Prints information about every buffer associated with
1006  * the binder_alloc state to the given seq_file
1007  */
1008 void binder_alloc_print_allocated(struct seq_file *m,
1009 				  struct binder_alloc *alloc)
1010 {
1011 	struct binder_buffer *buffer;
1012 	struct rb_node *n;
1013 
1014 	mutex_lock(&alloc->mutex);
1015 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
1016 		buffer = rb_entry(n, struct binder_buffer, rb_node);
1017 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
1018 			   buffer->debug_id,
1019 			   buffer->user_data - alloc->buffer,
1020 			   buffer->data_size, buffer->offsets_size,
1021 			   buffer->extra_buffers_size,
1022 			   buffer->transaction ? "active" : "delivered");
1023 	}
1024 	mutex_unlock(&alloc->mutex);
1025 }
1026 
1027 /**
1028  * binder_alloc_print_pages() - print page usage
1029  * @m:     seq_file for output via seq_printf()
1030  * @alloc: binder_alloc for this proc
1031  */
1032 void binder_alloc_print_pages(struct seq_file *m,
1033 			      struct binder_alloc *alloc)
1034 {
1035 	struct page *page;
1036 	int i;
1037 	int active = 0;
1038 	int lru = 0;
1039 	int free = 0;
1040 
1041 	mutex_lock(&alloc->mutex);
1042 	/*
1043 	 * Make sure the binder_alloc is fully initialized, otherwise we might
1044 	 * read inconsistent state.
1045 	 */
1046 	if (binder_alloc_is_mapped(alloc)) {
1047 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1048 			page = binder_get_installed_page(alloc, i);
1049 			if (!page)
1050 				free++;
1051 			else if (list_empty(page_to_lru(page)))
1052 				active++;
1053 			else
1054 				lru++;
1055 		}
1056 	}
1057 	mutex_unlock(&alloc->mutex);
1058 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
1059 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
1060 }
1061 
1062 /**
1063  * binder_alloc_get_allocated_count() - return count of buffers
1064  * @alloc: binder_alloc for this proc
1065  *
1066  * Return: count of allocated buffers
1067  */
1068 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1069 {
1070 	struct rb_node *n;
1071 	int count = 0;
1072 
1073 	mutex_lock(&alloc->mutex);
1074 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1075 		count++;
1076 	mutex_unlock(&alloc->mutex);
1077 	return count;
1078 }
1079 
1080 
1081 /**
1082  * binder_alloc_vma_close() - invalidate address space
1083  * @alloc: binder_alloc for this proc
1084  *
1085  * Called from binder_vma_close() when releasing address space.
1086  * Clears alloc->mapped to prevent new incoming transactions from
1087  * allocating more buffers.
1088  */
1089 void binder_alloc_vma_close(struct binder_alloc *alloc)
1090 {
1091 	binder_alloc_set_mapped(alloc, false);
1092 }
1093 
1094 /**
1095  * binder_alloc_free_page() - shrinker callback to free pages
1096  * @item:   item to free
1097  * @lru:    list_lru instance of the item
1098  * @cb_arg: callback argument
1099  *
1100  * Called from list_lru_walk() in binder_shrink_scan() to free
1101  * up pages when the system is under memory pressure.
1102  */
1103 enum lru_status binder_alloc_free_page(struct list_head *item,
1104 				       struct list_lru_one *lru,
1105 				       void *cb_arg)
1106 	__must_hold(&lru->lock)
1107 {
1108 	struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru);
1109 	struct binder_alloc *alloc = mdata->alloc;
1110 	struct mm_struct *mm = alloc->mm;
1111 	struct vm_area_struct *vma;
1112 	struct page *page_to_free;
1113 	unsigned long page_addr;
1114 	size_t index;
1115 
1116 	if (!mmget_not_zero(mm))
1117 		goto err_mmget;
1118 	if (!mmap_read_trylock(mm))
1119 		goto err_mmap_read_lock_failed;
1120 	if (!mutex_trylock(&alloc->mutex))
1121 		goto err_get_alloc_mutex_failed;
1122 
1123 	index = mdata->page_index;
1124 	page_addr = alloc->buffer + index * PAGE_SIZE;
1125 
1126 	vma = vma_lookup(mm, page_addr);
1127 	/*
1128 	 * Since a binder_alloc can only be mapped once, we ensure
1129 	 * the vma corresponds to this mapping by checking whether
1130 	 * the binder_alloc is still mapped.
1131 	 */
1132 	if (vma && !binder_alloc_is_mapped(alloc))
1133 		goto err_invalid_vma;
1134 
1135 	trace_binder_unmap_kernel_start(alloc, index);
1136 
1137 	page_to_free = alloc->pages[index];
1138 	binder_set_installed_page(alloc, index, NULL);
1139 
1140 	trace_binder_unmap_kernel_end(alloc, index);
1141 
1142 	list_lru_isolate(lru, item);
1143 	spin_unlock(&lru->lock);
1144 
1145 	if (vma) {
1146 		trace_binder_unmap_user_start(alloc, index);
1147 
1148 		zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
1149 
1150 		trace_binder_unmap_user_end(alloc, index);
1151 	}
1152 
1153 	mutex_unlock(&alloc->mutex);
1154 	mmap_read_unlock(mm);
1155 	mmput_async(mm);
1156 	binder_free_page(page_to_free);
1157 
1158 	return LRU_REMOVED_RETRY;
1159 
1160 err_invalid_vma:
1161 	mutex_unlock(&alloc->mutex);
1162 err_get_alloc_mutex_failed:
1163 	mmap_read_unlock(mm);
1164 err_mmap_read_lock_failed:
1165 	mmput_async(mm);
1166 err_mmget:
1167 	return LRU_SKIP;
1168 }
1169 
1170 static unsigned long
1171 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1172 {
1173 	return list_lru_count(&binder_freelist);
1174 }
1175 
1176 static unsigned long
1177 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1178 {
1179 	return list_lru_walk(&binder_freelist, binder_alloc_free_page,
1180 			    NULL, sc->nr_to_scan);
1181 }
1182 
1183 static struct shrinker *binder_shrinker;
1184 
1185 /**
1186  * binder_alloc_init() - called by binder_open() for per-proc initialization
1187  * @alloc: binder_alloc for this proc
1188  *
1189  * Called from binder_open() to initialize binder_alloc fields for
1190  * new binder proc
1191  */
1192 void binder_alloc_init(struct binder_alloc *alloc)
1193 {
1194 	alloc->pid = current->group_leader->pid;
1195 	alloc->mm = current->mm;
1196 	mmgrab(alloc->mm);
1197 	mutex_init(&alloc->mutex);
1198 	INIT_LIST_HEAD(&alloc->buffers);
1199 }
1200 
1201 int binder_alloc_shrinker_init(void)
1202 {
1203 	int ret;
1204 
1205 	ret = list_lru_init(&binder_freelist);
1206 	if (ret)
1207 		return ret;
1208 
1209 	binder_shrinker = shrinker_alloc(0, "android-binder");
1210 	if (!binder_shrinker) {
1211 		list_lru_destroy(&binder_freelist);
1212 		return -ENOMEM;
1213 	}
1214 
1215 	binder_shrinker->count_objects = binder_shrink_count;
1216 	binder_shrinker->scan_objects = binder_shrink_scan;
1217 
1218 	shrinker_register(binder_shrinker);
1219 
1220 	return 0;
1221 }
1222 
1223 void binder_alloc_shrinker_exit(void)
1224 {
1225 	shrinker_free(binder_shrinker);
1226 	list_lru_destroy(&binder_freelist);
1227 }
1228 
1229 /**
1230  * check_buffer() - verify that buffer/offset is safe to access
1231  * @alloc: binder_alloc for this proc
1232  * @buffer: binder buffer to be accessed
1233  * @offset: offset into @buffer data
1234  * @bytes: bytes to access from offset
1235  *
1236  * Check that the @offset/@bytes are within the size of the given
1237  * @buffer and that the buffer is currently active and not freeable.
1238  * Offsets must also be multiples of sizeof(u32). The kernel is
1239  * allowed to touch the buffer in two cases:
1240  *
1241  * 1) when the buffer is being created:
1242  *     (buffer->free == 0 && buffer->allow_user_free == 0)
1243  * 2) when the buffer is being torn down:
1244  *     (buffer->free == 0 && buffer->transaction == NULL).
1245  *
1246  * Return: true if the buffer is safe to access
1247  */
1248 static inline bool check_buffer(struct binder_alloc *alloc,
1249 				struct binder_buffer *buffer,
1250 				binder_size_t offset, size_t bytes)
1251 {
1252 	size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1253 
1254 	return buffer_size >= bytes &&
1255 		offset <= buffer_size - bytes &&
1256 		IS_ALIGNED(offset, sizeof(u32)) &&
1257 		!buffer->free &&
1258 		(!buffer->allow_user_free || !buffer->transaction);
1259 }
1260 
1261 /**
1262  * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1263  * @alloc: binder_alloc for this proc
1264  * @buffer: binder buffer to be accessed
1265  * @buffer_offset: offset into @buffer data
1266  * @from: userspace pointer to source buffer
1267  * @bytes: bytes to copy
1268  *
1269  * Copy bytes from source userspace to target buffer.
1270  *
1271  * Return: bytes remaining to be copied
1272  */
1273 unsigned long
1274 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1275 				 struct binder_buffer *buffer,
1276 				 binder_size_t buffer_offset,
1277 				 const void __user *from,
1278 				 size_t bytes)
1279 {
1280 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1281 		return bytes;
1282 
1283 	while (bytes) {
1284 		unsigned long size;
1285 		unsigned long ret;
1286 		struct page *page;
1287 		pgoff_t pgoff;
1288 		void *kptr;
1289 
1290 		page = binder_alloc_get_page(alloc, buffer,
1291 					     buffer_offset, &pgoff);
1292 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1293 		kptr = kmap_local_page(page) + pgoff;
1294 		ret = copy_from_user(kptr, from, size);
1295 		kunmap_local(kptr);
1296 		if (ret)
1297 			return bytes - size + ret;
1298 		bytes -= size;
1299 		from += size;
1300 		buffer_offset += size;
1301 	}
1302 	return 0;
1303 }
1304 
1305 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1306 				       bool to_buffer,
1307 				       struct binder_buffer *buffer,
1308 				       binder_size_t buffer_offset,
1309 				       void *ptr,
1310 				       size_t bytes)
1311 {
1312 	/* All copies must be 32-bit aligned and 32-bit size */
1313 	if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1314 		return -EINVAL;
1315 
1316 	while (bytes) {
1317 		unsigned long size;
1318 		struct page *page;
1319 		pgoff_t pgoff;
1320 
1321 		page = binder_alloc_get_page(alloc, buffer,
1322 					     buffer_offset, &pgoff);
1323 		size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1324 		if (to_buffer)
1325 			memcpy_to_page(page, pgoff, ptr, size);
1326 		else
1327 			memcpy_from_page(ptr, page, pgoff, size);
1328 		bytes -= size;
1329 		pgoff = 0;
1330 		ptr = ptr + size;
1331 		buffer_offset += size;
1332 	}
1333 	return 0;
1334 }
1335 
1336 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1337 				struct binder_buffer *buffer,
1338 				binder_size_t buffer_offset,
1339 				void *src,
1340 				size_t bytes)
1341 {
1342 	return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1343 					   src, bytes);
1344 }
1345 
1346 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1347 				  void *dest,
1348 				  struct binder_buffer *buffer,
1349 				  binder_size_t buffer_offset,
1350 				  size_t bytes)
1351 {
1352 	return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1353 					   dest, bytes);
1354 }
1355