1 // SPDX-License-Identifier: GPL-2.0-only 2 /* binder_alloc.c 3 * 4 * Android IPC Subsystem 5 * 6 * Copyright (C) 2007-2017 Google, Inc. 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/list.h> 12 #include <linux/sched/mm.h> 13 #include <linux/module.h> 14 #include <linux/rtmutex.h> 15 #include <linux/rbtree.h> 16 #include <linux/seq_file.h> 17 #include <linux/vmalloc.h> 18 #include <linux/slab.h> 19 #include <linux/sched.h> 20 #include <linux/list_lru.h> 21 #include <linux/ratelimit.h> 22 #include <asm/cacheflush.h> 23 #include <linux/uaccess.h> 24 #include <linux/highmem.h> 25 #include "binder_alloc.h" 26 #include "binder_trace.h" 27 28 struct list_lru binder_alloc_lru; 29 30 static DEFINE_MUTEX(binder_alloc_mmap_lock); 31 32 enum { 33 BINDER_DEBUG_USER_ERROR = 1U << 0, 34 BINDER_DEBUG_OPEN_CLOSE = 1U << 1, 35 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, 36 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, 37 }; 38 static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; 39 40 module_param_named(debug_mask, binder_alloc_debug_mask, 41 uint, 0644); 42 43 #define binder_alloc_debug(mask, x...) \ 44 do { \ 45 if (binder_alloc_debug_mask & mask) \ 46 pr_info_ratelimited(x); \ 47 } while (0) 48 49 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) 50 { 51 return list_entry(buffer->entry.next, struct binder_buffer, entry); 52 } 53 54 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) 55 { 56 return list_entry(buffer->entry.prev, struct binder_buffer, entry); 57 } 58 59 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, 60 struct binder_buffer *buffer) 61 { 62 if (list_is_last(&buffer->entry, &alloc->buffers)) 63 return alloc->buffer + alloc->buffer_size - buffer->user_data; 64 return binder_buffer_next(buffer)->user_data - buffer->user_data; 65 } 66 67 static void binder_insert_free_buffer(struct binder_alloc *alloc, 68 struct binder_buffer *new_buffer) 69 { 70 struct rb_node **p = &alloc->free_buffers.rb_node; 71 struct rb_node *parent = NULL; 72 struct binder_buffer *buffer; 73 size_t buffer_size; 74 size_t new_buffer_size; 75 76 BUG_ON(!new_buffer->free); 77 78 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); 79 80 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 81 "%d: add free buffer, size %zd, at %pK\n", 82 alloc->pid, new_buffer_size, new_buffer); 83 84 while (*p) { 85 parent = *p; 86 buffer = rb_entry(parent, struct binder_buffer, rb_node); 87 BUG_ON(!buffer->free); 88 89 buffer_size = binder_alloc_buffer_size(alloc, buffer); 90 91 if (new_buffer_size < buffer_size) 92 p = &parent->rb_left; 93 else 94 p = &parent->rb_right; 95 } 96 rb_link_node(&new_buffer->rb_node, parent, p); 97 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); 98 } 99 100 static void binder_insert_allocated_buffer_locked( 101 struct binder_alloc *alloc, struct binder_buffer *new_buffer) 102 { 103 struct rb_node **p = &alloc->allocated_buffers.rb_node; 104 struct rb_node *parent = NULL; 105 struct binder_buffer *buffer; 106 107 BUG_ON(new_buffer->free); 108 109 while (*p) { 110 parent = *p; 111 buffer = rb_entry(parent, struct binder_buffer, rb_node); 112 BUG_ON(buffer->free); 113 114 if (new_buffer->user_data < buffer->user_data) 115 p = &parent->rb_left; 116 else if (new_buffer->user_data > buffer->user_data) 117 p = &parent->rb_right; 118 else 119 BUG(); 120 } 121 rb_link_node(&new_buffer->rb_node, parent, p); 122 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); 123 } 124 125 static struct binder_buffer *binder_alloc_prepare_to_free_locked( 126 struct binder_alloc *alloc, 127 uintptr_t user_ptr) 128 { 129 struct rb_node *n = alloc->allocated_buffers.rb_node; 130 struct binder_buffer *buffer; 131 void __user *uptr; 132 133 uptr = (void __user *)user_ptr; 134 135 while (n) { 136 buffer = rb_entry(n, struct binder_buffer, rb_node); 137 BUG_ON(buffer->free); 138 139 if (uptr < buffer->user_data) 140 n = n->rb_left; 141 else if (uptr > buffer->user_data) 142 n = n->rb_right; 143 else { 144 /* 145 * Guard against user threads attempting to 146 * free the buffer when in use by kernel or 147 * after it's already been freed. 148 */ 149 if (!buffer->allow_user_free) 150 return ERR_PTR(-EPERM); 151 buffer->allow_user_free = 0; 152 return buffer; 153 } 154 } 155 return NULL; 156 } 157 158 /** 159 * binder_alloc_prepare_to_free() - get buffer given user ptr 160 * @alloc: binder_alloc for this proc 161 * @user_ptr: User pointer to buffer data 162 * 163 * Validate userspace pointer to buffer data and return buffer corresponding to 164 * that user pointer. Search the rb tree for buffer that matches user data 165 * pointer. 166 * 167 * Return: Pointer to buffer or NULL 168 */ 169 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, 170 uintptr_t user_ptr) 171 { 172 struct binder_buffer *buffer; 173 174 mutex_lock(&alloc->mutex); 175 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); 176 mutex_unlock(&alloc->mutex); 177 return buffer; 178 } 179 180 static int binder_update_page_range(struct binder_alloc *alloc, int allocate, 181 void __user *start, void __user *end) 182 { 183 void __user *page_addr; 184 unsigned long user_page_addr; 185 struct binder_lru_page *page; 186 struct vm_area_struct *vma = NULL; 187 struct mm_struct *mm = NULL; 188 bool need_mm = false; 189 190 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 191 "%d: %s pages %pK-%pK\n", alloc->pid, 192 allocate ? "allocate" : "free", start, end); 193 194 if (end <= start) 195 return 0; 196 197 trace_binder_update_page_range(alloc, allocate, start, end); 198 199 if (allocate == 0) 200 goto free_range; 201 202 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 203 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; 204 if (!page->page_ptr) { 205 need_mm = true; 206 break; 207 } 208 } 209 210 if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) 211 mm = alloc->vma_vm_mm; 212 213 if (mm) { 214 down_read(&mm->mmap_sem); 215 vma = alloc->vma; 216 } 217 218 if (!vma && need_mm) { 219 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 220 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 221 alloc->pid); 222 goto err_no_vma; 223 } 224 225 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 226 int ret; 227 bool on_lru; 228 size_t index; 229 230 index = (page_addr - alloc->buffer) / PAGE_SIZE; 231 page = &alloc->pages[index]; 232 233 if (page->page_ptr) { 234 trace_binder_alloc_lru_start(alloc, index); 235 236 on_lru = list_lru_del(&binder_alloc_lru, &page->lru); 237 WARN_ON(!on_lru); 238 239 trace_binder_alloc_lru_end(alloc, index); 240 continue; 241 } 242 243 if (WARN_ON(!vma)) 244 goto err_page_ptr_cleared; 245 246 trace_binder_alloc_page_start(alloc, index); 247 page->page_ptr = alloc_page(GFP_KERNEL | 248 __GFP_HIGHMEM | 249 __GFP_ZERO); 250 if (!page->page_ptr) { 251 pr_err("%d: binder_alloc_buf failed for page at %pK\n", 252 alloc->pid, page_addr); 253 goto err_alloc_page_failed; 254 } 255 page->alloc = alloc; 256 INIT_LIST_HEAD(&page->lru); 257 258 user_page_addr = (uintptr_t)page_addr; 259 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); 260 if (ret) { 261 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 262 alloc->pid, user_page_addr); 263 goto err_vm_insert_page_failed; 264 } 265 266 if (index + 1 > alloc->pages_high) 267 alloc->pages_high = index + 1; 268 269 trace_binder_alloc_page_end(alloc, index); 270 } 271 if (mm) { 272 up_read(&mm->mmap_sem); 273 mmput(mm); 274 } 275 return 0; 276 277 free_range: 278 for (page_addr = end - PAGE_SIZE; page_addr >= start; 279 page_addr -= PAGE_SIZE) { 280 bool ret; 281 size_t index; 282 283 index = (page_addr - alloc->buffer) / PAGE_SIZE; 284 page = &alloc->pages[index]; 285 286 trace_binder_free_lru_start(alloc, index); 287 288 ret = list_lru_add(&binder_alloc_lru, &page->lru); 289 WARN_ON(!ret); 290 291 trace_binder_free_lru_end(alloc, index); 292 continue; 293 294 err_vm_insert_page_failed: 295 __free_page(page->page_ptr); 296 page->page_ptr = NULL; 297 err_alloc_page_failed: 298 err_page_ptr_cleared: 299 ; 300 } 301 err_no_vma: 302 if (mm) { 303 up_read(&mm->mmap_sem); 304 mmput(mm); 305 } 306 return vma ? -ENOMEM : -ESRCH; 307 } 308 309 310 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, 311 struct vm_area_struct *vma) 312 { 313 if (vma) 314 alloc->vma_vm_mm = vma->vm_mm; 315 /* 316 * If we see alloc->vma is not NULL, buffer data structures set up 317 * completely. Look at smp_rmb side binder_alloc_get_vma. 318 * We also want to guarantee new alloc->vma_vm_mm is always visible 319 * if alloc->vma is set. 320 */ 321 smp_wmb(); 322 alloc->vma = vma; 323 } 324 325 static inline struct vm_area_struct *binder_alloc_get_vma( 326 struct binder_alloc *alloc) 327 { 328 struct vm_area_struct *vma = NULL; 329 330 if (alloc->vma) { 331 /* Look at description in binder_alloc_set_vma */ 332 smp_rmb(); 333 vma = alloc->vma; 334 } 335 return vma; 336 } 337 338 static struct binder_buffer *binder_alloc_new_buf_locked( 339 struct binder_alloc *alloc, 340 size_t data_size, 341 size_t offsets_size, 342 size_t extra_buffers_size, 343 int is_async) 344 { 345 struct rb_node *n = alloc->free_buffers.rb_node; 346 struct binder_buffer *buffer; 347 size_t buffer_size; 348 struct rb_node *best_fit = NULL; 349 void __user *has_page_addr; 350 void __user *end_page_addr; 351 size_t size, data_offsets_size; 352 int ret; 353 354 if (!binder_alloc_get_vma(alloc)) { 355 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 356 "%d: binder_alloc_buf, no vma\n", 357 alloc->pid); 358 return ERR_PTR(-ESRCH); 359 } 360 361 data_offsets_size = ALIGN(data_size, sizeof(void *)) + 362 ALIGN(offsets_size, sizeof(void *)); 363 364 if (data_offsets_size < data_size || data_offsets_size < offsets_size) { 365 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 366 "%d: got transaction with invalid size %zd-%zd\n", 367 alloc->pid, data_size, offsets_size); 368 return ERR_PTR(-EINVAL); 369 } 370 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); 371 if (size < data_offsets_size || size < extra_buffers_size) { 372 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 373 "%d: got transaction with invalid extra_buffers_size %zd\n", 374 alloc->pid, extra_buffers_size); 375 return ERR_PTR(-EINVAL); 376 } 377 if (is_async && 378 alloc->free_async_space < size + sizeof(struct binder_buffer)) { 379 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 380 "%d: binder_alloc_buf size %zd failed, no async space left\n", 381 alloc->pid, size); 382 return ERR_PTR(-ENOSPC); 383 } 384 385 /* Pad 0-size buffers so they get assigned unique addresses */ 386 size = max(size, sizeof(void *)); 387 388 while (n) { 389 buffer = rb_entry(n, struct binder_buffer, rb_node); 390 BUG_ON(!buffer->free); 391 buffer_size = binder_alloc_buffer_size(alloc, buffer); 392 393 if (size < buffer_size) { 394 best_fit = n; 395 n = n->rb_left; 396 } else if (size > buffer_size) 397 n = n->rb_right; 398 else { 399 best_fit = n; 400 break; 401 } 402 } 403 if (best_fit == NULL) { 404 size_t allocated_buffers = 0; 405 size_t largest_alloc_size = 0; 406 size_t total_alloc_size = 0; 407 size_t free_buffers = 0; 408 size_t largest_free_size = 0; 409 size_t total_free_size = 0; 410 411 for (n = rb_first(&alloc->allocated_buffers); n != NULL; 412 n = rb_next(n)) { 413 buffer = rb_entry(n, struct binder_buffer, rb_node); 414 buffer_size = binder_alloc_buffer_size(alloc, buffer); 415 allocated_buffers++; 416 total_alloc_size += buffer_size; 417 if (buffer_size > largest_alloc_size) 418 largest_alloc_size = buffer_size; 419 } 420 for (n = rb_first(&alloc->free_buffers); n != NULL; 421 n = rb_next(n)) { 422 buffer = rb_entry(n, struct binder_buffer, rb_node); 423 buffer_size = binder_alloc_buffer_size(alloc, buffer); 424 free_buffers++; 425 total_free_size += buffer_size; 426 if (buffer_size > largest_free_size) 427 largest_free_size = buffer_size; 428 } 429 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 430 "%d: binder_alloc_buf size %zd failed, no address space\n", 431 alloc->pid, size); 432 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 433 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", 434 total_alloc_size, allocated_buffers, 435 largest_alloc_size, total_free_size, 436 free_buffers, largest_free_size); 437 return ERR_PTR(-ENOSPC); 438 } 439 if (n == NULL) { 440 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 441 buffer_size = binder_alloc_buffer_size(alloc, buffer); 442 } 443 444 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 445 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", 446 alloc->pid, size, buffer, buffer_size); 447 448 has_page_addr = (void __user *) 449 (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); 450 WARN_ON(n && buffer_size != size); 451 end_page_addr = 452 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); 453 if (end_page_addr > has_page_addr) 454 end_page_addr = has_page_addr; 455 ret = binder_update_page_range(alloc, 1, (void __user *) 456 PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); 457 if (ret) 458 return ERR_PTR(ret); 459 460 if (buffer_size != size) { 461 struct binder_buffer *new_buffer; 462 463 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 464 if (!new_buffer) { 465 pr_err("%s: %d failed to alloc new buffer struct\n", 466 __func__, alloc->pid); 467 goto err_alloc_buf_struct_failed; 468 } 469 new_buffer->user_data = (u8 __user *)buffer->user_data + size; 470 list_add(&new_buffer->entry, &buffer->entry); 471 new_buffer->free = 1; 472 binder_insert_free_buffer(alloc, new_buffer); 473 } 474 475 rb_erase(best_fit, &alloc->free_buffers); 476 buffer->free = 0; 477 buffer->allow_user_free = 0; 478 binder_insert_allocated_buffer_locked(alloc, buffer); 479 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 480 "%d: binder_alloc_buf size %zd got %pK\n", 481 alloc->pid, size, buffer); 482 buffer->data_size = data_size; 483 buffer->offsets_size = offsets_size; 484 buffer->async_transaction = is_async; 485 buffer->extra_buffers_size = extra_buffers_size; 486 if (is_async) { 487 alloc->free_async_space -= size + sizeof(struct binder_buffer); 488 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 489 "%d: binder_alloc_buf size %zd async free %zd\n", 490 alloc->pid, size, alloc->free_async_space); 491 } 492 return buffer; 493 494 err_alloc_buf_struct_failed: 495 binder_update_page_range(alloc, 0, (void __user *) 496 PAGE_ALIGN((uintptr_t)buffer->user_data), 497 end_page_addr); 498 return ERR_PTR(-ENOMEM); 499 } 500 501 /** 502 * binder_alloc_new_buf() - Allocate a new binder buffer 503 * @alloc: binder_alloc for this proc 504 * @data_size: size of user data buffer 505 * @offsets_size: user specified buffer offset 506 * @extra_buffers_size: size of extra space for meta-data (eg, security context) 507 * @is_async: buffer for async transaction 508 * 509 * Allocate a new buffer given the requested sizes. Returns 510 * the kernel version of the buffer pointer. The size allocated 511 * is the sum of the three given sizes (each rounded up to 512 * pointer-sized boundary) 513 * 514 * Return: The allocated buffer or %NULL if error 515 */ 516 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, 517 size_t data_size, 518 size_t offsets_size, 519 size_t extra_buffers_size, 520 int is_async) 521 { 522 struct binder_buffer *buffer; 523 524 mutex_lock(&alloc->mutex); 525 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, 526 extra_buffers_size, is_async); 527 mutex_unlock(&alloc->mutex); 528 return buffer; 529 } 530 531 static void __user *buffer_start_page(struct binder_buffer *buffer) 532 { 533 return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); 534 } 535 536 static void __user *prev_buffer_end_page(struct binder_buffer *buffer) 537 { 538 return (void __user *) 539 (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); 540 } 541 542 static void binder_delete_free_buffer(struct binder_alloc *alloc, 543 struct binder_buffer *buffer) 544 { 545 struct binder_buffer *prev, *next = NULL; 546 bool to_free = true; 547 BUG_ON(alloc->buffers.next == &buffer->entry); 548 prev = binder_buffer_prev(buffer); 549 BUG_ON(!prev->free); 550 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { 551 to_free = false; 552 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 553 "%d: merge free, buffer %pK share page with %pK\n", 554 alloc->pid, buffer->user_data, 555 prev->user_data); 556 } 557 558 if (!list_is_last(&buffer->entry, &alloc->buffers)) { 559 next = binder_buffer_next(buffer); 560 if (buffer_start_page(next) == buffer_start_page(buffer)) { 561 to_free = false; 562 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 563 "%d: merge free, buffer %pK share page with %pK\n", 564 alloc->pid, 565 buffer->user_data, 566 next->user_data); 567 } 568 } 569 570 if (PAGE_ALIGNED(buffer->user_data)) { 571 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 572 "%d: merge free, buffer start %pK is page aligned\n", 573 alloc->pid, buffer->user_data); 574 to_free = false; 575 } 576 577 if (to_free) { 578 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 579 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", 580 alloc->pid, buffer->user_data, 581 prev->user_data, 582 next ? next->user_data : NULL); 583 binder_update_page_range(alloc, 0, buffer_start_page(buffer), 584 buffer_start_page(buffer) + PAGE_SIZE); 585 } 586 list_del(&buffer->entry); 587 kfree(buffer); 588 } 589 590 static void binder_free_buf_locked(struct binder_alloc *alloc, 591 struct binder_buffer *buffer) 592 { 593 size_t size, buffer_size; 594 595 buffer_size = binder_alloc_buffer_size(alloc, buffer); 596 597 size = ALIGN(buffer->data_size, sizeof(void *)) + 598 ALIGN(buffer->offsets_size, sizeof(void *)) + 599 ALIGN(buffer->extra_buffers_size, sizeof(void *)); 600 601 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 602 "%d: binder_free_buf %pK size %zd buffer_size %zd\n", 603 alloc->pid, buffer, size, buffer_size); 604 605 BUG_ON(buffer->free); 606 BUG_ON(size > buffer_size); 607 BUG_ON(buffer->transaction != NULL); 608 BUG_ON(buffer->user_data < alloc->buffer); 609 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); 610 611 if (buffer->async_transaction) { 612 alloc->free_async_space += size + sizeof(struct binder_buffer); 613 614 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 615 "%d: binder_free_buf size %zd async free %zd\n", 616 alloc->pid, size, alloc->free_async_space); 617 } 618 619 binder_update_page_range(alloc, 0, 620 (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), 621 (void __user *)(((uintptr_t) 622 buffer->user_data + buffer_size) & PAGE_MASK)); 623 624 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); 625 buffer->free = 1; 626 if (!list_is_last(&buffer->entry, &alloc->buffers)) { 627 struct binder_buffer *next = binder_buffer_next(buffer); 628 629 if (next->free) { 630 rb_erase(&next->rb_node, &alloc->free_buffers); 631 binder_delete_free_buffer(alloc, next); 632 } 633 } 634 if (alloc->buffers.next != &buffer->entry) { 635 struct binder_buffer *prev = binder_buffer_prev(buffer); 636 637 if (prev->free) { 638 binder_delete_free_buffer(alloc, buffer); 639 rb_erase(&prev->rb_node, &alloc->free_buffers); 640 buffer = prev; 641 } 642 } 643 binder_insert_free_buffer(alloc, buffer); 644 } 645 646 /** 647 * binder_alloc_free_buf() - free a binder buffer 648 * @alloc: binder_alloc for this proc 649 * @buffer: kernel pointer to buffer 650 * 651 * Free the buffer allocated via binder_alloc_new_buffer() 652 */ 653 void binder_alloc_free_buf(struct binder_alloc *alloc, 654 struct binder_buffer *buffer) 655 { 656 mutex_lock(&alloc->mutex); 657 binder_free_buf_locked(alloc, buffer); 658 mutex_unlock(&alloc->mutex); 659 } 660 661 /** 662 * binder_alloc_mmap_handler() - map virtual address space for proc 663 * @alloc: alloc structure for this proc 664 * @vma: vma passed to mmap() 665 * 666 * Called by binder_mmap() to initialize the space specified in 667 * vma for allocating binder buffers 668 * 669 * Return: 670 * 0 = success 671 * -EBUSY = address space already mapped 672 * -ENOMEM = failed to map memory to given address space 673 */ 674 int binder_alloc_mmap_handler(struct binder_alloc *alloc, 675 struct vm_area_struct *vma) 676 { 677 int ret; 678 const char *failure_string; 679 struct binder_buffer *buffer; 680 681 mutex_lock(&binder_alloc_mmap_lock); 682 if (alloc->buffer) { 683 ret = -EBUSY; 684 failure_string = "already mapped"; 685 goto err_already_mapped; 686 } 687 688 alloc->buffer = (void __user *)vma->vm_start; 689 mutex_unlock(&binder_alloc_mmap_lock); 690 691 alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, 692 sizeof(alloc->pages[0]), 693 GFP_KERNEL); 694 if (alloc->pages == NULL) { 695 ret = -ENOMEM; 696 failure_string = "alloc page array"; 697 goto err_alloc_pages_failed; 698 } 699 alloc->buffer_size = vma->vm_end - vma->vm_start; 700 701 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 702 if (!buffer) { 703 ret = -ENOMEM; 704 failure_string = "alloc buffer struct"; 705 goto err_alloc_buf_struct_failed; 706 } 707 708 buffer->user_data = alloc->buffer; 709 list_add(&buffer->entry, &alloc->buffers); 710 buffer->free = 1; 711 binder_insert_free_buffer(alloc, buffer); 712 alloc->free_async_space = alloc->buffer_size / 2; 713 binder_alloc_set_vma(alloc, vma); 714 mmgrab(alloc->vma_vm_mm); 715 716 return 0; 717 718 err_alloc_buf_struct_failed: 719 kfree(alloc->pages); 720 alloc->pages = NULL; 721 err_alloc_pages_failed: 722 mutex_lock(&binder_alloc_mmap_lock); 723 alloc->buffer = NULL; 724 err_already_mapped: 725 mutex_unlock(&binder_alloc_mmap_lock); 726 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 727 "%s: %d %lx-%lx %s failed %d\n", __func__, 728 alloc->pid, vma->vm_start, vma->vm_end, 729 failure_string, ret); 730 return ret; 731 } 732 733 734 void binder_alloc_deferred_release(struct binder_alloc *alloc) 735 { 736 struct rb_node *n; 737 int buffers, page_count; 738 struct binder_buffer *buffer; 739 740 buffers = 0; 741 mutex_lock(&alloc->mutex); 742 BUG_ON(alloc->vma); 743 744 while ((n = rb_first(&alloc->allocated_buffers))) { 745 buffer = rb_entry(n, struct binder_buffer, rb_node); 746 747 /* Transaction should already have been freed */ 748 BUG_ON(buffer->transaction); 749 750 binder_free_buf_locked(alloc, buffer); 751 buffers++; 752 } 753 754 while (!list_empty(&alloc->buffers)) { 755 buffer = list_first_entry(&alloc->buffers, 756 struct binder_buffer, entry); 757 WARN_ON(!buffer->free); 758 759 list_del(&buffer->entry); 760 WARN_ON_ONCE(!list_empty(&alloc->buffers)); 761 kfree(buffer); 762 } 763 764 page_count = 0; 765 if (alloc->pages) { 766 int i; 767 768 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 769 void __user *page_addr; 770 bool on_lru; 771 772 if (!alloc->pages[i].page_ptr) 773 continue; 774 775 on_lru = list_lru_del(&binder_alloc_lru, 776 &alloc->pages[i].lru); 777 page_addr = alloc->buffer + i * PAGE_SIZE; 778 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 779 "%s: %d: page %d at %pK %s\n", 780 __func__, alloc->pid, i, page_addr, 781 on_lru ? "on lru" : "active"); 782 __free_page(alloc->pages[i].page_ptr); 783 page_count++; 784 } 785 kfree(alloc->pages); 786 } 787 mutex_unlock(&alloc->mutex); 788 if (alloc->vma_vm_mm) 789 mmdrop(alloc->vma_vm_mm); 790 791 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 792 "%s: %d buffers %d, pages %d\n", 793 __func__, alloc->pid, buffers, page_count); 794 } 795 796 static void print_binder_buffer(struct seq_file *m, const char *prefix, 797 struct binder_buffer *buffer) 798 { 799 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", 800 prefix, buffer->debug_id, buffer->user_data, 801 buffer->data_size, buffer->offsets_size, 802 buffer->extra_buffers_size, 803 buffer->transaction ? "active" : "delivered"); 804 } 805 806 /** 807 * binder_alloc_print_allocated() - print buffer info 808 * @m: seq_file for output via seq_printf() 809 * @alloc: binder_alloc for this proc 810 * 811 * Prints information about every buffer associated with 812 * the binder_alloc state to the given seq_file 813 */ 814 void binder_alloc_print_allocated(struct seq_file *m, 815 struct binder_alloc *alloc) 816 { 817 struct rb_node *n; 818 819 mutex_lock(&alloc->mutex); 820 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) 821 print_binder_buffer(m, " buffer", 822 rb_entry(n, struct binder_buffer, rb_node)); 823 mutex_unlock(&alloc->mutex); 824 } 825 826 /** 827 * binder_alloc_print_pages() - print page usage 828 * @m: seq_file for output via seq_printf() 829 * @alloc: binder_alloc for this proc 830 */ 831 void binder_alloc_print_pages(struct seq_file *m, 832 struct binder_alloc *alloc) 833 { 834 struct binder_lru_page *page; 835 int i; 836 int active = 0; 837 int lru = 0; 838 int free = 0; 839 840 mutex_lock(&alloc->mutex); 841 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 842 page = &alloc->pages[i]; 843 if (!page->page_ptr) 844 free++; 845 else if (list_empty(&page->lru)) 846 active++; 847 else 848 lru++; 849 } 850 mutex_unlock(&alloc->mutex); 851 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); 852 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); 853 } 854 855 /** 856 * binder_alloc_get_allocated_count() - return count of buffers 857 * @alloc: binder_alloc for this proc 858 * 859 * Return: count of allocated buffers 860 */ 861 int binder_alloc_get_allocated_count(struct binder_alloc *alloc) 862 { 863 struct rb_node *n; 864 int count = 0; 865 866 mutex_lock(&alloc->mutex); 867 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) 868 count++; 869 mutex_unlock(&alloc->mutex); 870 return count; 871 } 872 873 874 /** 875 * binder_alloc_vma_close() - invalidate address space 876 * @alloc: binder_alloc for this proc 877 * 878 * Called from binder_vma_close() when releasing address space. 879 * Clears alloc->vma to prevent new incoming transactions from 880 * allocating more buffers. 881 */ 882 void binder_alloc_vma_close(struct binder_alloc *alloc) 883 { 884 binder_alloc_set_vma(alloc, NULL); 885 } 886 887 /** 888 * binder_alloc_free_page() - shrinker callback to free pages 889 * @item: item to free 890 * @lock: lock protecting the item 891 * @cb_arg: callback argument 892 * 893 * Called from list_lru_walk() in binder_shrink_scan() to free 894 * up pages when the system is under memory pressure. 895 */ 896 enum lru_status binder_alloc_free_page(struct list_head *item, 897 struct list_lru_one *lru, 898 spinlock_t *lock, 899 void *cb_arg) 900 __must_hold(lock) 901 { 902 struct mm_struct *mm = NULL; 903 struct binder_lru_page *page = container_of(item, 904 struct binder_lru_page, 905 lru); 906 struct binder_alloc *alloc; 907 uintptr_t page_addr; 908 size_t index; 909 struct vm_area_struct *vma; 910 911 alloc = page->alloc; 912 if (!mutex_trylock(&alloc->mutex)) 913 goto err_get_alloc_mutex_failed; 914 915 if (!page->page_ptr) 916 goto err_page_already_freed; 917 918 index = page - alloc->pages; 919 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 920 921 mm = alloc->vma_vm_mm; 922 if (!mmget_not_zero(mm)) 923 goto err_mmget; 924 if (!down_read_trylock(&mm->mmap_sem)) 925 goto err_down_read_mmap_sem_failed; 926 vma = binder_alloc_get_vma(alloc); 927 928 list_lru_isolate(lru, item); 929 spin_unlock(lock); 930 931 if (vma) { 932 trace_binder_unmap_user_start(alloc, index); 933 934 zap_page_range(vma, page_addr, PAGE_SIZE); 935 936 trace_binder_unmap_user_end(alloc, index); 937 } 938 up_read(&mm->mmap_sem); 939 mmput(mm); 940 941 trace_binder_unmap_kernel_start(alloc, index); 942 943 __free_page(page->page_ptr); 944 page->page_ptr = NULL; 945 946 trace_binder_unmap_kernel_end(alloc, index); 947 948 spin_lock(lock); 949 mutex_unlock(&alloc->mutex); 950 return LRU_REMOVED_RETRY; 951 952 err_down_read_mmap_sem_failed: 953 mmput_async(mm); 954 err_mmget: 955 err_page_already_freed: 956 mutex_unlock(&alloc->mutex); 957 err_get_alloc_mutex_failed: 958 return LRU_SKIP; 959 } 960 961 static unsigned long 962 binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 963 { 964 unsigned long ret = list_lru_count(&binder_alloc_lru); 965 return ret; 966 } 967 968 static unsigned long 969 binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 970 { 971 unsigned long ret; 972 973 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, 974 NULL, sc->nr_to_scan); 975 return ret; 976 } 977 978 static struct shrinker binder_shrinker = { 979 .count_objects = binder_shrink_count, 980 .scan_objects = binder_shrink_scan, 981 .seeks = DEFAULT_SEEKS, 982 }; 983 984 /** 985 * binder_alloc_init() - called by binder_open() for per-proc initialization 986 * @alloc: binder_alloc for this proc 987 * 988 * Called from binder_open() to initialize binder_alloc fields for 989 * new binder proc 990 */ 991 void binder_alloc_init(struct binder_alloc *alloc) 992 { 993 alloc->pid = current->group_leader->pid; 994 mutex_init(&alloc->mutex); 995 INIT_LIST_HEAD(&alloc->buffers); 996 } 997 998 int binder_alloc_shrinker_init(void) 999 { 1000 int ret = list_lru_init(&binder_alloc_lru); 1001 1002 if (ret == 0) { 1003 ret = register_shrinker(&binder_shrinker); 1004 if (ret) 1005 list_lru_destroy(&binder_alloc_lru); 1006 } 1007 return ret; 1008 } 1009 1010 /** 1011 * check_buffer() - verify that buffer/offset is safe to access 1012 * @alloc: binder_alloc for this proc 1013 * @buffer: binder buffer to be accessed 1014 * @offset: offset into @buffer data 1015 * @bytes: bytes to access from offset 1016 * 1017 * Check that the @offset/@bytes are within the size of the given 1018 * @buffer and that the buffer is currently active and not freeable. 1019 * Offsets must also be multiples of sizeof(u32). The kernel is 1020 * allowed to touch the buffer in two cases: 1021 * 1022 * 1) when the buffer is being created: 1023 * (buffer->free == 0 && buffer->allow_user_free == 0) 1024 * 2) when the buffer is being torn down: 1025 * (buffer->free == 0 && buffer->transaction == NULL). 1026 * 1027 * Return: true if the buffer is safe to access 1028 */ 1029 static inline bool check_buffer(struct binder_alloc *alloc, 1030 struct binder_buffer *buffer, 1031 binder_size_t offset, size_t bytes) 1032 { 1033 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); 1034 1035 return buffer_size >= bytes && 1036 offset <= buffer_size - bytes && 1037 IS_ALIGNED(offset, sizeof(u32)) && 1038 !buffer->free && 1039 (!buffer->allow_user_free || !buffer->transaction); 1040 } 1041 1042 /** 1043 * binder_alloc_get_page() - get kernel pointer for given buffer offset 1044 * @alloc: binder_alloc for this proc 1045 * @buffer: binder buffer to be accessed 1046 * @buffer_offset: offset into @buffer data 1047 * @pgoffp: address to copy final page offset to 1048 * 1049 * Lookup the struct page corresponding to the address 1050 * at @buffer_offset into @buffer->user_data. If @pgoffp is not 1051 * NULL, the byte-offset into the page is written there. 1052 * 1053 * The caller is responsible to ensure that the offset points 1054 * to a valid address within the @buffer and that @buffer is 1055 * not freeable by the user. Since it can't be freed, we are 1056 * guaranteed that the corresponding elements of @alloc->pages[] 1057 * cannot change. 1058 * 1059 * Return: struct page 1060 */ 1061 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, 1062 struct binder_buffer *buffer, 1063 binder_size_t buffer_offset, 1064 pgoff_t *pgoffp) 1065 { 1066 binder_size_t buffer_space_offset = buffer_offset + 1067 (buffer->user_data - alloc->buffer); 1068 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; 1069 size_t index = buffer_space_offset >> PAGE_SHIFT; 1070 struct binder_lru_page *lru_page; 1071 1072 lru_page = &alloc->pages[index]; 1073 *pgoffp = pgoff; 1074 return lru_page->page_ptr; 1075 } 1076 1077 /** 1078 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user 1079 * @alloc: binder_alloc for this proc 1080 * @buffer: binder buffer to be accessed 1081 * @buffer_offset: offset into @buffer data 1082 * @from: userspace pointer to source buffer 1083 * @bytes: bytes to copy 1084 * 1085 * Copy bytes from source userspace to target buffer. 1086 * 1087 * Return: bytes remaining to be copied 1088 */ 1089 unsigned long 1090 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, 1091 struct binder_buffer *buffer, 1092 binder_size_t buffer_offset, 1093 const void __user *from, 1094 size_t bytes) 1095 { 1096 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) 1097 return bytes; 1098 1099 while (bytes) { 1100 unsigned long size; 1101 unsigned long ret; 1102 struct page *page; 1103 pgoff_t pgoff; 1104 void *kptr; 1105 1106 page = binder_alloc_get_page(alloc, buffer, 1107 buffer_offset, &pgoff); 1108 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1109 kptr = kmap(page) + pgoff; 1110 ret = copy_from_user(kptr, from, size); 1111 kunmap(page); 1112 if (ret) 1113 return bytes - size + ret; 1114 bytes -= size; 1115 from += size; 1116 buffer_offset += size; 1117 } 1118 return 0; 1119 } 1120 1121 static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, 1122 bool to_buffer, 1123 struct binder_buffer *buffer, 1124 binder_size_t buffer_offset, 1125 void *ptr, 1126 size_t bytes) 1127 { 1128 /* All copies must be 32-bit aligned and 32-bit size */ 1129 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) 1130 return -EINVAL; 1131 1132 while (bytes) { 1133 unsigned long size; 1134 struct page *page; 1135 pgoff_t pgoff; 1136 void *tmpptr; 1137 void *base_ptr; 1138 1139 page = binder_alloc_get_page(alloc, buffer, 1140 buffer_offset, &pgoff); 1141 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); 1142 base_ptr = kmap_atomic(page); 1143 tmpptr = base_ptr + pgoff; 1144 if (to_buffer) 1145 memcpy(tmpptr, ptr, size); 1146 else 1147 memcpy(ptr, tmpptr, size); 1148 /* 1149 * kunmap_atomic() takes care of flushing the cache 1150 * if this device has VIVT cache arch 1151 */ 1152 kunmap_atomic(base_ptr); 1153 bytes -= size; 1154 pgoff = 0; 1155 ptr = ptr + size; 1156 buffer_offset += size; 1157 } 1158 return 0; 1159 } 1160 1161 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, 1162 struct binder_buffer *buffer, 1163 binder_size_t buffer_offset, 1164 void *src, 1165 size_t bytes) 1166 { 1167 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, 1168 src, bytes); 1169 } 1170 1171 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, 1172 void *dest, 1173 struct binder_buffer *buffer, 1174 binder_size_t buffer_offset, 1175 size_t bytes) 1176 { 1177 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, 1178 dest, bytes); 1179 } 1180 1181