1 /* 2 * Copyright 2009 Jerome Glisse. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Jerome Glisse <[email protected]> 29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 30 * Dave Airlie 31 */ 32 #include <linux/list.h> 33 #include <linux/slab.h> 34 #include <drm/drmP.h> 35 #include <drm/amdgpu_drm.h> 36 #include <drm/drm_cache.h> 37 #include "amdgpu.h" 38 #include "amdgpu_trace.h" 39 #include "amdgpu_amdkfd.h" 40 41 /** 42 * DOC: amdgpu_object 43 * 44 * This defines the interfaces to operate on an &amdgpu_bo buffer object which 45 * represents memory used by driver (VRAM, system memory, etc.). The driver 46 * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces 47 * to create/destroy/set buffer object which are then managed by the kernel TTM 48 * memory manager. 49 * The interfaces are also used internally by kernel clients, including gfx, 50 * uvd, etc. for kernel managed allocations used by the GPU. 51 * 52 */ 53 54 static bool amdgpu_need_backup(struct amdgpu_device *adev) 55 { 56 if (adev->flags & AMD_IS_APU) 57 return false; 58 59 if (amdgpu_gpu_recovery == 0 || 60 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev))) 61 return false; 62 63 return true; 64 } 65 66 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 67 { 68 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 69 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 70 71 if (bo->kfd_bo) 72 amdgpu_amdkfd_unreserve_system_memory_limit(bo); 73 74 amdgpu_bo_kunmap(bo); 75 76 if (bo->gem_base.import_attach) 77 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); 78 drm_gem_object_release(&bo->gem_base); 79 amdgpu_bo_unref(&bo->parent); 80 if (!list_empty(&bo->shadow_list)) { 81 mutex_lock(&adev->shadow_list_lock); 82 list_del_init(&bo->shadow_list); 83 mutex_unlock(&adev->shadow_list_lock); 84 } 85 kfree(bo->metadata); 86 kfree(bo); 87 } 88 89 /** 90 * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo 91 * @bo: buffer object to be checked 92 * 93 * Uses destroy function associated with the object to determine if this is 94 * an &amdgpu_bo. 95 * 96 * Returns: 97 * true if the object belongs to &amdgpu_bo, false if not. 98 */ 99 bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) 100 { 101 if (bo->destroy == &amdgpu_ttm_bo_destroy) 102 return true; 103 return false; 104 } 105 106 /** 107 * amdgpu_ttm_placement_from_domain - set buffer's placement 108 * @abo: &amdgpu_bo buffer object whose placement is to be set 109 * @domain: requested domain 110 * 111 * Sets buffer's placement according to requested domain and the buffer's 112 * flags. 113 */ 114 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 115 { 116 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 117 struct ttm_placement *placement = &abo->placement; 118 struct ttm_place *places = abo->placements; 119 u64 flags = abo->flags; 120 u32 c = 0; 121 122 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 123 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; 124 125 places[c].fpfn = 0; 126 places[c].lpfn = 0; 127 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 128 TTM_PL_FLAG_VRAM; 129 130 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) 131 places[c].lpfn = visible_pfn; 132 else 133 places[c].flags |= TTM_PL_FLAG_TOPDOWN; 134 135 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 136 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; 137 c++; 138 } 139 140 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 141 places[c].fpfn = 0; 142 if (flags & AMDGPU_GEM_CREATE_SHADOW) 143 places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 144 else 145 places[c].lpfn = 0; 146 places[c].flags = TTM_PL_FLAG_TT; 147 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 148 places[c].flags |= TTM_PL_FLAG_WC | 149 TTM_PL_FLAG_UNCACHED; 150 else 151 places[c].flags |= TTM_PL_FLAG_CACHED; 152 c++; 153 } 154 155 if (domain & AMDGPU_GEM_DOMAIN_CPU) { 156 places[c].fpfn = 0; 157 places[c].lpfn = 0; 158 places[c].flags = TTM_PL_FLAG_SYSTEM; 159 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 160 places[c].flags |= TTM_PL_FLAG_WC | 161 TTM_PL_FLAG_UNCACHED; 162 else 163 places[c].flags |= TTM_PL_FLAG_CACHED; 164 c++; 165 } 166 167 if (domain & AMDGPU_GEM_DOMAIN_GDS) { 168 places[c].fpfn = 0; 169 places[c].lpfn = 0; 170 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; 171 c++; 172 } 173 174 if (domain & AMDGPU_GEM_DOMAIN_GWS) { 175 places[c].fpfn = 0; 176 places[c].lpfn = 0; 177 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; 178 c++; 179 } 180 181 if (domain & AMDGPU_GEM_DOMAIN_OA) { 182 places[c].fpfn = 0; 183 places[c].lpfn = 0; 184 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; 185 c++; 186 } 187 188 if (!c) { 189 places[c].fpfn = 0; 190 places[c].lpfn = 0; 191 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 192 c++; 193 } 194 195 placement->num_placement = c; 196 placement->placement = places; 197 198 placement->num_busy_placement = c; 199 placement->busy_placement = places; 200 } 201 202 /** 203 * amdgpu_bo_create_reserved - create reserved BO for kernel use 204 * 205 * @adev: amdgpu device object 206 * @size: size for the new BO 207 * @align: alignment for the new BO 208 * @domain: where to place it 209 * @bo_ptr: used to initialize BOs in structures 210 * @gpu_addr: GPU addr of the pinned BO 211 * @cpu_addr: optional CPU address mapping 212 * 213 * Allocates and pins a BO for kernel internal use, and returns it still 214 * reserved. 215 * 216 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 217 * 218 * Returns: 219 * 0 on success, negative error code otherwise. 220 */ 221 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 222 unsigned long size, int align, 223 u32 domain, struct amdgpu_bo **bo_ptr, 224 u64 *gpu_addr, void **cpu_addr) 225 { 226 struct amdgpu_bo_param bp; 227 bool free = false; 228 int r; 229 230 memset(&bp, 0, sizeof(bp)); 231 bp.size = size; 232 bp.byte_align = align; 233 bp.domain = domain; 234 bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | 235 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 236 bp.type = ttm_bo_type_kernel; 237 bp.resv = NULL; 238 239 if (!*bo_ptr) { 240 r = amdgpu_bo_create(adev, &bp, bo_ptr); 241 if (r) { 242 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", 243 r); 244 return r; 245 } 246 free = true; 247 } 248 249 r = amdgpu_bo_reserve(*bo_ptr, false); 250 if (r) { 251 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); 252 goto error_free; 253 } 254 255 r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); 256 if (r) { 257 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); 258 goto error_unreserve; 259 } 260 261 if (cpu_addr) { 262 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); 263 if (r) { 264 dev_err(adev->dev, "(%d) kernel bo map failed\n", r); 265 goto error_unreserve; 266 } 267 } 268 269 return 0; 270 271 error_unreserve: 272 amdgpu_bo_unreserve(*bo_ptr); 273 274 error_free: 275 if (free) 276 amdgpu_bo_unref(bo_ptr); 277 278 return r; 279 } 280 281 /** 282 * amdgpu_bo_create_kernel - create BO for kernel use 283 * 284 * @adev: amdgpu device object 285 * @size: size for the new BO 286 * @align: alignment for the new BO 287 * @domain: where to place it 288 * @bo_ptr: used to initialize BOs in structures 289 * @gpu_addr: GPU addr of the pinned BO 290 * @cpu_addr: optional CPU address mapping 291 * 292 * Allocates and pins a BO for kernel internal use. 293 * 294 * Note: For bo_ptr new BO is only created if bo_ptr points to NULL. 295 * 296 * Returns: 297 * 0 on success, negative error code otherwise. 298 */ 299 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 300 unsigned long size, int align, 301 u32 domain, struct amdgpu_bo **bo_ptr, 302 u64 *gpu_addr, void **cpu_addr) 303 { 304 int r; 305 306 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, 307 gpu_addr, cpu_addr); 308 309 if (r) 310 return r; 311 312 amdgpu_bo_unreserve(*bo_ptr); 313 314 return 0; 315 } 316 317 /** 318 * amdgpu_bo_free_kernel - free BO for kernel use 319 * 320 * @bo: amdgpu BO to free 321 * @gpu_addr: pointer to where the BO's GPU memory space address was stored 322 * @cpu_addr: pointer to where the BO's CPU memory space address was stored 323 * 324 * unmaps and unpin a BO for kernel internal use. 325 */ 326 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 327 void **cpu_addr) 328 { 329 if (*bo == NULL) 330 return; 331 332 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) { 333 if (cpu_addr) 334 amdgpu_bo_kunmap(*bo); 335 336 amdgpu_bo_unpin(*bo); 337 amdgpu_bo_unreserve(*bo); 338 } 339 amdgpu_bo_unref(bo); 340 341 if (gpu_addr) 342 *gpu_addr = 0; 343 344 if (cpu_addr) 345 *cpu_addr = NULL; 346 } 347 348 /* Validate bo size is bit bigger then the request domain */ 349 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, 350 unsigned long size, u32 domain) 351 { 352 struct ttm_mem_type_manager *man = NULL; 353 354 /* 355 * If GTT is part of requested domains the check must succeed to 356 * allow fall back to GTT 357 */ 358 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 359 man = &adev->mman.bdev.man[TTM_PL_TT]; 360 361 if (size < (man->size << PAGE_SHIFT)) 362 return true; 363 else 364 goto fail; 365 } 366 367 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { 368 man = &adev->mman.bdev.man[TTM_PL_VRAM]; 369 370 if (size < (man->size << PAGE_SHIFT)) 371 return true; 372 else 373 goto fail; 374 } 375 376 377 /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */ 378 return true; 379 380 fail: 381 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, 382 man->size << PAGE_SHIFT); 383 return false; 384 } 385 386 static int amdgpu_bo_do_create(struct amdgpu_device *adev, 387 struct amdgpu_bo_param *bp, 388 struct amdgpu_bo **bo_ptr) 389 { 390 struct ttm_operation_ctx ctx = { 391 .interruptible = (bp->type != ttm_bo_type_kernel), 392 .no_wait_gpu = false, 393 .resv = bp->resv, 394 .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT 395 }; 396 struct amdgpu_bo *bo; 397 unsigned long page_align, size = bp->size; 398 size_t acc_size; 399 int r; 400 401 page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; 402 size = ALIGN(size, PAGE_SIZE); 403 404 if (!amdgpu_bo_validate_size(adev, size, bp->domain)) 405 return -ENOMEM; 406 407 *bo_ptr = NULL; 408 409 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, 410 sizeof(struct amdgpu_bo)); 411 412 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); 413 if (bo == NULL) 414 return -ENOMEM; 415 drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); 416 INIT_LIST_HEAD(&bo->shadow_list); 417 INIT_LIST_HEAD(&bo->va); 418 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : 419 bp->domain; 420 bo->allowed_domains = bo->preferred_domains; 421 if (bp->type != ttm_bo_type_kernel && 422 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) 423 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; 424 425 bo->flags = bp->flags; 426 427 #ifdef CONFIG_X86_32 428 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 429 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 430 */ 431 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 432 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 433 /* Don't try to enable write-combining when it can't work, or things 434 * may be slow 435 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 436 */ 437 438 #ifndef CONFIG_COMPILE_TEST 439 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 440 thanks to write-combining 441 #endif 442 443 if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) 444 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 445 "better performance thanks to write-combining\n"); 446 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 447 #else 448 /* For architectures that don't support WC memory, 449 * mask out the WC flag from the BO 450 */ 451 if (!drm_arch_can_wc_memory()) 452 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; 453 #endif 454 455 bo->tbo.bdev = &adev->mman.bdev; 456 amdgpu_ttm_placement_from_domain(bo, bp->domain); 457 if (bp->type == ttm_bo_type_kernel) 458 bo->tbo.priority = 1; 459 460 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, 461 &bo->placement, page_align, &ctx, acc_size, 462 NULL, bp->resv, &amdgpu_ttm_bo_destroy); 463 if (unlikely(r != 0)) 464 return r; 465 466 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 467 bo->tbo.mem.mem_type == TTM_PL_VRAM && 468 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) 469 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 470 ctx.bytes_moved); 471 else 472 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 473 474 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 475 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { 476 struct dma_fence *fence; 477 478 r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); 479 if (unlikely(r)) 480 goto fail_unreserve; 481 482 amdgpu_bo_fence(bo, fence, false); 483 dma_fence_put(bo->tbo.moving); 484 bo->tbo.moving = dma_fence_get(fence); 485 dma_fence_put(fence); 486 } 487 if (!bp->resv) 488 amdgpu_bo_unreserve(bo); 489 *bo_ptr = bo; 490 491 trace_amdgpu_bo_create(bo); 492 493 /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ 494 if (bp->type == ttm_bo_type_device) 495 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 496 497 return 0; 498 499 fail_unreserve: 500 if (!bp->resv) 501 ww_mutex_unlock(&bo->tbo.resv->lock); 502 amdgpu_bo_unref(&bo); 503 return r; 504 } 505 506 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, 507 unsigned long size, int byte_align, 508 struct amdgpu_bo *bo) 509 { 510 struct amdgpu_bo_param bp; 511 int r; 512 513 if (bo->shadow) 514 return 0; 515 516 memset(&bp, 0, sizeof(bp)); 517 bp.size = size; 518 bp.byte_align = byte_align; 519 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 520 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | 521 AMDGPU_GEM_CREATE_SHADOW; 522 bp.type = ttm_bo_type_kernel; 523 bp.resv = bo->tbo.resv; 524 525 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); 526 if (!r) { 527 bo->shadow->parent = amdgpu_bo_ref(bo); 528 mutex_lock(&adev->shadow_list_lock); 529 list_add_tail(&bo->shadow_list, &adev->shadow_list); 530 mutex_unlock(&adev->shadow_list_lock); 531 } 532 533 return r; 534 } 535 536 /** 537 * amdgpu_bo_create - create an &amdgpu_bo buffer object 538 * @adev: amdgpu device object 539 * @bp: parameters to be used for the buffer object 540 * @bo_ptr: pointer to the buffer object pointer 541 * 542 * Creates an &amdgpu_bo buffer object; and if requested, also creates a 543 * shadow object. 544 * Shadow object is used to backup the original buffer object, and is always 545 * in GTT. 546 * 547 * Returns: 548 * 0 for success or a negative error code on failure. 549 */ 550 int amdgpu_bo_create(struct amdgpu_device *adev, 551 struct amdgpu_bo_param *bp, 552 struct amdgpu_bo **bo_ptr) 553 { 554 u64 flags = bp->flags; 555 int r; 556 557 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; 558 r = amdgpu_bo_do_create(adev, bp, bo_ptr); 559 if (r) 560 return r; 561 562 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { 563 if (!bp->resv) 564 WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, 565 NULL)); 566 567 r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr)); 568 569 if (!bp->resv) 570 reservation_object_unlock((*bo_ptr)->tbo.resv); 571 572 if (r) 573 amdgpu_bo_unref(bo_ptr); 574 } 575 576 return r; 577 } 578 579 /** 580 * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object 581 * @adev: amdgpu device object 582 * @ring: amdgpu_ring for the engine handling the buffer operations 583 * @bo: &amdgpu_bo buffer to be backed up 584 * @resv: reservation object with embedded fence 585 * @fence: dma_fence associated with the operation 586 * @direct: whether to submit the job directly 587 * 588 * Copies an &amdgpu_bo buffer object to its shadow object. 589 * Not used for now. 590 * 591 * Returns: 592 * 0 for success or a negative error code on failure. 593 */ 594 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, 595 struct amdgpu_ring *ring, 596 struct amdgpu_bo *bo, 597 struct reservation_object *resv, 598 struct dma_fence **fence, 599 bool direct) 600 601 { 602 struct amdgpu_bo *shadow = bo->shadow; 603 uint64_t bo_addr, shadow_addr; 604 int r; 605 606 if (!shadow) 607 return -EINVAL; 608 609 bo_addr = amdgpu_bo_gpu_offset(bo); 610 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); 611 612 r = reservation_object_reserve_shared(bo->tbo.resv); 613 if (r) 614 goto err; 615 616 r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, 617 amdgpu_bo_size(bo), resv, fence, 618 direct, false); 619 if (!r) 620 amdgpu_bo_fence(bo, *fence, true); 621 622 err: 623 return r; 624 } 625 626 /** 627 * amdgpu_bo_validate - validate an &amdgpu_bo buffer object 628 * @bo: pointer to the buffer object 629 * 630 * Sets placement according to domain; and changes placement and caching 631 * policy of the buffer object according to the placement. 632 * This is used for validating shadow bos. It calls ttm_bo_validate() to 633 * make sure the buffer is resident where it needs to be. 634 * 635 * Returns: 636 * 0 for success or a negative error code on failure. 637 */ 638 int amdgpu_bo_validate(struct amdgpu_bo *bo) 639 { 640 struct ttm_operation_ctx ctx = { false, false }; 641 uint32_t domain; 642 int r; 643 644 if (bo->pin_count) 645 return 0; 646 647 domain = bo->preferred_domains; 648 649 retry: 650 amdgpu_ttm_placement_from_domain(bo, domain); 651 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 652 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 653 domain = bo->allowed_domains; 654 goto retry; 655 } 656 657 return r; 658 } 659 660 /** 661 * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object 662 * @adev: amdgpu device object 663 * @ring: amdgpu_ring for the engine handling the buffer operations 664 * @bo: &amdgpu_bo buffer to be restored 665 * @resv: reservation object with embedded fence 666 * @fence: dma_fence associated with the operation 667 * @direct: whether to submit the job directly 668 * 669 * Copies a buffer object's shadow content back to the object. 670 * This is used for recovering a buffer from its shadow in case of a gpu 671 * reset where vram context may be lost. 672 * 673 * Returns: 674 * 0 for success or a negative error code on failure. 675 */ 676 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, 677 struct amdgpu_ring *ring, 678 struct amdgpu_bo *bo, 679 struct reservation_object *resv, 680 struct dma_fence **fence, 681 bool direct) 682 683 { 684 struct amdgpu_bo *shadow = bo->shadow; 685 uint64_t bo_addr, shadow_addr; 686 int r; 687 688 if (!shadow) 689 return -EINVAL; 690 691 bo_addr = amdgpu_bo_gpu_offset(bo); 692 shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); 693 694 r = reservation_object_reserve_shared(bo->tbo.resv); 695 if (r) 696 goto err; 697 698 r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, 699 amdgpu_bo_size(bo), resv, fence, 700 direct, false); 701 if (!r) 702 amdgpu_bo_fence(bo, *fence, true); 703 704 err: 705 return r; 706 } 707 708 /** 709 * amdgpu_bo_kmap - map an &amdgpu_bo buffer object 710 * @bo: &amdgpu_bo buffer object to be mapped 711 * @ptr: kernel virtual address to be returned 712 * 713 * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls 714 * amdgpu_bo_kptr() to get the kernel virtual address. 715 * 716 * Returns: 717 * 0 for success or a negative error code on failure. 718 */ 719 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 720 { 721 void *kptr; 722 long r; 723 724 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 725 return -EPERM; 726 727 kptr = amdgpu_bo_kptr(bo); 728 if (kptr) { 729 if (ptr) 730 *ptr = kptr; 731 return 0; 732 } 733 734 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, 735 MAX_SCHEDULE_TIMEOUT); 736 if (r < 0) 737 return r; 738 739 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 740 if (r) 741 return r; 742 743 if (ptr) 744 *ptr = amdgpu_bo_kptr(bo); 745 746 return 0; 747 } 748 749 /** 750 * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object 751 * @bo: &amdgpu_bo buffer object 752 * 753 * Calls ttm_kmap_obj_virtual() to get the kernel virtual address 754 * 755 * Returns: 756 * the virtual address of a buffer object area. 757 */ 758 void *amdgpu_bo_kptr(struct amdgpu_bo *bo) 759 { 760 bool is_iomem; 761 762 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 763 } 764 765 /** 766 * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object 767 * @bo: &amdgpu_bo buffer object to be unmapped 768 * 769 * Unmaps a kernel map set up by amdgpu_bo_kmap(). 770 */ 771 void amdgpu_bo_kunmap(struct amdgpu_bo *bo) 772 { 773 if (bo->kmap.bo) 774 ttm_bo_kunmap(&bo->kmap); 775 } 776 777 /** 778 * amdgpu_bo_ref - reference an &amdgpu_bo buffer object 779 * @bo: &amdgpu_bo buffer object 780 * 781 * References the contained &ttm_buffer_object. 782 * 783 * Returns: 784 * a refcounted pointer to the &amdgpu_bo buffer object. 785 */ 786 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) 787 { 788 if (bo == NULL) 789 return NULL; 790 791 ttm_bo_reference(&bo->tbo); 792 return bo; 793 } 794 795 /** 796 * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object 797 * @bo: &amdgpu_bo buffer object 798 * 799 * Unreferences the contained &ttm_buffer_object and clear the pointer 800 */ 801 void amdgpu_bo_unref(struct amdgpu_bo **bo) 802 { 803 struct ttm_buffer_object *tbo; 804 805 if ((*bo) == NULL) 806 return; 807 808 tbo = &((*bo)->tbo); 809 ttm_bo_unref(&tbo); 810 if (tbo == NULL) 811 *bo = NULL; 812 } 813 814 /** 815 * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object 816 * @bo: &amdgpu_bo buffer object to be pinned 817 * @domain: domain to be pinned to 818 * @min_offset: the start of requested address range 819 * @max_offset: the end of requested address range 820 * @gpu_addr: GPU offset of the &amdgpu_bo buffer object 821 * 822 * Pins the buffer object according to requested domain and address range. If 823 * the memory is unbound gart memory, binds the pages into gart table. Adjusts 824 * pin_count and pin_size accordingly. 825 * 826 * Pinning means to lock pages in memory along with keeping them at a fixed 827 * offset. It is required when a buffer can not be moved, for example, when 828 * a display buffer is being scanned out. 829 * 830 * Compared with amdgpu_bo_pin(), this function gives more flexibility on 831 * where to pin a buffer if there are specific restrictions on where a buffer 832 * must be located. 833 * 834 * Returns: 835 * 0 for success or a negative error code on failure. 836 */ 837 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 838 u64 min_offset, u64 max_offset, 839 u64 *gpu_addr) 840 { 841 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 842 struct ttm_operation_ctx ctx = { false, false }; 843 int r, i; 844 845 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) 846 return -EPERM; 847 848 if (WARN_ON_ONCE(min_offset > max_offset)) 849 return -EINVAL; 850 851 /* A shared bo cannot be migrated to VRAM */ 852 if (bo->prime_shared_count) { 853 if (domain & AMDGPU_GEM_DOMAIN_GTT) 854 domain = AMDGPU_GEM_DOMAIN_GTT; 855 else 856 return -EINVAL; 857 } 858 859 /* This assumes only APU display buffers are pinned with (VRAM|GTT). 860 * See function amdgpu_display_supported_domains() 861 */ 862 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); 863 864 if (bo->pin_count) { 865 uint32_t mem_type = bo->tbo.mem.mem_type; 866 867 if (!(domain & amdgpu_mem_type_to_domain(mem_type))) 868 return -EINVAL; 869 870 bo->pin_count++; 871 if (gpu_addr) 872 *gpu_addr = amdgpu_bo_gpu_offset(bo); 873 874 if (max_offset != 0) { 875 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; 876 WARN_ON_ONCE(max_offset < 877 (amdgpu_bo_gpu_offset(bo) - domain_start)); 878 } 879 880 return 0; 881 } 882 883 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 884 /* force to pin into visible video ram */ 885 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) 886 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 887 amdgpu_ttm_placement_from_domain(bo, domain); 888 for (i = 0; i < bo->placement.num_placement; i++) { 889 unsigned fpfn, lpfn; 890 891 fpfn = min_offset >> PAGE_SHIFT; 892 lpfn = max_offset >> PAGE_SHIFT; 893 894 if (fpfn > bo->placements[i].fpfn) 895 bo->placements[i].fpfn = fpfn; 896 if (!bo->placements[i].lpfn || 897 (lpfn && lpfn < bo->placements[i].lpfn)) 898 bo->placements[i].lpfn = lpfn; 899 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 900 } 901 902 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 903 if (unlikely(r)) { 904 dev_err(adev->dev, "%p pin failed\n", bo); 905 goto error; 906 } 907 908 r = amdgpu_ttm_alloc_gart(&bo->tbo); 909 if (unlikely(r)) { 910 dev_err(adev->dev, "%p bind failed\n", bo); 911 goto error; 912 } 913 914 bo->pin_count = 1; 915 if (gpu_addr != NULL) 916 *gpu_addr = amdgpu_bo_gpu_offset(bo); 917 918 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 919 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 920 adev->vram_pin_size += amdgpu_bo_size(bo); 921 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 922 adev->invisible_pin_size += amdgpu_bo_size(bo); 923 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 924 adev->gart_pin_size += amdgpu_bo_size(bo); 925 } 926 927 error: 928 return r; 929 } 930 931 /** 932 * amdgpu_bo_pin - pin an &amdgpu_bo buffer object 933 * @bo: &amdgpu_bo buffer object to be pinned 934 * @domain: domain to be pinned to 935 * @gpu_addr: GPU offset of the &amdgpu_bo buffer object 936 * 937 * A simple wrapper to amdgpu_bo_pin_restricted(). 938 * Provides a simpler API for buffers that do not have any strict restrictions 939 * on where a buffer must be located. 940 * 941 * Returns: 942 * 0 for success or a negative error code on failure. 943 */ 944 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) 945 { 946 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); 947 } 948 949 /** 950 * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object 951 * @bo: &amdgpu_bo buffer object to be unpinned 952 * 953 * Decreases the pin_count, and clears the flags if pin_count reaches 0. 954 * Changes placement and pin size accordingly. 955 * 956 * Returns: 957 * 0 for success or a negative error code on failure. 958 */ 959 int amdgpu_bo_unpin(struct amdgpu_bo *bo) 960 { 961 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 962 struct ttm_operation_ctx ctx = { false, false }; 963 int r, i; 964 965 if (!bo->pin_count) { 966 dev_warn(adev->dev, "%p unpin not necessary\n", bo); 967 return 0; 968 } 969 bo->pin_count--; 970 if (bo->pin_count) 971 return 0; 972 for (i = 0; i < bo->placement.num_placement; i++) { 973 bo->placements[i].lpfn = 0; 974 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 975 } 976 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 977 if (unlikely(r)) { 978 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 979 goto error; 980 } 981 982 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 983 adev->vram_pin_size -= amdgpu_bo_size(bo); 984 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 985 adev->invisible_pin_size -= amdgpu_bo_size(bo); 986 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 987 adev->gart_pin_size -= amdgpu_bo_size(bo); 988 } 989 990 error: 991 return r; 992 } 993 994 /** 995 * amdgpu_bo_evict_vram - evict VRAM buffers 996 * @adev: amdgpu device object 997 * 998 * Evicts all VRAM buffers on the lru list of the memory type. 999 * Mainly used for evicting vram at suspend time. 1000 * 1001 * Returns: 1002 * 0 for success or a negative error code on failure. 1003 */ 1004 int amdgpu_bo_evict_vram(struct amdgpu_device *adev) 1005 { 1006 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 1007 if (0 && (adev->flags & AMD_IS_APU)) { 1008 /* Useless to evict on IGP chips */ 1009 return 0; 1010 } 1011 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); 1012 } 1013 1014 static const char *amdgpu_vram_names[] = { 1015 "UNKNOWN", 1016 "GDDR1", 1017 "DDR2", 1018 "GDDR3", 1019 "GDDR4", 1020 "GDDR5", 1021 "HBM", 1022 "DDR3", 1023 "DDR4", 1024 }; 1025 1026 /** 1027 * amdgpu_bo_init - initialize memory manager 1028 * @adev: amdgpu device object 1029 * 1030 * Calls amdgpu_ttm_init() to initialize amdgpu memory manager. 1031 * 1032 * Returns: 1033 * 0 for success or a negative error code on failure. 1034 */ 1035 int amdgpu_bo_init(struct amdgpu_device *adev) 1036 { 1037 /* reserve PAT memory space to WC for VRAM */ 1038 arch_io_reserve_memtype_wc(adev->gmc.aper_base, 1039 adev->gmc.aper_size); 1040 1041 /* Add an MTRR for the VRAM */ 1042 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, 1043 adev->gmc.aper_size); 1044 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", 1045 adev->gmc.mc_vram_size >> 20, 1046 (unsigned long long)adev->gmc.aper_size >> 20); 1047 DRM_INFO("RAM width %dbits %s\n", 1048 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]); 1049 return amdgpu_ttm_init(adev); 1050 } 1051 1052 /** 1053 * amdgpu_bo_late_init - late init 1054 * @adev: amdgpu device object 1055 * 1056 * Calls amdgpu_ttm_late_init() to free resources used earlier during 1057 * initialization. 1058 * 1059 * Returns: 1060 * 0 for success or a negative error code on failure. 1061 */ 1062 int amdgpu_bo_late_init(struct amdgpu_device *adev) 1063 { 1064 amdgpu_ttm_late_init(adev); 1065 1066 return 0; 1067 } 1068 1069 /** 1070 * amdgpu_bo_fini - tear down memory manager 1071 * @adev: amdgpu device object 1072 * 1073 * Reverses amdgpu_bo_init() to tear down memory manager. 1074 */ 1075 void amdgpu_bo_fini(struct amdgpu_device *adev) 1076 { 1077 amdgpu_ttm_fini(adev); 1078 arch_phys_wc_del(adev->gmc.vram_mtrr); 1079 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 1080 } 1081 1082 /** 1083 * amdgpu_bo_fbdev_mmap - mmap fbdev memory 1084 * @bo: &amdgpu_bo buffer object 1085 * @vma: vma as input from the fbdev mmap method 1086 * 1087 * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo. 1088 * 1089 * Returns: 1090 * 0 for success or a negative error code on failure. 1091 */ 1092 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, 1093 struct vm_area_struct *vma) 1094 { 1095 return ttm_fbdev_mmap(vma, &bo->tbo); 1096 } 1097 1098 /** 1099 * amdgpu_bo_set_tiling_flags - set tiling flags 1100 * @bo: &amdgpu_bo buffer object 1101 * @tiling_flags: new flags 1102 * 1103 * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or 1104 * kernel driver to set the tiling flags on a buffer. 1105 * 1106 * Returns: 1107 * 0 for success or a negative error code on failure. 1108 */ 1109 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) 1110 { 1111 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1112 1113 if (adev->family <= AMDGPU_FAMILY_CZ && 1114 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) 1115 return -EINVAL; 1116 1117 bo->tiling_flags = tiling_flags; 1118 return 0; 1119 } 1120 1121 /** 1122 * amdgpu_bo_get_tiling_flags - get tiling flags 1123 * @bo: &amdgpu_bo buffer object 1124 * @tiling_flags: returned flags 1125 * 1126 * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to 1127 * set the tiling flags on a buffer. 1128 */ 1129 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) 1130 { 1131 lockdep_assert_held(&bo->tbo.resv->lock.base); 1132 1133 if (tiling_flags) 1134 *tiling_flags = bo->tiling_flags; 1135 } 1136 1137 /** 1138 * amdgpu_bo_set_metadata - set metadata 1139 * @bo: &amdgpu_bo buffer object 1140 * @metadata: new metadata 1141 * @metadata_size: size of the new metadata 1142 * @flags: flags of the new metadata 1143 * 1144 * Sets buffer object's metadata, its size and flags. 1145 * Used via GEM ioctl. 1146 * 1147 * Returns: 1148 * 0 for success or a negative error code on failure. 1149 */ 1150 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 1151 uint32_t metadata_size, uint64_t flags) 1152 { 1153 void *buffer; 1154 1155 if (!metadata_size) { 1156 if (bo->metadata_size) { 1157 kfree(bo->metadata); 1158 bo->metadata = NULL; 1159 bo->metadata_size = 0; 1160 } 1161 return 0; 1162 } 1163 1164 if (metadata == NULL) 1165 return -EINVAL; 1166 1167 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); 1168 if (buffer == NULL) 1169 return -ENOMEM; 1170 1171 kfree(bo->metadata); 1172 bo->metadata_flags = flags; 1173 bo->metadata = buffer; 1174 bo->metadata_size = metadata_size; 1175 1176 return 0; 1177 } 1178 1179 /** 1180 * amdgpu_bo_get_metadata - get metadata 1181 * @bo: &amdgpu_bo buffer object 1182 * @buffer: returned metadata 1183 * @buffer_size: size of the buffer 1184 * @metadata_size: size of the returned metadata 1185 * @flags: flags of the returned metadata 1186 * 1187 * Gets buffer object's metadata, its size and flags. buffer_size shall not be 1188 * less than metadata_size. 1189 * Used via GEM ioctl. 1190 * 1191 * Returns: 1192 * 0 for success or a negative error code on failure. 1193 */ 1194 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 1195 size_t buffer_size, uint32_t *metadata_size, 1196 uint64_t *flags) 1197 { 1198 if (!buffer && !metadata_size) 1199 return -EINVAL; 1200 1201 if (buffer) { 1202 if (buffer_size < bo->metadata_size) 1203 return -EINVAL; 1204 1205 if (bo->metadata_size) 1206 memcpy(buffer, bo->metadata, bo->metadata_size); 1207 } 1208 1209 if (metadata_size) 1210 *metadata_size = bo->metadata_size; 1211 if (flags) 1212 *flags = bo->metadata_flags; 1213 1214 return 0; 1215 } 1216 1217 /** 1218 * amdgpu_bo_move_notify - notification about a memory move 1219 * @bo: pointer to a buffer object 1220 * @evict: if this move is evicting the buffer from the graphics address space 1221 * @new_mem: new information of the bufer object 1222 * 1223 * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs 1224 * bookkeeping. 1225 * TTM driver callback which is called when ttm moves a buffer. 1226 */ 1227 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 1228 bool evict, 1229 struct ttm_mem_reg *new_mem) 1230 { 1231 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1232 struct amdgpu_bo *abo; 1233 struct ttm_mem_reg *old_mem = &bo->mem; 1234 1235 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1236 return; 1237 1238 abo = ttm_to_amdgpu_bo(bo); 1239 amdgpu_vm_bo_invalidate(adev, abo, evict); 1240 1241 amdgpu_bo_kunmap(abo); 1242 1243 /* remember the eviction */ 1244 if (evict) 1245 atomic64_inc(&adev->num_evictions); 1246 1247 /* update statistics */ 1248 if (!new_mem) 1249 return; 1250 1251 /* move_notify is called before move happens */ 1252 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 1253 } 1254 1255 /** 1256 * amdgpu_bo_fault_reserve_notify - notification about a memory fault 1257 * @bo: pointer to a buffer object 1258 * 1259 * Notifies the driver we are taking a fault on this BO and have reserved it, 1260 * also performs bookkeeping. 1261 * TTM driver callback for dealing with vm faults. 1262 * 1263 * Returns: 1264 * 0 for success or a negative error code on failure. 1265 */ 1266 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 1267 { 1268 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1269 struct ttm_operation_ctx ctx = { false, false }; 1270 struct amdgpu_bo *abo; 1271 unsigned long offset, size; 1272 int r; 1273 1274 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 1275 return 0; 1276 1277 abo = ttm_to_amdgpu_bo(bo); 1278 1279 /* Remember that this BO was accessed by the CPU */ 1280 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1281 1282 if (bo->mem.mem_type != TTM_PL_VRAM) 1283 return 0; 1284 1285 size = bo->mem.num_pages << PAGE_SHIFT; 1286 offset = bo->mem.start << PAGE_SHIFT; 1287 if ((offset + size) <= adev->gmc.visible_vram_size) 1288 return 0; 1289 1290 /* Can't move a pinned BO to visible VRAM */ 1291 if (abo->pin_count > 0) 1292 return -EINVAL; 1293 1294 /* hurrah the memory is not visible ! */ 1295 atomic64_inc(&adev->num_vram_cpu_page_faults); 1296 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | 1297 AMDGPU_GEM_DOMAIN_GTT); 1298 1299 /* Avoid costly evictions; only set GTT as a busy placement */ 1300 abo->placement.num_busy_placement = 1; 1301 abo->placement.busy_placement = &abo->placements[1]; 1302 1303 r = ttm_bo_validate(bo, &abo->placement, &ctx); 1304 if (unlikely(r != 0)) 1305 return r; 1306 1307 offset = bo->mem.start << PAGE_SHIFT; 1308 /* this should never happen */ 1309 if (bo->mem.mem_type == TTM_PL_VRAM && 1310 (offset + size) > adev->gmc.visible_vram_size) 1311 return -EINVAL; 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * amdgpu_bo_fence - add fence to buffer object 1318 * 1319 * @bo: buffer object in question 1320 * @fence: fence to add 1321 * @shared: true if fence should be added shared 1322 * 1323 */ 1324 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 1325 bool shared) 1326 { 1327 struct reservation_object *resv = bo->tbo.resv; 1328 1329 if (shared) 1330 reservation_object_add_shared_fence(resv, fence); 1331 else 1332 reservation_object_add_excl_fence(resv, fence); 1333 } 1334 1335 /** 1336 * amdgpu_bo_gpu_offset - return GPU offset of bo 1337 * @bo: amdgpu object for which we query the offset 1338 * 1339 * Note: object should either be pinned or reserved when calling this 1340 * function, it might be useful to add check for this for debugging. 1341 * 1342 * Returns: 1343 * current GPU offset of the object. 1344 */ 1345 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1346 { 1347 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1348 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT && 1349 !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem)); 1350 WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && 1351 !bo->pin_count); 1352 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1353 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && 1354 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); 1355 1356 return bo->tbo.offset; 1357 } 1358 1359 /** 1360 * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout 1361 * @adev: amdgpu device object 1362 * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>` 1363 * 1364 * Returns: 1365 * Which of the allowed domains is preferred for pinning the BO for scanout. 1366 */ 1367 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 1368 uint32_t domain) 1369 { 1370 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { 1371 domain = AMDGPU_GEM_DOMAIN_VRAM; 1372 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) 1373 domain = AMDGPU_GEM_DOMAIN_GTT; 1374 } 1375 return domain; 1376 } 1377